index
int64 0
10k
| blob_id
stringlengths 40
40
| step-1
stringlengths 13
984k
| step-2
stringlengths 6
1.23M
⌀ | step-3
stringlengths 15
1.34M
⌀ | step-4
stringlengths 30
1.34M
⌀ | step-5
stringlengths 64
1.2M
⌀ | step-ids
sequencelengths 1
5
|
---|---|---|---|---|---|---|---|
1,500 | 6960fc6d949512ffc783b085041f86cb791160a3 | <mask token>
class DjangoHandler(tornado.web.RequestHandler):
async def reroute(self):
http = tornado.httpclient.AsyncHTTPClient()
new_request = copy.deepcopy(self.request)
url_obj = copy.urlparse(new_request.url)
new_request.url = f'{url_obj.scheme}://localhost:9000{url_obj.path}'
return await http.fetch(new_request)
<mask token>
<mask token>
<mask token>
| <mask token>
class DjangoHandler(tornado.web.RequestHandler):
async def reroute(self):
http = tornado.httpclient.AsyncHTTPClient()
new_request = copy.deepcopy(self.request)
url_obj = copy.urlparse(new_request.url)
new_request.url = f'{url_obj.scheme}://localhost:9000{url_obj.path}'
return await http.fetch(new_request)
get = reroute
post = reroute
<mask token>
application.listen(80)
tornado.ioloop.IOLoop.current().start()
| <mask token>
class DjangoHandler(tornado.web.RequestHandler):
async def reroute(self):
http = tornado.httpclient.AsyncHTTPClient()
new_request = copy.deepcopy(self.request)
url_obj = copy.urlparse(new_request.url)
new_request.url = f'{url_obj.scheme}://localhost:9000{url_obj.path}'
return await http.fetch(new_request)
get = reroute
post = reroute
application = tornado.web.Application([('/', DjangoHandler)])
application.listen(80)
tornado.ioloop.IOLoop.current().start()
| import tornado
import copy
class DjangoHandler(tornado.web.RequestHandler):
async def reroute(self):
http = tornado.httpclient.AsyncHTTPClient()
new_request = copy.deepcopy(self.request)
url_obj = copy.urlparse(new_request.url)
new_request.url = f'{url_obj.scheme}://localhost:9000{url_obj.path}'
return await http.fetch(new_request)
get = reroute
post = reroute
application = tornado.web.Application([('/', DjangoHandler)])
application.listen(80)
tornado.ioloop.IOLoop.current().start()
| import tornado
import copy
class DjangoHandler(tornado.web.RequestHandler):
async def reroute(self):
http = tornado.httpclient.AsyncHTTPClient()
new_request = copy.deepcopy(self.request)
url_obj = copy.urlparse(new_request.url)
new_request.url = f"{url_obj.scheme}://localhost:9000{url_obj.path}"
return await http.fetch(new_request)
get = reroute
post = reroute
application = tornado.web.Application([
# (r'/chat', WebsocketChatHandler),
(r'/', DjangoHandler),
])
application.listen(80)
tornado.ioloop.IOLoop.current().start()
| [
1,
3,
4,
5,
6
] |
1,501 | 7ad5e803afa42790e878bfb923eddcfde2d21928 | <mask token>
def add_owner_mce(m) ->MetadataChangeEventClass:
entity = m['Table']
schema = m['Schema']
dataset_name = f'{schema}.{entity}'
owners = [OwnerClass(owner=owner, type=OwnershipTypeClass.DATAOWNER) for
owner in m['Owner']]
changed_snapshot = DatasetSnapshotClass(urn=
f'urn:li:dataset:(urn:li:dataPlatform:{platform},{dataset_name},{env})'
, aspects=[])
changed_snapshot.aspects.append(OwnershipClass(owners))
mce = MetadataChangeEventClass(proposedSnapshot=changed_snapshot)
return mce
def callback(err, msg):
print('ingested row')
if err:
print('error:', err)
<mask token>
| <mask token>
with open(source_file_path, 'r') as f:
for _i in f:
row = json.loads(_i.rstrip('\n'))
Email = row['Email']
row['Owner'] = [f'urn:li:corpuser:{Email}']
recs.append(row)
def add_owner_mce(m) ->MetadataChangeEventClass:
entity = m['Table']
schema = m['Schema']
dataset_name = f'{schema}.{entity}'
owners = [OwnerClass(owner=owner, type=OwnershipTypeClass.DATAOWNER) for
owner in m['Owner']]
changed_snapshot = DatasetSnapshotClass(urn=
f'urn:li:dataset:(urn:li:dataPlatform:{platform},{dataset_name},{env})'
, aspects=[])
changed_snapshot.aspects.append(OwnershipClass(owners))
mce = MetadataChangeEventClass(proposedSnapshot=changed_snapshot)
return mce
def callback(err, msg):
print('ingested row')
if err:
print('error:', err)
<mask token>
for _i in range(num_recs):
print('sending data to datahub')
mce = add_owner_mce(recs[_i])
print(mce)
Restemitter.emit_mce(mce)
num_recs -= 1
| env = 'DEV'
platform = 'hive'
<mask token>
source_file_path = '/Users/snandi/Downloads/data/owner_data.json'
<mask token>
recs = []
with open(source_file_path, 'r') as f:
for _i in f:
row = json.loads(_i.rstrip('\n'))
Email = row['Email']
row['Owner'] = [f'urn:li:corpuser:{Email}']
recs.append(row)
def add_owner_mce(m) ->MetadataChangeEventClass:
entity = m['Table']
schema = m['Schema']
dataset_name = f'{schema}.{entity}'
owners = [OwnerClass(owner=owner, type=OwnershipTypeClass.DATAOWNER) for
owner in m['Owner']]
changed_snapshot = DatasetSnapshotClass(urn=
f'urn:li:dataset:(urn:li:dataPlatform:{platform},{dataset_name},{env})'
, aspects=[])
changed_snapshot.aspects.append(OwnershipClass(owners))
mce = MetadataChangeEventClass(proposedSnapshot=changed_snapshot)
return mce
def callback(err, msg):
print('ingested row')
if err:
print('error:', err)
num_recs = len(recs)
Restemitter = DatahubRestEmitter('http://10.174.24.179:8080')
for _i in range(num_recs):
print('sending data to datahub')
mce = add_owner_mce(recs[_i])
print(mce)
Restemitter.emit_mce(mce)
num_recs -= 1
| env = 'DEV'
platform = 'hive'
from datahub.emitter.kafka_emitter import DatahubKafkaEmitter, KafkaEmitterConfig
from datahub.emitter.rest_emitter import DatahubRestEmitter
from datahub.ingestion.extractor.schema_util import *
from datahub.metadata.schema_classes import DatasetSnapshotClass, MetadataChangeEventClass, OwnerClass, OwnershipClass, OwnershipTypeClass
source_file_path = '/Users/snandi/Downloads/data/owner_data.json'
import json
recs = []
with open(source_file_path, 'r') as f:
for _i in f:
row = json.loads(_i.rstrip('\n'))
Email = row['Email']
row['Owner'] = [f'urn:li:corpuser:{Email}']
recs.append(row)
def add_owner_mce(m) ->MetadataChangeEventClass:
entity = m['Table']
schema = m['Schema']
dataset_name = f'{schema}.{entity}'
owners = [OwnerClass(owner=owner, type=OwnershipTypeClass.DATAOWNER) for
owner in m['Owner']]
changed_snapshot = DatasetSnapshotClass(urn=
f'urn:li:dataset:(urn:li:dataPlatform:{platform},{dataset_name},{env})'
, aspects=[])
changed_snapshot.aspects.append(OwnershipClass(owners))
mce = MetadataChangeEventClass(proposedSnapshot=changed_snapshot)
return mce
def callback(err, msg):
print('ingested row')
if err:
print('error:', err)
num_recs = len(recs)
Restemitter = DatahubRestEmitter('http://10.174.24.179:8080')
for _i in range(num_recs):
print('sending data to datahub')
mce = add_owner_mce(recs[_i])
print(mce)
Restemitter.emit_mce(mce)
num_recs -= 1
|
env = 'DEV' ## this had to be in uppercase
platform = 'hive'
from datahub.emitter.kafka_emitter import DatahubKafkaEmitter, KafkaEmitterConfig
from datahub.emitter.rest_emitter import DatahubRestEmitter
from datahub.ingestion.extractor.schema_util import *
from datahub.metadata.schema_classes import (
DatasetSnapshotClass,
MetadataChangeEventClass,
OwnerClass,
OwnershipClass,
OwnershipTypeClass,
)
source_file_path = '/Users/snandi/Downloads/data/owner_data.json'
# created an emitter where the mce will be emitted, it will be DataHub's Kafka broker in docker (for PoC)
# emitter = DatahubKafkaEmitter(
# KafkaEmitterConfig.parse_obj(
# # This is the same config format as the standard Kafka sink's YAML.
# {
# "connection": {
# "bootstrap": "localhost:9002",
# "producer_config": {},
# "schema_registry_url": "localhost:8081",
# }
# }
# )
# )
# todo: 1. We have to make a living doc of table ownership 2. If we decide that to be google doc,
# then create an Oauth or service account to access the sheet programatically
import json
recs = []
with open(source_file_path, 'r') as f:
for _i in f:
row = json.loads(_i.rstrip('\n'))
Email= row['Email']
row['Owner'] = [f"urn:li:corpuser:{Email}"]
recs.append(row)
# recs = [{'schema_name': 'integrated_core', 'table_name': 'order_fact', 'owner': ["urn:li:corpuser:[email protected]"]}]
# Process messages
def add_owner_mce(m) -> MetadataChangeEventClass:
entity = m['Table']
schema = m['Schema']
dataset_name = f"{schema}.{entity}"
owners = [
OwnerClass(owner=owner, type=OwnershipTypeClass.DATAOWNER)
for owner in m['Owner']
]
changed_snapshot = DatasetSnapshotClass(
urn=f"urn:li:dataset:(urn:li:dataPlatform:{platform},{dataset_name},{env})",
aspects=[], # we append to this list later on
)
changed_snapshot.aspects.append(OwnershipClass(owners))
mce = MetadataChangeEventClass(proposedSnapshot=changed_snapshot)
return mce
def callback(err, msg):
print('ingested row')
if err:
# Handle the metadata emission error.
print("error:", err)
num_recs = len(recs)
# try REST emitter
Restemitter = DatahubRestEmitter("http://10.174.24.179:8080")
for _i in range(num_recs):
print('sending data to datahub')
mce = add_owner_mce(recs[_i])
print(mce)
# emit the mce to kafka
# emitter.emit_mce_async(mce, callback)
# emitter.flush()
# emit mce to REST
Restemitter.emit_mce(mce)
num_recs -= 1
| [
2,
3,
4,
5,
6
] |
1,502 | 6239cb08509b8e84a88db95479af05845876d9b6 | <mask token>
| <mask token>
class Migration(migrations.Migration):
<mask token>
<mask token>
<mask token>
| <mask token>
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [migrations.CreateModel(name='Book', fields=[('name',
models.CharField(max_length=250)), ('slug', models.SlugField(
max_length=25, primary_key=True, serialize=False, unique=True)), (
'author', models.CharField(max_length=250)), ('was_buplished',
models.DateField())]), migrations.CreateModel(name='Alias', fields=
[('id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')), ('alias', models.CharField(
max_length=250)), ('start', models.DateTimeField()), ('end', models
.DateTimeField(default=None)), ('target', models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT, to='alias.book'))])]
| from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [migrations.CreateModel(name='Book', fields=[('name',
models.CharField(max_length=250)), ('slug', models.SlugField(
max_length=25, primary_key=True, serialize=False, unique=True)), (
'author', models.CharField(max_length=250)), ('was_buplished',
models.DateField())]), migrations.CreateModel(name='Alias', fields=
[('id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')), ('alias', models.CharField(
max_length=250)), ('start', models.DateTimeField()), ('end', models
.DateTimeField(default=None)), ('target', models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT, to='alias.book'))])]
| # Generated by Django 3.1.6 on 2021-02-15 12:13
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Book',
fields=[
('name', models.CharField(max_length=250)),
('slug', models.SlugField(max_length=25, primary_key=True, serialize=False, unique=True)),
('author', models.CharField(max_length=250)),
('was_buplished', models.DateField()),
],
),
migrations.CreateModel(
name='Alias',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('alias', models.CharField(max_length=250)),
('start', models.DateTimeField()),
('end', models.DateTimeField(default=None)),
('target', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='alias.book')),
],
),
]
| [
0,
1,
2,
3,
4
] |
1,503 | 99ddc00bf1d0141118748aa98bcc3e7b8a0ff29e | # Generic function for updating Weblogic system resources
def update_system_resources(clusterName):
print "Cluster name is " + clusterName
startTransaction()
create_JMSSystemResource("/", "DummyJMSModule")
delete_JMSModule("/JMSSystemResources", "DummyJMSModule")
endTransaction()
print "update_system_resources function has finished"
#*************************************** | null | null | null | null | [
0
] |
1,504 | 69eb62ba47a63cf007334c777709b0513d75f396 | <mask token>
| <mask token>
with open('dummyoutput.txt', 'r') as file_object:
data = file_object.readlines()
for line in data:
words = line.split(';')
for i in range(1, len(words), 4):
if db.get(words[i], 0) != 0:
cmd1 = db.get(words[i])
cmd2 = db.get(words[i + 2])
space = b(' ')
cmd = cmd1 + space + cmd2
print(cmd)
| <mask token>
db = dbm.open('resistorvalues', 'c')
with open('dummyoutput.txt', 'r') as file_object:
data = file_object.readlines()
for line in data:
words = line.split(';')
for i in range(1, len(words), 4):
if db.get(words[i], 0) != 0:
cmd1 = db.get(words[i])
cmd2 = db.get(words[i + 2])
space = b(' ')
cmd = cmd1 + space + cmd2
print(cmd)
| <mask token>
import dbm
db = dbm.open('resistorvalues', 'c')
with open('dummyoutput.txt', 'r') as file_object:
data = file_object.readlines()
for line in data:
words = line.split(';')
for i in range(1, len(words), 4):
if db.get(words[i], 0) != 0:
cmd1 = db.get(words[i])
cmd2 = db.get(words[i + 2])
space = b(' ')
cmd = cmd1 + space + cmd2
print(cmd)
| """
Looks up values in createresistorvaluesdbm.py.
Outputs string value ( cmd ).
"""
import dbm
# Open a DB. The c option opens in read/write mode and creates the file if needed.
db = dbm.open( 'resistorvalues', 'c' )
with open( "dummyoutput.txt", "r" ) as file_object:
#print (file_object.readline(6))
data = file_object.readlines()
# Go through serial string line by line
for line in data:
# parse on semi-colon
words = line.split( ";" )
#print (line.rsplit(";"))
# Ignore position information and pull out resistor values
# Note every fourth item to compensate for word pairs
for i in range( 1, len( words ), 4 ):
# print(words[i])
# the get method has 2 vlues lookup, and what to return is no match in this case is `0`
if db.get( words[ i ], 0 ) != 0:
# Direction, i.e. "f"
cmd1 = db.get( words[ i ] )
# Value, i.e. "10"
cmd2 = db.get( words[ i + 2 ] )
# Formatting space
space = b( ' ' )
cmd = cmd1 + space + cmd2
#print (cmd.decode('ascii'))
print ( cmd )
| [
0,
1,
2,
3,
4
] |
1,505 | e36d2426fb8a268ab9ff4f3d6135aa72697e6326 | <mask token>
def get_window(lpClassName='UnityWndClass', lpWindowName='炉石传说'):
handle_of_hearthstone = win32gui.FindWindow(lpClassName, lpWindowName)
return win32gui.GetClientRect(handle_of_hearthstone)
def countdown(n):
for i in np.arange(n, 0, -1):
print(i)
time.sleep(1)
<mask token>
def currentmouse():
return pg.position()
def get_pic():
return np.array(ImageGrab.grab(window))
def closewindow():
cv2.waitKey(0)
cv2.destroyAllWindows()
<mask token>
| <mask token>
def get_window(lpClassName='UnityWndClass', lpWindowName='炉石传说'):
handle_of_hearthstone = win32gui.FindWindow(lpClassName, lpWindowName)
return win32gui.GetClientRect(handle_of_hearthstone)
def countdown(n):
for i in np.arange(n, 0, -1):
print(i)
time.sleep(1)
countdown(5)
<mask token>
window[:2] += corner
window[2:] += corner
<mask token>
def currentmouse():
return pg.position()
def get_pic():
return np.array(ImageGrab.grab(window))
def closewindow():
cv2.waitKey(0)
cv2.destroyAllWindows()
<mask token>
print(states)
<mask token>
while True:
pic = get_pic()
cv2.imshow('output', pic)
key = chr(cv2.waitKey(0))
cv2.destroyAllWindows()
if key == 'q':
break
elif key == 'd':
pass
countdown(5)
else:
count += 1
plt.imsave('./dataset/{}_{}.png'.format(key, count[0]), pic)
countdown(5)
np.save('count.npy', count)
if False:
countdown(5)
print(pg.position())
print(np.array(get_window()))
print(np.array(pg.position()) - corner)
| <mask token>
def get_window(lpClassName='UnityWndClass', lpWindowName='炉石传说'):
handle_of_hearthstone = win32gui.FindWindow(lpClassName, lpWindowName)
return win32gui.GetClientRect(handle_of_hearthstone)
def countdown(n):
for i in np.arange(n, 0, -1):
print(i)
time.sleep(1)
countdown(5)
corner = pg.position()
window = np.array(get_window())
window[:2] += corner
window[2:] += corner
window = tuple(window)
def currentmouse():
return pg.position()
def get_pic():
return np.array(ImageGrab.grab(window))
def closewindow():
cv2.waitKey(0)
cv2.destroyAllWindows()
states_str = ['主界面', '选牌界面', '战斗界面', '收藏界面', '搜索界面', '手牌更换', '战斗结果']
states_num = [0, 1, 2, 3, 4, 5, 6]
states = pd.DataFrame(states_str, index=states_num)
print(states)
count = np.load('count.npy')
while True:
pic = get_pic()
cv2.imshow('output', pic)
key = chr(cv2.waitKey(0))
cv2.destroyAllWindows()
if key == 'q':
break
elif key == 'd':
pass
countdown(5)
else:
count += 1
plt.imsave('./dataset/{}_{}.png'.format(key, count[0]), pic)
countdown(5)
np.save('count.npy', count)
if False:
countdown(5)
print(pg.position())
print(np.array(get_window()))
print(np.array(pg.position()) - corner)
| import numpy as np
import cv2
import matplotlib.pyplot as plt
import win32gui, win32ui, win32con, win32api
import pyautogui as pg
from PIL import ImageGrab
import time
import pandas as pd
def get_window(lpClassName='UnityWndClass', lpWindowName='炉石传说'):
handle_of_hearthstone = win32gui.FindWindow(lpClassName, lpWindowName)
return win32gui.GetClientRect(handle_of_hearthstone)
def countdown(n):
for i in np.arange(n, 0, -1):
print(i)
time.sleep(1)
countdown(5)
corner = pg.position()
window = np.array(get_window())
window[:2] += corner
window[2:] += corner
window = tuple(window)
def currentmouse():
return pg.position()
def get_pic():
return np.array(ImageGrab.grab(window))
def closewindow():
cv2.waitKey(0)
cv2.destroyAllWindows()
states_str = ['主界面', '选牌界面', '战斗界面', '收藏界面', '搜索界面', '手牌更换', '战斗结果']
states_num = [0, 1, 2, 3, 4, 5, 6]
states = pd.DataFrame(states_str, index=states_num)
print(states)
count = np.load('count.npy')
while True:
pic = get_pic()
cv2.imshow('output', pic)
key = chr(cv2.waitKey(0))
cv2.destroyAllWindows()
if key == 'q':
break
elif key == 'd':
pass
countdown(5)
else:
count += 1
plt.imsave('./dataset/{}_{}.png'.format(key, count[0]), pic)
countdown(5)
np.save('count.npy', count)
if False:
countdown(5)
print(pg.position())
print(np.array(get_window()))
print(np.array(pg.position()) - corner)
| #%%
import numpy as np
import cv2
import matplotlib.pyplot as plt
import win32gui,win32ui,win32con,win32api
import pyautogui as pg
from PIL import ImageGrab
import time
import pandas as pd
# %%
def get_window(lpClassName='UnityWndClass', lpWindowName='炉石传说'):
handle_of_hearthstone=win32gui.FindWindow(lpClassName,lpWindowName)
return win32gui.GetClientRect(handle_of_hearthstone)
def countdown(n):
for i in np.arange(n,0,-1):
print(i)
time.sleep(1)
countdown(5)
corner=pg.position()
window=np.array(get_window())
window[:2]+=corner
window[2:]+=corner
window=tuple(window)
def currentmouse():
return pg.position()
def get_pic():
return np.array(ImageGrab.grab(window))
def closewindow():
cv2.waitKey(0)
cv2.destroyAllWindows()
#%%
states_str=['主界面','选牌界面','战斗界面','收藏界面','搜索界面','手牌更换','战斗结果']
states_num=[0,1,2,3,4,5,6]
states=pd.DataFrame(states_str,index=states_num)
print(states)
count=np.load('count.npy')
while(True):
pic=get_pic()
cv2.imshow('output',pic)
key=chr(cv2.waitKey(0))
cv2.destroyAllWindows()
if key=='q':#quit
break
elif key=='d':#discard
pass
countdown(5)
else:
count+=1
plt.imsave('./dataset/{}_{}.png'.format(key,count[0]),pic)
countdown(5)
np.save('count.npy',count)
#%% 收集按钮位置
if False:
countdown(5)
print(pg.position())
print(np.array(get_window()))
print(np.array(pg.position())-corner) | [
5,
6,
7,
8,
9
] |
1,506 | d8fb5aeb5453b986cc698165749992e4a7677257 | <mask token>
def prepare_output_directory(config: ConfigSchema) ->None:
formatted = datetime.now().strftime(config.output_path_format)
output_path = Path(formatted)
output_path.mkdir(parents=True, exist_ok=False)
config.output_path = output_path.as_posix()
| <mask token>
def log_basic_info(logger: Logger, config: ConfigSchema):
logger.info('Experiment: {}'.format(config.experiment_name))
logger.info('- PyTorch version: {}'.format(torch.__version__))
logger.info('- Ignite version: {}'.format(ignite.__version__))
logger.info('\n')
logger.info('Configuration:')
for line in OmegaConf.to_yaml(config).split('\n'):
logger.info('\t' + line)
logger.info('\n')
if idist.get_world_size() > 1:
logger.info('\nDistributed setting:')
logger.info('\tbackend: {}'.format(idist.backend()))
logger.info('\tworld size: {}'.format(idist.get_world_size()))
logger.info('\n')
def prepare_output_directory(config: ConfigSchema) ->None:
formatted = datetime.now().strftime(config.output_path_format)
output_path = Path(formatted)
output_path.mkdir(parents=True, exist_ok=False)
config.output_path = output_path.as_posix()
| <mask token>
def log_metrics(logger: Logger, epoch: int, elapsed: float, tag: str,
metrics: Dict[str, float]):
logger.info('Epoch {} - elapsed: {:.5f} - {} metrics: {}'.format(epoch,
elapsed, tag, ', '.join(['{}: {}'.format(k, v) for k, v in metrics.
items()])))
def log_basic_info(logger: Logger, config: ConfigSchema):
logger.info('Experiment: {}'.format(config.experiment_name))
logger.info('- PyTorch version: {}'.format(torch.__version__))
logger.info('- Ignite version: {}'.format(ignite.__version__))
logger.info('\n')
logger.info('Configuration:')
for line in OmegaConf.to_yaml(config).split('\n'):
logger.info('\t' + line)
logger.info('\n')
if idist.get_world_size() > 1:
logger.info('\nDistributed setting:')
logger.info('\tbackend: {}'.format(idist.backend()))
logger.info('\tworld size: {}'.format(idist.get_world_size()))
logger.info('\n')
def prepare_output_directory(config: ConfigSchema) ->None:
formatted = datetime.now().strftime(config.output_path_format)
output_path = Path(formatted)
output_path.mkdir(parents=True, exist_ok=False)
config.output_path = output_path.as_posix()
| from datetime import datetime
from logging import Logger
from pathlib import Path
from typing import Dict
import ignite
import ignite.distributed as idist
import torch
from omegaconf import OmegaConf
from config_schema import ConfigSchema
def log_metrics(logger: Logger, epoch: int, elapsed: float, tag: str,
metrics: Dict[str, float]):
logger.info('Epoch {} - elapsed: {:.5f} - {} metrics: {}'.format(epoch,
elapsed, tag, ', '.join(['{}: {}'.format(k, v) for k, v in metrics.
items()])))
def log_basic_info(logger: Logger, config: ConfigSchema):
logger.info('Experiment: {}'.format(config.experiment_name))
logger.info('- PyTorch version: {}'.format(torch.__version__))
logger.info('- Ignite version: {}'.format(ignite.__version__))
logger.info('\n')
logger.info('Configuration:')
for line in OmegaConf.to_yaml(config).split('\n'):
logger.info('\t' + line)
logger.info('\n')
if idist.get_world_size() > 1:
logger.info('\nDistributed setting:')
logger.info('\tbackend: {}'.format(idist.backend()))
logger.info('\tworld size: {}'.format(idist.get_world_size()))
logger.info('\n')
def prepare_output_directory(config: ConfigSchema) ->None:
formatted = datetime.now().strftime(config.output_path_format)
output_path = Path(formatted)
output_path.mkdir(parents=True, exist_ok=False)
config.output_path = output_path.as_posix()
| from datetime import datetime
from logging import Logger
from pathlib import Path
from typing import Dict
import ignite
import ignite.distributed as idist
import torch
from omegaconf import OmegaConf
from config_schema import ConfigSchema
def log_metrics(
logger: Logger, epoch: int, elapsed: float, tag: str, metrics: Dict[str, float]
):
logger.info(
"Epoch {} - elapsed: {:.5f} - {} metrics: {}".format(
epoch,
elapsed,
tag,
", ".join(["{}: {}".format(k, v) for k, v in metrics.items()]),
)
)
def log_basic_info(logger: Logger, config: ConfigSchema):
logger.info("Experiment: {}".format(config.experiment_name))
logger.info("- PyTorch version: {}".format(torch.__version__))
logger.info("- Ignite version: {}".format(ignite.__version__))
logger.info("\n")
logger.info("Configuration:")
for line in OmegaConf.to_yaml(config).split("\n"):
logger.info("\t" + line)
logger.info("\n")
if idist.get_world_size() > 1:
logger.info("\nDistributed setting:")
logger.info("\tbackend: {}".format(idist.backend()))
logger.info("\tworld size: {}".format(idist.get_world_size()))
logger.info("\n")
def prepare_output_directory(config: ConfigSchema) -> None:
formatted = datetime.now().strftime(config.output_path_format)
output_path = Path(formatted)
# force always to use a new directory to avoid overwriting existing ones
output_path.mkdir(parents=True, exist_ok=False)
config.output_path = output_path.as_posix()
| [
1,
2,
3,
4,
5
] |
1,507 | 0baa133bd9eb8a162a82b23ba4d26cdd34f701c4 | <mask token>
def test_existing_user_accept_invite_calls_api_and_redirects_to_dashboard(
client, service_one, api_user_active, sample_invite, mock_get_service,
mock_check_invite_token, mock_get_user_by_email,
mock_get_users_by_service, mock_accept_invite, mock_add_user_to_service):
expected_service = service_one['id']
expected_redirect_location = ('http://localhost/services/{}/dashboard'.
format(expected_service))
expected_permissions = ['send_messages', 'manage_service',
'manage_api_keys']
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'))
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_get_user_by_email.assert_called_with('[email protected]')
assert mock_accept_invite.call_count == 1
mock_add_user_to_service.assert_called_with(expected_service,
api_user_active.id, expected_permissions)
assert response.status_code == 302
assert response.location == expected_redirect_location
def test_existing_user_with_no_permissions_accept_invite(client, mocker,
service_one, api_user_active, sample_invite, mock_check_invite_token,
mock_get_user_by_email, mock_get_users_by_service,
mock_add_user_to_service, mock_get_service):
expected_service = service_one['id']
sample_invite['permissions'] = ''
expected_permissions = []
mocker.patch('app.invite_api_client.accept_invite', return_value=
sample_invite)
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'))
mock_add_user_to_service.assert_called_with(expected_service,
api_user_active.id, expected_permissions)
assert response.status_code == 302
def test_if_existing_user_accepts_twice_they_redirect_to_sign_in(client,
mocker, sample_invite, mock_get_service):
sample_invite['status'] = 'accepted'
invite = InvitedUser(**sample_invite)
mocker.patch('app.invite_api_client.check_token', return_value=invite)
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'), follow_redirects=True)
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert (page.h1.string, page.select('main p')[0].text.strip()) == (
'You need to sign in again',
'We signed you out because you haven’t used Notify for a while.')
def test_existing_user_of_service_get_redirected_to_signin(client, mocker,
api_user_active, sample_invite, mock_get_service,
mock_get_user_by_email, mock_accept_invite):
sample_invite['email_address'] = api_user_active.email_address
invite = InvitedUser(**sample_invite)
mocker.patch('app.invite_api_client.check_token', return_value=invite)
mocker.patch('app.user_api_client.get_users_for_service', return_value=
[api_user_active])
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'), follow_redirects=True)
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert (page.h1.string, page.select('main p')[0].text.strip()) == (
'You need to sign in again',
'We signed you out because you haven’t used Notify for a while.')
assert mock_accept_invite.call_count == 1
def test_existing_signed_out_user_accept_invite_redirects_to_sign_in(client,
service_one, api_user_active, sample_invite, mock_check_invite_token,
mock_get_user_by_email, mock_get_users_by_service,
mock_add_user_to_service, mock_accept_invite, mock_get_service):
expected_service = service_one['id']
expected_permissions = ['send_messages', 'manage_service',
'manage_api_keys']
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'), follow_redirects=True)
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_get_user_by_email.assert_called_with('[email protected]')
mock_add_user_to_service.assert_called_with(expected_service,
api_user_active.id, expected_permissions)
assert mock_accept_invite.call_count == 1
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert (page.h1.string, page.select('main p')[0].text.strip()) == (
'You need to sign in again',
'We signed you out because you haven’t used Notify for a while.')
def test_new_user_accept_invite_calls_api_and_redirects_to_registration(client,
service_one, mock_check_invite_token, mock_dont_get_user_by_email,
mock_add_user_to_service, mock_get_users_by_service, mock_get_service):
expected_redirect_location = 'http://localhost/register-from-invite'
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'))
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_dont_get_user_by_email.assert_called_with('[email protected]')
assert response.status_code == 302
assert response.location == expected_redirect_location
def test_new_user_accept_invite_calls_api_and_views_registration_page(client,
service_one, mock_check_invite_token, mock_dont_get_user_by_email,
mock_add_user_to_service, mock_get_users_by_service, mock_get_service):
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'), follow_redirects=True)
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_dont_get_user_by_email.assert_called_with('[email protected]')
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert page.h1.string.strip() == 'Create an account'
email_in_page = page.find('main').find('p')
assert email_in_page.text.strip(
) == 'Your account will be created with this email: [email protected]'
form = page.find('form')
name = form.find('input', id='name')
password = form.find('input', id='password')
service = form.find('input', type='hidden', id='service')
email = form.find('input', type='hidden', id='email_address')
assert email
assert email.attrs['value'] == '[email protected]'
assert name
assert password
assert service
assert service.attrs['value'] == service_one['id']
<mask token>
def test_signed_in_existing_user_cannot_use_anothers_invite(logged_in_client,
mocker, api_user_active, sample_invite, mock_get_user,
mock_accept_invite, mock_get_service):
invite = InvitedUser(**sample_invite)
mocker.patch('app.invite_api_client.check_token', return_value=invite)
mocker.patch('app.user_api_client.get_users_for_service', return_value=
[api_user_active])
response = logged_in_client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'), follow_redirects=True)
assert response.status_code == 403
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert page.h1.string.strip() == '403'
flash_banners = page.find_all('div', class_='banner-dangerous')
assert len(flash_banners) == 1
banner_contents = flash_banners[0].text.strip()
assert 'You’re signed in as [email protected].' in banner_contents
assert 'This invite is for another email address.' in banner_contents
assert 'Sign out and click the link again to accept this invite.' in banner_contents
assert mock_accept_invite.call_count == 0
<mask token>
| <mask token>
def test_existing_user_accept_invite_calls_api_and_redirects_to_dashboard(
client, service_one, api_user_active, sample_invite, mock_get_service,
mock_check_invite_token, mock_get_user_by_email,
mock_get_users_by_service, mock_accept_invite, mock_add_user_to_service):
expected_service = service_one['id']
expected_redirect_location = ('http://localhost/services/{}/dashboard'.
format(expected_service))
expected_permissions = ['send_messages', 'manage_service',
'manage_api_keys']
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'))
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_get_user_by_email.assert_called_with('[email protected]')
assert mock_accept_invite.call_count == 1
mock_add_user_to_service.assert_called_with(expected_service,
api_user_active.id, expected_permissions)
assert response.status_code == 302
assert response.location == expected_redirect_location
def test_existing_user_with_no_permissions_accept_invite(client, mocker,
service_one, api_user_active, sample_invite, mock_check_invite_token,
mock_get_user_by_email, mock_get_users_by_service,
mock_add_user_to_service, mock_get_service):
expected_service = service_one['id']
sample_invite['permissions'] = ''
expected_permissions = []
mocker.patch('app.invite_api_client.accept_invite', return_value=
sample_invite)
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'))
mock_add_user_to_service.assert_called_with(expected_service,
api_user_active.id, expected_permissions)
assert response.status_code == 302
def test_if_existing_user_accepts_twice_they_redirect_to_sign_in(client,
mocker, sample_invite, mock_get_service):
sample_invite['status'] = 'accepted'
invite = InvitedUser(**sample_invite)
mocker.patch('app.invite_api_client.check_token', return_value=invite)
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'), follow_redirects=True)
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert (page.h1.string, page.select('main p')[0].text.strip()) == (
'You need to sign in again',
'We signed you out because you haven’t used Notify for a while.')
def test_existing_user_of_service_get_redirected_to_signin(client, mocker,
api_user_active, sample_invite, mock_get_service,
mock_get_user_by_email, mock_accept_invite):
sample_invite['email_address'] = api_user_active.email_address
invite = InvitedUser(**sample_invite)
mocker.patch('app.invite_api_client.check_token', return_value=invite)
mocker.patch('app.user_api_client.get_users_for_service', return_value=
[api_user_active])
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'), follow_redirects=True)
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert (page.h1.string, page.select('main p')[0].text.strip()) == (
'You need to sign in again',
'We signed you out because you haven’t used Notify for a while.')
assert mock_accept_invite.call_count == 1
def test_existing_signed_out_user_accept_invite_redirects_to_sign_in(client,
service_one, api_user_active, sample_invite, mock_check_invite_token,
mock_get_user_by_email, mock_get_users_by_service,
mock_add_user_to_service, mock_accept_invite, mock_get_service):
expected_service = service_one['id']
expected_permissions = ['send_messages', 'manage_service',
'manage_api_keys']
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'), follow_redirects=True)
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_get_user_by_email.assert_called_with('[email protected]')
mock_add_user_to_service.assert_called_with(expected_service,
api_user_active.id, expected_permissions)
assert mock_accept_invite.call_count == 1
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert (page.h1.string, page.select('main p')[0].text.strip()) == (
'You need to sign in again',
'We signed you out because you haven’t used Notify for a while.')
def test_new_user_accept_invite_calls_api_and_redirects_to_registration(client,
service_one, mock_check_invite_token, mock_dont_get_user_by_email,
mock_add_user_to_service, mock_get_users_by_service, mock_get_service):
expected_redirect_location = 'http://localhost/register-from-invite'
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'))
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_dont_get_user_by_email.assert_called_with('[email protected]')
assert response.status_code == 302
assert response.location == expected_redirect_location
def test_new_user_accept_invite_calls_api_and_views_registration_page(client,
service_one, mock_check_invite_token, mock_dont_get_user_by_email,
mock_add_user_to_service, mock_get_users_by_service, mock_get_service):
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'), follow_redirects=True)
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_dont_get_user_by_email.assert_called_with('[email protected]')
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert page.h1.string.strip() == 'Create an account'
email_in_page = page.find('main').find('p')
assert email_in_page.text.strip(
) == 'Your account will be created with this email: [email protected]'
form = page.find('form')
name = form.find('input', id='name')
password = form.find('input', id='password')
service = form.find('input', type='hidden', id='service')
email = form.find('input', type='hidden', id='email_address')
assert email
assert email.attrs['value'] == '[email protected]'
assert name
assert password
assert service
assert service.attrs['value'] == service_one['id']
def test_cancelled_invited_user_accepts_invited_redirect_to_cancelled_invitation(
client, service_one, mocker, mock_get_user, mock_get_service):
cancelled_invitation = create_sample_invite(mocker, service_one, status
='cancelled')
mock_check_token_invite(mocker, cancelled_invitation)
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'))
app.invite_api_client.check_token.assert_called_with('thisisnotarealtoken')
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert page.h1.string.strip(
) == 'The invitation you were sent has been cancelled'
<mask token>
def test_signed_in_existing_user_cannot_use_anothers_invite(logged_in_client,
mocker, api_user_active, sample_invite, mock_get_user,
mock_accept_invite, mock_get_service):
invite = InvitedUser(**sample_invite)
mocker.patch('app.invite_api_client.check_token', return_value=invite)
mocker.patch('app.user_api_client.get_users_for_service', return_value=
[api_user_active])
response = logged_in_client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'), follow_redirects=True)
assert response.status_code == 403
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert page.h1.string.strip() == '403'
flash_banners = page.find_all('div', class_='banner-dangerous')
assert len(flash_banners) == 1
banner_contents = flash_banners[0].text.strip()
assert 'You’re signed in as [email protected].' in banner_contents
assert 'This invite is for another email address.' in banner_contents
assert 'Sign out and click the link again to accept this invite.' in banner_contents
assert mock_accept_invite.call_count == 0
def test_new_invited_user_verifies_and_added_to_service(client, service_one,
sample_invite, api_user_active, mock_check_invite_token,
mock_dont_get_user_by_email, mock_is_email_unique, mock_register_user,
mock_send_verify_code, mock_check_verify_code, mock_get_user,
mock_update_user, mock_add_user_to_service, mock_accept_invite,
mock_get_service, mock_get_service_templates,
mock_get_template_statistics, mock_get_jobs, mock_has_permissions,
mock_get_users_by_service, mock_get_detailed_service, mock_get_usage):
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'))
data = {'service': sample_invite['service'], 'email_address':
sample_invite['email_address'], 'from_user': sample_invite[
'from_user'], 'password': 'longpassword', 'mobile_number':
'+447890123456', 'name': 'Invited User'}
response = client.post(url_for('main.register_from_invite'), data=data)
response = client.post(url_for('main.verify'), data={'sms_code':
'12345'}, follow_redirects=True)
expected_permissions = ['send_messages', 'manage_service',
'manage_api_keys']
with client.session_transaction() as session:
new_user_id = session['user_id']
mock_add_user_to_service.assert_called_with(data['service'],
new_user_id, expected_permissions)
mock_accept_invite.assert_called_with(data['service'],
sample_invite['id'])
mock_check_verify_code.assert_called_once_with(new_user_id, '12345',
'sms')
assert service_one['id'] == session['service_id']
raw_html = response.data.decode('utf-8')
page = BeautifulSoup(raw_html, 'html.parser')
assert page.find('h1').text == 'Dashboard'
| <mask token>
def test_existing_user_accept_invite_calls_api_and_redirects_to_dashboard(
client, service_one, api_user_active, sample_invite, mock_get_service,
mock_check_invite_token, mock_get_user_by_email,
mock_get_users_by_service, mock_accept_invite, mock_add_user_to_service):
expected_service = service_one['id']
expected_redirect_location = ('http://localhost/services/{}/dashboard'.
format(expected_service))
expected_permissions = ['send_messages', 'manage_service',
'manage_api_keys']
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'))
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_get_user_by_email.assert_called_with('[email protected]')
assert mock_accept_invite.call_count == 1
mock_add_user_to_service.assert_called_with(expected_service,
api_user_active.id, expected_permissions)
assert response.status_code == 302
assert response.location == expected_redirect_location
def test_existing_user_with_no_permissions_accept_invite(client, mocker,
service_one, api_user_active, sample_invite, mock_check_invite_token,
mock_get_user_by_email, mock_get_users_by_service,
mock_add_user_to_service, mock_get_service):
expected_service = service_one['id']
sample_invite['permissions'] = ''
expected_permissions = []
mocker.patch('app.invite_api_client.accept_invite', return_value=
sample_invite)
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'))
mock_add_user_to_service.assert_called_with(expected_service,
api_user_active.id, expected_permissions)
assert response.status_code == 302
def test_if_existing_user_accepts_twice_they_redirect_to_sign_in(client,
mocker, sample_invite, mock_get_service):
sample_invite['status'] = 'accepted'
invite = InvitedUser(**sample_invite)
mocker.patch('app.invite_api_client.check_token', return_value=invite)
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'), follow_redirects=True)
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert (page.h1.string, page.select('main p')[0].text.strip()) == (
'You need to sign in again',
'We signed you out because you haven’t used Notify for a while.')
def test_existing_user_of_service_get_redirected_to_signin(client, mocker,
api_user_active, sample_invite, mock_get_service,
mock_get_user_by_email, mock_accept_invite):
sample_invite['email_address'] = api_user_active.email_address
invite = InvitedUser(**sample_invite)
mocker.patch('app.invite_api_client.check_token', return_value=invite)
mocker.patch('app.user_api_client.get_users_for_service', return_value=
[api_user_active])
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'), follow_redirects=True)
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert (page.h1.string, page.select('main p')[0].text.strip()) == (
'You need to sign in again',
'We signed you out because you haven’t used Notify for a while.')
assert mock_accept_invite.call_count == 1
def test_existing_signed_out_user_accept_invite_redirects_to_sign_in(client,
service_one, api_user_active, sample_invite, mock_check_invite_token,
mock_get_user_by_email, mock_get_users_by_service,
mock_add_user_to_service, mock_accept_invite, mock_get_service):
expected_service = service_one['id']
expected_permissions = ['send_messages', 'manage_service',
'manage_api_keys']
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'), follow_redirects=True)
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_get_user_by_email.assert_called_with('[email protected]')
mock_add_user_to_service.assert_called_with(expected_service,
api_user_active.id, expected_permissions)
assert mock_accept_invite.call_count == 1
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert (page.h1.string, page.select('main p')[0].text.strip()) == (
'You need to sign in again',
'We signed you out because you haven’t used Notify for a while.')
def test_new_user_accept_invite_calls_api_and_redirects_to_registration(client,
service_one, mock_check_invite_token, mock_dont_get_user_by_email,
mock_add_user_to_service, mock_get_users_by_service, mock_get_service):
expected_redirect_location = 'http://localhost/register-from-invite'
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'))
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_dont_get_user_by_email.assert_called_with('[email protected]')
assert response.status_code == 302
assert response.location == expected_redirect_location
def test_new_user_accept_invite_calls_api_and_views_registration_page(client,
service_one, mock_check_invite_token, mock_dont_get_user_by_email,
mock_add_user_to_service, mock_get_users_by_service, mock_get_service):
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'), follow_redirects=True)
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_dont_get_user_by_email.assert_called_with('[email protected]')
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert page.h1.string.strip() == 'Create an account'
email_in_page = page.find('main').find('p')
assert email_in_page.text.strip(
) == 'Your account will be created with this email: [email protected]'
form = page.find('form')
name = form.find('input', id='name')
password = form.find('input', id='password')
service = form.find('input', type='hidden', id='service')
email = form.find('input', type='hidden', id='email_address')
assert email
assert email.attrs['value'] == '[email protected]'
assert name
assert password
assert service
assert service.attrs['value'] == service_one['id']
def test_cancelled_invited_user_accepts_invited_redirect_to_cancelled_invitation(
client, service_one, mocker, mock_get_user, mock_get_service):
cancelled_invitation = create_sample_invite(mocker, service_one, status
='cancelled')
mock_check_token_invite(mocker, cancelled_invitation)
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'))
app.invite_api_client.check_token.assert_called_with('thisisnotarealtoken')
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert page.h1.string.strip(
) == 'The invitation you were sent has been cancelled'
def test_new_user_accept_invite_completes_new_registration_redirects_to_verify(
client, service_one, sample_invite, api_user_active,
mock_check_invite_token, mock_dont_get_user_by_email,
mock_is_email_unique, mock_register_user, mock_send_verify_code,
mock_accept_invite, mock_get_users_by_service, mock_add_user_to_service,
mock_get_service):
expected_service = service_one['id']
expected_email = sample_invite['email_address']
expected_from_user = service_one['users'][0]
expected_redirect_location = 'http://localhost/register-from-invite'
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'))
with client.session_transaction() as session:
assert response.status_code == 302
assert response.location == expected_redirect_location
invited_user = session.get('invited_user')
assert invited_user
assert expected_service == invited_user['service']
assert expected_email == invited_user['email_address']
assert expected_from_user == invited_user['from_user']
data = {'service': invited_user['service'], 'email_address':
invited_user['email_address'], 'from_user': invited_user[
'from_user'], 'password': 'longpassword', 'mobile_number':
'+447890123456', 'name': 'Invited User'}
expected_redirect_location = 'http://localhost/verify'
response = client.post(url_for('main.register_from_invite'), data=data)
assert response.status_code == 302
assert response.location == expected_redirect_location
mock_send_verify_code.assert_called_once_with(ANY, 'sms', data[
'mobile_number'])
mock_register_user.assert_called_with(data['name'], data[
'email_address'], data['mobile_number'], data['password'])
assert mock_accept_invite.call_count == 1
def test_signed_in_existing_user_cannot_use_anothers_invite(logged_in_client,
mocker, api_user_active, sample_invite, mock_get_user,
mock_accept_invite, mock_get_service):
invite = InvitedUser(**sample_invite)
mocker.patch('app.invite_api_client.check_token', return_value=invite)
mocker.patch('app.user_api_client.get_users_for_service', return_value=
[api_user_active])
response = logged_in_client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'), follow_redirects=True)
assert response.status_code == 403
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert page.h1.string.strip() == '403'
flash_banners = page.find_all('div', class_='banner-dangerous')
assert len(flash_banners) == 1
banner_contents = flash_banners[0].text.strip()
assert 'You’re signed in as [email protected].' in banner_contents
assert 'This invite is for another email address.' in banner_contents
assert 'Sign out and click the link again to accept this invite.' in banner_contents
assert mock_accept_invite.call_count == 0
def test_new_invited_user_verifies_and_added_to_service(client, service_one,
sample_invite, api_user_active, mock_check_invite_token,
mock_dont_get_user_by_email, mock_is_email_unique, mock_register_user,
mock_send_verify_code, mock_check_verify_code, mock_get_user,
mock_update_user, mock_add_user_to_service, mock_accept_invite,
mock_get_service, mock_get_service_templates,
mock_get_template_statistics, mock_get_jobs, mock_has_permissions,
mock_get_users_by_service, mock_get_detailed_service, mock_get_usage):
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'))
data = {'service': sample_invite['service'], 'email_address':
sample_invite['email_address'], 'from_user': sample_invite[
'from_user'], 'password': 'longpassword', 'mobile_number':
'+447890123456', 'name': 'Invited User'}
response = client.post(url_for('main.register_from_invite'), data=data)
response = client.post(url_for('main.verify'), data={'sms_code':
'12345'}, follow_redirects=True)
expected_permissions = ['send_messages', 'manage_service',
'manage_api_keys']
with client.session_transaction() as session:
new_user_id = session['user_id']
mock_add_user_to_service.assert_called_with(data['service'],
new_user_id, expected_permissions)
mock_accept_invite.assert_called_with(data['service'],
sample_invite['id'])
mock_check_verify_code.assert_called_once_with(new_user_id, '12345',
'sms')
assert service_one['id'] == session['service_id']
raw_html = response.data.decode('utf-8')
page = BeautifulSoup(raw_html, 'html.parser')
assert page.find('h1').text == 'Dashboard'
| from flask import url_for
from bs4 import BeautifulSoup
from unittest.mock import ANY
import app
from app.notify_client.models import InvitedUser
from tests.conftest import sample_invite as create_sample_invite
from tests.conftest import mock_check_invite_token as mock_check_token_invite
def test_existing_user_accept_invite_calls_api_and_redirects_to_dashboard(
client, service_one, api_user_active, sample_invite, mock_get_service,
mock_check_invite_token, mock_get_user_by_email,
mock_get_users_by_service, mock_accept_invite, mock_add_user_to_service):
expected_service = service_one['id']
expected_redirect_location = ('http://localhost/services/{}/dashboard'.
format(expected_service))
expected_permissions = ['send_messages', 'manage_service',
'manage_api_keys']
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'))
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_get_user_by_email.assert_called_with('[email protected]')
assert mock_accept_invite.call_count == 1
mock_add_user_to_service.assert_called_with(expected_service,
api_user_active.id, expected_permissions)
assert response.status_code == 302
assert response.location == expected_redirect_location
def test_existing_user_with_no_permissions_accept_invite(client, mocker,
service_one, api_user_active, sample_invite, mock_check_invite_token,
mock_get_user_by_email, mock_get_users_by_service,
mock_add_user_to_service, mock_get_service):
expected_service = service_one['id']
sample_invite['permissions'] = ''
expected_permissions = []
mocker.patch('app.invite_api_client.accept_invite', return_value=
sample_invite)
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'))
mock_add_user_to_service.assert_called_with(expected_service,
api_user_active.id, expected_permissions)
assert response.status_code == 302
def test_if_existing_user_accepts_twice_they_redirect_to_sign_in(client,
mocker, sample_invite, mock_get_service):
sample_invite['status'] = 'accepted'
invite = InvitedUser(**sample_invite)
mocker.patch('app.invite_api_client.check_token', return_value=invite)
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'), follow_redirects=True)
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert (page.h1.string, page.select('main p')[0].text.strip()) == (
'You need to sign in again',
'We signed you out because you haven’t used Notify for a while.')
def test_existing_user_of_service_get_redirected_to_signin(client, mocker,
api_user_active, sample_invite, mock_get_service,
mock_get_user_by_email, mock_accept_invite):
sample_invite['email_address'] = api_user_active.email_address
invite = InvitedUser(**sample_invite)
mocker.patch('app.invite_api_client.check_token', return_value=invite)
mocker.patch('app.user_api_client.get_users_for_service', return_value=
[api_user_active])
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'), follow_redirects=True)
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert (page.h1.string, page.select('main p')[0].text.strip()) == (
'You need to sign in again',
'We signed you out because you haven’t used Notify for a while.')
assert mock_accept_invite.call_count == 1
def test_existing_signed_out_user_accept_invite_redirects_to_sign_in(client,
service_one, api_user_active, sample_invite, mock_check_invite_token,
mock_get_user_by_email, mock_get_users_by_service,
mock_add_user_to_service, mock_accept_invite, mock_get_service):
expected_service = service_one['id']
expected_permissions = ['send_messages', 'manage_service',
'manage_api_keys']
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'), follow_redirects=True)
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_get_user_by_email.assert_called_with('[email protected]')
mock_add_user_to_service.assert_called_with(expected_service,
api_user_active.id, expected_permissions)
assert mock_accept_invite.call_count == 1
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert (page.h1.string, page.select('main p')[0].text.strip()) == (
'You need to sign in again',
'We signed you out because you haven’t used Notify for a while.')
def test_new_user_accept_invite_calls_api_and_redirects_to_registration(client,
service_one, mock_check_invite_token, mock_dont_get_user_by_email,
mock_add_user_to_service, mock_get_users_by_service, mock_get_service):
expected_redirect_location = 'http://localhost/register-from-invite'
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'))
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_dont_get_user_by_email.assert_called_with('[email protected]')
assert response.status_code == 302
assert response.location == expected_redirect_location
def test_new_user_accept_invite_calls_api_and_views_registration_page(client,
service_one, mock_check_invite_token, mock_dont_get_user_by_email,
mock_add_user_to_service, mock_get_users_by_service, mock_get_service):
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'), follow_redirects=True)
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_dont_get_user_by_email.assert_called_with('[email protected]')
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert page.h1.string.strip() == 'Create an account'
email_in_page = page.find('main').find('p')
assert email_in_page.text.strip(
) == 'Your account will be created with this email: [email protected]'
form = page.find('form')
name = form.find('input', id='name')
password = form.find('input', id='password')
service = form.find('input', type='hidden', id='service')
email = form.find('input', type='hidden', id='email_address')
assert email
assert email.attrs['value'] == '[email protected]'
assert name
assert password
assert service
assert service.attrs['value'] == service_one['id']
def test_cancelled_invited_user_accepts_invited_redirect_to_cancelled_invitation(
client, service_one, mocker, mock_get_user, mock_get_service):
cancelled_invitation = create_sample_invite(mocker, service_one, status
='cancelled')
mock_check_token_invite(mocker, cancelled_invitation)
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'))
app.invite_api_client.check_token.assert_called_with('thisisnotarealtoken')
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert page.h1.string.strip(
) == 'The invitation you were sent has been cancelled'
def test_new_user_accept_invite_completes_new_registration_redirects_to_verify(
client, service_one, sample_invite, api_user_active,
mock_check_invite_token, mock_dont_get_user_by_email,
mock_is_email_unique, mock_register_user, mock_send_verify_code,
mock_accept_invite, mock_get_users_by_service, mock_add_user_to_service,
mock_get_service):
expected_service = service_one['id']
expected_email = sample_invite['email_address']
expected_from_user = service_one['users'][0]
expected_redirect_location = 'http://localhost/register-from-invite'
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'))
with client.session_transaction() as session:
assert response.status_code == 302
assert response.location == expected_redirect_location
invited_user = session.get('invited_user')
assert invited_user
assert expected_service == invited_user['service']
assert expected_email == invited_user['email_address']
assert expected_from_user == invited_user['from_user']
data = {'service': invited_user['service'], 'email_address':
invited_user['email_address'], 'from_user': invited_user[
'from_user'], 'password': 'longpassword', 'mobile_number':
'+447890123456', 'name': 'Invited User'}
expected_redirect_location = 'http://localhost/verify'
response = client.post(url_for('main.register_from_invite'), data=data)
assert response.status_code == 302
assert response.location == expected_redirect_location
mock_send_verify_code.assert_called_once_with(ANY, 'sms', data[
'mobile_number'])
mock_register_user.assert_called_with(data['name'], data[
'email_address'], data['mobile_number'], data['password'])
assert mock_accept_invite.call_count == 1
def test_signed_in_existing_user_cannot_use_anothers_invite(logged_in_client,
mocker, api_user_active, sample_invite, mock_get_user,
mock_accept_invite, mock_get_service):
invite = InvitedUser(**sample_invite)
mocker.patch('app.invite_api_client.check_token', return_value=invite)
mocker.patch('app.user_api_client.get_users_for_service', return_value=
[api_user_active])
response = logged_in_client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'), follow_redirects=True)
assert response.status_code == 403
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert page.h1.string.strip() == '403'
flash_banners = page.find_all('div', class_='banner-dangerous')
assert len(flash_banners) == 1
banner_contents = flash_banners[0].text.strip()
assert 'You’re signed in as [email protected].' in banner_contents
assert 'This invite is for another email address.' in banner_contents
assert 'Sign out and click the link again to accept this invite.' in banner_contents
assert mock_accept_invite.call_count == 0
def test_new_invited_user_verifies_and_added_to_service(client, service_one,
sample_invite, api_user_active, mock_check_invite_token,
mock_dont_get_user_by_email, mock_is_email_unique, mock_register_user,
mock_send_verify_code, mock_check_verify_code, mock_get_user,
mock_update_user, mock_add_user_to_service, mock_accept_invite,
mock_get_service, mock_get_service_templates,
mock_get_template_statistics, mock_get_jobs, mock_has_permissions,
mock_get_users_by_service, mock_get_detailed_service, mock_get_usage):
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'))
data = {'service': sample_invite['service'], 'email_address':
sample_invite['email_address'], 'from_user': sample_invite[
'from_user'], 'password': 'longpassword', 'mobile_number':
'+447890123456', 'name': 'Invited User'}
response = client.post(url_for('main.register_from_invite'), data=data)
response = client.post(url_for('main.verify'), data={'sms_code':
'12345'}, follow_redirects=True)
expected_permissions = ['send_messages', 'manage_service',
'manage_api_keys']
with client.session_transaction() as session:
new_user_id = session['user_id']
mock_add_user_to_service.assert_called_with(data['service'],
new_user_id, expected_permissions)
mock_accept_invite.assert_called_with(data['service'],
sample_invite['id'])
mock_check_verify_code.assert_called_once_with(new_user_id, '12345',
'sms')
assert service_one['id'] == session['service_id']
raw_html = response.data.decode('utf-8')
page = BeautifulSoup(raw_html, 'html.parser')
assert page.find('h1').text == 'Dashboard'
| from flask import url_for
from bs4 import BeautifulSoup
from unittest.mock import ANY
import app
from app.notify_client.models import InvitedUser
from tests.conftest import sample_invite as create_sample_invite
from tests.conftest import mock_check_invite_token as mock_check_token_invite
def test_existing_user_accept_invite_calls_api_and_redirects_to_dashboard(
client,
service_one,
api_user_active,
sample_invite,
mock_get_service,
mock_check_invite_token,
mock_get_user_by_email,
mock_get_users_by_service,
mock_accept_invite,
mock_add_user_to_service,
):
expected_service = service_one['id']
expected_redirect_location = 'http://localhost/services/{}/dashboard'.format(expected_service)
expected_permissions = ['send_messages', 'manage_service', 'manage_api_keys']
response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'))
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_get_user_by_email.assert_called_with('[email protected]')
assert mock_accept_invite.call_count == 1
mock_add_user_to_service.assert_called_with(expected_service, api_user_active.id, expected_permissions)
assert response.status_code == 302
assert response.location == expected_redirect_location
def test_existing_user_with_no_permissions_accept_invite(
client,
mocker,
service_one,
api_user_active,
sample_invite,
mock_check_invite_token,
mock_get_user_by_email,
mock_get_users_by_service,
mock_add_user_to_service,
mock_get_service,
):
expected_service = service_one['id']
sample_invite['permissions'] = ''
expected_permissions = []
mocker.patch('app.invite_api_client.accept_invite', return_value=sample_invite)
response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'))
mock_add_user_to_service.assert_called_with(expected_service, api_user_active.id, expected_permissions)
assert response.status_code == 302
def test_if_existing_user_accepts_twice_they_redirect_to_sign_in(
client,
mocker,
sample_invite,
mock_get_service,
):
sample_invite['status'] = 'accepted'
invite = InvitedUser(**sample_invite)
mocker.patch('app.invite_api_client.check_token', return_value=invite)
response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'), follow_redirects=True)
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert (
page.h1.string,
page.select('main p')[0].text.strip(),
) == (
'You need to sign in again',
'We signed you out because you haven’t used Notify for a while.',
)
def test_existing_user_of_service_get_redirected_to_signin(
client,
mocker,
api_user_active,
sample_invite,
mock_get_service,
mock_get_user_by_email,
mock_accept_invite,
):
sample_invite['email_address'] = api_user_active.email_address
invite = InvitedUser(**sample_invite)
mocker.patch('app.invite_api_client.check_token', return_value=invite)
mocker.patch('app.user_api_client.get_users_for_service', return_value=[api_user_active])
response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'), follow_redirects=True)
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert (
page.h1.string,
page.select('main p')[0].text.strip(),
) == (
'You need to sign in again',
'We signed you out because you haven’t used Notify for a while.',
)
assert mock_accept_invite.call_count == 1
def test_existing_signed_out_user_accept_invite_redirects_to_sign_in(
client,
service_one,
api_user_active,
sample_invite,
mock_check_invite_token,
mock_get_user_by_email,
mock_get_users_by_service,
mock_add_user_to_service,
mock_accept_invite,
mock_get_service,
):
expected_service = service_one['id']
expected_permissions = ['send_messages', 'manage_service', 'manage_api_keys']
response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'), follow_redirects=True)
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_get_user_by_email.assert_called_with('[email protected]')
mock_add_user_to_service.assert_called_with(expected_service, api_user_active.id, expected_permissions)
assert mock_accept_invite.call_count == 1
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert (
page.h1.string,
page.select('main p')[0].text.strip(),
) == (
'You need to sign in again',
'We signed you out because you haven’t used Notify for a while.',
)
def test_new_user_accept_invite_calls_api_and_redirects_to_registration(
client,
service_one,
mock_check_invite_token,
mock_dont_get_user_by_email,
mock_add_user_to_service,
mock_get_users_by_service,
mock_get_service,
):
expected_redirect_location = 'http://localhost/register-from-invite'
response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'))
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_dont_get_user_by_email.assert_called_with('[email protected]')
assert response.status_code == 302
assert response.location == expected_redirect_location
def test_new_user_accept_invite_calls_api_and_views_registration_page(
client,
service_one,
mock_check_invite_token,
mock_dont_get_user_by_email,
mock_add_user_to_service,
mock_get_users_by_service,
mock_get_service,
):
response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'), follow_redirects=True)
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_dont_get_user_by_email.assert_called_with('[email protected]')
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert page.h1.string.strip() == 'Create an account'
email_in_page = page.find('main').find('p')
assert email_in_page.text.strip() == 'Your account will be created with this email: [email protected]' # noqa
form = page.find('form')
name = form.find('input', id='name')
password = form.find('input', id='password')
service = form.find('input', type='hidden', id='service')
email = form.find('input', type='hidden', id='email_address')
assert email
assert email.attrs['value'] == '[email protected]'
assert name
assert password
assert service
assert service.attrs['value'] == service_one['id']
def test_cancelled_invited_user_accepts_invited_redirect_to_cancelled_invitation(
client,
service_one,
mocker,
mock_get_user,
mock_get_service,
):
cancelled_invitation = create_sample_invite(mocker, service_one, status='cancelled')
mock_check_token_invite(mocker, cancelled_invitation)
response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'))
app.invite_api_client.check_token.assert_called_with('thisisnotarealtoken')
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert page.h1.string.strip() == 'The invitation you were sent has been cancelled'
def test_new_user_accept_invite_completes_new_registration_redirects_to_verify(
client,
service_one,
sample_invite,
api_user_active,
mock_check_invite_token,
mock_dont_get_user_by_email,
mock_is_email_unique,
mock_register_user,
mock_send_verify_code,
mock_accept_invite,
mock_get_users_by_service,
mock_add_user_to_service,
mock_get_service,
):
expected_service = service_one['id']
expected_email = sample_invite['email_address']
expected_from_user = service_one['users'][0]
expected_redirect_location = 'http://localhost/register-from-invite'
response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'))
with client.session_transaction() as session:
assert response.status_code == 302
assert response.location == expected_redirect_location
invited_user = session.get('invited_user')
assert invited_user
assert expected_service == invited_user['service']
assert expected_email == invited_user['email_address']
assert expected_from_user == invited_user['from_user']
data = {'service': invited_user['service'],
'email_address': invited_user['email_address'],
'from_user': invited_user['from_user'],
'password': 'longpassword',
'mobile_number': '+447890123456',
'name': 'Invited User'
}
expected_redirect_location = 'http://localhost/verify'
response = client.post(url_for('main.register_from_invite'), data=data)
assert response.status_code == 302
assert response.location == expected_redirect_location
mock_send_verify_code.assert_called_once_with(ANY, 'sms', data['mobile_number'])
mock_register_user.assert_called_with(data['name'],
data['email_address'],
data['mobile_number'],
data['password'])
assert mock_accept_invite.call_count == 1
def test_signed_in_existing_user_cannot_use_anothers_invite(
logged_in_client,
mocker,
api_user_active,
sample_invite,
mock_get_user,
mock_accept_invite,
mock_get_service,
):
invite = InvitedUser(**sample_invite)
mocker.patch('app.invite_api_client.check_token', return_value=invite)
mocker.patch('app.user_api_client.get_users_for_service', return_value=[api_user_active])
response = logged_in_client.get(url_for('main.accept_invite', token='thisisnotarealtoken'), follow_redirects=True)
assert response.status_code == 403
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert page.h1.string.strip() == '403'
flash_banners = page.find_all('div', class_='banner-dangerous')
assert len(flash_banners) == 1
banner_contents = flash_banners[0].text.strip()
assert "You’re signed in as [email protected]." in banner_contents
assert "This invite is for another email address." in banner_contents
assert "Sign out and click the link again to accept this invite." in banner_contents
assert mock_accept_invite.call_count == 0
def test_new_invited_user_verifies_and_added_to_service(
client,
service_one,
sample_invite,
api_user_active,
mock_check_invite_token,
mock_dont_get_user_by_email,
mock_is_email_unique,
mock_register_user,
mock_send_verify_code,
mock_check_verify_code,
mock_get_user,
mock_update_user,
mock_add_user_to_service,
mock_accept_invite,
mock_get_service,
mock_get_service_templates,
mock_get_template_statistics,
mock_get_jobs,
mock_has_permissions,
mock_get_users_by_service,
mock_get_detailed_service,
mock_get_usage,
):
# visit accept token page
response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'))
data = {'service': sample_invite['service'],
'email_address': sample_invite['email_address'],
'from_user': sample_invite['from_user'],
'password': 'longpassword',
'mobile_number': '+447890123456',
'name': 'Invited User'
}
# get redirected to register from invite
response = client.post(url_for('main.register_from_invite'), data=data)
# that sends user on to verify
response = client.post(url_for('main.verify'), data={'sms_code': '12345'}, follow_redirects=True)
# when they post codes back to admin user should be added to
# service and sent on to dash board
expected_permissions = ['send_messages', 'manage_service', 'manage_api_keys']
with client.session_transaction() as session:
new_user_id = session['user_id']
mock_add_user_to_service.assert_called_with(data['service'], new_user_id, expected_permissions)
mock_accept_invite.assert_called_with(data['service'], sample_invite['id'])
mock_check_verify_code.assert_called_once_with(new_user_id, '12345', 'sms')
assert service_one['id'] == session['service_id']
raw_html = response.data.decode('utf-8')
page = BeautifulSoup(raw_html, 'html.parser')
assert page.find('h1').text == 'Dashboard'
| [
8,
10,
11,
12,
13
] |
1,508 | 319af5232c043d77a9d63ab1efa62d857da6db23 | <mask token>
| <mask token>
def cLineGraph(j_file):
data = []
with open(j_file) as f:
for line in f:
data.append(json.loads(line))
data = data[0]
in_other = 0
in_picture = 1
in_text = 2
values = []
time = []
x_coords = []
x_times = []
page_turns = []
pages = []
pic = []
text = []
p = 1
t0 = 0
first = 0
for i in range(0, len(data)):
if data[i].get('type') == 'Picture':
pic = data[i]
if data[i].get('type') == 'Text':
text = data[i]
if first == 0:
page_turns.append(0)
else:
page_turns.append(data[i + 1].get('timestamp') - t0)
pages.append(p)
p = p + 1
if data[i].get('type') == 'SampleGaze' or data[i].get('type'
) == 'SampleFixation':
if first == 0:
t0 = data[i].get('timestamp')
first = 1
time.append(data[i].get('timestamp') - t0)
x = data[i].get('x')
y = data[i].get('y')
if x < pic.get('pr') and x > pic.get('pl') and y < pic.get('pb'
) and y > pic.get('pt'):
values.append(in_picture)
elif x < text.get('tr') and x > text.get('tl') and y < text.get(
'tb') and y > text.get('tt'):
values.append(in_text)
x_coords.append(x)
x_times.append(data[i].get('timestamp') - t0)
else:
values.append(in_other)
d = []
v = values[0]
vs = []
ts = []
vs.append(v)
ts.append(time[0])
for i in range(1, len(values)):
if values[i] == v:
vs.append(v)
ts.append(time[i])
else:
d.append([ts, vs])
vs = []
ts = []
v = values[i]
vs.append(v)
ts.append(time[i])
for i in range(0, len(x_times)):
x_coords[i] = 1 / 1920.0 * x_coords[i] + 1.5
for plot in d:
if plot[1][0] == 0:
plt.plot(plot[0], plot[1], 'k', linewidth=10)
elif plot[1][0] == 1:
plt.plot(plot[0], plot[1], 'b', linewidth=10)
elif plot[1][0] == 2:
plt.plot(plot[0], plot[1], 'g', linewidth=10)
plt.axis([0, time[-1], -0.5, 2.5])
plt.yticks([0, 1, 2], ['Other', 'Picture', 'Text'], size='small')
plt.xticks(page_turns, pages, size='small')
plt.xlabel('Page')
plt.ylabel('Eye Location on Page')
plt.savefig('linegraph' + j_file[11:-5] + '.png')
| import matplotlib
import matplotlib.pyplot as plt
from matplotlib.transforms import Bbox
from matplotlib.path import Path
import json
def cLineGraph(j_file):
data = []
with open(j_file) as f:
for line in f:
data.append(json.loads(line))
data = data[0]
in_other = 0
in_picture = 1
in_text = 2
values = []
time = []
x_coords = []
x_times = []
page_turns = []
pages = []
pic = []
text = []
p = 1
t0 = 0
first = 0
for i in range(0, len(data)):
if data[i].get('type') == 'Picture':
pic = data[i]
if data[i].get('type') == 'Text':
text = data[i]
if first == 0:
page_turns.append(0)
else:
page_turns.append(data[i + 1].get('timestamp') - t0)
pages.append(p)
p = p + 1
if data[i].get('type') == 'SampleGaze' or data[i].get('type'
) == 'SampleFixation':
if first == 0:
t0 = data[i].get('timestamp')
first = 1
time.append(data[i].get('timestamp') - t0)
x = data[i].get('x')
y = data[i].get('y')
if x < pic.get('pr') and x > pic.get('pl') and y < pic.get('pb'
) and y > pic.get('pt'):
values.append(in_picture)
elif x < text.get('tr') and x > text.get('tl') and y < text.get(
'tb') and y > text.get('tt'):
values.append(in_text)
x_coords.append(x)
x_times.append(data[i].get('timestamp') - t0)
else:
values.append(in_other)
d = []
v = values[0]
vs = []
ts = []
vs.append(v)
ts.append(time[0])
for i in range(1, len(values)):
if values[i] == v:
vs.append(v)
ts.append(time[i])
else:
d.append([ts, vs])
vs = []
ts = []
v = values[i]
vs.append(v)
ts.append(time[i])
for i in range(0, len(x_times)):
x_coords[i] = 1 / 1920.0 * x_coords[i] + 1.5
for plot in d:
if plot[1][0] == 0:
plt.plot(plot[0], plot[1], 'k', linewidth=10)
elif plot[1][0] == 1:
plt.plot(plot[0], plot[1], 'b', linewidth=10)
elif plot[1][0] == 2:
plt.plot(plot[0], plot[1], 'g', linewidth=10)
plt.axis([0, time[-1], -0.5, 2.5])
plt.yticks([0, 1, 2], ['Other', 'Picture', 'Text'], size='small')
plt.xticks(page_turns, pages, size='small')
plt.xlabel('Page')
plt.ylabel('Eye Location on Page')
plt.savefig('linegraph' + j_file[11:-5] + '.png')
| import matplotlib
import matplotlib.pyplot as plt
from matplotlib.transforms import Bbox
from matplotlib.path import Path
import json
def cLineGraph(j_file):
data = []
with open(j_file) as f:
for line in f:
data.append(json.loads(line))
data = data[0]
in_other = 0
in_picture = 1
in_text = 2
values = []
time = []
x_coords = []
x_times = []
page_turns = []
pages = []
pic = []
text = []
p = 1
t0 = 0
first = 0
for i in range(0, len(data)):
if data[i].get('type') == 'Picture':
pic = data[i]
#print(pic, i)
if data[i].get('type') == 'Text':
text = data[i]
if first == 0:
page_turns.append(0)
else:
page_turns.append(data[i+1].get('timestamp') - t0)
pages.append(p)
p = p + 1
#print(text, i)
if data[i].get('type') == 'SampleGaze' or data[i].get('type') == 'SampleFixation':
#if data[i].get('type') == 'SampleFixation': # comment out line above and use this one for only fixation data
if first == 0:
t0 = data[i].get('timestamp')
first = 1
time.append(data[i].get('timestamp') - t0)
x = data[i].get('x')
y = data[i].get('y')
if x < pic.get('pr') and x > pic.get('pl') and y < pic.get('pb') and y > pic.get('pt'):
values.append(in_picture)
elif x < text.get('tr') and x > text.get('tl') and y < text.get('tb') and y > text.get('tt'):
values.append(in_text)
x_coords.append(x)
x_times.append(data[i].get('timestamp') - t0)
else:
values.append(in_other)
d = []
v = values[0]
vs = []
ts = []
vs.append(v)
ts.append(time[0])
for i in range(1, len(values)):
if values[i] == v:
vs.append(v)
ts.append(time[i])
else:
d.append([ts, vs])
vs = []
ts = []
v = values[i]
vs.append(v)
ts.append(time[i])
for i in range(0, len(x_times)):
x_coords[i] = ((1/1920.0)*(x_coords[i])) + 1.5
for plot in d:
if plot[1][0] == 0: # other
plt.plot(plot[0], plot[1], 'k', linewidth=10)
elif plot[1][0] == 1: # picture
plt.plot(plot[0], plot[1], 'b', linewidth=10)
elif plot[1][0] == 2:
plt.plot(plot[0], plot[1], 'g', linewidth=10)
# THESE TWO LINES IMPLEMENT THE READING POINT PLOT FUNCTIONALITY
#plt.plot(x_times, x_coords, 'go')
#plt.plot(x_times, x_coords, 'g')
plt.axis([0, time[-1], -0.5, 2.5])
plt.yticks([0, 1, 2], ['Other', 'Picture', 'Text'], size='small')
plt.xticks(page_turns, pages, size='small')
plt.xlabel('Page')
plt.ylabel('Eye Location on Page')
plt.savefig('linegraph' + j_file[11:-5] + '.png') | null | [
0,
1,
2,
3
] |
1,509 | 4d87c3f70809bbd488159f0b55131af903c7e7b4 | <mask token>
| print('hello guys')
print('hello everyone')
| print ("hello guys")
print ("hello everyone") | null | null | [
0,
1,
2
] |
1,510 | 71f9d9d7973809654db3ea613073f2d431f2d65f | <mask token>
@override_settings(USE_TZ=False)
class TestEmailUserManager(TestCase):
def setUp(self):
self.email = '[email protected]'
self.password = 'default'
def test_private_create_user_without_email(self):
"""
Test that EmailUser.objects._create_user without email raise an
ValueError exception
"""
with pytest.raises(ValueError) as exinfo:
EmailUser.objects._create_user(None, None, False, False)
self.assertIn('email must be set', str(exinfo.value))
@patch.object(timezone, 'now', return_value=fake_now)
def test_private_create_user_its_ok(self, mock_now):
user = EmailUser.objects._create_user(self.email, self.password,
True, False)
self.assertTrue(isinstance(user, EmailUser))
self.assertIsNotNone(user.pk)
self.assertEqual(user.email, self.email)
self.assertEqual(user.date_joined, fake_now)
self.assertEqual(user.last_login, fake_now)
self.assertTrue(user.is_staff)
self.assertTrue(user.is_active)
self.assertFalse(user.is_superuser)
self.assertTrue(user.check_password(self.password))
<mask token>
@patch.object(EmailUserManager, '_create_user')
def test_create_user_call_private_create_user_without_staff(self,
mock_create_user):
EmailUser.objects.create_user(self.email, self.password)
mock_create_user.assert_called_once_with(self.email, self.password,
False, False)
@patch.object(EmailUserManager, '_create_user')
def test_create_user_call_private_create_user_with_staff(self,
mock_create_user):
EmailUser.objects.create_user(self.email, self.password, True)
mock_create_user.assert_called_once_with(self.email, self.password,
True, False)
@patch.object(EmailUserManager, '_create_user')
def test_create_superuser_call_private_create_user(self, mock_create_user):
EmailUser.objects.create_superuser(self.email, self.password)
mock_create_user.assert_called_once_with(self.email, self.password,
True, True)
| <mask token>
try:
from unittest.mock import patch
except ImportError:
from mock import patch
<mask token>
try:
from django.test import override_settings
except ImportError:
from django.test.utils import override_settings
<mask token>
@override_settings(USE_TZ=False)
class TestEmailUserManager(TestCase):
def setUp(self):
self.email = '[email protected]'
self.password = 'default'
def test_private_create_user_without_email(self):
"""
Test that EmailUser.objects._create_user without email raise an
ValueError exception
"""
with pytest.raises(ValueError) as exinfo:
EmailUser.objects._create_user(None, None, False, False)
self.assertIn('email must be set', str(exinfo.value))
@patch.object(timezone, 'now', return_value=fake_now)
def test_private_create_user_its_ok(self, mock_now):
user = EmailUser.objects._create_user(self.email, self.password,
True, False)
self.assertTrue(isinstance(user, EmailUser))
self.assertIsNotNone(user.pk)
self.assertEqual(user.email, self.email)
self.assertEqual(user.date_joined, fake_now)
self.assertEqual(user.last_login, fake_now)
self.assertTrue(user.is_staff)
self.assertTrue(user.is_active)
self.assertFalse(user.is_superuser)
self.assertTrue(user.check_password(self.password))
def test_private_create_user_with_wrong_email(self):
with pytest.raises(ValueError) as exinfo:
EmailUser.objects._create_user('wrong@example', None, False, False)
self.assertIn('email must be a valid email', str(exinfo.value))
@patch.object(EmailUserManager, '_create_user')
def test_create_user_call_private_create_user_without_staff(self,
mock_create_user):
EmailUser.objects.create_user(self.email, self.password)
mock_create_user.assert_called_once_with(self.email, self.password,
False, False)
@patch.object(EmailUserManager, '_create_user')
def test_create_user_call_private_create_user_with_staff(self,
mock_create_user):
EmailUser.objects.create_user(self.email, self.password, True)
mock_create_user.assert_called_once_with(self.email, self.password,
True, False)
@patch.object(EmailUserManager, '_create_user')
def test_create_superuser_call_private_create_user(self, mock_create_user):
EmailUser.objects.create_superuser(self.email, self.password)
mock_create_user.assert_called_once_with(self.email, self.password,
True, True)
| <mask token>
try:
from unittest.mock import patch
except ImportError:
from mock import patch
<mask token>
try:
from django.test import override_settings
except ImportError:
from django.test.utils import override_settings
<mask token>
fake_now = datetime(2015, 9, 10)
@override_settings(USE_TZ=False)
class TestEmailUserManager(TestCase):
def setUp(self):
self.email = '[email protected]'
self.password = 'default'
def test_private_create_user_without_email(self):
"""
Test that EmailUser.objects._create_user without email raise an
ValueError exception
"""
with pytest.raises(ValueError) as exinfo:
EmailUser.objects._create_user(None, None, False, False)
self.assertIn('email must be set', str(exinfo.value))
@patch.object(timezone, 'now', return_value=fake_now)
def test_private_create_user_its_ok(self, mock_now):
user = EmailUser.objects._create_user(self.email, self.password,
True, False)
self.assertTrue(isinstance(user, EmailUser))
self.assertIsNotNone(user.pk)
self.assertEqual(user.email, self.email)
self.assertEqual(user.date_joined, fake_now)
self.assertEqual(user.last_login, fake_now)
self.assertTrue(user.is_staff)
self.assertTrue(user.is_active)
self.assertFalse(user.is_superuser)
self.assertTrue(user.check_password(self.password))
def test_private_create_user_with_wrong_email(self):
with pytest.raises(ValueError) as exinfo:
EmailUser.objects._create_user('wrong@example', None, False, False)
self.assertIn('email must be a valid email', str(exinfo.value))
@patch.object(EmailUserManager, '_create_user')
def test_create_user_call_private_create_user_without_staff(self,
mock_create_user):
EmailUser.objects.create_user(self.email, self.password)
mock_create_user.assert_called_once_with(self.email, self.password,
False, False)
@patch.object(EmailUserManager, '_create_user')
def test_create_user_call_private_create_user_with_staff(self,
mock_create_user):
EmailUser.objects.create_user(self.email, self.password, True)
mock_create_user.assert_called_once_with(self.email, self.password,
True, False)
@patch.object(EmailUserManager, '_create_user')
def test_create_superuser_call_private_create_user(self, mock_create_user):
EmailUser.objects.create_superuser(self.email, self.password)
mock_create_user.assert_called_once_with(self.email, self.password,
True, True)
| from __future__ import unicode_literals, absolute_import
from datetime import datetime
try:
from unittest.mock import patch
except ImportError:
from mock import patch
import pytest
from django.test import TestCase
try:
from django.test import override_settings
except ImportError:
from django.test.utils import override_settings
from django.utils import timezone
from custom_email_user.models import EmailUser
from custom_email_user.managers import EmailUserManager
fake_now = datetime(2015, 9, 10)
@override_settings(USE_TZ=False)
class TestEmailUserManager(TestCase):
def setUp(self):
self.email = '[email protected]'
self.password = 'default'
def test_private_create_user_without_email(self):
"""
Test that EmailUser.objects._create_user without email raise an
ValueError exception
"""
with pytest.raises(ValueError) as exinfo:
EmailUser.objects._create_user(None, None, False, False)
self.assertIn('email must be set', str(exinfo.value))
@patch.object(timezone, 'now', return_value=fake_now)
def test_private_create_user_its_ok(self, mock_now):
user = EmailUser.objects._create_user(self.email, self.password,
True, False)
self.assertTrue(isinstance(user, EmailUser))
self.assertIsNotNone(user.pk)
self.assertEqual(user.email, self.email)
self.assertEqual(user.date_joined, fake_now)
self.assertEqual(user.last_login, fake_now)
self.assertTrue(user.is_staff)
self.assertTrue(user.is_active)
self.assertFalse(user.is_superuser)
self.assertTrue(user.check_password(self.password))
def test_private_create_user_with_wrong_email(self):
with pytest.raises(ValueError) as exinfo:
EmailUser.objects._create_user('wrong@example', None, False, False)
self.assertIn('email must be a valid email', str(exinfo.value))
@patch.object(EmailUserManager, '_create_user')
def test_create_user_call_private_create_user_without_staff(self,
mock_create_user):
EmailUser.objects.create_user(self.email, self.password)
mock_create_user.assert_called_once_with(self.email, self.password,
False, False)
@patch.object(EmailUserManager, '_create_user')
def test_create_user_call_private_create_user_with_staff(self,
mock_create_user):
EmailUser.objects.create_user(self.email, self.password, True)
mock_create_user.assert_called_once_with(self.email, self.password,
True, False)
@patch.object(EmailUserManager, '_create_user')
def test_create_superuser_call_private_create_user(self, mock_create_user):
EmailUser.objects.create_superuser(self.email, self.password)
mock_create_user.assert_called_once_with(self.email, self.password,
True, True)
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from datetime import datetime
try:
from unittest.mock import patch
except ImportError:
from mock import patch
import pytest
from django.test import TestCase
try:
from django.test import override_settings
except ImportError:
from django.test.utils import override_settings
from django.utils import timezone
from custom_email_user.models import EmailUser
from custom_email_user.managers import EmailUserManager
fake_now = datetime(2015, 9, 10)
@override_settings(USE_TZ=False)
class TestEmailUserManager(TestCase):
def setUp(self):
self.email = '[email protected]'
self.password = 'default'
def test_private_create_user_without_email(self):
"""
Test that EmailUser.objects._create_user without email raise an
ValueError exception
"""
with pytest.raises(ValueError) as exinfo:
EmailUser.objects._create_user(None, None, False, False)
self.assertIn('email must be set', str(exinfo.value))
@patch.object(timezone, 'now', return_value=fake_now)
def test_private_create_user_its_ok(self, mock_now):
user = EmailUser.objects._create_user(self.email, self.password,
True, False)
self.assertTrue(isinstance(user, EmailUser))
self.assertIsNotNone(user.pk)
self.assertEqual(user.email, self.email)
self.assertEqual(user.date_joined, fake_now)
self.assertEqual(user.last_login, fake_now)
self.assertTrue(user.is_staff)
self.assertTrue(user.is_active)
self.assertFalse(user.is_superuser)
self.assertTrue(user.check_password(self.password))
def test_private_create_user_with_wrong_email(self):
with pytest.raises(ValueError) as exinfo:
EmailUser.objects._create_user('wrong@example', None, False, False)
self.assertIn('email must be a valid email', str(exinfo.value))
@patch.object(EmailUserManager, '_create_user')
def test_create_user_call_private_create_user_without_staff(
self, mock_create_user):
EmailUser.objects.create_user(self.email, self.password)
mock_create_user.assert_called_once_with(
self.email, self.password, False, False)
@patch.object(EmailUserManager, '_create_user')
def test_create_user_call_private_create_user_with_staff(
self, mock_create_user):
EmailUser.objects.create_user(self.email, self.password, True)
mock_create_user.assert_called_once_with(
self.email, self.password, True, False)
@patch.object(EmailUserManager, '_create_user')
def test_create_superuser_call_private_create_user(self, mock_create_user):
EmailUser.objects.create_superuser(self.email, self.password)
mock_create_user.assert_called_once_with(
self.email, self.password, True, True)
| [
7,
9,
10,
11,
12
] |
1,511 | 05bd95966d72dd40b9b828932b0bf70e40ddb573 | '''
Created on 14 november 2015
@author: federico
'''
import paho.mqtt.client as mosquitto
import json
import urllib,urllib2
import datetime
import threading
import time
from pygame import mixer
from datetime import timedelta
#ALARM SOUND PATH
alarm_path="/home/pi/SmartBed/Smart_Bed/src/Rooster.wav"
#DWEET&FREEBOARD
thing_name='smart_bed_status'
url_freeboard="https://dweet.io:443/dweet/for/smart_bed_values"
url_status="https://dweet.io:443/get/latest/dweet/for/smart_bed_status"
url_freeboard_qos="https://dweet.io:443/dweet/for/smart_bed_qos"
url_freeboard_sleep_time="https://dweet.io:443/dweet/for/smart_bed_sleep_time"
#THINGSPEAK
url_thingspeak="https://api.thingspeak.com/update"
channel_id="68285"
api_read="XXXXXXXXXXXXXXX"
api_write="ZZZZZZZZZZZZZZZ"
#CONSTANT
soglia=10
broker_ip="127.0.0.1"
smart_alarm_threshold=10 #threshold for the smart alarm:how much movement is needed to ring
sensor_freq=2 #seconds
sensor_MAXVAL=255 #g
#LOCK VARIABLE:this variable is needed to avoid that 2 threads change the status variable
alarm_clock_lock=0
#queue
q=[]
nsamples=10
#status of the system
status=0
mov_tot=0.1
alarm_sensibility=5 #seconds
def on_connect(client,userdata,rc):
print ("connected with result code"+str(rc))
client.subscribe("smart_bed/values", 0)
def on_message(client,userdata,msg):
print "Raspberry receive data Topic:",msg.topic+'\nMessage:'+str(msg.payload)
jsonfile=json.loads(msg.payload)
queue_insert(jsonfile)
def queue_insert(jsonfile):
x=int(jsonfile["e"]["v"]["x"])
y=int(jsonfile["e"]["v"]["y"])
z=int(jsonfile["e"]["v"]["z"])
valore=transform_function(x, y, z)
if(valore>soglia):
q.append(valore-soglia)
print "Value appended in the queue"+str(valore-soglia)
else:
q.append(0)
print "0 appended"
#SENDING DATA TO FREEBOARD LIVE VIEW
values={}
values["x"]=x
values["y"]=y
values["z"]=z
data = urllib.urlencode(values)
req = urllib2.Request(url_freeboard, data)
urllib2.urlopen(req)
def send_data_c(coda):
global mov_tot
somma=0
conta=0
valore=0
for l in coda:
if l!=0:
somma=somma+l
conta=conta+1
if somma!=0:
valore=float(somma)/conta
mov_tot=mov_tot+valore
print "I'm ready to send"+ str(valore)+" to thingspeak"
#sending data to thingspeak movement
params = urllib.urlencode({'api_key': api_write, 'field1': "%.2f" % valore})
req=urllib2.Request(url_thingspeak,params)
urllib2.urlopen(req)
def transform_function(x,y,z):
#PARAMETERS TO SET IN CASE OF SPECIAL INTEREST IN ONE DIRECTION
a=1
b=1
c=1
valore=a*x+b*y+z*c
return valore
def process_data(ore,minu,init_time):
global mov_tot
while(status==1):
if len(q)==nsamples:
coda=q[:]
tr=threading.Thread(target=send_data_c,args=(coda,))
tr.start()
del q[:]
#LAST DATA IN THE QUEUE
if len(q)!=0:
coda=q
tr=threading.Thread(target=send_data_c,args=(coda,))
tr.start()
del q[:]
#LAST STATISTICS
i=datetime.datetime.now()
#sleep time in minutes
b=i-init_time
sleep_time=b.seconds/60
print "Passed seconds from the start"+str(b.seconds)
print "Total movement"+str(mov_tot)
#MYFUNCTION TO QUALITY OF SLEEP
qos=-((100*sensor_freq*nsamples*15/(sensor_MAXVAL*3*b.seconds)))*mov_tot+100
#LAST DATA TO FREEBOARD
data = urllib.urlencode({'qos': "%.0f" %qos})
req = urllib2.Request(url_freeboard_qos, data)
urllib2.urlopen(req)
data = urllib.urlencode({'sleep_time':sleep_time})
req = urllib2.Request(url_freeboard_sleep_time, data)
urllib2.urlopen(req)
#LAST DATA TO THINGSPEAK. WHILE CYCLE IS NEEDED BECAUSE DATA ON THINGSPEAK CAN BE UPDATED EACH 15s
resp='0'
times=0
while resp=='0':
time.sleep(times)
params = urllib.urlencode({'api_key': api_write, 'field2': "%.1f" % sleep_time})
req=urllib2.Request(url_thingspeak,params)
risp=urllib2.urlopen(req)
resp=risp.read()
times=times+5
resp='0'
times=0
while(resp=='0'):
time.sleep(times)
params = urllib.urlencode({'api_key': api_write, 'field3': "%.1f" % qos})
req=urllib2.Request(url_thingspeak,params)
risp=urllib2.urlopen(req)
resp=risp.read()
times=times+5
#needed for next measurement
mov_tot=0.1
def alarmclock(h,m):
global alarm_clock_lock
while(status==1):
i=datetime.datetime.now()
if (i.hour==h) & (i.minute==m):
if alarm_clock_lock==0:
#LOCK
alarm_clock_lock=1
print "ALARM FROM BASIC ALARMCLOCK"
sound_clock()
#UNLOCK
alarm_clock_lock=0
#alarm sensibility
time.sleep(alarm_sensibility)
def sound_clock():
mixer.init()
mixer.music.load(alarm_path)
while(status==1):
mixer.music.play()
time.sleep(4)
def smart_alarm(a_h,a_m,ore,minu,smart_min):
#bad thing but signals cannot be managed as a child thread
time_to_wait=abs(a_h-ore)*3600+abs(a_m-abs((minu-smart_min)%60))*60
print "second to sleep"+str(time_to_wait)
time.sleep(time_to_wait)
global mov_tot
initial_mov=mov_tot
while(status==1):
print "mov_tot"+ str(mov_tot)
print "initial_mov"+str(initial_mov)
if((mov_tot-initial_mov)>smart_alarm_threshold):
global alarm_clock_lock
#LOCK
if alarm_clock_lock==0:
alarm_clock_lock=1
print "ALARM FROM SMART CLOCK"
sound_clock()
#UNLOCK
alarm_clock_lock=0
time.sleep(5)
if __name__ == '__main__':
client=mosquitto.Mosquitto("Raspberry")
client.on_connect=on_connect
client.on_message = on_message
client.connect(broker_ip, port=1883, keepalive=60, bind_address="")
client.loop_start()
while(True):
req=urllib2.Request(url_status)
resp=urllib2.urlopen(req)
dweet=resp.read()
dweet2=json.loads(dweet)
stat=dweet2["with"][0]["content"]["status"]
if (stat==1) & (status==0):
status=1
print "System is switched ON"
ore=dweet2["with"][0]["content"]["alarm_hour"]
minu=dweet2["with"][0]["content"]["alarm_min"]
smart_min=dweet2["with"][0]["content"]["smart_alarm"]
init_time=datetime.datetime.now()
actual_hour=init_time.hour
actual_min=init_time.minute
t=threading.Thread(target=process_data,args=(actual_hour,actual_min,init_time))
t.daemon=True
t.start()
l=threading.Thread(target=alarmclock,args=(ore,minu,))
l.daemon=True
l.start()
if(smart_min!=0):
h=threading.Thread(target=smart_alarm,args=(actual_hour,actual_min,ore,minu,smart_min,))
h.daemon=True
h.start()
diz={}
diz["status"]=1
val=client.publish("smart_bed",json.dumps(diz) , qos=1)
elif (stat==0) & (status==1):
diz={}
diz["status"]=0
val=client.publish("smart_bed",json.dumps(diz) , qos=1)
status=0
print "System is switched OFF"
time.sleep(2)
client.loop_stop()
| null | null | null | null | [
0
] |
1,512 | 076d9f0c14a8070993039bbda2ffe4d52c8d2273 | <mask token>
def t200():
return np.random.choice(2, 200, p=[0.1, 0.9])
<mask token>
def t1000():
return np.random.choice(2, 1000, p=[0.1, 0.9])
<mask token>
def prob_cond_given_pos(prob_cond, prob_pos_given_cond, prob_neg_given_no_cond
):
x = np.random.choice(2, 10000, p=[1 - prob_cond, prob_cond])
y0 = np.random.choice(2, len(x[x == 0]), p=[prob_neg_given_no_cond, 1 -
prob_neg_given_no_cond])
y1 = np.random.choice(2, len(x[x == 1]), p=[1 - prob_pos_given_cond,
prob_pos_given_cond])
pos_no_meas = np.zeros(len(y0[y0 == 1]))
pos_with_meas = np.ones(len(y1[y1 == 1]))
tested_pos = np.concatenate((pos_no_meas, pos_with_meas))
return len(tested_pos[tested_pos == 1]) / len(tested_pos)
def prob_cond_given_pos_bayes(prob_cond, prob_pos_given_cond,
prob_neg_given_no_cond):
return prob_pos_given_cond * prob_cond / (prob_pos_given_cond *
prob_cond + (1 - prob_neg_given_no_cond) * (1 - prob_cond))
<mask token>
| <mask token>
def calculated_weights(x):
return sum(cs < x)
<mask token>
len(x[x == 1]) / len(x)
def t200():
return np.random.choice(2, 200, p=[0.1, 0.9])
<mask token>
plt.hist(y1)
plt.title("Fraction of 1's for 200 biased coin tosses a 100 times")
plt.xlabel("Fraction of 1's in a given attempt (of 200 tosses)")
plt.ylabel('frequency')
def t1000():
return np.random.choice(2, 1000, p=[0.1, 0.9])
<mask token>
fig.suptitle('Histograms for Y1 and Y2')
ax[0].hist(y1)
ax[1].hist(y2)
ax[0].set_xlim([0.85, 0.95])
ax[1].set_xlim([0.85, 0.95])
<mask token>
def prob_cond_given_pos(prob_cond, prob_pos_given_cond, prob_neg_given_no_cond
):
x = np.random.choice(2, 10000, p=[1 - prob_cond, prob_cond])
y0 = np.random.choice(2, len(x[x == 0]), p=[prob_neg_given_no_cond, 1 -
prob_neg_given_no_cond])
y1 = np.random.choice(2, len(x[x == 1]), p=[1 - prob_pos_given_cond,
prob_pos_given_cond])
pos_no_meas = np.zeros(len(y0[y0 == 1]))
pos_with_meas = np.ones(len(y1[y1 == 1]))
tested_pos = np.concatenate((pos_no_meas, pos_with_meas))
return len(tested_pos[tested_pos == 1]) / len(tested_pos)
def prob_cond_given_pos_bayes(prob_cond, prob_pos_given_cond,
prob_neg_given_no_cond):
return prob_pos_given_cond * prob_cond / (prob_pos_given_cond *
prob_cond + (1 - prob_neg_given_no_cond) * (1 - prob_cond))
<mask token>
plt.scatter(x, prob_cond_given_pos_bayes(x, 0.98, 0.95))
plt.xlabel('Probability of condition (%)')
plt.ylabel('Probability of condition if tested positive (%)')
| <mask token>
x = np.random.choice(2, 200, p=[0.1, 0.9])
x = np.random.sample(size=200)
weights = [0.1, 0.9]
cs = np.cumsum(weights)
def calculated_weights(x):
return sum(cs < x)
vectroized_calculated_weights = np.vectorize(calculated_weights)
x = vectroized_calculated_weights(x)
len(x[x == 1]) / len(x)
def t200():
return np.random.choice(2, 200, p=[0.1, 0.9])
y1 = np.array([(len(t200()[t200() == 1]) / len(t200())) for i in range(100)])
plt.hist(y1)
plt.title("Fraction of 1's for 200 biased coin tosses a 100 times")
plt.xlabel("Fraction of 1's in a given attempt (of 200 tosses)")
plt.ylabel('frequency')
def t1000():
return np.random.choice(2, 1000, p=[0.1, 0.9])
y2 = np.array([(len(t1000()[t1000() == 1]) / len(t1000())) for i in range(100)]
)
fig, ax = plt.subplots(2)
fig.suptitle('Histograms for Y1 and Y2')
ax[0].hist(y1)
ax[1].hist(y2)
ax[0].set_xlim([0.85, 0.95])
ax[1].set_xlim([0.85, 0.95])
x = np.random.choice(2, 10000, p=[0.99, 0.01])
y0 = np.random.choice(2, len(x[x == 0]), p=[0.95, 0.05])
y1 = np.random.choice(2, len(x[x == 1]), p=[0.02, 0.98])
pos_no_meas = np.zeros(len(y0[y0 == 1]))
pos_with_meas = np.ones(len(y1[y1 == 1]))
tested_pos = np.concatenate((pos_no_meas, pos_with_meas))
p = len(tested_pos[tested_pos == 1]) / len(tested_pos)
def prob_cond_given_pos(prob_cond, prob_pos_given_cond, prob_neg_given_no_cond
):
x = np.random.choice(2, 10000, p=[1 - prob_cond, prob_cond])
y0 = np.random.choice(2, len(x[x == 0]), p=[prob_neg_given_no_cond, 1 -
prob_neg_given_no_cond])
y1 = np.random.choice(2, len(x[x == 1]), p=[1 - prob_pos_given_cond,
prob_pos_given_cond])
pos_no_meas = np.zeros(len(y0[y0 == 1]))
pos_with_meas = np.ones(len(y1[y1 == 1]))
tested_pos = np.concatenate((pos_no_meas, pos_with_meas))
return len(tested_pos[tested_pos == 1]) / len(tested_pos)
def prob_cond_given_pos_bayes(prob_cond, prob_pos_given_cond,
prob_neg_given_no_cond):
return prob_pos_given_cond * prob_cond / (prob_pos_given_cond *
prob_cond + (1 - prob_neg_given_no_cond) * (1 - prob_cond))
x = np.linspace(0.001, 0.1, 100)
plt.scatter(x, prob_cond_given_pos_bayes(x, 0.98, 0.95))
plt.xlabel('Probability of condition (%)')
plt.ylabel('Probability of condition if tested positive (%)')
| <mask token>
import numpy as np
import matplotlib.pyplot as plt
x = np.random.choice(2, 200, p=[0.1, 0.9])
x = np.random.sample(size=200)
weights = [0.1, 0.9]
cs = np.cumsum(weights)
def calculated_weights(x):
return sum(cs < x)
vectroized_calculated_weights = np.vectorize(calculated_weights)
x = vectroized_calculated_weights(x)
len(x[x == 1]) / len(x)
def t200():
return np.random.choice(2, 200, p=[0.1, 0.9])
y1 = np.array([(len(t200()[t200() == 1]) / len(t200())) for i in range(100)])
plt.hist(y1)
plt.title("Fraction of 1's for 200 biased coin tosses a 100 times")
plt.xlabel("Fraction of 1's in a given attempt (of 200 tosses)")
plt.ylabel('frequency')
def t1000():
return np.random.choice(2, 1000, p=[0.1, 0.9])
y2 = np.array([(len(t1000()[t1000() == 1]) / len(t1000())) for i in range(100)]
)
fig, ax = plt.subplots(2)
fig.suptitle('Histograms for Y1 and Y2')
ax[0].hist(y1)
ax[1].hist(y2)
ax[0].set_xlim([0.85, 0.95])
ax[1].set_xlim([0.85, 0.95])
x = np.random.choice(2, 10000, p=[0.99, 0.01])
y0 = np.random.choice(2, len(x[x == 0]), p=[0.95, 0.05])
y1 = np.random.choice(2, len(x[x == 1]), p=[0.02, 0.98])
pos_no_meas = np.zeros(len(y0[y0 == 1]))
pos_with_meas = np.ones(len(y1[y1 == 1]))
tested_pos = np.concatenate((pos_no_meas, pos_with_meas))
p = len(tested_pos[tested_pos == 1]) / len(tested_pos)
def prob_cond_given_pos(prob_cond, prob_pos_given_cond, prob_neg_given_no_cond
):
x = np.random.choice(2, 10000, p=[1 - prob_cond, prob_cond])
y0 = np.random.choice(2, len(x[x == 0]), p=[prob_neg_given_no_cond, 1 -
prob_neg_given_no_cond])
y1 = np.random.choice(2, len(x[x == 1]), p=[1 - prob_pos_given_cond,
prob_pos_given_cond])
pos_no_meas = np.zeros(len(y0[y0 == 1]))
pos_with_meas = np.ones(len(y1[y1 == 1]))
tested_pos = np.concatenate((pos_no_meas, pos_with_meas))
return len(tested_pos[tested_pos == 1]) / len(tested_pos)
def prob_cond_given_pos_bayes(prob_cond, prob_pos_given_cond,
prob_neg_given_no_cond):
return prob_pos_given_cond * prob_cond / (prob_pos_given_cond *
prob_cond + (1 - prob_neg_given_no_cond) * (1 - prob_cond))
x = np.linspace(0.001, 0.1, 100)
plt.scatter(x, prob_cond_given_pos_bayes(x, 0.98, 0.95))
plt.xlabel('Probability of condition (%)')
plt.ylabel('Probability of condition if tested positive (%)')
| # -*- coding: utf-8 -*-
"""
CST 383, measles simulation homework
# Here's a question. Suppose 1% of people have measles, that the
# test for measles if 98% accurate if you do have measles, and 98%
# accurate if you don't have measles. Then what is the probability
# that you have measles, given that you have tested positive for them?
#
# Try guessing an answer before you start on this assignment.
#
# In this homework we will use simulation to estimate the answer,
# and we'll also compute the answer using Bayes' Law. There
# are three parts below:
# 1. Warm up by simulating some coin flips.
# 2. Use simulation to answer the question above.
# 3. Use Bayes' Law to answer the question without simulation.
"""
import numpy as np
import matplotlib.pyplot as plt
# Instructions:
# Problems start with #@ and then give a number. Enter your
# Python code after each problem. Do not use any variables
# in your answer except for the ones that the problem says
# you can assume are defined.
#
# Part 1: warmup
#
#@ 1
# Simulate flipping a coin 200 times that has a 90% chance of
# landing heads. Store your result in a NumPy array x of length
# 200 that contains only 0 or 1, where 1 represents heads.
# Use np.random.choice().
# (assignment to x)
x = np.random.choice(2, 200, p=[0.1, 0.9])
#@ 2
# Repeat the problem above, but this time use np.random.sample(),
# which gives values between 0 and 1. Obviously you will need to do
# further processing to turn the output of sample() into your
# array x. This will take a little thought.
# (assignment to x)
x = np.random.sample(size=200)
weights = [0.1, 0.9]
cs = np.cumsum(weights)
def calculated_weights(x):
return sum(cs < x)
vectroized_calculated_weights = np.vectorize(calculated_weights)
x = vectroized_calculated_weights(x)
#@ 3
# compute the fraction of values in array x that are 1.
# (expression)
len(x[x == 1]) / len(x)
#@ 4
# Flip the weighted coin of problem 1 200 times, compute the fraction
# of values that are 1, and repeat this entire process 100 times to
# get an array of length 100. Assign this array to variable y1.
# (assignment to y1)
def t200():
return np.random.choice(2, 200, p=[0.1, 0.9])
y1 = np.array([len(t200()[t200()==1])/len(t200()) for i in range(100)])
#@ 5
# plot a histogram of y1 using matplotlib
# (produce a plot)
plt.hist(y1)
plt.title("Fraction of 1's for 200 biased coin tosses a 100 times")
plt.xlabel("Fraction of 1's in a given attempt (of 200 tosses)")
plt.ylabel("frequency")
#@ 6
# compute a NumPy array y2 that is just like y1, except that in creating y2
# we do 1000 coin flips in each experiment, not 200.
# (assignment to y2)
def t1000():
return np.random.choice(2, 1000, p=[0.1, 0.9])
y2 = np.array([len(t1000()[t1000()==1])/len(t1000()) for i in range(100)])
#@ 7
# plot histograms for y1 and y2, with the histogram for y1 above
# the plot for y2. Our lecture notes show how to do this; see
# the 'multiple subplots' slide. Use matplotlib. In both histograms,
# let the x axis values range from 0.85 to 0.95. Please study
# the two histograms and think about why they are different.
# Assume y1 and y2 are defined.
# (produce a plot)
fig, ax = plt.subplots(2)
fig.suptitle("Histograms for Y1 and Y2")
ax[0].hist(y1)
ax[1].hist(y2)
ax[0].set_xlim([0.85, 0.95])
ax[1].set_xlim([0.85, 0.95])
#
# Part 2 - simulate the answer to the question
#
#@ 8
# Simulate the overall occurrence of measles among 10,000 people,
# based on the assumption that each person has a 0.01% chance of
# having measles.
# Compute a NumPy array x of length 10,000, where each value is
# either 0 or 1. Each of the 10,000 values should be found by
# "flipping a 0/1 coin" that is weighted 99% to 0. Approximately
# 99% of the values in x should be 0, and the others should be one.
# (assignment to x)
x = np.random.choice(2, 10000, p=[0.99, 0.01])
#@ 9
# Simulate the measles test results on the people without measles,
# based on the assumption that the measles test gives the right
# answer about 95% of the time on people without measles.
# Create an array y0, which is as long as the number of 0's in
# array x, by flipping a 0/1 coin that is weighted 95% to 0.
# Assume x is defined.
# (assignment to y0)
y0 = np.random.choice(2, len(x[x==0]), p=[0.95, 0.05])
#@ 10
# Simulate the measles test results on the people with measles,
# based on the assumption that the measles test gives the right
# answer about 98% of the time on people with measles.
# Create an array y1, which is as long as the number of 1's in
# array x, by flipping a 0/1 coin that is weighted 98% to 1.
# Assume x is defined.
# (assignment to y1)
y1 = np.random.choice(2, len(x[x==1]), p=[0.02, 0.98])
#@ 11
# Collect the measles-free people among those who tested positive.
# Compute a vector pos_no_meas that is all 0's, and is as long as the
# number of 1's in y0.
# Assume y0 is defined.
# (assignment to pos_no_meas)
pos_no_meas = np.zeros(len(y0[y0==1]))
#@ 12
# Collect the measles-infected people among those who tested positive.
# Compute a vector pos_with_meas that is all 1's, and is as long as
# the number of 1's in y1.
# Assume y1 is defined.
# (assignment to pos_with_meas)
pos_with_meas = np.ones(len(y1[y1==1]))
#@ 13
# Collect information about all people who tested positive.
# Concatenate arrays pos_no_meas and pos_with_meas, and assign
# the result to array 'tested_pos'. A 0 in in this array means
# no measles; a 1 means measles.
# Assume pos_no_meas and pos_with_meas are defined.
# (assignment to tested_pos)
tested_pos = np.concatenate((pos_no_meas, pos_with_meas))
#@ 14
# Estimate the probability of having measles if you've tested
# positive for measles. Compute the fraction of values in
# tested_positive that are 1, and assign the result to
# variable 'p'.
# Assume tested_pos is defined.
# (assignment to p)
p = len(tested_pos[tested_pos == 1]) / len(tested_pos)
#@ 15
# Package up your code into a function 'prob_cond_given_pos'. This
# function will return the probability of having a condition, based
# on certain probabilities.
# The function should have the following parameters:
# prob_cond - probability of a condition (above you used 0.01)
# prob_pos_given_cond - probability of testing positive given condition (you used 0.98)
# prob_neg_given_no_cond - probability of testing negative given no condition (you used 0.95)
# The function must return the probability of having the condition.
#
# Your function should return a slightly different value every time.
# When you run prob_cond_given_pos(0.01, 0.98, 0.95), you should get an answer
# similar to the value of p you just computed.
#
# Here is the output from tests I ran with my code:
# test 1:
# np.array([prob_cond_given_pos(0.5, 0.9, 0.8) for i in range(1000)]).mean()
# output: 0.8180582615720287
# test 2:
# np.array([prob_cond_given_pos(0.3, 0.8, 0.7) for i in range(1000)]).mean()
# output: 0.5334712339397902
# test 3:
# np.array([prob_cond_given_pos(0.5, 0.9, 0.8) for i in range(100)]).std()
# output: 0.00550051982001144
#
## I provided the function header. You should fill out the function body,
# including the return statement.
# (define a function)
def prob_cond_given_pos(prob_cond, prob_pos_given_cond, prob_neg_given_no_cond):
x = np.random.choice(2, 10000, p=[1-prob_cond, prob_cond])
y0 = np.random.choice(2, len(x[x==0]), p=[prob_neg_given_no_cond, 1-prob_neg_given_no_cond])
y1 = np.random.choice(2, len(x[x==1]), p=[1-prob_pos_given_cond, prob_pos_given_cond])
pos_no_meas = np.zeros(len(y0[y0==1]))
pos_with_meas = np.ones(len(y1[y1==1]))
tested_pos = np.concatenate((pos_no_meas, pos_with_meas))
return len(tested_pos[tested_pos == 1]) / len(tested_pos)
#
# Part 3 - compute the answer using Bayes' Law
#
#@ 16
# Write a function 'prob_cond_given_pos_bayes'. This function
# will take the same parameters as prob_cond_given_pos, but will
# use Bayes' Law to compute the result.
#
# Here is some output from my code:
# test1:
# prob_cond_given_pos_bayes(0.5, 0.9, 0.8)
# output: 0.1818...
# test 2:
# prob_cond_given_pos_bayes(0.3, 0.8, 0.7)
# output: 0.5333...
#
# I provided the function header. You should fill out the function body,
# including the return statement.
# (define a function)
def prob_cond_given_pos_bayes(prob_cond, prob_pos_given_cond, prob_neg_given_no_cond):
return (prob_pos_given_cond*prob_cond) / ((prob_pos_given_cond*prob_cond)+(1-prob_neg_given_no_cond)*(1-prob_cond))
#@ 17
# How does the probability of having a condition given you
# tested positive for it change based on how rare the
# condition is?
# Produce a histogram showing the probability of having measles
# given you tested positive for measles. Compute
# prob_cond_given_pos_bayes(x, 0.98, 0.95) for x ranging
# from 0.001 to 0.10 (x is the probability of having the
# condition). Use at least 100 values of x.
# Plot the results as a scatter plot, with x on the x axis
# and probability on the y axis. Label the x and y axes
# appropriately. Use matplotlib.
# Assume function prob_cond_given_pos_bayes() is defined.
# (produce a plot)
#x = np.arange(0.001, 0.1, ((0.1-0.001)/100))
x = np.linspace(0.001, 0.1, 100)
plt.scatter(x, prob_cond_given_pos_bayes(x, 0.98, 0.95))
plt.xlabel("Probability of condition (%)")
plt.ylabel("Probability of condition if tested positive (%)") | [
4,
6,
7,
8,
9
] |
1,513 | c23bd136991bfb41f153321420c2fcfba0c843f4 | <mask token>
class TaskSolver:
<mask token>
<mask token>
<mask token>
def task_calculate_cosin_similarity(self, word1, word2, print_to_screen
=True):
sim = 0
if word1 in self.W2V_DICT and word2 in self.W2V_DICT:
sim = (2 - spatial.distance.cosine(self.W2V_DICT[word1], self.
W2V_DICT[word2])) / 2
if print_to_screen:
print("Độ tương đồng giữa '{}' và '{}' là: {}".format(word1,
word2, sim))
return sim
def test_with_visim_400_data_set(self):
visim_400_df = pd.read_csv(os.path.abspath(
'./Word-Similarity/datasets/ViSim-400/Visim-400.txt'), sep='\t')
rs, sim1_arr, sim2_arr = [], [], []
for index, row in visim_400_df.iterrows():
word_1, word_2 = row['Word1'], row['Word2']
sim_1, sim_2 = row['Sim1'], row['Sim2']
if word_1 in self.W2V_DICT and word_2 in self.W2V_DICT:
sim = self.task_calculate_cosin_similarity(word_1, word_2, True
)
rs.append(sim)
sim1_arr.append(sim_1)
sim2_arr.append(sim_2)
print('Hệ số tương đồng Pearson là: ', stats.pearsonr(rs, sim1_arr))
print('Hệ số tương đồng Spearman là: ', stats.spearmanr(rs, sim1_arr))
def task_k_nearest_words(self, k, word):
k = int(k)
if word not in self.W2V_DICT:
print("Word '{}' not in vocab".format(word))
return
sims = []
for key in self.W2V_DICT:
if key != word:
sims.append({'key': key, 'sim': self.
task_calculate_cosin_similarity(key, word, False)})
k_list = sorted(sims, key=lambda k: k['sim'], reverse=True)[0:k - 1]
print("{} từ tương đồng nhất với từ '{}' là:".format(k, word))
for w in k_list:
print('Từ {} có độ tương đồng là {}'.format(w.get('key'), w.get
('sim')))
return k_list
def task_synonym_antonym_classification(self):
self.prepare_data()
self.train_synonym_antonym_classification()
self.test_synonym_antonym_classification()
<mask token>
<mask token>
def train_synonym_antonym_classification(self):
X_train, Y_train = pickle.load(open(
'./main/dataset/antonym-synonym/antonym-synonym-pairs.bin', 'rb+'))
unique, counts = np.unique(Y_train, return_counts=True)
label_count = dict(zip(unique, counts))
clf = MLPClassifier()
clf.fit(X_train, Y_train)
pickle.dump(clf, open('./main/model/svm.model', 'wb+'))
return clf
def prepare_data(self):
X, Y = [], []
for file in [
'./Word-Similarity/antonym-synonym set/Antonym_vietnamese.txt',
'./Word-Similarity/antonym-synonym set/Synonym_vietnamese.txt']:
f = open(file, 'r', encoding='utf8')
for index, line in enumerate(f):
line_arr = line.split()
if len(line_arr) < 2:
continue
word1, word2 = line_arr[0], line_arr[1]
if word1 in self.W2V_DICT and word2 in self.W2V_DICT:
vec = self.gen_vec_for_synonym_antonym_pair(word1, word2)
X.append(vec)
if os.path.basename(f.name) == 'Antonym_vietnamese.txt':
Y.append(-1)
else:
Y.append(1)
X, Y = np.array(X), np.array(Y)
pickle.dump((X.astype(np.float64), Y), open(
'./main/dataset/antonym-synonym/antonym-synonym-pairs.bin', 'wb+'))
<mask token>
<mask token>
| <mask token>
class TaskSolver:
<mask token>
def __init__(self):
pass
<mask token>
def task_calculate_cosin_similarity(self, word1, word2, print_to_screen
=True):
sim = 0
if word1 in self.W2V_DICT and word2 in self.W2V_DICT:
sim = (2 - spatial.distance.cosine(self.W2V_DICT[word1], self.
W2V_DICT[word2])) / 2
if print_to_screen:
print("Độ tương đồng giữa '{}' và '{}' là: {}".format(word1,
word2, sim))
return sim
def test_with_visim_400_data_set(self):
visim_400_df = pd.read_csv(os.path.abspath(
'./Word-Similarity/datasets/ViSim-400/Visim-400.txt'), sep='\t')
rs, sim1_arr, sim2_arr = [], [], []
for index, row in visim_400_df.iterrows():
word_1, word_2 = row['Word1'], row['Word2']
sim_1, sim_2 = row['Sim1'], row['Sim2']
if word_1 in self.W2V_DICT and word_2 in self.W2V_DICT:
sim = self.task_calculate_cosin_similarity(word_1, word_2, True
)
rs.append(sim)
sim1_arr.append(sim_1)
sim2_arr.append(sim_2)
print('Hệ số tương đồng Pearson là: ', stats.pearsonr(rs, sim1_arr))
print('Hệ số tương đồng Spearman là: ', stats.spearmanr(rs, sim1_arr))
def task_k_nearest_words(self, k, word):
k = int(k)
if word not in self.W2V_DICT:
print("Word '{}' not in vocab".format(word))
return
sims = []
for key in self.W2V_DICT:
if key != word:
sims.append({'key': key, 'sim': self.
task_calculate_cosin_similarity(key, word, False)})
k_list = sorted(sims, key=lambda k: k['sim'], reverse=True)[0:k - 1]
print("{} từ tương đồng nhất với từ '{}' là:".format(k, word))
for w in k_list:
print('Từ {} có độ tương đồng là {}'.format(w.get('key'), w.get
('sim')))
return k_list
def task_synonym_antonym_classification(self):
self.prepare_data()
self.train_synonym_antonym_classification()
self.test_synonym_antonym_classification()
def test_synonym_antonym_classification(self):
clf = pickle.load(open('./main/model/svm.model', 'rb'))
X_test, Y_test = [], []
for file in ['./Word-Similarity/datasets/ViCon-400/400_noun_pairs.txt',
'./Word-Similarity/datasets/ViCon-400/400_verb_pairs.txt',
'./Word-Similarity/datasets/ViCon-400/600_adj_pairs.txt']:
f = open(file, 'r', encoding='utf8')
for index, line in enumerate(f):
line_arr = line.split()
if index == 0:
continue
word1, word2, relation = line_arr[0], line_arr[1], line_arr[2]
if word1 in self.W2V_DICT and word2 in self.W2V_DICT:
vec = self.gen_vec_for_synonym_antonym_pair(word1, word2)
X_test.append(vec)
if relation == 'SYN':
Y_test.append(1)
elif relation == 'ANT':
Y_test.append(-1)
X_test = X_test
pred = clf.predict(X_test)
print('Test date: {}'.format(date.today()))
print('Precision: {}'.format(precision_score(Y_test, pred)))
print('Recall: {}'.format(recall_score(Y_test, pred)))
print('F1: {}'.format(f1_score(Y_test, pred)))
log = (
"""
Test date: {}
Precision: {}
Recall: {}
F1: {}
----------------------------------------
"""
.format(date.today(), precision_score(Y_test, pred),
recall_score(Y_test, pred), f1_score(Y_test, pred)))
log_f = open('./main/log', 'a+')
log_f.write(log)
log_f.close()
def gen_vec_for_synonym_antonym_pair(self, word1, word2):
np_vec1, np_vec2 = np.array(self.W2V_DICT[word1]), np.array(self.
W2V_DICT[word2])
return np.concatenate((np_vec1, np_vec2, np_vec1 + np_vec2, np_vec1 *
np_vec2, np.absolute(np_vec1 - np_vec2)), axis=0)
def train_synonym_antonym_classification(self):
X_train, Y_train = pickle.load(open(
'./main/dataset/antonym-synonym/antonym-synonym-pairs.bin', 'rb+'))
unique, counts = np.unique(Y_train, return_counts=True)
label_count = dict(zip(unique, counts))
clf = MLPClassifier()
clf.fit(X_train, Y_train)
pickle.dump(clf, open('./main/model/svm.model', 'wb+'))
return clf
def prepare_data(self):
X, Y = [], []
for file in [
'./Word-Similarity/antonym-synonym set/Antonym_vietnamese.txt',
'./Word-Similarity/antonym-synonym set/Synonym_vietnamese.txt']:
f = open(file, 'r', encoding='utf8')
for index, line in enumerate(f):
line_arr = line.split()
if len(line_arr) < 2:
continue
word1, word2 = line_arr[0], line_arr[1]
if word1 in self.W2V_DICT and word2 in self.W2V_DICT:
vec = self.gen_vec_for_synonym_antonym_pair(word1, word2)
X.append(vec)
if os.path.basename(f.name) == 'Antonym_vietnamese.txt':
Y.append(-1)
else:
Y.append(1)
X, Y = np.array(X), np.array(Y)
pickle.dump((X.astype(np.float64), Y), open(
'./main/dataset/antonym-synonym/antonym-synonym-pairs.bin', 'wb+'))
def gen_w2v_dict(self):
with open('./main/dataset/w2v/w2v-dict.json', 'w+') as f:
if f.read(1):
f.seek(0)
self.W2V_DICT = json.load(f)
if not self.W2V_DICT:
with open('./Word-Similarity/word2vec/W2V_150.txt', 'r',
encoding='utf8') as f:
for index, line in enumerate(f):
line_arr = line.split()
if index > 1:
self.W2V_DICT.update({line_arr[0]: np.array(
line_arr[1:]).astype(float).tolist()})
f = open('./main/dataset/w2v/w2v-dict.json', 'w+')
f.write(json.dumps(self.W2V_DICT))
f.close()
<mask token>
| <mask token>
class TaskSolver:
<mask token>
def __init__(self):
pass
def solve(self, task_name, **kwargs):
self.gen_w2v_dict()
if task_name == 'k-nearest-words':
self.task_k_nearest_words(kwargs.get('k'), kwargs.get('word'))
elif task_name == 'synonym-antonym-classification':
self.task_synonym_antonym_classification()
elif task_name == 'test-cosin-similarity-with-visim-400-dataset':
self.test_with_visim_400_data_set()
def task_calculate_cosin_similarity(self, word1, word2, print_to_screen
=True):
sim = 0
if word1 in self.W2V_DICT and word2 in self.W2V_DICT:
sim = (2 - spatial.distance.cosine(self.W2V_DICT[word1], self.
W2V_DICT[word2])) / 2
if print_to_screen:
print("Độ tương đồng giữa '{}' và '{}' là: {}".format(word1,
word2, sim))
return sim
def test_with_visim_400_data_set(self):
visim_400_df = pd.read_csv(os.path.abspath(
'./Word-Similarity/datasets/ViSim-400/Visim-400.txt'), sep='\t')
rs, sim1_arr, sim2_arr = [], [], []
for index, row in visim_400_df.iterrows():
word_1, word_2 = row['Word1'], row['Word2']
sim_1, sim_2 = row['Sim1'], row['Sim2']
if word_1 in self.W2V_DICT and word_2 in self.W2V_DICT:
sim = self.task_calculate_cosin_similarity(word_1, word_2, True
)
rs.append(sim)
sim1_arr.append(sim_1)
sim2_arr.append(sim_2)
print('Hệ số tương đồng Pearson là: ', stats.pearsonr(rs, sim1_arr))
print('Hệ số tương đồng Spearman là: ', stats.spearmanr(rs, sim1_arr))
def task_k_nearest_words(self, k, word):
k = int(k)
if word not in self.W2V_DICT:
print("Word '{}' not in vocab".format(word))
return
sims = []
for key in self.W2V_DICT:
if key != word:
sims.append({'key': key, 'sim': self.
task_calculate_cosin_similarity(key, word, False)})
k_list = sorted(sims, key=lambda k: k['sim'], reverse=True)[0:k - 1]
print("{} từ tương đồng nhất với từ '{}' là:".format(k, word))
for w in k_list:
print('Từ {} có độ tương đồng là {}'.format(w.get('key'), w.get
('sim')))
return k_list
def task_synonym_antonym_classification(self):
self.prepare_data()
self.train_synonym_antonym_classification()
self.test_synonym_antonym_classification()
def test_synonym_antonym_classification(self):
clf = pickle.load(open('./main/model/svm.model', 'rb'))
X_test, Y_test = [], []
for file in ['./Word-Similarity/datasets/ViCon-400/400_noun_pairs.txt',
'./Word-Similarity/datasets/ViCon-400/400_verb_pairs.txt',
'./Word-Similarity/datasets/ViCon-400/600_adj_pairs.txt']:
f = open(file, 'r', encoding='utf8')
for index, line in enumerate(f):
line_arr = line.split()
if index == 0:
continue
word1, word2, relation = line_arr[0], line_arr[1], line_arr[2]
if word1 in self.W2V_DICT and word2 in self.W2V_DICT:
vec = self.gen_vec_for_synonym_antonym_pair(word1, word2)
X_test.append(vec)
if relation == 'SYN':
Y_test.append(1)
elif relation == 'ANT':
Y_test.append(-1)
X_test = X_test
pred = clf.predict(X_test)
print('Test date: {}'.format(date.today()))
print('Precision: {}'.format(precision_score(Y_test, pred)))
print('Recall: {}'.format(recall_score(Y_test, pred)))
print('F1: {}'.format(f1_score(Y_test, pred)))
log = (
"""
Test date: {}
Precision: {}
Recall: {}
F1: {}
----------------------------------------
"""
.format(date.today(), precision_score(Y_test, pred),
recall_score(Y_test, pred), f1_score(Y_test, pred)))
log_f = open('./main/log', 'a+')
log_f.write(log)
log_f.close()
def gen_vec_for_synonym_antonym_pair(self, word1, word2):
np_vec1, np_vec2 = np.array(self.W2V_DICT[word1]), np.array(self.
W2V_DICT[word2])
return np.concatenate((np_vec1, np_vec2, np_vec1 + np_vec2, np_vec1 *
np_vec2, np.absolute(np_vec1 - np_vec2)), axis=0)
def train_synonym_antonym_classification(self):
X_train, Y_train = pickle.load(open(
'./main/dataset/antonym-synonym/antonym-synonym-pairs.bin', 'rb+'))
unique, counts = np.unique(Y_train, return_counts=True)
label_count = dict(zip(unique, counts))
clf = MLPClassifier()
clf.fit(X_train, Y_train)
pickle.dump(clf, open('./main/model/svm.model', 'wb+'))
return clf
def prepare_data(self):
X, Y = [], []
for file in [
'./Word-Similarity/antonym-synonym set/Antonym_vietnamese.txt',
'./Word-Similarity/antonym-synonym set/Synonym_vietnamese.txt']:
f = open(file, 'r', encoding='utf8')
for index, line in enumerate(f):
line_arr = line.split()
if len(line_arr) < 2:
continue
word1, word2 = line_arr[0], line_arr[1]
if word1 in self.W2V_DICT and word2 in self.W2V_DICT:
vec = self.gen_vec_for_synonym_antonym_pair(word1, word2)
X.append(vec)
if os.path.basename(f.name) == 'Antonym_vietnamese.txt':
Y.append(-1)
else:
Y.append(1)
X, Y = np.array(X), np.array(Y)
pickle.dump((X.astype(np.float64), Y), open(
'./main/dataset/antonym-synonym/antonym-synonym-pairs.bin', 'wb+'))
def gen_w2v_dict(self):
with open('./main/dataset/w2v/w2v-dict.json', 'w+') as f:
if f.read(1):
f.seek(0)
self.W2V_DICT = json.load(f)
if not self.W2V_DICT:
with open('./Word-Similarity/word2vec/W2V_150.txt', 'r',
encoding='utf8') as f:
for index, line in enumerate(f):
line_arr = line.split()
if index > 1:
self.W2V_DICT.update({line_arr[0]: np.array(
line_arr[1:]).astype(float).tolist()})
f = open('./main/dataset/w2v/w2v-dict.json', 'w+')
f.write(json.dumps(self.W2V_DICT))
f.close()
<mask token>
| import os, pickle, json, ast
import pandas as pd
from scipy import spatial
import numpy as np
from scipy import stats
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn.svm import LinearSVC
from sklearn.metrics import precision_score, recall_score, f1_score
from datetime import date
from sklearn.neural_network import MLPClassifier
class TaskSolver:
W2V_DICT = dict()
def __init__(self):
pass
def solve(self, task_name, **kwargs):
self.gen_w2v_dict()
if task_name == 'k-nearest-words':
self.task_k_nearest_words(kwargs.get('k'), kwargs.get('word'))
elif task_name == 'synonym-antonym-classification':
self.task_synonym_antonym_classification()
elif task_name == 'test-cosin-similarity-with-visim-400-dataset':
self.test_with_visim_400_data_set()
def task_calculate_cosin_similarity(self, word1, word2, print_to_screen
=True):
sim = 0
if word1 in self.W2V_DICT and word2 in self.W2V_DICT:
sim = (2 - spatial.distance.cosine(self.W2V_DICT[word1], self.
W2V_DICT[word2])) / 2
if print_to_screen:
print("Độ tương đồng giữa '{}' và '{}' là: {}".format(word1,
word2, sim))
return sim
def test_with_visim_400_data_set(self):
visim_400_df = pd.read_csv(os.path.abspath(
'./Word-Similarity/datasets/ViSim-400/Visim-400.txt'), sep='\t')
rs, sim1_arr, sim2_arr = [], [], []
for index, row in visim_400_df.iterrows():
word_1, word_2 = row['Word1'], row['Word2']
sim_1, sim_2 = row['Sim1'], row['Sim2']
if word_1 in self.W2V_DICT and word_2 in self.W2V_DICT:
sim = self.task_calculate_cosin_similarity(word_1, word_2, True
)
rs.append(sim)
sim1_arr.append(sim_1)
sim2_arr.append(sim_2)
print('Hệ số tương đồng Pearson là: ', stats.pearsonr(rs, sim1_arr))
print('Hệ số tương đồng Spearman là: ', stats.spearmanr(rs, sim1_arr))
def task_k_nearest_words(self, k, word):
k = int(k)
if word not in self.W2V_DICT:
print("Word '{}' not in vocab".format(word))
return
sims = []
for key in self.W2V_DICT:
if key != word:
sims.append({'key': key, 'sim': self.
task_calculate_cosin_similarity(key, word, False)})
k_list = sorted(sims, key=lambda k: k['sim'], reverse=True)[0:k - 1]
print("{} từ tương đồng nhất với từ '{}' là:".format(k, word))
for w in k_list:
print('Từ {} có độ tương đồng là {}'.format(w.get('key'), w.get
('sim')))
return k_list
def task_synonym_antonym_classification(self):
self.prepare_data()
self.train_synonym_antonym_classification()
self.test_synonym_antonym_classification()
def test_synonym_antonym_classification(self):
clf = pickle.load(open('./main/model/svm.model', 'rb'))
X_test, Y_test = [], []
for file in ['./Word-Similarity/datasets/ViCon-400/400_noun_pairs.txt',
'./Word-Similarity/datasets/ViCon-400/400_verb_pairs.txt',
'./Word-Similarity/datasets/ViCon-400/600_adj_pairs.txt']:
f = open(file, 'r', encoding='utf8')
for index, line in enumerate(f):
line_arr = line.split()
if index == 0:
continue
word1, word2, relation = line_arr[0], line_arr[1], line_arr[2]
if word1 in self.W2V_DICT and word2 in self.W2V_DICT:
vec = self.gen_vec_for_synonym_antonym_pair(word1, word2)
X_test.append(vec)
if relation == 'SYN':
Y_test.append(1)
elif relation == 'ANT':
Y_test.append(-1)
X_test = X_test
pred = clf.predict(X_test)
print('Test date: {}'.format(date.today()))
print('Precision: {}'.format(precision_score(Y_test, pred)))
print('Recall: {}'.format(recall_score(Y_test, pred)))
print('F1: {}'.format(f1_score(Y_test, pred)))
log = (
"""
Test date: {}
Precision: {}
Recall: {}
F1: {}
----------------------------------------
"""
.format(date.today(), precision_score(Y_test, pred),
recall_score(Y_test, pred), f1_score(Y_test, pred)))
log_f = open('./main/log', 'a+')
log_f.write(log)
log_f.close()
def gen_vec_for_synonym_antonym_pair(self, word1, word2):
np_vec1, np_vec2 = np.array(self.W2V_DICT[word1]), np.array(self.
W2V_DICT[word2])
return np.concatenate((np_vec1, np_vec2, np_vec1 + np_vec2, np_vec1 *
np_vec2, np.absolute(np_vec1 - np_vec2)), axis=0)
def train_synonym_antonym_classification(self):
X_train, Y_train = pickle.load(open(
'./main/dataset/antonym-synonym/antonym-synonym-pairs.bin', 'rb+'))
unique, counts = np.unique(Y_train, return_counts=True)
label_count = dict(zip(unique, counts))
clf = MLPClassifier()
clf.fit(X_train, Y_train)
pickle.dump(clf, open('./main/model/svm.model', 'wb+'))
return clf
def prepare_data(self):
X, Y = [], []
for file in [
'./Word-Similarity/antonym-synonym set/Antonym_vietnamese.txt',
'./Word-Similarity/antonym-synonym set/Synonym_vietnamese.txt']:
f = open(file, 'r', encoding='utf8')
for index, line in enumerate(f):
line_arr = line.split()
if len(line_arr) < 2:
continue
word1, word2 = line_arr[0], line_arr[1]
if word1 in self.W2V_DICT and word2 in self.W2V_DICT:
vec = self.gen_vec_for_synonym_antonym_pair(word1, word2)
X.append(vec)
if os.path.basename(f.name) == 'Antonym_vietnamese.txt':
Y.append(-1)
else:
Y.append(1)
X, Y = np.array(X), np.array(Y)
pickle.dump((X.astype(np.float64), Y), open(
'./main/dataset/antonym-synonym/antonym-synonym-pairs.bin', 'wb+'))
def gen_w2v_dict(self):
with open('./main/dataset/w2v/w2v-dict.json', 'w+') as f:
if f.read(1):
f.seek(0)
self.W2V_DICT = json.load(f)
if not self.W2V_DICT:
with open('./Word-Similarity/word2vec/W2V_150.txt', 'r',
encoding='utf8') as f:
for index, line in enumerate(f):
line_arr = line.split()
if index > 1:
self.W2V_DICT.update({line_arr[0]: np.array(
line_arr[1:]).astype(float).tolist()})
f = open('./main/dataset/w2v/w2v-dict.json', 'w+')
f.write(json.dumps(self.W2V_DICT))
f.close()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Helper')
parser.add_argument('--task', required=True, metavar='path', help=
"""
Task name:
0 => Cosin Similarity
1 => Test Cosine Similarity with Visim-400 dataset
2 => K Nearest Words
3 => Synonym Antonym Classification
"""
)
parser.add_argument('--word', metavar='path', help=
"Target word used in 'K Nearest Words' task")
parser.add_argument('--k', metavar='path', help=
"Number of 'Nearest Words' used in 'K Nearest Words' task")
parser.add_argument('--word1', metavar='path', help=
"Source word used in 'Cosin Similarity' and 'Predict Synonym Antonym' task"
)
parser.add_argument('--word2', metavar='path', help=
"Target word used in 'Cosin Similarity' and 'Predict Synonym Antonym' task"
)
args = parser.parse_args()
task = args.task
k = args.k
word = args.word
word1 = args.word1
word2 = args.word2
switcher = {'0': 'calculate-cosin-similarity', '1':
'test-cosin-similarity-with-visim-400-dataset', '2':
'k-nearest-words', '3': 'synonym-antonym-classification', '4':
'predict-synonym-antonym'}
task_name = switcher.get(task, 'Invalid task')
task_solver = TaskSolver()
task_solver.solve(task_name, k=k, word=word, word1=word1, word2=word2)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, pickle, json, ast
import pandas as pd
from scipy import spatial
import numpy as np
from scipy import stats
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn.svm import LinearSVC
from sklearn.metrics import precision_score, recall_score, f1_score
from datetime import date
from sklearn.neural_network import MLPClassifier
class TaskSolver:
W2V_DICT = dict()
def __init__(self):
pass
def solve(self, task_name, **kwargs):
self.gen_w2v_dict()
if task_name == 'k-nearest-words':
self.task_k_nearest_words(kwargs.get('k'), kwargs.get('word'))
elif task_name == 'synonym-antonym-classification':
self.task_synonym_antonym_classification()
elif task_name == 'test-cosin-similarity-with-visim-400-dataset':
self.test_with_visim_400_data_set()
def task_calculate_cosin_similarity(self, word1, word2, print_to_screen=True):
sim = 0
if word1 in self.W2V_DICT and word2 in self.W2V_DICT:
sim = (2 - spatial.distance.cosine(self.W2V_DICT[word1], self.W2V_DICT[word2])) / 2
if (print_to_screen): print("Độ tương đồng giữa '{}' và '{}' là: {}".format(word1, word2, sim))
return sim
def test_with_visim_400_data_set(self):
visim_400_df = pd.read_csv(
os.path.abspath('./Word-Similarity/datasets/ViSim-400/Visim-400.txt'),
sep="\t")
rs, sim1_arr, sim2_arr = [], [], []
for index, row in visim_400_df.iterrows():
word_1, word_2 = row['Word1'], row['Word2']
sim_1, sim_2 = row['Sim1'], row['Sim2']
if word_1 in self.W2V_DICT and word_2 in self.W2V_DICT:
sim = self.task_calculate_cosin_similarity(word_1, word_2, True)
rs.append(sim)
sim1_arr.append(sim_1)
sim2_arr.append(sim_2)
print("Hệ số tương đồng Pearson là: ", stats.pearsonr(rs, sim1_arr))
print("Hệ số tương đồng Spearman là: ", stats.spearmanr(rs, sim1_arr))
def task_k_nearest_words(self, k, word):
k = int(k)
if word not in self.W2V_DICT:
print("Word '{}' not in vocab".format(word))
return
sims = []
for key in self.W2V_DICT:
if key != word:
sims.append({
'key': key,
'sim': self.task_calculate_cosin_similarity(key, word, False)
})
k_list = sorted(sims, key=lambda k: k['sim'], reverse=True)[0: (k - 1)]
print("{} từ tương đồng nhất với từ '{}' là:".format(k, word))
for w in k_list:
print("Từ {} có độ tương đồng là {}".format(w.get('key'), w.get('sim')))
return k_list
def task_synonym_antonym_classification(self):
self.prepare_data()
self.train_synonym_antonym_classification()
self.test_synonym_antonym_classification()
def test_synonym_antonym_classification(self):
clf = pickle.load(open('./main/model/svm.model', 'rb'))
X_test, Y_test = [], []
for file in [
'./Word-Similarity/datasets/ViCon-400/400_noun_pairs.txt',
'./Word-Similarity/datasets/ViCon-400/400_verb_pairs.txt',
'./Word-Similarity/datasets/ViCon-400/600_adj_pairs.txt'
]:
f = open(file, 'r', encoding="utf8")
for index, line in enumerate(f):
line_arr = line.split()
if index == 0: continue
word1, word2, relation = line_arr[0], line_arr[1], line_arr[2]
if word1 in self.W2V_DICT and word2 in self.W2V_DICT:
vec = self.gen_vec_for_synonym_antonym_pair(word1, word2)
X_test.append(vec)
if relation == 'SYN': Y_test.append(1)
elif relation == 'ANT': Y_test.append(-1)
X_test = X_test
pred = clf.predict(X_test)
print("Test date: {}".format(date.today()))
print("Precision: {}".format(precision_score(Y_test, pred)))
print("Recall: {}".format(recall_score(Y_test, pred)))
print("F1: {}".format(f1_score(Y_test, pred)))
log = """
Test date: {}
Precision: {}
Recall: {}
F1: {}
\n
----------------------------------------
""".format(
date.today(),
precision_score(Y_test, pred),
recall_score(Y_test, pred),
f1_score(Y_test, pred))
log_f = open('./main/log', 'a+')
log_f.write(log)
log_f.close()
def gen_vec_for_synonym_antonym_pair(self, word1, word2):
np_vec1, np_vec2 = np.array(self.W2V_DICT[word1]), np.array(self.W2V_DICT[word2])
return np.concatenate((
np_vec1,
np_vec2,
np_vec1 + np_vec2,
np_vec1 * np_vec2,
np.absolute(np_vec1 - np_vec2),
# np.array([self.task_calculate_cosin_similarity(word1, word2, False)])
), axis=0)
def train_synonym_antonym_classification(self):
X_train, Y_train = pickle.load(open('./main/dataset/antonym-synonym/antonym-synonym-pairs.bin', 'rb+'))
unique, counts = np.unique(Y_train, return_counts=True)
label_count = dict(zip(unique, counts))
clf = MLPClassifier()
clf.fit(X_train, Y_train)
pickle.dump(clf, open('./main/model/svm.model', 'wb+'))
return clf
def prepare_data(self):
X, Y = [], []
for file in [
'./Word-Similarity/antonym-synonym set/Antonym_vietnamese.txt',
'./Word-Similarity/antonym-synonym set/Synonym_vietnamese.txt'
]:
f = open(file, 'r', encoding="utf8")
for index, line in enumerate(f):
line_arr = line.split()
if len(line_arr) < 2: continue
word1, word2 = line_arr[0], line_arr[1]
if word1 in self.W2V_DICT and word2 in self.W2V_DICT:
vec = self.gen_vec_for_synonym_antonym_pair(word1, word2)
X.append(vec)
if os.path.basename(f.name) == 'Antonym_vietnamese.txt': Y.append(-1)
else: Y.append(1)
X, Y = np.array(X), np.array(Y)
pickle.dump(
( X.astype(np.float64), Y ),
open('./main/dataset/antonym-synonym/antonym-synonym-pairs.bin', 'wb+')
)
def gen_w2v_dict(self):
with open('./main/dataset/w2v/w2v-dict.json', 'w+') as f:
if f.read(1):
f.seek(0)
self.W2V_DICT = json.load(f)
if not self.W2V_DICT:
with open('./Word-Similarity/word2vec/W2V_150.txt', 'r', encoding="utf8") as f:
for index, line in enumerate(f):
line_arr = line.split()
if index > 1:
self.W2V_DICT.update({line_arr[0]: np.array(line_arr[1:]).astype(float).tolist()})
f = open("./main/dataset/w2v/w2v-dict.json","w+")
f.write(json.dumps(self.W2V_DICT))
f.close()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Helper")
parser.add_argument(
"--task",
required=True,
metavar="path",
help="""
Task name:
0 => Cosin Similarity
1 => Test Cosine Similarity with Visim-400 dataset
2 => K Nearest Words
3 => Synonym Antonym Classification
""",
)
parser.add_argument(
"--word",
metavar="path",
help="Target word used in 'K Nearest Words' task",
)
parser.add_argument(
"--k",
metavar="path",
help="Number of 'Nearest Words' used in 'K Nearest Words' task",
)
parser.add_argument(
"--word1",
metavar="path",
help="Source word used in 'Cosin Similarity' and 'Predict Synonym Antonym' task",
)
parser.add_argument(
"--word2",
metavar="path",
help="Target word used in 'Cosin Similarity' and 'Predict Synonym Antonym' task",
)
args = parser.parse_args()
task = args.task
k = args.k
word = args.word
word1 = args.word1
word2 = args.word2
switcher = {
'0': 'calculate-cosin-similarity',
'1': 'test-cosin-similarity-with-visim-400-dataset',
'2': 'k-nearest-words',
'3': 'synonym-antonym-classification',
'4': 'predict-synonym-antonym'
}
task_name = switcher.get(task, "Invalid task")
task_solver = TaskSolver()
task_solver.solve(
task_name,
k=k,
word=word,
word1=word1,
word2=word2
)
| [
7,
11,
12,
15,
16
] |
1,514 | 9fa534664056a8cf9e9a64ccc7d6dd4de2ec0936 | <mask token>
class Trainer(object):
<mask token>
def __init__(self, data_loader, model_name, model, optimizer_fn,
final_steps, lr_scheduler_fn=None, step=0, ckpt_path=None, log_path
=None, n_epochs=None, save_steps=None, log_steps=10, device='cuda',
use_amp=False, nvprof_iter_start=None, nvprof_iter_end=None,
pyprof_enabled=False, detect_anomaly=False, seed=None):
self.data_loader = data_loader
self.model_name = model_name
self.model = model
self.n_epochs = n_epochs
self.save_steps = save_steps
self.log_steps = log_steps
self.ckpt_path = ckpt_path
self.log_path = log_path
self.final_steps = final_steps
self.step = step
self.device = device
self.use_amp = use_amp
self.nvprof_iter_start = nvprof_iter_start
self.nvprof_iter_end = nvprof_iter_end
self.pyprof_enabled = pyprof_enabled
self.detect_anomaly = detect_anomaly
self.model.train()
to_device_async(self.model, self.device)
num_param = sum(param.numel() for param in model.parameters())
tprint('The number of {} parameters: {}'.format(self.model_name,
num_param))
self.optimizer = optimizer_fn(model)
if lr_scheduler_fn:
self.lr_scheduler = lr_scheduler_fn(self.optimizer)
else:
self.lr_scheduler = None
if self.use_amp:
from apex import amp
self.model, self.optimizer = amp.initialize(self.model, self.
optimizer, opt_level='O1')
if (nvprof_iter_start and nvprof_iter_end is not None and
pyprof_enabled):
from apex import pyprof
pyprof.nvtx.init()
self.model = nn.DataParallel(self.model)
if seed is None:
seed = np.random.randint(2 ** 16)
np.random.seed(seed)
torch.manual_seed(seed)
self.data_loader_iter = self.repeat(self.data_loader, n_epochs)
if log_path:
log_path = os.path.join(log_path, time.strftime('%Y%m%d-%H%M%S'))
self.tbwriter = SummaryWriter(log_dir=log_path, flush_secs=10)
if self.ckpt_path:
self.ckpt_path = os.path.join(self.ckpt_path, self.model_name)
pathlib.Path(self.ckpt_path).mkdir(parents=True, exist_ok=True)
self.load()
<mask token>
@abc.abstractmethod
def loss(self, inputs, model):
raise NotImplemented
<mask token>
<mask token>
def save(self):
state_dict = {'step': self.step, 'model': self.model.state_dict(),
'optim': self.optimizer.state_dict()}
torch.save(state_dict, self.ckpt_path + '/checkpoint_{:06d}.pt'.
format(self.step))
tprint('[Save] Model "{}". Step={}.'.format(self.model_name, self.step)
)
def load(self, load_optim=True):
files_exist = glob.glob(os.path.join(self.ckpt_path, '*'))
if files_exist:
latest_file = max(files_exist, key=os.path.getctime)
state_dict = torch.load(latest_file)
self.step = state_dict['step']
self.model.load_state_dict(state_dict['model'])
if load_optim:
self.optimizer.load_state_dict(state_dict['optim'])
tprint("[Load] Checkpoint '{}'. Step={}".format(latest_file,
self.step))
else:
tprint('No checkpoints in {}. Load skipped.'.format(self.ckpt_path)
)
def console_log(self, tag, loss, meta):
msg = 'loss: {:.6f}'.format(loss)
for key, value in meta.items():
msg += ',\t{}: {:.4f}'.format(key, value)
tprint(msg)
def tensorboard_log(self, tag, loss):
self.tbwriter.add_scalar('{}/loss'.format(tag), loss, global_step=
self.step)
@staticmethod
def repeat(iterable, n_repeat=None):
cnt = 0
while n_repeat is None or cnt < n_repeat:
for x in iterable:
yield x
cnt += 1
return StopIteration()
| <mask token>
class Trainer(object):
"""
set seed
set n_epochs, n_steps
save/load model
validation
logging
distributed
"""
def __init__(self, data_loader, model_name, model, optimizer_fn,
final_steps, lr_scheduler_fn=None, step=0, ckpt_path=None, log_path
=None, n_epochs=None, save_steps=None, log_steps=10, device='cuda',
use_amp=False, nvprof_iter_start=None, nvprof_iter_end=None,
pyprof_enabled=False, detect_anomaly=False, seed=None):
self.data_loader = data_loader
self.model_name = model_name
self.model = model
self.n_epochs = n_epochs
self.save_steps = save_steps
self.log_steps = log_steps
self.ckpt_path = ckpt_path
self.log_path = log_path
self.final_steps = final_steps
self.step = step
self.device = device
self.use_amp = use_amp
self.nvprof_iter_start = nvprof_iter_start
self.nvprof_iter_end = nvprof_iter_end
self.pyprof_enabled = pyprof_enabled
self.detect_anomaly = detect_anomaly
self.model.train()
to_device_async(self.model, self.device)
num_param = sum(param.numel() for param in model.parameters())
tprint('The number of {} parameters: {}'.format(self.model_name,
num_param))
self.optimizer = optimizer_fn(model)
if lr_scheduler_fn:
self.lr_scheduler = lr_scheduler_fn(self.optimizer)
else:
self.lr_scheduler = None
if self.use_amp:
from apex import amp
self.model, self.optimizer = amp.initialize(self.model, self.
optimizer, opt_level='O1')
if (nvprof_iter_start and nvprof_iter_end is not None and
pyprof_enabled):
from apex import pyprof
pyprof.nvtx.init()
self.model = nn.DataParallel(self.model)
if seed is None:
seed = np.random.randint(2 ** 16)
np.random.seed(seed)
torch.manual_seed(seed)
self.data_loader_iter = self.repeat(self.data_loader, n_epochs)
if log_path:
log_path = os.path.join(log_path, time.strftime('%Y%m%d-%H%M%S'))
self.tbwriter = SummaryWriter(log_dir=log_path, flush_secs=10)
if self.ckpt_path:
self.ckpt_path = os.path.join(self.ckpt_path, self.model_name)
pathlib.Path(self.ckpt_path).mkdir(parents=True, exist_ok=True)
self.load()
def train(self):
try:
with torch.autograd.profiler.emit_nvtx(enabled=self.pyprof_enabled
):
for i in range(self.step + 1, self.final_steps + 1):
self.step = i
tprint('------------- TRAIN step : {} -------------'.
format(i))
if self.nvprof_iter_start and i == self.nvprof_iter_start:
profiler.start()
timer = TimeElapsed(name=
'Training time during profiling', format=':.6f')
timer.start()
with Nvtx('step #{}'.format(self.step)):
loss, meta = self.do_step()
if self.nvprof_iter_end and i == self.nvprof_iter_end:
profiler.stop()
timer.end()
if self.lr_scheduler:
for param_group in self.optimizer.param_groups:
tprint('lr: {:06f}'.format(param_group['lr']))
self.lr_scheduler.step(self.step)
if self.step % self.log_steps == 0:
self.log(loss, meta)
if (self.ckpt_path and self.save_steps and i % self.
save_steps == 0):
self.save()
tprint('Training has been done.')
except StopIteration:
tprint('Training has been done. (by n_epochs)')
except KeyboardInterrupt:
tprint('Training has been canceled.')
@abc.abstractmethod
def loss(self, inputs, model):
raise NotImplemented
def do_step(self):
with Nvtx('data load', enabled=False):
data = next(self.data_loader_iter)
with torch.autograd.set_detect_anomaly(mode=self.detect_anomaly):
with Nvtx('forward'):
loss, meta = self.loss(data, self.model)
self.optimizer.zero_grad()
with Nvtx('backward'):
if self.use_amp:
from apex import amp
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
with Nvtx('weight update'):
self.optimizer.step()
return loss, meta
def log(self, loss, meta):
self.console_log('train', loss, meta)
if self.log_path:
self.tensorboard_log('train', loss)
def save(self):
state_dict = {'step': self.step, 'model': self.model.state_dict(),
'optim': self.optimizer.state_dict()}
torch.save(state_dict, self.ckpt_path + '/checkpoint_{:06d}.pt'.
format(self.step))
tprint('[Save] Model "{}". Step={}.'.format(self.model_name, self.step)
)
def load(self, load_optim=True):
files_exist = glob.glob(os.path.join(self.ckpt_path, '*'))
if files_exist:
latest_file = max(files_exist, key=os.path.getctime)
state_dict = torch.load(latest_file)
self.step = state_dict['step']
self.model.load_state_dict(state_dict['model'])
if load_optim:
self.optimizer.load_state_dict(state_dict['optim'])
tprint("[Load] Checkpoint '{}'. Step={}".format(latest_file,
self.step))
else:
tprint('No checkpoints in {}. Load skipped.'.format(self.ckpt_path)
)
def console_log(self, tag, loss, meta):
msg = 'loss: {:.6f}'.format(loss)
for key, value in meta.items():
msg += ',\t{}: {:.4f}'.format(key, value)
tprint(msg)
def tensorboard_log(self, tag, loss):
self.tbwriter.add_scalar('{}/loss'.format(tag), loss, global_step=
self.step)
@staticmethod
def repeat(iterable, n_repeat=None):
cnt = 0
while n_repeat is None or cnt < n_repeat:
for x in iterable:
yield x
cnt += 1
return StopIteration()
| <mask token>
plt.switch_backend('Agg')
class Trainer(object):
"""
set seed
set n_epochs, n_steps
save/load model
validation
logging
distributed
"""
def __init__(self, data_loader, model_name, model, optimizer_fn,
final_steps, lr_scheduler_fn=None, step=0, ckpt_path=None, log_path
=None, n_epochs=None, save_steps=None, log_steps=10, device='cuda',
use_amp=False, nvprof_iter_start=None, nvprof_iter_end=None,
pyprof_enabled=False, detect_anomaly=False, seed=None):
self.data_loader = data_loader
self.model_name = model_name
self.model = model
self.n_epochs = n_epochs
self.save_steps = save_steps
self.log_steps = log_steps
self.ckpt_path = ckpt_path
self.log_path = log_path
self.final_steps = final_steps
self.step = step
self.device = device
self.use_amp = use_amp
self.nvprof_iter_start = nvprof_iter_start
self.nvprof_iter_end = nvprof_iter_end
self.pyprof_enabled = pyprof_enabled
self.detect_anomaly = detect_anomaly
self.model.train()
to_device_async(self.model, self.device)
num_param = sum(param.numel() for param in model.parameters())
tprint('The number of {} parameters: {}'.format(self.model_name,
num_param))
self.optimizer = optimizer_fn(model)
if lr_scheduler_fn:
self.lr_scheduler = lr_scheduler_fn(self.optimizer)
else:
self.lr_scheduler = None
if self.use_amp:
from apex import amp
self.model, self.optimizer = amp.initialize(self.model, self.
optimizer, opt_level='O1')
if (nvprof_iter_start and nvprof_iter_end is not None and
pyprof_enabled):
from apex import pyprof
pyprof.nvtx.init()
self.model = nn.DataParallel(self.model)
if seed is None:
seed = np.random.randint(2 ** 16)
np.random.seed(seed)
torch.manual_seed(seed)
self.data_loader_iter = self.repeat(self.data_loader, n_epochs)
if log_path:
log_path = os.path.join(log_path, time.strftime('%Y%m%d-%H%M%S'))
self.tbwriter = SummaryWriter(log_dir=log_path, flush_secs=10)
if self.ckpt_path:
self.ckpt_path = os.path.join(self.ckpt_path, self.model_name)
pathlib.Path(self.ckpt_path).mkdir(parents=True, exist_ok=True)
self.load()
def train(self):
try:
with torch.autograd.profiler.emit_nvtx(enabled=self.pyprof_enabled
):
for i in range(self.step + 1, self.final_steps + 1):
self.step = i
tprint('------------- TRAIN step : {} -------------'.
format(i))
if self.nvprof_iter_start and i == self.nvprof_iter_start:
profiler.start()
timer = TimeElapsed(name=
'Training time during profiling', format=':.6f')
timer.start()
with Nvtx('step #{}'.format(self.step)):
loss, meta = self.do_step()
if self.nvprof_iter_end and i == self.nvprof_iter_end:
profiler.stop()
timer.end()
if self.lr_scheduler:
for param_group in self.optimizer.param_groups:
tprint('lr: {:06f}'.format(param_group['lr']))
self.lr_scheduler.step(self.step)
if self.step % self.log_steps == 0:
self.log(loss, meta)
if (self.ckpt_path and self.save_steps and i % self.
save_steps == 0):
self.save()
tprint('Training has been done.')
except StopIteration:
tprint('Training has been done. (by n_epochs)')
except KeyboardInterrupt:
tprint('Training has been canceled.')
@abc.abstractmethod
def loss(self, inputs, model):
raise NotImplemented
def do_step(self):
with Nvtx('data load', enabled=False):
data = next(self.data_loader_iter)
with torch.autograd.set_detect_anomaly(mode=self.detect_anomaly):
with Nvtx('forward'):
loss, meta = self.loss(data, self.model)
self.optimizer.zero_grad()
with Nvtx('backward'):
if self.use_amp:
from apex import amp
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
with Nvtx('weight update'):
self.optimizer.step()
return loss, meta
def log(self, loss, meta):
self.console_log('train', loss, meta)
if self.log_path:
self.tensorboard_log('train', loss)
def save(self):
state_dict = {'step': self.step, 'model': self.model.state_dict(),
'optim': self.optimizer.state_dict()}
torch.save(state_dict, self.ckpt_path + '/checkpoint_{:06d}.pt'.
format(self.step))
tprint('[Save] Model "{}". Step={}.'.format(self.model_name, self.step)
)
def load(self, load_optim=True):
files_exist = glob.glob(os.path.join(self.ckpt_path, '*'))
if files_exist:
latest_file = max(files_exist, key=os.path.getctime)
state_dict = torch.load(latest_file)
self.step = state_dict['step']
self.model.load_state_dict(state_dict['model'])
if load_optim:
self.optimizer.load_state_dict(state_dict['optim'])
tprint("[Load] Checkpoint '{}'. Step={}".format(latest_file,
self.step))
else:
tprint('No checkpoints in {}. Load skipped.'.format(self.ckpt_path)
)
def console_log(self, tag, loss, meta):
msg = 'loss: {:.6f}'.format(loss)
for key, value in meta.items():
msg += ',\t{}: {:.4f}'.format(key, value)
tprint(msg)
def tensorboard_log(self, tag, loss):
self.tbwriter.add_scalar('{}/loss'.format(tag), loss, global_step=
self.step)
@staticmethod
def repeat(iterable, n_repeat=None):
cnt = 0
while n_repeat is None or cnt < n_repeat:
for x in iterable:
yield x
cnt += 1
return StopIteration()
| import abc
import glob
import pathlib
import numpy as np
import torch
from tensorboardX import SummaryWriter
import time
import os
import matplotlib.pyplot as plt
from torch import nn
from fastspeech.utils.logging import tprint
from fastspeech.utils.pytorch import to_device_async
from fastspeech.utils.nvtx import Nvtx
from fastspeech.utils.fp16 import cast_model_to_half
import torch.cuda.profiler as profiler
from fastspeech.utils.logging import tprint
from fastspeech.utils.time import TimeElapsed
plt.switch_backend('Agg')
class Trainer(object):
"""
set seed
set n_epochs, n_steps
save/load model
validation
logging
distributed
"""
def __init__(self, data_loader, model_name, model, optimizer_fn,
final_steps, lr_scheduler_fn=None, step=0, ckpt_path=None, log_path
=None, n_epochs=None, save_steps=None, log_steps=10, device='cuda',
use_amp=False, nvprof_iter_start=None, nvprof_iter_end=None,
pyprof_enabled=False, detect_anomaly=False, seed=None):
self.data_loader = data_loader
self.model_name = model_name
self.model = model
self.n_epochs = n_epochs
self.save_steps = save_steps
self.log_steps = log_steps
self.ckpt_path = ckpt_path
self.log_path = log_path
self.final_steps = final_steps
self.step = step
self.device = device
self.use_amp = use_amp
self.nvprof_iter_start = nvprof_iter_start
self.nvprof_iter_end = nvprof_iter_end
self.pyprof_enabled = pyprof_enabled
self.detect_anomaly = detect_anomaly
self.model.train()
to_device_async(self.model, self.device)
num_param = sum(param.numel() for param in model.parameters())
tprint('The number of {} parameters: {}'.format(self.model_name,
num_param))
self.optimizer = optimizer_fn(model)
if lr_scheduler_fn:
self.lr_scheduler = lr_scheduler_fn(self.optimizer)
else:
self.lr_scheduler = None
if self.use_amp:
from apex import amp
self.model, self.optimizer = amp.initialize(self.model, self.
optimizer, opt_level='O1')
if (nvprof_iter_start and nvprof_iter_end is not None and
pyprof_enabled):
from apex import pyprof
pyprof.nvtx.init()
self.model = nn.DataParallel(self.model)
if seed is None:
seed = np.random.randint(2 ** 16)
np.random.seed(seed)
torch.manual_seed(seed)
self.data_loader_iter = self.repeat(self.data_loader, n_epochs)
if log_path:
log_path = os.path.join(log_path, time.strftime('%Y%m%d-%H%M%S'))
self.tbwriter = SummaryWriter(log_dir=log_path, flush_secs=10)
if self.ckpt_path:
self.ckpt_path = os.path.join(self.ckpt_path, self.model_name)
pathlib.Path(self.ckpt_path).mkdir(parents=True, exist_ok=True)
self.load()
def train(self):
try:
with torch.autograd.profiler.emit_nvtx(enabled=self.pyprof_enabled
):
for i in range(self.step + 1, self.final_steps + 1):
self.step = i
tprint('------------- TRAIN step : {} -------------'.
format(i))
if self.nvprof_iter_start and i == self.nvprof_iter_start:
profiler.start()
timer = TimeElapsed(name=
'Training time during profiling', format=':.6f')
timer.start()
with Nvtx('step #{}'.format(self.step)):
loss, meta = self.do_step()
if self.nvprof_iter_end and i == self.nvprof_iter_end:
profiler.stop()
timer.end()
if self.lr_scheduler:
for param_group in self.optimizer.param_groups:
tprint('lr: {:06f}'.format(param_group['lr']))
self.lr_scheduler.step(self.step)
if self.step % self.log_steps == 0:
self.log(loss, meta)
if (self.ckpt_path and self.save_steps and i % self.
save_steps == 0):
self.save()
tprint('Training has been done.')
except StopIteration:
tprint('Training has been done. (by n_epochs)')
except KeyboardInterrupt:
tprint('Training has been canceled.')
@abc.abstractmethod
def loss(self, inputs, model):
raise NotImplemented
def do_step(self):
with Nvtx('data load', enabled=False):
data = next(self.data_loader_iter)
with torch.autograd.set_detect_anomaly(mode=self.detect_anomaly):
with Nvtx('forward'):
loss, meta = self.loss(data, self.model)
self.optimizer.zero_grad()
with Nvtx('backward'):
if self.use_amp:
from apex import amp
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
with Nvtx('weight update'):
self.optimizer.step()
return loss, meta
def log(self, loss, meta):
self.console_log('train', loss, meta)
if self.log_path:
self.tensorboard_log('train', loss)
def save(self):
state_dict = {'step': self.step, 'model': self.model.state_dict(),
'optim': self.optimizer.state_dict()}
torch.save(state_dict, self.ckpt_path + '/checkpoint_{:06d}.pt'.
format(self.step))
tprint('[Save] Model "{}". Step={}.'.format(self.model_name, self.step)
)
def load(self, load_optim=True):
files_exist = glob.glob(os.path.join(self.ckpt_path, '*'))
if files_exist:
latest_file = max(files_exist, key=os.path.getctime)
state_dict = torch.load(latest_file)
self.step = state_dict['step']
self.model.load_state_dict(state_dict['model'])
if load_optim:
self.optimizer.load_state_dict(state_dict['optim'])
tprint("[Load] Checkpoint '{}'. Step={}".format(latest_file,
self.step))
else:
tprint('No checkpoints in {}. Load skipped.'.format(self.ckpt_path)
)
def console_log(self, tag, loss, meta):
msg = 'loss: {:.6f}'.format(loss)
for key, value in meta.items():
msg += ',\t{}: {:.4f}'.format(key, value)
tprint(msg)
def tensorboard_log(self, tag, loss):
self.tbwriter.add_scalar('{}/loss'.format(tag), loss, global_step=
self.step)
@staticmethod
def repeat(iterable, n_repeat=None):
cnt = 0
while n_repeat is None or cnt < n_repeat:
for x in iterable:
yield x
cnt += 1
return StopIteration()
| # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import abc
import glob
import pathlib
import numpy as np
import torch
from tensorboardX import SummaryWriter
import time
import os
import matplotlib.pyplot as plt
from torch import nn
from fastspeech.utils.logging import tprint
from fastspeech.utils.pytorch import to_device_async
from fastspeech.utils.nvtx import Nvtx
from fastspeech.utils.fp16 import cast_model_to_half
import torch.cuda.profiler as profiler
from fastspeech.utils.logging import tprint
from fastspeech.utils.time import TimeElapsed
plt.switch_backend('Agg')
class Trainer(object):
"""
set seed
set n_epochs, n_steps
save/load model
validation
logging
distributed
"""
def __init__(self, data_loader, model_name, model, optimizer_fn, final_steps, lr_scheduler_fn=None, step=0, ckpt_path=None, log_path=None, n_epochs=None, save_steps=None, log_steps=10, device='cuda', use_amp=False, nvprof_iter_start=None, nvprof_iter_end=None, pyprof_enabled=False, detect_anomaly=False, seed=None):
self.data_loader = data_loader
self.model_name = model_name
self.model = model
self.n_epochs = n_epochs
self.save_steps = save_steps
self.log_steps = log_steps
self.ckpt_path = ckpt_path
self.log_path = log_path
self.final_steps = final_steps
self.step = step
self.device = device
self.use_amp = use_amp
self.nvprof_iter_start = nvprof_iter_start
self.nvprof_iter_end = nvprof_iter_end
self.pyprof_enabled = pyprof_enabled
self.detect_anomaly = detect_anomaly
# model
self.model.train()
to_device_async(self.model, self.device)
num_param = sum(param.numel() for param in model.parameters())
tprint('The number of {} parameters: {}'.format(
self.model_name, num_param))
# optimizer
self.optimizer = optimizer_fn(model)
# lr scheduler
if lr_scheduler_fn:
self.lr_scheduler = lr_scheduler_fn(self.optimizer)
else:
self.lr_scheduler = None
# automatic mixed precision
if self.use_amp:
from apex import amp
self.model, self.optimizer = amp.initialize(self.model,
self.optimizer,
opt_level='O1')
# profile
if nvprof_iter_start and nvprof_iter_end is not None and pyprof_enabled:
from apex import pyprof
pyprof.nvtx.init()
# data parallel
self.model = nn.DataParallel(self.model)
# set seed
if seed is None:
seed = np.random.randint(2**16)
np.random.seed(seed)
torch.manual_seed(seed)
# data loader
self.data_loader_iter = self.repeat(self.data_loader, n_epochs)
# logging
if log_path:
# tensorboard log path : {log_path}/YYYYMMDD-HHMMMSS
log_path = os.path.join(log_path, time.strftime('%Y%m%d-%H%M%S'))
self.tbwriter = SummaryWriter(log_dir=log_path, flush_secs=10)
# checkpoint path
if self.ckpt_path:
self.ckpt_path = os.path.join(self.ckpt_path, self.model_name)
pathlib.Path(self.ckpt_path).mkdir(parents=True, exist_ok=True)
# load checkpoint
self.load()
def train(self):
try:
with torch.autograd.profiler.emit_nvtx(enabled=self.pyprof_enabled):
for i in range(self.step+1, self.final_steps + 1):
self.step = i
tprint("------------- TRAIN step : {} -------------".format(i))
if self.nvprof_iter_start and i == self.nvprof_iter_start:
profiler.start()
timer = TimeElapsed(name="Training time during profiling", format=":.6f")
timer.start()
with Nvtx("step #{}".format(self.step)):
loss, meta = self.do_step()
if self.nvprof_iter_end and i == self.nvprof_iter_end:
profiler.stop()
timer.end()
if self.lr_scheduler:
for param_group in self.optimizer.param_groups:
tprint("lr: {:06f}".format(param_group['lr']))
self.lr_scheduler.step(self.step)
if self.step % self.log_steps == 0:
self.log(loss, meta)
if self.ckpt_path and self.save_steps and i % self.save_steps == 0:
self.save()
tprint("Training has been done.")
except StopIteration: # done by n_epochs
tprint("Training has been done. (by n_epochs)")
except KeyboardInterrupt:
tprint("Training has been canceled.")
@abc.abstractmethod
def loss(self, inputs, model):
raise NotImplemented
def do_step(self):
with Nvtx("data load", enabled=False):
data = next(self.data_loader_iter)
with torch.autograd.set_detect_anomaly(mode=self.detect_anomaly):
with Nvtx("forward"):
loss, meta = self.loss(data, self.model)
self.optimizer.zero_grad()
with Nvtx("backward"):
if self.use_amp:
from apex import amp
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
with Nvtx("weight update"):
self.optimizer.step()
return loss, meta
def log(self, loss, meta):
self.console_log('train', loss, meta)
if self.log_path:
self.tensorboard_log('train', loss)
def save(self):
state_dict = {
'step': self.step,
'model': self.model.state_dict(),
'optim': self.optimizer.state_dict(),
}
torch.save(state_dict, self.ckpt_path +
'/checkpoint_{:06d}.pt'.format(self.step))
tprint('[Save] Model "{}". Step={}.'.format(
self.model_name, self.step))
def load(self, load_optim=True):
files_exist = glob.glob(os.path.join(self.ckpt_path, '*'))
if files_exist:
# load the latest created file.
latest_file = max(files_exist, key=os.path.getctime)
state_dict = torch.load(latest_file)
self.step = state_dict['step']
self.model.load_state_dict(state_dict['model'])
if load_optim:
self.optimizer.load_state_dict(state_dict['optim'])
tprint('[Load] Checkpoint \'{}\'. Step={}'.format(
latest_file, self.step))
else:
tprint('No checkpoints in {}. Load skipped.'.format(self.ckpt_path))
def console_log(self, tag, loss, meta):
# console logging
msg = 'loss: {:.6f}'.format(loss)
for key, value in meta.items():
msg += ',\t{}: {:.4f}'.format(key, value)
tprint(msg)
def tensorboard_log(self, tag, loss):
self.tbwriter.add_scalar(
'{}/loss'.format(tag), loss, global_step=self.step)
@staticmethod
def repeat(iterable, n_repeat=None):
cnt = 0
while n_repeat is None or cnt < n_repeat:
for x in iterable:
yield x
cnt += 1
return StopIteration()
| [
8,
12,
13,
14,
15
] |
1,515 | e07bd4cd13209bff8bc1119a619a2954abd52592 | <mask token>
class SequenceHeuristic(object):
<mask token>
<mask token>
| <mask token>
class SequenceHeuristic(object):
def __init__(self, minChanges, minDuration, noMotionDelay):
self._minChanges = minChanges
self._minDuration = minDuration
self._noMotionDelay = noMotionDelay
self._duration = 0
<mask token>
| <mask token>
class SequenceHeuristic(object):
def __init__(self, minChanges, minDuration, noMotionDelay):
self._minChanges = minChanges
self._minDuration = minDuration
self._noMotionDelay = noMotionDelay
self._duration = 0
def isValid(self, image, data):
numOfChanges = data['numOfChanges']
if numOfChanges >= self._minChanges:
self._duration += 1
if self._duration >= self._minDuration:
return True
elif self._duration > 0:
self._duration -= 1
elif self._noMotionDelay:
time.sleep(self._noMotionDelay / 1000.0)
return False
| import time
class SequenceHeuristic(object):
def __init__(self, minChanges, minDuration, noMotionDelay):
self._minChanges = minChanges
self._minDuration = minDuration
self._noMotionDelay = noMotionDelay
self._duration = 0
def isValid(self, image, data):
numOfChanges = data['numOfChanges']
if numOfChanges >= self._minChanges:
self._duration += 1
if self._duration >= self._minDuration:
return True
elif self._duration > 0:
self._duration -= 1
elif self._noMotionDelay:
time.sleep(self._noMotionDelay / 1000.0)
return False
| import time
class SequenceHeuristic(object):
def __init__(self, minChanges, minDuration, noMotionDelay):
self._minChanges = minChanges
self._minDuration = minDuration
self._noMotionDelay = noMotionDelay
self._duration = 0
def isValid(self, image, data):
numOfChanges = data['numOfChanges']
if numOfChanges >= self._minChanges:
self._duration += 1
if self._duration >= self._minDuration:
return True
else:
if self._duration > 0: # No sleep if duration is in effect
self._duration -= 1
else:
if self._noMotionDelay:
time.sleep(self._noMotionDelay/1000.0)
return False
| [
1,
2,
3,
4,
5
] |
1,516 | 8e8c72362dfb1587150aadaa6b8a0aeb77c3641a | <mask token>
| <mask token>
class Migration(migrations.Migration):
<mask token>
<mask token>
| <mask token>
class Migration(migrations.Migration):
dependencies = [('blog', '0005_auto_20200111_1513')]
operations = [migrations.AlterField(model_name='post', name='photo',
field=models.TextField(default='https://medium.com/'))]
| from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('blog', '0005_auto_20200111_1513')]
operations = [migrations.AlterField(model_name='post', name='photo',
field=models.TextField(default='https://medium.com/'))]
| # Generated by Django 3.0.1 on 2020-01-11 09:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0005_auto_20200111_1513'),
]
operations = [
migrations.AlterField(
model_name='post',
name='photo',
field=models.TextField(default='https://medium.com/'),
),
]
| [
0,
1,
2,
3,
4
] |
1,517 | b7007778ea9dfac3af8c31d66d32d8157dc0d69b | <mask token>
def getfanyiInfo():
vocaben, rev_vocaben = datautil.initialize_vocabulary(os.path.join(
datautil.data_dir, datautil.vocabulary_fileen))
vocab_sizeen = len(vocaben)
vocabch, rev_vocabch = datautil.initialize_vocabulary(os.path.join(
datautil.data_dir, datautil.vocabulary_filech))
vocab_sizech = len(vocabch)
return vocab_sizeen, vocab_sizech, vocaben, vocabch
def createModel(session, forward_only, from_vocab_size, to_vocab_size):
model = seq2seq_model.Seq2SeqModel(from_vocab_size, to_vocab_size,
_buckets, hidden_size, num_layers, dropout, grad_clip, batch_size,
learning_rate, lr_decay_factor, forward_only=forward_only, dtype=tf
.float32)
ckpt = tf.train.latest_checkpoint(checkpoint_dir)
if ckpt != None:
model.saver.restore(session, ckpt)
else:
session.run(tf.global_variables_initializer())
return model
<mask token>
| <mask token>
tf.reset_default_graph
<mask token>
def getfanyiInfo():
vocaben, rev_vocaben = datautil.initialize_vocabulary(os.path.join(
datautil.data_dir, datautil.vocabulary_fileen))
vocab_sizeen = len(vocaben)
vocabch, rev_vocabch = datautil.initialize_vocabulary(os.path.join(
datautil.data_dir, datautil.vocabulary_filech))
vocab_sizech = len(vocabch)
return vocab_sizeen, vocab_sizech, vocaben, vocabch
def createModel(session, forward_only, from_vocab_size, to_vocab_size):
model = seq2seq_model.Seq2SeqModel(from_vocab_size, to_vocab_size,
_buckets, hidden_size, num_layers, dropout, grad_clip, batch_size,
learning_rate, lr_decay_factor, forward_only=forward_only, dtype=tf
.float32)
ckpt = tf.train.latest_checkpoint(checkpoint_dir)
if ckpt != None:
model.saver.restore(session, ckpt)
else:
session.run(tf.global_variables_initializer())
return model
def main():
vocab_sizeen, vocab_sizech, vocaben, rev_vocabch = getfanyiInfo()
if not os.path.exists(checkpoint_dir):
os.mkdir(checkpoint_dir)
with tf.Session() as sess:
model = createModel(sess, True, vocab_sizeen, vocab_sizech)
model.batch_size = 1
conversation_history = []
while True:
prompt = '请输入:'
sentence = input(prompt)
conversation_history.append(sentence)
conversation_history = conversation_history[-conversation_history:]
token_ids = list(reversed(datautil.sentence_to_ids(' '.join(
conversation_history), vocaben, normalize_digits=True, Isch
=True)))
bucket_id = min([b for b in range(len(_buckets)) if _buckets[b]
[0] > len(token_ids)])
encoder_inputs, decoder_inputs, target_weights = model.get_batch({
bucket_id: [(token_ids, [])]}, bucket_id)
_, _, output_logits = model.step(sess, encoder_inputs,
decoder_inputs, target_weights, bucket_id, True)
outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits
]
if datautil.EOS_ID in outputs:
outputs = outputs[:outputs.index(datautil.EOS_ID)]
convo_output = ' '.join(datautil.ids2texts(outputs,
rev_vocabch))
conversation_history.append(convo_output)
else:
print('can not translation!')
if __name__ == '__main__':
main()
| <mask token>
_buckets = []
convo_hist_limit = 1
max_source_length = 1
max_target_length = 2
flags = tf.app.flags
FLAGS = flags.FLAGS
tf.reset_default_graph
max_train_data_size = 0
data_dir = 'datacn/'
dropout = 1.0
grad_clip = 5.0
batch_size = 60
hidden_size = 14
num_layers = 2
learning_rate = 0.5
lr_decay_factor = 0.99
checkpoint_dir = 'data/checkpoints/'
hidden_size = 100
checkpoint_dir = 'fanyichina/checkpoints/'
data_dir = 'fanyichina'
_buckets = [(20, 20), (40, 40), (50, 50), (60, 60)]
def getfanyiInfo():
vocaben, rev_vocaben = datautil.initialize_vocabulary(os.path.join(
datautil.data_dir, datautil.vocabulary_fileen))
vocab_sizeen = len(vocaben)
vocabch, rev_vocabch = datautil.initialize_vocabulary(os.path.join(
datautil.data_dir, datautil.vocabulary_filech))
vocab_sizech = len(vocabch)
return vocab_sizeen, vocab_sizech, vocaben, vocabch
def createModel(session, forward_only, from_vocab_size, to_vocab_size):
model = seq2seq_model.Seq2SeqModel(from_vocab_size, to_vocab_size,
_buckets, hidden_size, num_layers, dropout, grad_clip, batch_size,
learning_rate, lr_decay_factor, forward_only=forward_only, dtype=tf
.float32)
ckpt = tf.train.latest_checkpoint(checkpoint_dir)
if ckpt != None:
model.saver.restore(session, ckpt)
else:
session.run(tf.global_variables_initializer())
return model
def main():
vocab_sizeen, vocab_sizech, vocaben, rev_vocabch = getfanyiInfo()
if not os.path.exists(checkpoint_dir):
os.mkdir(checkpoint_dir)
with tf.Session() as sess:
model = createModel(sess, True, vocab_sizeen, vocab_sizech)
model.batch_size = 1
conversation_history = []
while True:
prompt = '请输入:'
sentence = input(prompt)
conversation_history.append(sentence)
conversation_history = conversation_history[-conversation_history:]
token_ids = list(reversed(datautil.sentence_to_ids(' '.join(
conversation_history), vocaben, normalize_digits=True, Isch
=True)))
bucket_id = min([b for b in range(len(_buckets)) if _buckets[b]
[0] > len(token_ids)])
encoder_inputs, decoder_inputs, target_weights = model.get_batch({
bucket_id: [(token_ids, [])]}, bucket_id)
_, _, output_logits = model.step(sess, encoder_inputs,
decoder_inputs, target_weights, bucket_id, True)
outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits
]
if datautil.EOS_ID in outputs:
outputs = outputs[:outputs.index(datautil.EOS_ID)]
convo_output = ' '.join(datautil.ids2texts(outputs,
rev_vocabch))
conversation_history.append(convo_output)
else:
print('can not translation!')
if __name__ == '__main__':
main()
| import os
import numpy as np
import tensorflow as tf
from translate import datautil
import seq2seq_model
_buckets = []
convo_hist_limit = 1
max_source_length = 1
max_target_length = 2
flags = tf.app.flags
FLAGS = flags.FLAGS
tf.reset_default_graph
max_train_data_size = 0
data_dir = 'datacn/'
dropout = 1.0
grad_clip = 5.0
batch_size = 60
hidden_size = 14
num_layers = 2
learning_rate = 0.5
lr_decay_factor = 0.99
checkpoint_dir = 'data/checkpoints/'
hidden_size = 100
checkpoint_dir = 'fanyichina/checkpoints/'
data_dir = 'fanyichina'
_buckets = [(20, 20), (40, 40), (50, 50), (60, 60)]
def getfanyiInfo():
vocaben, rev_vocaben = datautil.initialize_vocabulary(os.path.join(
datautil.data_dir, datautil.vocabulary_fileen))
vocab_sizeen = len(vocaben)
vocabch, rev_vocabch = datautil.initialize_vocabulary(os.path.join(
datautil.data_dir, datautil.vocabulary_filech))
vocab_sizech = len(vocabch)
return vocab_sizeen, vocab_sizech, vocaben, vocabch
def createModel(session, forward_only, from_vocab_size, to_vocab_size):
model = seq2seq_model.Seq2SeqModel(from_vocab_size, to_vocab_size,
_buckets, hidden_size, num_layers, dropout, grad_clip, batch_size,
learning_rate, lr_decay_factor, forward_only=forward_only, dtype=tf
.float32)
ckpt = tf.train.latest_checkpoint(checkpoint_dir)
if ckpt != None:
model.saver.restore(session, ckpt)
else:
session.run(tf.global_variables_initializer())
return model
def main():
vocab_sizeen, vocab_sizech, vocaben, rev_vocabch = getfanyiInfo()
if not os.path.exists(checkpoint_dir):
os.mkdir(checkpoint_dir)
with tf.Session() as sess:
model = createModel(sess, True, vocab_sizeen, vocab_sizech)
model.batch_size = 1
conversation_history = []
while True:
prompt = '请输入:'
sentence = input(prompt)
conversation_history.append(sentence)
conversation_history = conversation_history[-conversation_history:]
token_ids = list(reversed(datautil.sentence_to_ids(' '.join(
conversation_history), vocaben, normalize_digits=True, Isch
=True)))
bucket_id = min([b for b in range(len(_buckets)) if _buckets[b]
[0] > len(token_ids)])
encoder_inputs, decoder_inputs, target_weights = model.get_batch({
bucket_id: [(token_ids, [])]}, bucket_id)
_, _, output_logits = model.step(sess, encoder_inputs,
decoder_inputs, target_weights, bucket_id, True)
outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits
]
if datautil.EOS_ID in outputs:
outputs = outputs[:outputs.index(datautil.EOS_ID)]
convo_output = ' '.join(datautil.ids2texts(outputs,
rev_vocabch))
conversation_history.append(convo_output)
else:
print('can not translation!')
if __name__ == '__main__':
main()
| # -*- coding:utf-8 -*-
import os
import numpy as np
import tensorflow as tf
from translate import datautil
import seq2seq_model
_buckets = []
convo_hist_limit = 1
max_source_length = 1
max_target_length = 2
flags = tf.app.flags
FLAGS = flags.FLAGS
tf.reset_default_graph
max_train_data_size = 0
data_dir = 'datacn/'
dropout = 1.0
grad_clip = 5.0
batch_size = 60
hidden_size = 14
num_layers = 2
learning_rate = 0.5
lr_decay_factor = 0.99
checkpoint_dir = 'data/checkpoints/'
hidden_size = 100
checkpoint_dir = 'fanyichina/checkpoints/'
data_dir = 'fanyichina'
_buckets = [(20, 20), (40, 40), (50, 50), (60, 60)]
def getfanyiInfo():
vocaben, rev_vocaben = datautil.initialize_vocabulary(
os.path.join(datautil.data_dir, datautil.vocabulary_fileen))
vocab_sizeen = len(vocaben)
vocabch, rev_vocabch = datautil.initialize_vocabulary(
os.path.join(datautil.data_dir, datautil.vocabulary_filech))
vocab_sizech = len(vocabch)
return vocab_sizeen, vocab_sizech, vocaben, vocabch
def createModel(session, forward_only, from_vocab_size, to_vocab_size):
model = seq2seq_model.Seq2SeqModel(from_vocab_size, to_vocab_size, _buckets, hidden_size, num_layers, dropout,
grad_clip, batch_size, learning_rate, lr_decay_factor, forward_only=forward_only, dtype=tf.float32)
ckpt = tf.train.latest_checkpoint(checkpoint_dir)
if ckpt != None:
model.saver.restore(session, ckpt)
else:
session.run(tf.global_variables_initializer())
return model
def main():
vocab_sizeen, vocab_sizech, vocaben, rev_vocabch = getfanyiInfo()
if not os.path.exists(checkpoint_dir):
os.mkdir(checkpoint_dir)
with tf.Session() as sess:
model = createModel(sess, True, vocab_sizeen, vocab_sizech)
model.batch_size = 1
conversation_history = []
while True:
prompt = '请输入:'
sentence = input(prompt)
conversation_history.append(sentence)
conversation_history = conversation_history[-conversation_history:]
token_ids = list(reversed(datautil.sentence_to_ids(
" ".join(conversation_history), vocaben, normalize_digits=True, Isch=True)))
bucket_id = min([b for b in range(len(_buckets))
if _buckets[b][0] > len(token_ids)])
encoder_inputs, decoder_inputs, target_weights = model.get_batch(
{bucket_id: [(token_ids, [])]}, bucket_id)
_, _, output_logits = model.step(
sess, encoder_inputs, decoder_inputs, target_weights, bucket_id, True)
outputs = [int(np.argmax(logit, axis=1))
for logit in output_logits]
if datautil.EOS_ID in outputs:
outputs = outputs[:outputs.index(datautil.EOS_ID)]
convo_output = " ".join(
datautil.ids2texts(outputs, rev_vocabch))
conversation_history.append(convo_output)
else:
print('can not translation!')
if __name__ == '__main__':
main()
| [
2,
4,
5,
6,
7
] |
1,518 | 943e8be7a9ee4e494c0a42e1368555f3df3de897 | <mask token>
def aug_fn(image):
data = {'image': image}
aug_data = transforms(**data)
aug_img = aug_data['image']
aug_img = tf.cast(aug_img, tf.float32) / 255.0
aug_img = tf.image.per_image_standardization(aug_img)
return aug_img
def process_data(image, label):
aug_img = tf.numpy_function(func=aug_fn, inp=[image], Tout=tf.float32)
return aug_img, label
<mask token>
def residual_block(x, filters, kernel_size=3, stride=1, conv_shortcut=True,
name=None):
if conv_shortcut:
shortcut = tf.keras.layers.Conv2D(4 * filters, 1, strides=stride,
name=name + '_0_conv', kernel_initializer='he_uniform',
bias_initializer='he_uniform', kernel_regularizer=tf.keras.
regularizers.l2(0.0001))(x)
shortcut = tf.keras.layers.BatchNormalization(axis=3, name=name +
'_0_bn')(shortcut)
else:
shortcut = x
x = tf.keras.layers.Conv2D(filters, 1, strides=stride, name=name +
'_1_conv', kernel_initializer='he_uniform', bias_initializer=
'he_uniform', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(x)
x = tf.keras.layers.BatchNormalization(axis=3, name=name + '_1_bn')(x)
x = tf.keras.layers.Activation('relu', name=name + '_1_relu')(x)
x = tf.keras.layers.Conv2D(filters, kernel_size, padding='SAME', name=
name + '_2_conv', kernel_initializer='he_uniform', bias_initializer
='he_uniform', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(x)
x = tf.keras.layers.BatchNormalization(axis=3, name=name + '_2_bn')(x)
x = tf.keras.layers.Activation('relu', name=name + '_2_relu')(x)
x = tf.keras.layers.Conv2D(4 * filters, 1, name=name + '_3_conv',
kernel_initializer='he_uniform', bias_initializer='he_uniform',
kernel_regularizer=tf.keras.regularizers.l2(0.0001))(x)
x = tf.keras.layers.BatchNormalization(axis=3, name=name + '_3_bn')(x)
x = tf.keras.layers.Add(name=name + '_add')([shortcut, x])
x = tf.keras.layers.Activation('relu', name=name + '_out')(x)
return x
def residual_stack(x, filters, blocks, stride1=2, name=None):
x = residual_block(x, filters, stride=stride1, name=name + '_block1')
for i in range(2, blocks + 1):
x = residual_block(x, filters, conv_shortcut=False, name=name +
'_block' + str(i))
return x
def ResNet50():
inputs = tf.keras.layers.Input(shape=INPUT_SHAPE)
x = tf.keras.layers.ZeroPadding2D(padding=((3, 3), (3, 3)), name=
'conv1_pad')(inputs)
x = tf.keras.layers.Conv2D(64, 7, strides=2, use_bias=True, name=
'conv1_conv', kernel_initializer='he_uniform', bias_initializer=
'he_uniform', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(x)
x = tf.keras.layers.BatchNormalization(axis=3, name='conv1_bn')(x)
x = tf.keras.layers.Activation('relu', name='conv1_relu')(x)
x = tf.keras.layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=
'pool1_pad')(x)
x = tf.keras.layers.MaxPooling2D(3, strides=2, name='pool1_pool')(x)
x = residual_stack(x, 64, 3, stride1=1, name='conv2')
x = residual_stack(x, 128, 4, name='conv3')
x = residual_stack(x, 256, 6, name='conv4')
x = residual_stack(x, 512, 3, name='conv5')
x = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x)
outputs = tf.keras.layers.Dense(n_classes, activation='softmax', name=
'predictions')(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
return model
@tf.function
def train(model, images, labels):
with tf.GradientTape() as tape:
y_pred = model(images, training=True)
loss = tf.reduce_mean(cost_fn(labels, y_pred))
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(grads_and_vars=zip(grads, model.
trainable_variables))
train_acc.update_state(labels, y_pred)
train_loss.update_state(labels, y_pred)
<mask token>
def lrfn():
if epoch < LR_RAMPUP_EPOCHS:
lr = (LR_MAX - LR_START) / LR_RAMPUP_EPOCHS * epoch + LR_START
elif epoch < LR_RAMPUP_EPOCHS + LR_SUSTAIN_EPOCHS:
lr = LR_MAX
else:
lr = (LR_MAX - LR_MIN) * LR_EXP_DECAY ** (epoch - LR_RAMPUP_EPOCHS -
LR_SUSTAIN_EPOCHS) + LR_MIN
return lr
<mask token>
| <mask token>
def get_dataset(ds_path):
ds_path = pathlib.Path(ds_path)
images = list(ds_path.glob('*/*.jpg'))
images = [str(path) for path in images]
total_images = len(images)
labels = sorted(item.name for item in ds_path.glob('*/') if item.is_dir())
classes = labels
labels = dict((name, index) for index, name in enumerate(labels))
labels = [labels[pathlib.Path(path).parent.name] for path in images]
labels = tf.keras.utils.to_categorical(labels, num_classes=len(classes),
dtype='float32')
return images, labels, classes
def aug_fn(image):
data = {'image': image}
aug_data = transforms(**data)
aug_img = aug_data['image']
aug_img = tf.cast(aug_img, tf.float32) / 255.0
aug_img = tf.image.per_image_standardization(aug_img)
return aug_img
def process_data(image, label):
aug_img = tf.numpy_function(func=aug_fn, inp=[image], Tout=tf.float32)
return aug_img, label
def make_tf_data(images, labels, augmentation):
images = tf.data.Dataset.from_tensor_slices(images)
images = images.map(preprocess_image, num_parallel_calls=AUTOTUNE)
labels = tf.data.Dataset.from_tensor_slices(labels)
dataset = tf.data.Dataset.zip((images, labels))
dataset = dataset.repeat()
if augmentation:
dataset = dataset.map(partial(process_data), num_parallel_calls=
AUTOTUNE)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.prefetch(AUTOTUNE)
return dataset
def residual_block(x, filters, kernel_size=3, stride=1, conv_shortcut=True,
name=None):
if conv_shortcut:
shortcut = tf.keras.layers.Conv2D(4 * filters, 1, strides=stride,
name=name + '_0_conv', kernel_initializer='he_uniform',
bias_initializer='he_uniform', kernel_regularizer=tf.keras.
regularizers.l2(0.0001))(x)
shortcut = tf.keras.layers.BatchNormalization(axis=3, name=name +
'_0_bn')(shortcut)
else:
shortcut = x
x = tf.keras.layers.Conv2D(filters, 1, strides=stride, name=name +
'_1_conv', kernel_initializer='he_uniform', bias_initializer=
'he_uniform', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(x)
x = tf.keras.layers.BatchNormalization(axis=3, name=name + '_1_bn')(x)
x = tf.keras.layers.Activation('relu', name=name + '_1_relu')(x)
x = tf.keras.layers.Conv2D(filters, kernel_size, padding='SAME', name=
name + '_2_conv', kernel_initializer='he_uniform', bias_initializer
='he_uniform', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(x)
x = tf.keras.layers.BatchNormalization(axis=3, name=name + '_2_bn')(x)
x = tf.keras.layers.Activation('relu', name=name + '_2_relu')(x)
x = tf.keras.layers.Conv2D(4 * filters, 1, name=name + '_3_conv',
kernel_initializer='he_uniform', bias_initializer='he_uniform',
kernel_regularizer=tf.keras.regularizers.l2(0.0001))(x)
x = tf.keras.layers.BatchNormalization(axis=3, name=name + '_3_bn')(x)
x = tf.keras.layers.Add(name=name + '_add')([shortcut, x])
x = tf.keras.layers.Activation('relu', name=name + '_out')(x)
return x
def residual_stack(x, filters, blocks, stride1=2, name=None):
x = residual_block(x, filters, stride=stride1, name=name + '_block1')
for i in range(2, blocks + 1):
x = residual_block(x, filters, conv_shortcut=False, name=name +
'_block' + str(i))
return x
def ResNet50():
inputs = tf.keras.layers.Input(shape=INPUT_SHAPE)
x = tf.keras.layers.ZeroPadding2D(padding=((3, 3), (3, 3)), name=
'conv1_pad')(inputs)
x = tf.keras.layers.Conv2D(64, 7, strides=2, use_bias=True, name=
'conv1_conv', kernel_initializer='he_uniform', bias_initializer=
'he_uniform', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(x)
x = tf.keras.layers.BatchNormalization(axis=3, name='conv1_bn')(x)
x = tf.keras.layers.Activation('relu', name='conv1_relu')(x)
x = tf.keras.layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=
'pool1_pad')(x)
x = tf.keras.layers.MaxPooling2D(3, strides=2, name='pool1_pool')(x)
x = residual_stack(x, 64, 3, stride1=1, name='conv2')
x = residual_stack(x, 128, 4, name='conv3')
x = residual_stack(x, 256, 6, name='conv4')
x = residual_stack(x, 512, 3, name='conv5')
x = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x)
outputs = tf.keras.layers.Dense(n_classes, activation='softmax', name=
'predictions')(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
return model
@tf.function
def train(model, images, labels):
with tf.GradientTape() as tape:
y_pred = model(images, training=True)
loss = tf.reduce_mean(cost_fn(labels, y_pred))
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(grads_and_vars=zip(grads, model.
trainable_variables))
train_acc.update_state(labels, y_pred)
train_loss.update_state(labels, y_pred)
@tf.function
def validation(model, images, labels):
y_pred = model(images, training=False)
loss = tf.reduce_mean(cost_fn(labels, y_pred))
val_acc.update_state(labels, y_pred)
val_loss.update_state(labels, y_pred)
def lrfn():
if epoch < LR_RAMPUP_EPOCHS:
lr = (LR_MAX - LR_START) / LR_RAMPUP_EPOCHS * epoch + LR_START
elif epoch < LR_RAMPUP_EPOCHS + LR_SUSTAIN_EPOCHS:
lr = LR_MAX
else:
lr = (LR_MAX - LR_MIN) * LR_EXP_DECAY ** (epoch - LR_RAMPUP_EPOCHS -
LR_SUSTAIN_EPOCHS) + LR_MIN
return lr
<mask token>
| <mask token>
def preprocess_image(images):
image = tf.io.read_file(images)
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize(image, [IMG_SIZE, IMG_SIZE])
return image
def get_dataset(ds_path):
ds_path = pathlib.Path(ds_path)
images = list(ds_path.glob('*/*.jpg'))
images = [str(path) for path in images]
total_images = len(images)
labels = sorted(item.name for item in ds_path.glob('*/') if item.is_dir())
classes = labels
labels = dict((name, index) for index, name in enumerate(labels))
labels = [labels[pathlib.Path(path).parent.name] for path in images]
labels = tf.keras.utils.to_categorical(labels, num_classes=len(classes),
dtype='float32')
return images, labels, classes
def aug_fn(image):
data = {'image': image}
aug_data = transforms(**data)
aug_img = aug_data['image']
aug_img = tf.cast(aug_img, tf.float32) / 255.0
aug_img = tf.image.per_image_standardization(aug_img)
return aug_img
def process_data(image, label):
aug_img = tf.numpy_function(func=aug_fn, inp=[image], Tout=tf.float32)
return aug_img, label
def make_tf_data(images, labels, augmentation):
images = tf.data.Dataset.from_tensor_slices(images)
images = images.map(preprocess_image, num_parallel_calls=AUTOTUNE)
labels = tf.data.Dataset.from_tensor_slices(labels)
dataset = tf.data.Dataset.zip((images, labels))
dataset = dataset.repeat()
if augmentation:
dataset = dataset.map(partial(process_data), num_parallel_calls=
AUTOTUNE)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.prefetch(AUTOTUNE)
return dataset
def residual_block(x, filters, kernel_size=3, stride=1, conv_shortcut=True,
name=None):
if conv_shortcut:
shortcut = tf.keras.layers.Conv2D(4 * filters, 1, strides=stride,
name=name + '_0_conv', kernel_initializer='he_uniform',
bias_initializer='he_uniform', kernel_regularizer=tf.keras.
regularizers.l2(0.0001))(x)
shortcut = tf.keras.layers.BatchNormalization(axis=3, name=name +
'_0_bn')(shortcut)
else:
shortcut = x
x = tf.keras.layers.Conv2D(filters, 1, strides=stride, name=name +
'_1_conv', kernel_initializer='he_uniform', bias_initializer=
'he_uniform', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(x)
x = tf.keras.layers.BatchNormalization(axis=3, name=name + '_1_bn')(x)
x = tf.keras.layers.Activation('relu', name=name + '_1_relu')(x)
x = tf.keras.layers.Conv2D(filters, kernel_size, padding='SAME', name=
name + '_2_conv', kernel_initializer='he_uniform', bias_initializer
='he_uniform', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(x)
x = tf.keras.layers.BatchNormalization(axis=3, name=name + '_2_bn')(x)
x = tf.keras.layers.Activation('relu', name=name + '_2_relu')(x)
x = tf.keras.layers.Conv2D(4 * filters, 1, name=name + '_3_conv',
kernel_initializer='he_uniform', bias_initializer='he_uniform',
kernel_regularizer=tf.keras.regularizers.l2(0.0001))(x)
x = tf.keras.layers.BatchNormalization(axis=3, name=name + '_3_bn')(x)
x = tf.keras.layers.Add(name=name + '_add')([shortcut, x])
x = tf.keras.layers.Activation('relu', name=name + '_out')(x)
return x
def residual_stack(x, filters, blocks, stride1=2, name=None):
x = residual_block(x, filters, stride=stride1, name=name + '_block1')
for i in range(2, blocks + 1):
x = residual_block(x, filters, conv_shortcut=False, name=name +
'_block' + str(i))
return x
def ResNet50():
inputs = tf.keras.layers.Input(shape=INPUT_SHAPE)
x = tf.keras.layers.ZeroPadding2D(padding=((3, 3), (3, 3)), name=
'conv1_pad')(inputs)
x = tf.keras.layers.Conv2D(64, 7, strides=2, use_bias=True, name=
'conv1_conv', kernel_initializer='he_uniform', bias_initializer=
'he_uniform', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(x)
x = tf.keras.layers.BatchNormalization(axis=3, name='conv1_bn')(x)
x = tf.keras.layers.Activation('relu', name='conv1_relu')(x)
x = tf.keras.layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=
'pool1_pad')(x)
x = tf.keras.layers.MaxPooling2D(3, strides=2, name='pool1_pool')(x)
x = residual_stack(x, 64, 3, stride1=1, name='conv2')
x = residual_stack(x, 128, 4, name='conv3')
x = residual_stack(x, 256, 6, name='conv4')
x = residual_stack(x, 512, 3, name='conv5')
x = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x)
outputs = tf.keras.layers.Dense(n_classes, activation='softmax', name=
'predictions')(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
return model
@tf.function
def train(model, images, labels):
with tf.GradientTape() as tape:
y_pred = model(images, training=True)
loss = tf.reduce_mean(cost_fn(labels, y_pred))
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(grads_and_vars=zip(grads, model.
trainable_variables))
train_acc.update_state(labels, y_pred)
train_loss.update_state(labels, y_pred)
@tf.function
def validation(model, images, labels):
y_pred = model(images, training=False)
loss = tf.reduce_mean(cost_fn(labels, y_pred))
val_acc.update_state(labels, y_pred)
val_loss.update_state(labels, y_pred)
def lrfn():
if epoch < LR_RAMPUP_EPOCHS:
lr = (LR_MAX - LR_START) / LR_RAMPUP_EPOCHS * epoch + LR_START
elif epoch < LR_RAMPUP_EPOCHS + LR_SUSTAIN_EPOCHS:
lr = LR_MAX
else:
lr = (LR_MAX - LR_MIN) * LR_EXP_DECAY ** (epoch - LR_RAMPUP_EPOCHS -
LR_SUSTAIN_EPOCHS) + LR_MIN
return lr
<mask token>
| <mask token>
def preprocess_image(images):
image = tf.io.read_file(images)
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize(image, [IMG_SIZE, IMG_SIZE])
return image
def get_dataset(ds_path):
ds_path = pathlib.Path(ds_path)
images = list(ds_path.glob('*/*.jpg'))
images = [str(path) for path in images]
total_images = len(images)
labels = sorted(item.name for item in ds_path.glob('*/') if item.is_dir())
classes = labels
labels = dict((name, index) for index, name in enumerate(labels))
labels = [labels[pathlib.Path(path).parent.name] for path in images]
labels = tf.keras.utils.to_categorical(labels, num_classes=len(classes),
dtype='float32')
return images, labels, classes
def aug_fn(image):
data = {'image': image}
aug_data = transforms(**data)
aug_img = aug_data['image']
aug_img = tf.cast(aug_img, tf.float32) / 255.0
aug_img = tf.image.per_image_standardization(aug_img)
return aug_img
def process_data(image, label):
aug_img = tf.numpy_function(func=aug_fn, inp=[image], Tout=tf.float32)
return aug_img, label
def make_tf_data(images, labels, augmentation):
images = tf.data.Dataset.from_tensor_slices(images)
images = images.map(preprocess_image, num_parallel_calls=AUTOTUNE)
labels = tf.data.Dataset.from_tensor_slices(labels)
dataset = tf.data.Dataset.zip((images, labels))
dataset = dataset.repeat()
if augmentation:
dataset = dataset.map(partial(process_data), num_parallel_calls=
AUTOTUNE)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.prefetch(AUTOTUNE)
return dataset
def residual_block(x, filters, kernel_size=3, stride=1, conv_shortcut=True,
name=None):
if conv_shortcut:
shortcut = tf.keras.layers.Conv2D(4 * filters, 1, strides=stride,
name=name + '_0_conv', kernel_initializer='he_uniform',
bias_initializer='he_uniform', kernel_regularizer=tf.keras.
regularizers.l2(0.0001))(x)
shortcut = tf.keras.layers.BatchNormalization(axis=3, name=name +
'_0_bn')(shortcut)
else:
shortcut = x
x = tf.keras.layers.Conv2D(filters, 1, strides=stride, name=name +
'_1_conv', kernel_initializer='he_uniform', bias_initializer=
'he_uniform', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(x)
x = tf.keras.layers.BatchNormalization(axis=3, name=name + '_1_bn')(x)
x = tf.keras.layers.Activation('relu', name=name + '_1_relu')(x)
x = tf.keras.layers.Conv2D(filters, kernel_size, padding='SAME', name=
name + '_2_conv', kernel_initializer='he_uniform', bias_initializer
='he_uniform', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(x)
x = tf.keras.layers.BatchNormalization(axis=3, name=name + '_2_bn')(x)
x = tf.keras.layers.Activation('relu', name=name + '_2_relu')(x)
x = tf.keras.layers.Conv2D(4 * filters, 1, name=name + '_3_conv',
kernel_initializer='he_uniform', bias_initializer='he_uniform',
kernel_regularizer=tf.keras.regularizers.l2(0.0001))(x)
x = tf.keras.layers.BatchNormalization(axis=3, name=name + '_3_bn')(x)
x = tf.keras.layers.Add(name=name + '_add')([shortcut, x])
x = tf.keras.layers.Activation('relu', name=name + '_out')(x)
return x
def residual_stack(x, filters, blocks, stride1=2, name=None):
x = residual_block(x, filters, stride=stride1, name=name + '_block1')
for i in range(2, blocks + 1):
x = residual_block(x, filters, conv_shortcut=False, name=name +
'_block' + str(i))
return x
def ResNet50():
inputs = tf.keras.layers.Input(shape=INPUT_SHAPE)
x = tf.keras.layers.ZeroPadding2D(padding=((3, 3), (3, 3)), name=
'conv1_pad')(inputs)
x = tf.keras.layers.Conv2D(64, 7, strides=2, use_bias=True, name=
'conv1_conv', kernel_initializer='he_uniform', bias_initializer=
'he_uniform', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(x)
x = tf.keras.layers.BatchNormalization(axis=3, name='conv1_bn')(x)
x = tf.keras.layers.Activation('relu', name='conv1_relu')(x)
x = tf.keras.layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=
'pool1_pad')(x)
x = tf.keras.layers.MaxPooling2D(3, strides=2, name='pool1_pool')(x)
x = residual_stack(x, 64, 3, stride1=1, name='conv2')
x = residual_stack(x, 128, 4, name='conv3')
x = residual_stack(x, 256, 6, name='conv4')
x = residual_stack(x, 512, 3, name='conv5')
x = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x)
outputs = tf.keras.layers.Dense(n_classes, activation='softmax', name=
'predictions')(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
return model
@tf.function
def train(model, images, labels):
with tf.GradientTape() as tape:
y_pred = model(images, training=True)
loss = tf.reduce_mean(cost_fn(labels, y_pred))
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(grads_and_vars=zip(grads, model.
trainable_variables))
train_acc.update_state(labels, y_pred)
train_loss.update_state(labels, y_pred)
@tf.function
def validation(model, images, labels):
y_pred = model(images, training=False)
loss = tf.reduce_mean(cost_fn(labels, y_pred))
val_acc.update_state(labels, y_pred)
val_loss.update_state(labels, y_pred)
def lrfn():
if epoch < LR_RAMPUP_EPOCHS:
lr = (LR_MAX - LR_START) / LR_RAMPUP_EPOCHS * epoch + LR_START
elif epoch < LR_RAMPUP_EPOCHS + LR_SUSTAIN_EPOCHS:
lr = LR_MAX
else:
lr = (LR_MAX - LR_MIN) * LR_EXP_DECAY ** (epoch - LR_RAMPUP_EPOCHS -
LR_SUSTAIN_EPOCHS) + LR_MIN
return lr
def tf_data_visualize(augmentation_element, name):
row, col, idx = 5, 4, 0
row = min(row, BATCH_SIZE // col)
for image, label in augmentation_element:
print(image.shape, label.shape)
image = image / 255.0
plt.figure(figsize=(15, int(15 * row / col)))
for j in range(row * col):
plt.subplot(row, col, j + 1)
plt.axis('off')
plt.imshow(image[j,])
plt.show()
idx += 1
if idx == 3:
break
<mask token>
| import pathlib, random, cv2
import tensorflow as tf
import numpy as np
import tensorflow.keras.backend as K
import albumentations as A
from matplotlib import pyplot as plt
from functools import partial
from sklearn.model_selection import train_test_split
# GPU setup
gpus = tf.config.experimental.list_physical_devices('GPU')
if len(gpus) > 1:
try:
print("Activate Multi GPU")
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
strategy = tf.distribute.MirroredStrategy(cross_device_ops=tf.distribute.HierarchicalCopyAllReduce())
except RuntimeError as e:
print(e)
else:
try:
print("Activate Sigle GPU")
tf.config.experimental.set_memory_growth(gpus[0], True)
strategy = tf.distribute.experimental.CentralStorageStrategy()
except RuntimeError as e:
print(e)
def preprocess_image(images):
image = tf.io.read_file(images)
image = tf.image.decode_jpeg(image, channels=3)
# image = tf.cast(image, tf.float32) / 255.0
# image = (tf.cast(image, tf.float32) / 127.5) - 1
# image = tf.image.per_image_standardization(image)
image = tf.image.resize(image, [IMG_SIZE, IMG_SIZE])
return image
def get_dataset(ds_path):
ds_path = pathlib.Path(ds_path)
images = list(ds_path.glob('*/*.jpg'))
images = [str(path) for path in images]
total_images = len(images)
labels = sorted(item.name for item in ds_path.glob('*/') if item.is_dir())
classes = labels
labels = dict((name, index) for index, name in enumerate(labels))
labels = [labels[pathlib.Path(path).parent.name] for path in images]
labels = tf.keras.utils.to_categorical(labels, num_classes=len(classes), dtype='float32')
return images, labels, classes
def aug_fn(image):
data = {"image":image}
aug_data = transforms(**data)
aug_img = aug_data["image"]
aug_img = tf.cast(aug_img, tf.float32) / 255.0
aug_img = tf.image.per_image_standardization(aug_img)
# aug_img = tf.keras.applications.resnet.preprocess_input(aug_img)
return aug_img
def process_data(image, label):
aug_img = tf.numpy_function(func=aug_fn, inp=[image], Tout=tf.float32)
return aug_img, label
def make_tf_data(images, labels, augmentation):
images = tf.data.Dataset.from_tensor_slices(images)
images = images.map(preprocess_image, num_parallel_calls=AUTOTUNE)
labels = tf.data.Dataset.from_tensor_slices(labels)
dataset = tf.data.Dataset.zip((images, labels))
dataset = dataset.repeat()
if augmentation:
dataset = dataset.map(partial(process_data), num_parallel_calls=AUTOTUNE)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.prefetch(AUTOTUNE)
return dataset
def residual_block(x, filters, kernel_size=3, stride=1, conv_shortcut=True, name=None):
if conv_shortcut:
shortcut = tf.keras.layers.Conv2D(4 * filters, 1, strides=stride, name=name+'_0_conv', kernel_initializer='he_uniform', bias_initializer='he_uniform', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(x)
shortcut = tf.keras.layers.BatchNormalization(axis=3, name=name+'_0_bn')(shortcut)
else:
shortcut = x
x = tf.keras.layers.Conv2D(filters, 1, strides=stride, name=name + '_1_conv', kernel_initializer='he_uniform', bias_initializer='he_uniform', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(x)
x = tf.keras.layers.BatchNormalization(axis=3, name=name + '_1_bn')(x)
x = tf.keras.layers.Activation('relu', name=name + '_1_relu')(x)
x = tf.keras.layers.Conv2D(filters, kernel_size, padding='SAME', name=name + '_2_conv', kernel_initializer='he_uniform', bias_initializer='he_uniform', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(x)
x = tf.keras.layers.BatchNormalization(axis=3, name=name + '_2_bn')(x)
x = tf.keras.layers.Activation('relu', name=name + '_2_relu')(x)
x = tf.keras.layers.Conv2D(4 * filters, 1, name=name + '_3_conv', kernel_initializer='he_uniform', bias_initializer='he_uniform', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(x)
x = tf.keras.layers.BatchNormalization(axis=3, name=name + '_3_bn')(x)
x = tf.keras.layers.Add(name=name + '_add')([shortcut, x])
x = tf.keras.layers.Activation('relu', name=name + '_out')(x)
return x
def residual_stack(x, filters, blocks, stride1=2, name=None):
x = residual_block(x, filters, stride=stride1, name=name + '_block1')
for i in range(2, blocks + 1):
x = residual_block(x, filters, conv_shortcut=False, name=name + '_block' + str(i))
return x
def ResNet50():
inputs = tf.keras.layers.Input(shape=INPUT_SHAPE)
x = tf.keras.layers.ZeroPadding2D(padding=((3, 3), (3, 3)), name='conv1_pad')(inputs)
x = tf.keras.layers.Conv2D(64, 7, strides=2, use_bias=True, name='conv1_conv', kernel_initializer='he_uniform', bias_initializer='he_uniform', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(x)
x = tf.keras.layers.BatchNormalization(axis=3, name='conv1_bn')(x)
x = tf.keras.layers.Activation('relu', name='conv1_relu')(x)
x = tf.keras.layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name='pool1_pad')(x)
x = tf.keras.layers.MaxPooling2D(3, strides=2, name='pool1_pool')(x)
x = residual_stack(x, 64, 3, stride1=1, name='conv2')
x = residual_stack(x, 128, 4, name='conv3')
x = residual_stack(x, 256, 6, name='conv4')
x = residual_stack(x, 512, 3, name='conv5')
x = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x)
outputs = tf.keras.layers.Dense(n_classes, activation='softmax', name='predictions')(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
return model
@tf.function
def train(model, images, labels):
with tf.GradientTape() as tape:
y_pred = model(images, training=True)
loss = tf.reduce_mean(cost_fn(labels, y_pred))
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(grads_and_vars=zip(grads, model.trainable_variables))
train_acc.update_state(labels, y_pred)
train_loss.update_state(labels, y_pred)
@tf.function
def validation(model, images, labels):
y_pred = model(images, training=False)
loss = tf.reduce_mean(cost_fn(labels, y_pred))
val_acc.update_state(labels, y_pred)
val_loss.update_state(labels, y_pred)
def lrfn():
if epoch < LR_RAMPUP_EPOCHS:
lr = (LR_MAX - LR_START) / LR_RAMPUP_EPOCHS * epoch + LR_START
elif epoch < LR_RAMPUP_EPOCHS + LR_SUSTAIN_EPOCHS:
lr = LR_MAX
else:
lr = (LR_MAX - LR_MIN) * LR_EXP_DECAY**(epoch - LR_RAMPUP_EPOCHS - LR_SUSTAIN_EPOCHS) + LR_MIN
return lr
def tf_data_visualize(augmentation_element, name):
row, col, idx = 5, 4, 0
row = min(row, BATCH_SIZE // col)
for (image, label) in augmentation_element:
print(image.shape, label.shape)
image = image / 255.0
plt.figure(figsize=(15, int(15 * row / col)))
for j in range(row * col):
plt.subplot(row, col, j + 1)
plt.axis('off')
plt.imshow(image[j, ])
# plt.savefig(f'{SAVED_PATH}/{LOG_TIME}/{name}_{idx}.jpg')
plt.show()
idx += 1
if idx == 3:
break
if __name__ == "__main__":
# hyper parameters
AUTOTUNE = tf.data.experimental.AUTOTUNE
IMG_SIZE = 224
INPUT_SHAPE = (IMG_SIZE, IMG_SIZE, 3)
BATCH_SIZE = 32
EPOCHS = 1000
# learning rate scheduler
LR_START = 0.001
LR_MAX = 0.005 * strategy.num_replicas_in_sync
LR_MIN = 0.001
LR_RAMPUP_EPOCHS = 5
LR_SUSTAIN_EPOCHS = 0
LR_EXP_DECAY = .8
# early stopping
PATIENCE = 3
EARLY_STOPPING = True
minimum_loss = float(2147000000)
total_images, total_labels, CLASSES = get_dataset('/home/v100/tf_workspace/datasets/natural_images/natural_images')
n_classes = len(CLASSES)
train_images, valid_images, train_labels, valid_labels = train_test_split(total_images, total_labels, test_size=.3, shuffle=True, random_state=777)
TRAIN_STEPS_PER_EPOCH = int(tf.math.ceil(len(train_images) / BATCH_SIZE).numpy())
VALID_STEP_PER_EPOCH = int(tf.math.ceil(len(valid_images) / BATCH_SIZE).numpy())
cost_fn = tf.keras.losses.CategoricalCrossentropy()
# optimizer = tf.keras.optimizers.Adam(learning_rate=lrfn)
optimizer = tf.keras.optimizers.Adam(learning_rate=0.00001)
inputs = tf.keras.Input(shape=(INPUT_SHAPE))
model = ResNet50()
model(inputs=inputs)
model.summary()
# tf.keras.utils.plot_model(model, show_shapes=True)
train_acc = tf.metrics.CategoricalAccuracy()
train_loss = tf.metrics.CategoricalCrossentropy()
val_acc = tf.metrics.CategoricalAccuracy()
val_loss = tf.metrics.CategoricalCrossentropy()
transforms = A.Compose([
# A.Resize(IMG_SIZE, IMG_SIZE, 3, p=1),
A.OneOf([
A.HorizontalFlip(p=0.6),
A.VerticalFlip(p=0.6),
], p=0.7),
# A.Cutout(num_holes=15, max_h_size=4, max_w_size=4, fill_value=[0, 0, 0], p=0.7),
A.OneOf([
A.RandomRotate90(p=0.6),
A.ShiftScaleRotate(p=0.6, border_mode=1)
], p=0.7),
# A.RandomBrightness(limit=0.1, p=0.5),
# A.HueSaturationValue(hue_shift_limit=20, sat_shift_limit=30, val_shift_limit=20, p=0.5),
# A.RandomContrast(limit=0.2, p=0.5),
])
# tf_data_visualize(make_tf_data(train_images, train_labels, True), 'train')
stateful_matrices = ['train_acc', 'train_loss', 'valid_acc', 'valid_loss']
print()
print('Learning started. It takes sometime.')
for epoch in range(EPOCHS):
print("Current Learning Rate : ", optimizer._decayed_lr('float32').numpy())
tf.print("Epoch {}/{}".format(epoch + 1, EPOCHS))
prog_bar = tf.keras.utils.Progbar(target=TRAIN_STEPS_PER_EPOCH, stateful_metrics=stateful_matrices)
train_acc.reset_states()
train_loss.reset_states()
val_acc.reset_states()
val_loss.reset_states()
for idx, (images, labels) in enumerate(make_tf_data(train_images, train_labels, True)):
train(model, images, labels)
values=[('train_loss', train_loss.result().numpy()), ('train_acc', train_acc.result().numpy())]
prog_bar.update(idx, values=values)
if idx+1 >= TRAIN_STEPS_PER_EPOCH:
break
for idx, (images, labels) in enumerate(make_tf_data(valid_images, valid_labels, True)):
validation(model, images, labels)
if idx+1 >= VALID_STEP_PER_EPOCH:
break
values = [('train_loss', train_loss.result().numpy()), ('train_acc', train_acc.result().numpy()), ('valid_loss', val_loss.result().numpy()), ('valid_acc', val_acc.result().numpy())]
prog_bar.update(TRAIN_STEPS_PER_EPOCH, values=values, finalize=True)
if EARLY_STOPPING:
tmp_loss = (val_loss.result().numpy())
if tmp_loss < minimum_loss:
minimum_loss = tmp_loss
PATIENCE = 3
else:
PATIENCE -= 1
if PATIENCE == 0:
break
print('Learning Finished')
model.save('/home/v100/tf_workspace/model/resnet50_adam_he_l2_aug.h5') | [
7,
10,
11,
12,
16
] |
1,519 | a238175c94764137bfc8fac1ce67436016b1591a | <mask token>
| <mask token>
@admin.register(OrderModel)
class OrderAdmin(admin.ModelAdmin):
<mask token>
| <mask token>
@admin.register(OrderModel)
class OrderAdmin(admin.ModelAdmin):
list_display = ['first_name', 'phone']
| from django.contrib import admin
from orders.models import OrderModel
@admin.register(OrderModel)
class OrderAdmin(admin.ModelAdmin):
list_display = ['first_name', 'phone']
| null | [
0,
1,
2,
3
] |
1,520 | 00531c5a7fdcd24204b0546c081bbe7d63d0a6b2 | <mask token>
| <mask token>
print('Your name is ' + name + ', you are ' + age +
' years old, and your username is ' + reddit)
| name = input('What is your name? ')
age = input('How old are you? ')
reddit = input('What is your reddit username? ')
print('Your name is ' + name + ', you are ' + age +
' years old, and your username is ' + reddit)
| # Create a program that will ask the users name, age, and reddit username.
# Have it tell them the information back, in the format:
#
# Your name is (blank), you are (blank) years old, and your username is (blank)
#
# For extra credit, have the program log this information in a file to be accessed later.
#
name = input("What is your name? ")
age = input("How old are you? ")
reddit = input("What is your reddit username? ")
print("Your name is " + name + ", you are " + age + " years old, and your username is " + reddit)
| null | [
0,
1,
2,
3
] |
1,521 | 3024359710148bfbb15677973555f214b1f878b7 | <mask token>
| <mask token>
for classifier in classifiers:
classifier.fit(training_data[:1500], validation_data[:1500])
expected = validation_data[681:]
predicted = classifier.predict(training_data[681:])
print('Classification report for classifier %s:\n%s\n' % (classifier,
metrics.classification_report(expected, predicted)))
print('Confusion matrix:\n%s' % metrics.confusion_matrix(expected,
predicted))
| <mask token>
my_data = np.loadtxt('edited_data/dataset_regression_edited.csv', delimiter
=',', dtype='str')
training_data = my_data[:, 0:6]
validation_data = my_data[:, 6]
classifiers = [tree.DecisionTreeClassifier(max_depth=5), tree.
DecisionTreeClassifier(max_depth=8), tree.DecisionTreeClassifier(
max_depth=10), svm.SVC(kernel='linear'), svm.SVC(kernel='rbf'),
AdaBoostClassifier(n_estimators=50), AdaBoostClassifier(n_estimators=
100), KNeighborsClassifier(3), KNeighborsClassifier(5),
KNeighborsClassifier(7)]
for classifier in classifiers:
classifier.fit(training_data[:1500], validation_data[:1500])
expected = validation_data[681:]
predicted = classifier.predict(training_data[681:])
print('Classification report for classifier %s:\n%s\n' % (classifier,
metrics.classification_report(expected, predicted)))
print('Confusion matrix:\n%s' % metrics.confusion_matrix(expected,
predicted))
| from sklearn import svm, metrics, tree
from sklearn.ensemble import AdaBoostClassifier
from sklearn.neighbors import KNeighborsClassifier
import numpy as np
my_data = np.loadtxt('edited_data/dataset_regression_edited.csv', delimiter
=',', dtype='str')
training_data = my_data[:, 0:6]
validation_data = my_data[:, 6]
classifiers = [tree.DecisionTreeClassifier(max_depth=5), tree.
DecisionTreeClassifier(max_depth=8), tree.DecisionTreeClassifier(
max_depth=10), svm.SVC(kernel='linear'), svm.SVC(kernel='rbf'),
AdaBoostClassifier(n_estimators=50), AdaBoostClassifier(n_estimators=
100), KNeighborsClassifier(3), KNeighborsClassifier(5),
KNeighborsClassifier(7)]
for classifier in classifiers:
classifier.fit(training_data[:1500], validation_data[:1500])
expected = validation_data[681:]
predicted = classifier.predict(training_data[681:])
print('Classification report for classifier %s:\n%s\n' % (classifier,
metrics.classification_report(expected, predicted)))
print('Confusion matrix:\n%s' % metrics.confusion_matrix(expected,
predicted))
| from sklearn import svm, metrics, tree
from sklearn.ensemble import AdaBoostClassifier
from sklearn.neighbors import KNeighborsClassifier
import numpy as np
my_data = np.loadtxt('edited_data/dataset_regression_edited.csv',delimiter=',', dtype='str')
training_data = my_data[:, 0:6]
validation_data = my_data[:, 6]
classifiers = [
tree.DecisionTreeClassifier(max_depth=5),
tree.DecisionTreeClassifier(max_depth=8),
tree.DecisionTreeClassifier(max_depth=10),
svm.SVC(kernel='linear'),
svm.SVC(kernel='rbf'),
AdaBoostClassifier(n_estimators=50),
AdaBoostClassifier(n_estimators=100),
KNeighborsClassifier(3),
KNeighborsClassifier(5),
KNeighborsClassifier(7)
]
for classifier in classifiers:
classifier.fit(training_data[:1500], validation_data[:1500])
expected = validation_data[681:]
predicted = classifier.predict(training_data[681:])
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
| [
0,
1,
2,
3,
4
] |
1,522 | 20fe9b68e65f6f017897bfa8e99d0c21ba1617fb | print(input()in[str(i**i+i)for i in range(11)])
num = int(input())
suma = 0
x = 0
while(suma < num):
x += 1
suma = x**x + x
print(True if suma == num else False
| null | null | null | null | [
0
] |
1,523 | cf2bbe332237bd849df62be099f1719eaf1f2082 | <mask token>
class Fire(pyglet.sprite.Sprite):
<mask token>
<mask token>
def update(self):
self.rotation += self.rotate_speed
self.x += self.velocity_x
self.check_bounds()
def remote_update(self, x, rotation):
self.rotation = rotation
self.x = x
self.check_bounds()
def distance(self, point_1=(0, 0), point_2=(0, 0)):
"""Returns the distance between two points"""
return math.sqrt((point_1[0] - point_2[0]) ** 2 + (point_1[1] -
point_2[1]) ** 2)
def collides_with(self, other_object):
collision_distance = (self.image.width * 0.5 * self.scale +
other_object.image.width * 0.5 * other_object.scale)
actual_distance = self.distance(self.position, other_object.position)
return actual_distance <= collision_distance
| <mask token>
class Fire(pyglet.sprite.Sprite):
def __init__(self, *args, **kwargs):
super(Fire, self).__init__(*args, img=fireball, **kwargs)
self.rotation = 45
self.rotate_speed = 5
self.velocity_x = 5
<mask token>
def update(self):
self.rotation += self.rotate_speed
self.x += self.velocity_x
self.check_bounds()
def remote_update(self, x, rotation):
self.rotation = rotation
self.x = x
self.check_bounds()
def distance(self, point_1=(0, 0), point_2=(0, 0)):
"""Returns the distance between two points"""
return math.sqrt((point_1[0] - point_2[0]) ** 2 + (point_1[1] -
point_2[1]) ** 2)
def collides_with(self, other_object):
collision_distance = (self.image.width * 0.5 * self.scale +
other_object.image.width * 0.5 * other_object.scale)
actual_distance = self.distance(self.position, other_object.position)
return actual_distance <= collision_distance
| <mask token>
class Fire(pyglet.sprite.Sprite):
def __init__(self, *args, **kwargs):
super(Fire, self).__init__(*args, img=fireball, **kwargs)
self.rotation = 45
self.rotate_speed = 5
self.velocity_x = 5
def check_bounds(self):
max_x = 1000 + self.image.width / 2
if self.x > max_x:
self.x = -self.image.width / 2
def update(self):
self.rotation += self.rotate_speed
self.x += self.velocity_x
self.check_bounds()
def remote_update(self, x, rotation):
self.rotation = rotation
self.x = x
self.check_bounds()
def distance(self, point_1=(0, 0), point_2=(0, 0)):
"""Returns the distance between two points"""
return math.sqrt((point_1[0] - point_2[0]) ** 2 + (point_1[1] -
point_2[1]) ** 2)
def collides_with(self, other_object):
collision_distance = (self.image.width * 0.5 * self.scale +
other_object.image.width * 0.5 * other_object.scale)
actual_distance = self.distance(self.position, other_object.position)
return actual_distance <= collision_distance
| import pyglet
import math
from lvl1_resources import fireball
class Fire(pyglet.sprite.Sprite):
def __init__(self, *args, **kwargs):
super(Fire, self).__init__(*args, img=fireball, **kwargs)
self.rotation = 45
self.rotate_speed = 5
self.velocity_x = 5
def check_bounds(self):
max_x = 1000 + self.image.width / 2
if self.x > max_x:
self.x = -self.image.width / 2
def update(self):
self.rotation += self.rotate_speed
self.x += self.velocity_x
self.check_bounds()
def remote_update(self, x, rotation):
self.rotation = rotation
self.x = x
self.check_bounds()
def distance(self, point_1=(0, 0), point_2=(0, 0)):
"""Returns the distance between two points"""
return math.sqrt((point_1[0] - point_2[0]) ** 2 + (point_1[1] -
point_2[1]) ** 2)
def collides_with(self, other_object):
collision_distance = (self.image.width * 0.5 * self.scale +
other_object.image.width * 0.5 * other_object.scale)
actual_distance = self.distance(self.position, other_object.position)
return actual_distance <= collision_distance
| import pyglet
import math
from lvl1_resources import fireball
class Fire(pyglet.sprite.Sprite):
def __init__( self, *args, **kwargs):
super(Fire, self).__init__(img= fireball, *args, **kwargs)
self.rotation= 45
self.rotate_speed= 5
self.velocity_x= 5
def check_bounds(self):
max_x= 1000 + self.image.width/2
if self.x > max_x:
self.x= -self.image.width/2
def update(self):
self.rotation += self.rotate_speed
self.x += self.velocity_x
self.check_bounds()
def remote_update(self, x, rotation):
self.rotation= rotation
self.x= x
self.check_bounds()
def distance(self, point_1=(0, 0), point_2=(0, 0)):
"""Returns the distance between two points"""
return math.sqrt((point_1[0]-point_2[0])**2+(point_1[1]-point_2[1])**2)
def collides_with(self, other_object):
collision_distance = self.image.width*0.5*self.scale \
+ other_object.image.width*0.5*other_object.scale
actual_distance = self.distance(self.position, other_object.position)
return (actual_distance <= collision_distance)
| [
5,
6,
7,
8,
9
] |
1,524 | 94f5fa411f8a41985caaf4eb7ab1cb4e45439405 | <mask token>
@MultiSerializer.register(lambda x: True)
class PickleSerializer(BaseSerializer):
<mask token>
<mask token>
<mask token>
def deserialize(self, data):
return pickle.loads(data)
@MultiSerializer.register(lambda x: isinstance(x, Exception))
class ExceptionSerializer(BaseSerializer):
"""
Exception serialization.
"""
signature = '_e'
def serialize(self, data):
return pickle.dumps(data, -1)
def deserialize(self, data):
return pickle.loads(data)
@MultiSerializer.register(lambda x: isinstance(x, (str, unicode, bytes, int,
float)))
class BasicSerializer(BaseSerializer):
"""
Basic serialization of simple python types.
"""
signature = '_b'
def serialize(self, data):
return data
def deserialize(self, data):
return data
class Encoder(object):
"""
Handles how args and kwargs are encoded over zmq ports.
By default zerorpc does not support passing kwargs to remote methods.
This class is used to fix that so args are kwargs are combined into a
single args payload that is then deconstructed on the remote side.
"""
_default_serializer = PickleSerializer
def __init__(self, serializer=None):
if serializer is None:
serializer = self._default_serializer()
self.serializer = serializer
def encode(self, *args, **kwargs):
"""
Encode args and kwargs as a single serialized payload.
Parameters
----------
args : *Any
kwargs : **Any
Returns
-------
Tuple[Tuple[Any, ...], Dict[Any, Any]]
"""
return self.serializer.serialize(args), self.serializer.serialize(
kwargs)
def decode(self, *payload):
"""
Decode encoded args and kwargs.
Parameters
----------
payload : Tuple[Tuple[Any, ...], Dict[Any, Any]]
Returns
-------
Tuple[Tuple[Any, ...], Dict[Any, Any]]
"""
if not payload:
return (), {}
args, kwargs = payload
return self.serializer.deserialize(args), self.serializer.deserialize(
kwargs)
| <mask token>
class MultiSerializer(BaseSerializer):
<mask token>
<mask token>
@classmethod
def register(cls, claim_func):
"""
Decorator for registering a callable to serialize certain types.
Parameters
----------
claim_func : Callable[Any, bool]
Returns
-------
Callable[[T], T]
"""
def _deco(serializer):
cls._registered.insert(0, (claim_func, serializer))
return serializer
return _deco
def __init__(self):
self._serializers = {}
self._claims = []
for claim_func, serializerCls in self._registered:
assert serializerCls.signature is not None, 'Populate the serializer.signature attribute.'
assert serializerCls.signature not in self._serializers, 'Existing serializer with signature {!r}'.format(
serializerCls.signature)
serializer = serializerCls()
self._claims.append((serializerCls.signature, claim_func))
self._serializers[serializerCls.signature] = serializer
def serialize(self, data):
if isinstance(data, (list, tuple, set)):
return type(data)(self.serialize(x) for x in data)
elif isinstance(data, MutableMapping):
return type(data)({self.serialize(k): self.serialize(v) for k,
v in data.items()})
for name, claim_func in self._claims:
if claim_func(data):
return name, self._serializers[name].serialize(data)
raise ValueError('No serializer found for {!r}'.format(data))
def deserialize(self, payload):
if not payload:
return payload
if isinstance(payload, (tuple, list)) and len(payload
) == 2 and payload[0] in self._serializers.keys():
signature, data = payload
if signature not in self._serializers:
raise ValueError('No deserializer found for {!r}'.format(data))
return self._serializers[signature].deserialize(data)
if isinstance(payload, (list, tuple, set)):
return type(payload)(self.deserialize(x) for x in payload)
elif isinstance(payload, MutableMapping):
return type(payload)({self.deserialize(k): self.deserialize(v) for
k, v in payload.items()})
else:
raise NotImplementedError
@MultiSerializer.register(lambda x: True)
class PickleSerializer(BaseSerializer):
"""
Pickle serialization of python objects over the zmq ports.
"""
signature = '_p'
def serialize(self, data):
return pickle.dumps(data, -1)
def deserialize(self, data):
return pickle.loads(data)
@MultiSerializer.register(lambda x: isinstance(x, Exception))
class ExceptionSerializer(BaseSerializer):
"""
Exception serialization.
"""
signature = '_e'
def serialize(self, data):
return pickle.dumps(data, -1)
def deserialize(self, data):
return pickle.loads(data)
@MultiSerializer.register(lambda x: isinstance(x, (str, unicode, bytes, int,
float)))
class BasicSerializer(BaseSerializer):
"""
Basic serialization of simple python types.
"""
signature = '_b'
def serialize(self, data):
return data
def deserialize(self, data):
return data
class Encoder(object):
"""
Handles how args and kwargs are encoded over zmq ports.
By default zerorpc does not support passing kwargs to remote methods.
This class is used to fix that so args are kwargs are combined into a
single args payload that is then deconstructed on the remote side.
"""
_default_serializer = PickleSerializer
def __init__(self, serializer=None):
if serializer is None:
serializer = self._default_serializer()
self.serializer = serializer
def encode(self, *args, **kwargs):
"""
Encode args and kwargs as a single serialized payload.
Parameters
----------
args : *Any
kwargs : **Any
Returns
-------
Tuple[Tuple[Any, ...], Dict[Any, Any]]
"""
return self.serializer.serialize(args), self.serializer.serialize(
kwargs)
def decode(self, *payload):
"""
Decode encoded args and kwargs.
Parameters
----------
payload : Tuple[Tuple[Any, ...], Dict[Any, Any]]
Returns
-------
Tuple[Tuple[Any, ...], Dict[Any, Any]]
"""
if not payload:
return (), {}
args, kwargs = payload
return self.serializer.deserialize(args), self.serializer.deserialize(
kwargs)
| <mask token>
class BaseSerializer(Generic[T]):
<mask token>
signature = None
@abc.abstractmethod
def serialize(self, data):
"""
Serialize a python object to transport over zmq.
Parameters
----------
data : T
Returns
-------
Any
"""
raise NotImplementedError
@abc.abstractmethod
def deserialize(self, data):
"""
Deserialize a python object. Counter of `serialize`.
Parameters
----------
data : Any
Returns
-------
T
"""
return NotImplementedError
class MultiSerializer(BaseSerializer):
"""
Serializer with multple sub-serializers that can register methods to claim
certain python objects.
All serialized objects (besides list, tuples, sets, dicts) are represented
as a tuple of (serializer.signature, serialized_value). This is so data
can be properly decoded on the remote side.
Register new sub-serializers using the register decorator:
@MultiSerializer.register(lamba x: isinstance(x, MyCls))
class MyClsSerializer(BaseSerializer):
...
"""
_registered = []
@classmethod
def register(cls, claim_func):
"""
Decorator for registering a callable to serialize certain types.
Parameters
----------
claim_func : Callable[Any, bool]
Returns
-------
Callable[[T], T]
"""
def _deco(serializer):
cls._registered.insert(0, (claim_func, serializer))
return serializer
return _deco
def __init__(self):
self._serializers = {}
self._claims = []
for claim_func, serializerCls in self._registered:
assert serializerCls.signature is not None, 'Populate the serializer.signature attribute.'
assert serializerCls.signature not in self._serializers, 'Existing serializer with signature {!r}'.format(
serializerCls.signature)
serializer = serializerCls()
self._claims.append((serializerCls.signature, claim_func))
self._serializers[serializerCls.signature] = serializer
def serialize(self, data):
if isinstance(data, (list, tuple, set)):
return type(data)(self.serialize(x) for x in data)
elif isinstance(data, MutableMapping):
return type(data)({self.serialize(k): self.serialize(v) for k,
v in data.items()})
for name, claim_func in self._claims:
if claim_func(data):
return name, self._serializers[name].serialize(data)
raise ValueError('No serializer found for {!r}'.format(data))
def deserialize(self, payload):
if not payload:
return payload
if isinstance(payload, (tuple, list)) and len(payload
) == 2 and payload[0] in self._serializers.keys():
signature, data = payload
if signature not in self._serializers:
raise ValueError('No deserializer found for {!r}'.format(data))
return self._serializers[signature].deserialize(data)
if isinstance(payload, (list, tuple, set)):
return type(payload)(self.deserialize(x) for x in payload)
elif isinstance(payload, MutableMapping):
return type(payload)({self.deserialize(k): self.deserialize(v) for
k, v in payload.items()})
else:
raise NotImplementedError
@MultiSerializer.register(lambda x: True)
class PickleSerializer(BaseSerializer):
"""
Pickle serialization of python objects over the zmq ports.
"""
signature = '_p'
def serialize(self, data):
return pickle.dumps(data, -1)
def deserialize(self, data):
return pickle.loads(data)
@MultiSerializer.register(lambda x: isinstance(x, Exception))
class ExceptionSerializer(BaseSerializer):
"""
Exception serialization.
"""
signature = '_e'
def serialize(self, data):
return pickle.dumps(data, -1)
def deserialize(self, data):
return pickle.loads(data)
@MultiSerializer.register(lambda x: isinstance(x, (str, unicode, bytes, int,
float)))
class BasicSerializer(BaseSerializer):
"""
Basic serialization of simple python types.
"""
signature = '_b'
def serialize(self, data):
return data
def deserialize(self, data):
return data
class Encoder(object):
"""
Handles how args and kwargs are encoded over zmq ports.
By default zerorpc does not support passing kwargs to remote methods.
This class is used to fix that so args are kwargs are combined into a
single args payload that is then deconstructed on the remote side.
"""
_default_serializer = PickleSerializer
def __init__(self, serializer=None):
if serializer is None:
serializer = self._default_serializer()
self.serializer = serializer
def encode(self, *args, **kwargs):
"""
Encode args and kwargs as a single serialized payload.
Parameters
----------
args : *Any
kwargs : **Any
Returns
-------
Tuple[Tuple[Any, ...], Dict[Any, Any]]
"""
return self.serializer.serialize(args), self.serializer.serialize(
kwargs)
def decode(self, *payload):
"""
Decode encoded args and kwargs.
Parameters
----------
payload : Tuple[Tuple[Any, ...], Dict[Any, Any]]
Returns
-------
Tuple[Tuple[Any, ...], Dict[Any, Any]]
"""
if not payload:
return (), {}
args, kwargs = payload
return self.serializer.deserialize(args), self.serializer.deserialize(
kwargs)
| <mask token>
class BaseSerializer(Generic[T]):
"""
The serializer is responsible for converting complex python data types
into primitive types that can be sent over zmq ports via msgpack.
"""
signature = None
@abc.abstractmethod
def serialize(self, data):
"""
Serialize a python object to transport over zmq.
Parameters
----------
data : T
Returns
-------
Any
"""
raise NotImplementedError
@abc.abstractmethod
def deserialize(self, data):
"""
Deserialize a python object. Counter of `serialize`.
Parameters
----------
data : Any
Returns
-------
T
"""
return NotImplementedError
class MultiSerializer(BaseSerializer):
"""
Serializer with multple sub-serializers that can register methods to claim
certain python objects.
All serialized objects (besides list, tuples, sets, dicts) are represented
as a tuple of (serializer.signature, serialized_value). This is so data
can be properly decoded on the remote side.
Register new sub-serializers using the register decorator:
@MultiSerializer.register(lamba x: isinstance(x, MyCls))
class MyClsSerializer(BaseSerializer):
...
"""
_registered = []
@classmethod
def register(cls, claim_func):
"""
Decorator for registering a callable to serialize certain types.
Parameters
----------
claim_func : Callable[Any, bool]
Returns
-------
Callable[[T], T]
"""
def _deco(serializer):
cls._registered.insert(0, (claim_func, serializer))
return serializer
return _deco
def __init__(self):
self._serializers = {}
self._claims = []
for claim_func, serializerCls in self._registered:
assert serializerCls.signature is not None, 'Populate the serializer.signature attribute.'
assert serializerCls.signature not in self._serializers, 'Existing serializer with signature {!r}'.format(
serializerCls.signature)
serializer = serializerCls()
self._claims.append((serializerCls.signature, claim_func))
self._serializers[serializerCls.signature] = serializer
def serialize(self, data):
if isinstance(data, (list, tuple, set)):
return type(data)(self.serialize(x) for x in data)
elif isinstance(data, MutableMapping):
return type(data)({self.serialize(k): self.serialize(v) for k,
v in data.items()})
for name, claim_func in self._claims:
if claim_func(data):
return name, self._serializers[name].serialize(data)
raise ValueError('No serializer found for {!r}'.format(data))
def deserialize(self, payload):
if not payload:
return payload
if isinstance(payload, (tuple, list)) and len(payload
) == 2 and payload[0] in self._serializers.keys():
signature, data = payload
if signature not in self._serializers:
raise ValueError('No deserializer found for {!r}'.format(data))
return self._serializers[signature].deserialize(data)
if isinstance(payload, (list, tuple, set)):
return type(payload)(self.deserialize(x) for x in payload)
elif isinstance(payload, MutableMapping):
return type(payload)({self.deserialize(k): self.deserialize(v) for
k, v in payload.items()})
else:
raise NotImplementedError
@MultiSerializer.register(lambda x: True)
class PickleSerializer(BaseSerializer):
"""
Pickle serialization of python objects over the zmq ports.
"""
signature = '_p'
def serialize(self, data):
return pickle.dumps(data, -1)
def deserialize(self, data):
return pickle.loads(data)
@MultiSerializer.register(lambda x: isinstance(x, Exception))
class ExceptionSerializer(BaseSerializer):
"""
Exception serialization.
"""
signature = '_e'
def serialize(self, data):
return pickle.dumps(data, -1)
def deserialize(self, data):
return pickle.loads(data)
@MultiSerializer.register(lambda x: isinstance(x, (str, unicode, bytes, int,
float)))
class BasicSerializer(BaseSerializer):
"""
Basic serialization of simple python types.
"""
signature = '_b'
def serialize(self, data):
return data
def deserialize(self, data):
return data
class Encoder(object):
"""
Handles how args and kwargs are encoded over zmq ports.
By default zerorpc does not support passing kwargs to remote methods.
This class is used to fix that so args are kwargs are combined into a
single args payload that is then deconstructed on the remote side.
"""
_default_serializer = PickleSerializer
def __init__(self, serializer=None):
if serializer is None:
serializer = self._default_serializer()
self.serializer = serializer
def encode(self, *args, **kwargs):
"""
Encode args and kwargs as a single serialized payload.
Parameters
----------
args : *Any
kwargs : **Any
Returns
-------
Tuple[Tuple[Any, ...], Dict[Any, Any]]
"""
return self.serializer.serialize(args), self.serializer.serialize(
kwargs)
def decode(self, *payload):
"""
Decode encoded args and kwargs.
Parameters
----------
payload : Tuple[Tuple[Any, ...], Dict[Any, Any]]
Returns
-------
Tuple[Tuple[Any, ...], Dict[Any, Any]]
"""
if not payload:
return (), {}
args, kwargs = payload
return self.serializer.deserialize(args), self.serializer.deserialize(
kwargs)
| import abc
try:
import cPickle as pickle
except ImportError:
import pickle
from typing import *
T = TypeVar('T')
class BaseSerializer(Generic[T]):
"""
The serializer is responsible for converting complex python data types
into primitive types that can be sent over zmq ports via msgpack.
"""
# Used within the `MultiSerializer` to embed which serializer to use for
# round-trip data serialization.
signature = None # type: str
@abc.abstractmethod
def serialize(self, data):
"""
Serialize a python object to transport over zmq.
Parameters
----------
data : T
Returns
-------
Any
"""
raise NotImplementedError
@abc.abstractmethod
def deserialize(self, data):
"""
Deserialize a python object. Counter of `serialize`.
Parameters
----------
data : Any
Returns
-------
T
"""
return NotImplementedError
class MultiSerializer(BaseSerializer):
"""
Serializer with multple sub-serializers that can register methods to claim
certain python objects.
All serialized objects (besides list, tuples, sets, dicts) are represented
as a tuple of (serializer.signature, serialized_value). This is so data
can be properly decoded on the remote side.
Register new sub-serializers using the register decorator:
@MultiSerializer.register(lamba x: isinstance(x, MyCls))
class MyClsSerializer(BaseSerializer):
...
"""
_registered = []
@classmethod
def register(cls, claim_func):
"""
Decorator for registering a callable to serialize certain types.
Parameters
----------
claim_func : Callable[Any, bool]
Returns
-------
Callable[[T], T]
"""
def _deco(serializer):
cls._registered.insert(0, (claim_func, serializer))
return serializer
return _deco
def __init__(self):
self._serializers = {} # type: Dict[str, BaseSerializer]
self._claims = [] # type: List[Tuple[str, Callable[[Any], bool]]]
for claim_func, serializerCls in self._registered:
assert serializerCls.signature is not None, \
'Populate the serializer.signature attribute.'
assert serializerCls.signature not in self._serializers, \
'Existing serializer with signature ' \
'{!r}'.format(serializerCls.signature)
serializer = serializerCls()
self._claims.append((serializerCls.signature, claim_func))
self._serializers[serializerCls.signature] = serializer
def serialize(self, data):
if isinstance(data, (list, tuple, set)):
return type(data)(self.serialize(x) for x in data)
elif isinstance(data, MutableMapping):
return type(data)({self.serialize(k): self.serialize(v)
for k, v in data.items()})
for name, claim_func in self._claims:
if claim_func(data):
return name, self._serializers[name].serialize(data)
raise ValueError('No serializer found for {!r}'.format(data))
def deserialize(self, payload):
if not payload:
return payload
if isinstance(payload, (tuple, list)) \
and len(payload) == 2 \
and payload[0] in self._serializers.keys():
signature, data = payload
if signature not in self._serializers:
raise ValueError('No deserializer found for {!r}'.format(data))
return self._serializers[signature].deserialize(data)
if isinstance(payload, (list, tuple, set)):
return type(payload)(self.deserialize(x) for x in payload)
elif isinstance(payload, MutableMapping):
return type(payload)({self.deserialize(k): self.deserialize(v)
for k, v in payload.items()})
else:
raise NotImplementedError
@MultiSerializer.register(lambda x: True)
class PickleSerializer(BaseSerializer):
"""
Pickle serialization of python objects over the zmq ports.
"""
signature = '_p'
def serialize(self, data):
return pickle.dumps(data, -1)
def deserialize(self, data):
return pickle.loads(data)
@MultiSerializer.register(lambda x: isinstance(x, Exception))
class ExceptionSerializer(BaseSerializer):
"""
Exception serialization.
"""
signature = '_e'
def serialize(self, data):
return pickle.dumps(data, -1)
def deserialize(self, data):
return pickle.loads(data)
@MultiSerializer.register(
lambda x: isinstance(x, (str, unicode, bytes, int, float)))
class BasicSerializer(BaseSerializer):
"""
Basic serialization of simple python types.
"""
signature = '_b'
def serialize(self, data):
return data
def deserialize(self, data):
return data
class Encoder(object):
"""
Handles how args and kwargs are encoded over zmq ports.
By default zerorpc does not support passing kwargs to remote methods.
This class is used to fix that so args are kwargs are combined into a
single args payload that is then deconstructed on the remote side.
"""
_default_serializer = PickleSerializer
def __init__(self, serializer=None):
if serializer is None:
serializer = self._default_serializer()
self.serializer = serializer
def encode(self, *args, **kwargs):
"""
Encode args and kwargs as a single serialized payload.
Parameters
----------
args : *Any
kwargs : **Any
Returns
-------
Tuple[Tuple[Any, ...], Dict[Any, Any]]
"""
return self.serializer.serialize(args), \
self.serializer.serialize(kwargs)
def decode(self, *payload):
"""
Decode encoded args and kwargs.
Parameters
----------
payload : Tuple[Tuple[Any, ...], Dict[Any, Any]]
Returns
-------
Tuple[Tuple[Any, ...], Dict[Any, Any]]
"""
if not payload:
return (), {}
args, kwargs = payload
return self.serializer.deserialize(args), \
self.serializer.deserialize(kwargs)
| [
18,
26,
32,
33,
37
] |
1,525 | 1a05817c4c16f2d9234e504b0c98f9c9ae2dc3f7 | <mask token>
class RatingBlock(InclusionTag):
name = 'rating'
template = 'ratings/rating.html'
options = Options(Argument('obj', required=True))
def get_context(self, context, obj):
if not hasattr(obj, '_meta') or not hasattr(obj, 'pk'):
raise ValueError(
'Ожидался экземпляр django.models.Model, а получили %s.' %
type(obj))
can_vote = True
if 'user' in context and getattr(obj, 'permissions', None
) and isinstance(obj.permissions, Permissions) and hasattr(obj.
permissions, 'can_vote'):
can_vote = obj.permissions.can_vote(context['user'])
return {'content_type': str(obj._meta), 'obj_pk': obj.pk,
'can_vote': can_vote, 'score': RatedItem.objects.score_for_obj(obj)
}
<mask token>
| <mask token>
class RatingBlock(InclusionTag):
name = 'rating'
template = 'ratings/rating.html'
options = Options(Argument('obj', required=True))
def get_context(self, context, obj):
if not hasattr(obj, '_meta') or not hasattr(obj, 'pk'):
raise ValueError(
'Ожидался экземпляр django.models.Model, а получили %s.' %
type(obj))
can_vote = True
if 'user' in context and getattr(obj, 'permissions', None
) and isinstance(obj.permissions, Permissions) and hasattr(obj.
permissions, 'can_vote'):
can_vote = obj.permissions.can_vote(context['user'])
return {'content_type': str(obj._meta), 'obj_pk': obj.pk,
'can_vote': can_vote, 'score': RatedItem.objects.score_for_obj(obj)
}
<mask token>
register.tag(RatingBlock)
| <mask token>
class RatingBlock(InclusionTag):
name = 'rating'
template = 'ratings/rating.html'
options = Options(Argument('obj', required=True))
def get_context(self, context, obj):
if not hasattr(obj, '_meta') or not hasattr(obj, 'pk'):
raise ValueError(
'Ожидался экземпляр django.models.Model, а получили %s.' %
type(obj))
can_vote = True
if 'user' in context and getattr(obj, 'permissions', None
) and isinstance(obj.permissions, Permissions) and hasattr(obj.
permissions, 'can_vote'):
can_vote = obj.permissions.can_vote(context['user'])
return {'content_type': str(obj._meta), 'obj_pk': obj.pk,
'can_vote': can_vote, 'score': RatedItem.objects.score_for_obj(obj)
}
register = template.Library()
register.tag(RatingBlock)
| from django import template
from classytags.helpers import InclusionTag
from classytags.core import Tag, Options
from classytags.arguments import Argument
from ratings.models import RatedItem
from blogs.permissions import Permissions
class RatingBlock(InclusionTag):
name = 'rating'
template = 'ratings/rating.html'
options = Options(Argument('obj', required=True))
def get_context(self, context, obj):
if not hasattr(obj, '_meta') or not hasattr(obj, 'pk'):
raise ValueError(
'Ожидался экземпляр django.models.Model, а получили %s.' %
type(obj))
can_vote = True
if 'user' in context and getattr(obj, 'permissions', None
) and isinstance(obj.permissions, Permissions) and hasattr(obj.
permissions, 'can_vote'):
can_vote = obj.permissions.can_vote(context['user'])
return {'content_type': str(obj._meta), 'obj_pk': obj.pk,
'can_vote': can_vote, 'score': RatedItem.objects.score_for_obj(obj)
}
register = template.Library()
register.tag(RatingBlock)
| #coding=utf-8
from django import template
from classytags.helpers import InclusionTag
from classytags.core import Tag, Options
from classytags.arguments import Argument
from ratings.models import RatedItem
from blogs.permissions import Permissions
class RatingBlock(InclusionTag):
name = 'rating'
template = 'ratings/rating.html'
options = Options(
Argument('obj', required=True),
)
def get_context(self, context, obj):
if not hasattr(obj, '_meta') or not hasattr(obj, 'pk'):
raise ValueError("Ожидался экземпляр django.models.Model, а получили %s." % type(obj))
can_vote = True
if 'user' in context and\
getattr(obj, 'permissions', None) and\
isinstance(obj.permissions, Permissions) and\
hasattr(obj.permissions, 'can_vote'):
can_vote = obj.permissions.can_vote(context['user'])
return {
'content_type': str(obj._meta),
'obj_pk': obj.pk,
'can_vote': can_vote,
'score': RatedItem.objects.score_for_obj(obj),
}
register = template.Library()
register.tag(RatingBlock) | [
3,
4,
5,
6,
7
] |
1,526 | 9206e4c4eff8ca64266ce53705e88069912b80d8 | <mask token>
| <mask token>
parser.add_argument('-f', '-forward', required=True, help=
'forward sequencing files', nargs='+', action='store', dest='forward_files'
)
parser.add_argument('-r', '-reverse', required=True, help=
'reverse sequencing files', nargs='+', action='store', dest='reverse_files'
)
parser.add_argument('-s', '-segments', required=True, help=
'number of segments to split job into', action='store', dest=
'total_segments')
parser.add_argument('-o', '-out', required=True, help=
'keyword for saving output files', action='store', dest='out')
parser.add_argument('-c', '-cutoff', required=False, default=0, help=
'read count cutoff for barcodes to keep (default=0)', action='store',
dest='cutoff')
parser.add_argument('-b', '-barcode', required=False, default=31, help=
'length of barcode (default=31)', action='store', dest='barcode_length')
parser.add_argument('-bq', '-bquality', required=False, default=53, help=
'ascii quality score cutoff for barcode (default=53)', action='store',
dest='barcode_quality')
parser.add_argument('-gdq', '-gdquality', required=False, default=55, help=
'ascii quality score cutoff for guide-donor (default=55)', action=
'store', dest='guide_donor_quality')
<mask token>
for file in args.forward_files:
forward_lines.extend(gzip.open(file).readlines())
<mask token>
for line in forward_quality:
scores = [ord(i) for i in line[:BARCODE_LENGTH]]
barcode_quality_scores.append(np.mean(scores))
<mask token>
for line in forward_quality:
scores = [ord(i) for i in line[BARCODE_LENGTH:]]
forward_guide_donor_quality_scores.append(np.mean(scores))
<mask token>
for file in args.reverse_files:
reverse_lines.extend(gzip.open(file).readlines())
<mask token>
for line in reverse_quality:
scores = [ord(i) for i in line]
reverse_guide_donor_quality_scores.append(np.mean(scores))
<mask token>
if READ_COUNT_CUTOFF != 0:
barcodes_to_keep = [key for key, count in Counter(barcodes).items() if
count >= READ_COUNT_CUTOFF]
keep_dict = {g: (True) for g in barcodes_to_keep}
forward_sequence, reverse_sequence, barcodes = zip(*[(f, r, b) for f, r,
b in zip(forward_sequence, reverse_sequence, barcodes) if b in
keep_dict])
<mask token>
pickle.dump(count_dict, pickle_out, protocol=2)
pickle_out.close()
<mask token>
for segment in range(0, total_segments):
start = int(LENGTH / total_segments * segment)
if segment + 1 == total_segments:
sub_barcodes_set = barcode_list[start:]
else:
stop = int(LENGTH / total_segments * (segment + 1))
sub_barcodes_set = barcode_list[start:stop]
sub_barcodes_dict = {b: (True) for b in sub_barcodes_set}
sub_forward, sub_reverse, sub_barcodes = zip(*[(f, r, b) for f, r, b in
zip(forward_sequence, reverse_sequence, barcodes) if b in
sub_barcodes_dict])
R1_dict, R2_dict = {}, {}
for f, r, b in zip(sub_forward, sub_reverse, sub_barcodes):
if b not in R1_dict and b not in R2_dict:
R1_dict[b] = [f]
R2_dict[b] = [r]
else:
R1_dict[b].append(f)
R2_dict[b].append(r)
pickle_out = open(OUTPUT_HEADER + '_' + str(segment) + '-' + str(
total_segments) + '.R1_dict', 'wb')
pickle.dump(R1_dict, pickle_out, protocol=2)
pickle_out.close()
pickle_out = open(OUTPUT_HEADER + '_' + str(segment) + '-' + str(
total_segments) + '.R2_dict', 'wb')
pickle.dump(R2_dict, pickle_out, protocol=2)
pickle_out.close()
| <mask token>
parser = argparse.ArgumentParser()
parser.add_argument('-f', '-forward', required=True, help=
'forward sequencing files', nargs='+', action='store', dest='forward_files'
)
parser.add_argument('-r', '-reverse', required=True, help=
'reverse sequencing files', nargs='+', action='store', dest='reverse_files'
)
parser.add_argument('-s', '-segments', required=True, help=
'number of segments to split job into', action='store', dest=
'total_segments')
parser.add_argument('-o', '-out', required=True, help=
'keyword for saving output files', action='store', dest='out')
parser.add_argument('-c', '-cutoff', required=False, default=0, help=
'read count cutoff for barcodes to keep (default=0)', action='store',
dest='cutoff')
parser.add_argument('-b', '-barcode', required=False, default=31, help=
'length of barcode (default=31)', action='store', dest='barcode_length')
parser.add_argument('-bq', '-bquality', required=False, default=53, help=
'ascii quality score cutoff for barcode (default=53)', action='store',
dest='barcode_quality')
parser.add_argument('-gdq', '-gdquality', required=False, default=55, help=
'ascii quality score cutoff for guide-donor (default=55)', action=
'store', dest='guide_donor_quality')
args = parser.parse_args()
OUTPUT_HEADER = args.out
READ_COUNT_CUTOFF = int(args.cutoff)
BARCODE_LENGTH = int(args.barcode_length)
BARCODE_QUALITY_CUTOFF = int(args.barcode_quality)
GUIDE_DONOR_QUALITY_CUTOFF = int(args.guide_donor_quality)
forward_lines = []
for file in args.forward_files:
forward_lines.extend(gzip.open(file).readlines())
forward_sequence = [forward_lines[r] for r in range(1, len(forward_lines), 4)]
forward_sequence = [l.decode('utf-8').replace('\n', '') for l in
forward_sequence]
forward_quality = [forward_lines[r] for r in range(3, len(forward_lines), 4)]
forward_quality = [l.decode('utf-8').replace('\n', '') for l in forward_quality
]
barcode_quality_scores = []
for line in forward_quality:
scores = [ord(i) for i in line[:BARCODE_LENGTH]]
barcode_quality_scores.append(np.mean(scores))
forward_guide_donor_quality_scores = []
for line in forward_quality:
scores = [ord(i) for i in line[BARCODE_LENGTH:]]
forward_guide_donor_quality_scores.append(np.mean(scores))
reverse_lines = []
for file in args.reverse_files:
reverse_lines.extend(gzip.open(file).readlines())
reverse_sequence = [reverse_lines[r] for r in range(1, len(reverse_lines), 4)]
reverse_sequence = [l.decode('utf-8').replace('\n', '') for l in
reverse_sequence]
reverse_quality = [reverse_lines[r] for r in range(3, len(reverse_lines), 4)]
reverse_quality = [l.decode('utf-8').replace('\n', '') for l in reverse_quality
]
reverse_guide_donor_quality_scores = []
for line in reverse_quality:
scores = [ord(i) for i in line]
reverse_guide_donor_quality_scores.append(np.mean(scores))
forward_sequence, reverse_sequence, barcodes = zip(*[(f, r, f[:
BARCODE_LENGTH]) for f, r, fscore, fscore2, rscore in zip(
forward_sequence, reverse_sequence, barcode_quality_scores,
forward_guide_donor_quality_scores, reverse_guide_donor_quality_scores) if
fscore >= BARCODE_QUALITY_CUTOFF and fscore2 >=
GUIDE_DONOR_QUALITY_CUTOFF and rscore >= GUIDE_DONOR_QUALITY_CUTOFF])
if READ_COUNT_CUTOFF != 0:
barcodes_to_keep = [key for key, count in Counter(barcodes).items() if
count >= READ_COUNT_CUTOFF]
keep_dict = {g: (True) for g in barcodes_to_keep}
forward_sequence, reverse_sequence, barcodes = zip(*[(f, r, b) for f, r,
b in zip(forward_sequence, reverse_sequence, barcodes) if b in
keep_dict])
count_dict = dict(Counter(barcodes))
pickle_out = open(OUTPUT_HEADER + '.read_count_dict', 'wb')
pickle.dump(count_dict, pickle_out, protocol=2)
pickle_out.close()
LENGTH = len(set(barcodes))
total_segments = int(args.total_segments)
barcode_list = list(set(barcodes))
for segment in range(0, total_segments):
start = int(LENGTH / total_segments * segment)
if segment + 1 == total_segments:
sub_barcodes_set = barcode_list[start:]
else:
stop = int(LENGTH / total_segments * (segment + 1))
sub_barcodes_set = barcode_list[start:stop]
sub_barcodes_dict = {b: (True) for b in sub_barcodes_set}
sub_forward, sub_reverse, sub_barcodes = zip(*[(f, r, b) for f, r, b in
zip(forward_sequence, reverse_sequence, barcodes) if b in
sub_barcodes_dict])
R1_dict, R2_dict = {}, {}
for f, r, b in zip(sub_forward, sub_reverse, sub_barcodes):
if b not in R1_dict and b not in R2_dict:
R1_dict[b] = [f]
R2_dict[b] = [r]
else:
R1_dict[b].append(f)
R2_dict[b].append(r)
pickle_out = open(OUTPUT_HEADER + '_' + str(segment) + '-' + str(
total_segments) + '.R1_dict', 'wb')
pickle.dump(R1_dict, pickle_out, protocol=2)
pickle_out.close()
pickle_out = open(OUTPUT_HEADER + '_' + str(segment) + '-' + str(
total_segments) + '.R2_dict', 'wb')
pickle.dump(R2_dict, pickle_out, protocol=2)
pickle_out.close()
| <mask token>
from collections import Counter
import argparse
import gzip
import numpy as np
import pickle
parser = argparse.ArgumentParser()
parser.add_argument('-f', '-forward', required=True, help=
'forward sequencing files', nargs='+', action='store', dest='forward_files'
)
parser.add_argument('-r', '-reverse', required=True, help=
'reverse sequencing files', nargs='+', action='store', dest='reverse_files'
)
parser.add_argument('-s', '-segments', required=True, help=
'number of segments to split job into', action='store', dest=
'total_segments')
parser.add_argument('-o', '-out', required=True, help=
'keyword for saving output files', action='store', dest='out')
parser.add_argument('-c', '-cutoff', required=False, default=0, help=
'read count cutoff for barcodes to keep (default=0)', action='store',
dest='cutoff')
parser.add_argument('-b', '-barcode', required=False, default=31, help=
'length of barcode (default=31)', action='store', dest='barcode_length')
parser.add_argument('-bq', '-bquality', required=False, default=53, help=
'ascii quality score cutoff for barcode (default=53)', action='store',
dest='barcode_quality')
parser.add_argument('-gdq', '-gdquality', required=False, default=55, help=
'ascii quality score cutoff for guide-donor (default=55)', action=
'store', dest='guide_donor_quality')
args = parser.parse_args()
OUTPUT_HEADER = args.out
READ_COUNT_CUTOFF = int(args.cutoff)
BARCODE_LENGTH = int(args.barcode_length)
BARCODE_QUALITY_CUTOFF = int(args.barcode_quality)
GUIDE_DONOR_QUALITY_CUTOFF = int(args.guide_donor_quality)
forward_lines = []
for file in args.forward_files:
forward_lines.extend(gzip.open(file).readlines())
forward_sequence = [forward_lines[r] for r in range(1, len(forward_lines), 4)]
forward_sequence = [l.decode('utf-8').replace('\n', '') for l in
forward_sequence]
forward_quality = [forward_lines[r] for r in range(3, len(forward_lines), 4)]
forward_quality = [l.decode('utf-8').replace('\n', '') for l in forward_quality
]
barcode_quality_scores = []
for line in forward_quality:
scores = [ord(i) for i in line[:BARCODE_LENGTH]]
barcode_quality_scores.append(np.mean(scores))
forward_guide_donor_quality_scores = []
for line in forward_quality:
scores = [ord(i) for i in line[BARCODE_LENGTH:]]
forward_guide_donor_quality_scores.append(np.mean(scores))
reverse_lines = []
for file in args.reverse_files:
reverse_lines.extend(gzip.open(file).readlines())
reverse_sequence = [reverse_lines[r] for r in range(1, len(reverse_lines), 4)]
reverse_sequence = [l.decode('utf-8').replace('\n', '') for l in
reverse_sequence]
reverse_quality = [reverse_lines[r] for r in range(3, len(reverse_lines), 4)]
reverse_quality = [l.decode('utf-8').replace('\n', '') for l in reverse_quality
]
reverse_guide_donor_quality_scores = []
for line in reverse_quality:
scores = [ord(i) for i in line]
reverse_guide_donor_quality_scores.append(np.mean(scores))
forward_sequence, reverse_sequence, barcodes = zip(*[(f, r, f[:
BARCODE_LENGTH]) for f, r, fscore, fscore2, rscore in zip(
forward_sequence, reverse_sequence, barcode_quality_scores,
forward_guide_donor_quality_scores, reverse_guide_donor_quality_scores) if
fscore >= BARCODE_QUALITY_CUTOFF and fscore2 >=
GUIDE_DONOR_QUALITY_CUTOFF and rscore >= GUIDE_DONOR_QUALITY_CUTOFF])
if READ_COUNT_CUTOFF != 0:
barcodes_to_keep = [key for key, count in Counter(barcodes).items() if
count >= READ_COUNT_CUTOFF]
keep_dict = {g: (True) for g in barcodes_to_keep}
forward_sequence, reverse_sequence, barcodes = zip(*[(f, r, b) for f, r,
b in zip(forward_sequence, reverse_sequence, barcodes) if b in
keep_dict])
count_dict = dict(Counter(barcodes))
pickle_out = open(OUTPUT_HEADER + '.read_count_dict', 'wb')
pickle.dump(count_dict, pickle_out, protocol=2)
pickle_out.close()
LENGTH = len(set(barcodes))
total_segments = int(args.total_segments)
barcode_list = list(set(barcodes))
for segment in range(0, total_segments):
start = int(LENGTH / total_segments * segment)
if segment + 1 == total_segments:
sub_barcodes_set = barcode_list[start:]
else:
stop = int(LENGTH / total_segments * (segment + 1))
sub_barcodes_set = barcode_list[start:stop]
sub_barcodes_dict = {b: (True) for b in sub_barcodes_set}
sub_forward, sub_reverse, sub_barcodes = zip(*[(f, r, b) for f, r, b in
zip(forward_sequence, reverse_sequence, barcodes) if b in
sub_barcodes_dict])
R1_dict, R2_dict = {}, {}
for f, r, b in zip(sub_forward, sub_reverse, sub_barcodes):
if b not in R1_dict and b not in R2_dict:
R1_dict[b] = [f]
R2_dict[b] = [r]
else:
R1_dict[b].append(f)
R2_dict[b].append(r)
pickle_out = open(OUTPUT_HEADER + '_' + str(segment) + '-' + str(
total_segments) + '.R1_dict', 'wb')
pickle.dump(R1_dict, pickle_out, protocol=2)
pickle_out.close()
pickle_out = open(OUTPUT_HEADER + '_' + str(segment) + '-' + str(
total_segments) + '.R2_dict', 'wb')
pickle.dump(R2_dict, pickle_out, protocol=2)
pickle_out.close()
| """
Process pair-end reads of barcode-guide-donor Step 1 cassette to generate a library reference table mapping barcodes to features.
Create dictionaries mapping barcodes to forward and reverse reads, split into sub-segments.
R1_dict: map barcodes to corresponding R1 sequences.
R2_dict: map barcodes to corresponding R2 sequences.
read_count_dict: map each barcode to corresponding total number of reads.
"""
from collections import Counter
import argparse
import gzip
import numpy as np
import pickle
parser = argparse.ArgumentParser()
parser.add_argument('-f', '-forward', required=True, help="forward sequencing files", nargs='+', action='store', dest='forward_files')
parser.add_argument('-r', '-reverse', required=True, help="reverse sequencing files", nargs='+', action='store', dest='reverse_files')
parser.add_argument('-s', '-segments', required=True, help="number of segments to split job into", action='store', dest='total_segments')
parser.add_argument('-o', '-out', required=True, help="keyword for saving output files", action='store', dest='out')
parser.add_argument('-c', '-cutoff', required=False, default=0, help="read count cutoff for barcodes to keep (default=0)", action='store', dest='cutoff')
parser.add_argument('-b', '-barcode', required=False, default=31, help="length of barcode (default=31)", action='store', dest='barcode_length')
parser.add_argument('-bq', '-bquality', required=False, default=53, help="ascii quality score cutoff for barcode (default=53)", action='store', dest='barcode_quality')
parser.add_argument('-gdq', '-gdquality', required=False, default=55, help="ascii quality score cutoff for guide-donor (default=55)", action='store', dest='guide_donor_quality')
args = parser.parse_args()
OUTPUT_HEADER = args.out
READ_COUNT_CUTOFF = int(args.cutoff)
BARCODE_LENGTH = int(args.barcode_length)
BARCODE_QUALITY_CUTOFF = int(args.barcode_quality)
GUIDE_DONOR_QUALITY_CUTOFF = int(args.guide_donor_quality)
# Collect all sequencing reads from forward files.
forward_lines = []
for file in args.forward_files:
forward_lines.extend(gzip.open(file).readlines())
# Forward sequence.
forward_sequence = [forward_lines[r] for r in range(1, len(forward_lines), 4)]
forward_sequence = [l.decode('utf-8').replace("\n","") for l in forward_sequence]
# Forward sequence quality scores.
forward_quality = [forward_lines[r] for r in range(3, len(forward_lines), 4)]
forward_quality = [l.decode('utf-8').replace("\n","") for l in forward_quality]
barcode_quality_scores = [] # Barcode quality.
for line in forward_quality:
scores = [ord(i) for i in line[:BARCODE_LENGTH]]
barcode_quality_scores.append(np.mean(scores))
forward_guide_donor_quality_scores = [] # Guide-donor quality.
for line in forward_quality:
scores = [ord(i) for i in line[BARCODE_LENGTH:]]
forward_guide_donor_quality_scores.append(np.mean(scores))
# Collect all sequencing reads from reverse files.
reverse_lines = []
for file in args.reverse_files:
reverse_lines.extend(gzip.open(file).readlines())
# Reverse sequence.
reverse_sequence = [reverse_lines[r] for r in range(1, len(reverse_lines), 4)]
reverse_sequence = [l.decode('utf-8').replace("\n","") for l in reverse_sequence]
# Reverse sequence base quality scores.
reverse_quality = [reverse_lines[r] for r in range(3, len(reverse_lines), 4)]
reverse_quality = [l.decode('utf-8').replace("\n","") for l in reverse_quality]
reverse_guide_donor_quality_scores = []
for line in reverse_quality:
scores = [ord(i) for i in line]
reverse_guide_donor_quality_scores.append(np.mean(scores))
# Filter out low quality barcodes and low quality guide-donor sequences.
forward_sequence, reverse_sequence, barcodes = zip(*[(f, r, f[:BARCODE_LENGTH]) for f, r, fscore, fscore2, rscore
in zip(forward_sequence, reverse_sequence, barcode_quality_scores, forward_guide_donor_quality_scores, reverse_guide_donor_quality_scores)
if (fscore >= BARCODE_QUALITY_CUTOFF) and (fscore2 >= GUIDE_DONOR_QUALITY_CUTOFF) and (rscore >= GUIDE_DONOR_QUALITY_CUTOFF)])
if (READ_COUNT_CUTOFF != 0): # optional choice to remove low read barcodes from annotations.
barcodes_to_keep = [key for key, count in Counter(barcodes).items() if count >= READ_COUNT_CUTOFF]
keep_dict = {g: True for g in barcodes_to_keep}
forward_sequence, reverse_sequence, barcodes = zip(*[(f, r, b) for f, r, b
in zip(forward_sequence, reverse_sequence, barcodes) if b in keep_dict])
# Store barcode read count dictionary for later use.
count_dict = dict(Counter(barcodes))
pickle_out = open(OUTPUT_HEADER + ".read_count_dict", "wb")
pickle.dump(count_dict, pickle_out, protocol=2)
pickle_out.close()
# Divide up barcodes into specified number of segments for parallel analysis.
LENGTH = len(set(barcodes))
total_segments = int(args.total_segments)
barcode_list = list(set(barcodes))
for segment in range(0, total_segments):
start = int((LENGTH/total_segments)*segment) # determine start and end position of segment.
if (segment+1 == total_segments):
sub_barcodes_set = barcode_list[start:]
else:
stop = int((LENGTH/total_segments)*(segment+1))
sub_barcodes_set = barcode_list[start:stop]
sub_barcodes_dict = {b: True for b in sub_barcodes_set}
sub_forward, sub_reverse, sub_barcodes = zip(*[(f, r, b) for f, r, b
in zip(forward_sequence, reverse_sequence, barcodes) if b in sub_barcodes_dict])
R1_dict, R2_dict = {}, {} # store reads by barcode into R1 and R2 dictionaries.
for f, r, b in zip(sub_forward, sub_reverse, sub_barcodes):
if (b not in R1_dict) and (b not in R2_dict):
R1_dict[b] = [f]
R2_dict[b] = [r]
else:
R1_dict[b].append(f)
R2_dict[b].append(r)
pickle_out = open(OUTPUT_HEADER + "_" + str(segment) + "-" + str(total_segments) + ".R1_dict", "wb")
pickle.dump(R1_dict, pickle_out, protocol=2)
pickle_out.close()
pickle_out = open(OUTPUT_HEADER + "_" + str(segment) + "-" + str(total_segments) + ".R2_dict", "wb")
pickle.dump(R2_dict, pickle_out, protocol=2)
pickle_out.close() | [
0,
1,
2,
3,
4
] |
1,527 | 957db647500433fd73723fdeb3933037ba0641b1 | <mask token>
| <mask token>
class Migration(migrations.Migration):
<mask token>
<mask token>
<mask token>
| <mask token>
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [migrations.CreateModel(name='DiscounInfo', fields=[('id',
models.BigIntegerField(primary_key=True, serialize=False)), (
'title', models.CharField(max_length=500)), ('lkurl', models.
CharField(max_length=500)), ('imgurl', models.CharField(max_length=
500))])]
| from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [migrations.CreateModel(name='DiscounInfo', fields=[('id',
models.BigIntegerField(primary_key=True, serialize=False)), (
'title', models.CharField(max_length=500)), ('lkurl', models.
CharField(max_length=500)), ('imgurl', models.CharField(max_length=
500))])]
| # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-01-26 05:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='DiscounInfo',
fields=[
('id', models.BigIntegerField(primary_key=True, serialize=False)),
('title', models.CharField(max_length=500)),
('lkurl', models.CharField(max_length=500)),
('imgurl', models.CharField(max_length=500)),
],
),
]
| [
0,
1,
2,
3,
4
] |
1,528 | 2d36ae916ad257615016ed6c0bc67e506ee313c9 | <mask token>
@bp.route('/login', methods=('POST',))
def login() ->Any:
"""Flask view for logging a user in."""
user_dict = UserSchema().load(request.json, partial=('id',
'qualifications') + PERMISSIONS)
username = user_dict['username']
password = user_dict['password']
if is_password_correct(username, password):
user = fetch_user(username)
session['user_id'] = user['id']
response = make_response(user)
response.set_cookie('is_authenticated', '1')
return response
raise APIError(reason='invalid_user_or_password', status_code=403)
@bp.route('/logout', methods=('POST',))
def logout() ->Any:
"""Flask view to log a user out."""
if 'user_id' in session:
del session['user_id']
response = make_response({'success': True})
response.set_cookie('is_authenticated', max_age=0, expires=0)
return response
def is_password_correct(username: str, password: str) ->bool:
"""Checks whether password is valid for user, tries to avoid timing attacks."""
user = User.query.filter_by(username=username).first()
if user is None:
password_hash = _CHECK_HASH_ANYWAY
else:
password_hash = user.password
return check_password_hash(password_hash, password) and user is not None
<mask token>
def authentication_required(to_be_wrapped: Callable[..., Any]) ->Callable[
..., Any]:
"""Wraps a view with a check for whether the user is authenticated."""
@functools.wraps(to_be_wrapped)
def wrapper(*args: Any, **kwargs: Any) ->Any:
user_id = session.get('user_id')
if user_id is None or User.query.get(user_id) is None:
if 'user_id' in session:
del session['user_id']
response = make_response({'reason': 'authentication_required'}, 403
)
response.set_cookie('is_authenticated', max_age=0, expires=0)
return response
return to_be_wrapped(*args, **kwargs)
return wrapper
| <mask token>
check_password_hash: Callable[[str, str], bool] = cast(Callable[[str, str],
bool], _check_password_hash)
@bp.route('/login', methods=('POST',))
def login() ->Any:
"""Flask view for logging a user in."""
user_dict = UserSchema().load(request.json, partial=('id',
'qualifications') + PERMISSIONS)
username = user_dict['username']
password = user_dict['password']
if is_password_correct(username, password):
user = fetch_user(username)
session['user_id'] = user['id']
response = make_response(user)
response.set_cookie('is_authenticated', '1')
return response
raise APIError(reason='invalid_user_or_password', status_code=403)
@bp.route('/logout', methods=('POST',))
def logout() ->Any:
"""Flask view to log a user out."""
if 'user_id' in session:
del session['user_id']
response = make_response({'success': True})
response.set_cookie('is_authenticated', max_age=0, expires=0)
return response
def is_password_correct(username: str, password: str) ->bool:
"""Checks whether password is valid for user, tries to avoid timing attacks."""
user = User.query.filter_by(username=username).first()
if user is None:
password_hash = _CHECK_HASH_ANYWAY
else:
password_hash = user.password
return check_password_hash(password_hash, password) and user is not None
def fetch_user(username: str) ->Dict[str, Any]:
"""Look up a user as a dictionary from the DB."""
user = User.query.filter_by(username=username).first()
return cast(Dict[str, Any], UserSchema().dump(user))
def authentication_required(to_be_wrapped: Callable[..., Any]) ->Callable[
..., Any]:
"""Wraps a view with a check for whether the user is authenticated."""
@functools.wraps(to_be_wrapped)
def wrapper(*args: Any, **kwargs: Any) ->Any:
user_id = session.get('user_id')
if user_id is None or User.query.get(user_id) is None:
if 'user_id' in session:
del session['user_id']
response = make_response({'reason': 'authentication_required'}, 403
)
response.set_cookie('is_authenticated', max_age=0, expires=0)
return response
return to_be_wrapped(*args, **kwargs)
return wrapper
| <mask token>
bp = Blueprint('auth', __name__, url_prefix='/api/v1/auth')
_CHECK_HASH_ANYWAY = (
'pbkdf2:sha256:150000$tRQtwnYW$80442246fe5dbd649c8a90cd0209f7a3751e8a0ec1327f88f6b331f929642050'
)
check_password_hash: Callable[[str, str], bool] = cast(Callable[[str, str],
bool], _check_password_hash)
@bp.route('/login', methods=('POST',))
def login() ->Any:
"""Flask view for logging a user in."""
user_dict = UserSchema().load(request.json, partial=('id',
'qualifications') + PERMISSIONS)
username = user_dict['username']
password = user_dict['password']
if is_password_correct(username, password):
user = fetch_user(username)
session['user_id'] = user['id']
response = make_response(user)
response.set_cookie('is_authenticated', '1')
return response
raise APIError(reason='invalid_user_or_password', status_code=403)
@bp.route('/logout', methods=('POST',))
def logout() ->Any:
"""Flask view to log a user out."""
if 'user_id' in session:
del session['user_id']
response = make_response({'success': True})
response.set_cookie('is_authenticated', max_age=0, expires=0)
return response
def is_password_correct(username: str, password: str) ->bool:
"""Checks whether password is valid for user, tries to avoid timing attacks."""
user = User.query.filter_by(username=username).first()
if user is None:
password_hash = _CHECK_HASH_ANYWAY
else:
password_hash = user.password
return check_password_hash(password_hash, password) and user is not None
def fetch_user(username: str) ->Dict[str, Any]:
"""Look up a user as a dictionary from the DB."""
user = User.query.filter_by(username=username).first()
return cast(Dict[str, Any], UserSchema().dump(user))
def authentication_required(to_be_wrapped: Callable[..., Any]) ->Callable[
..., Any]:
"""Wraps a view with a check for whether the user is authenticated."""
@functools.wraps(to_be_wrapped)
def wrapper(*args: Any, **kwargs: Any) ->Any:
user_id = session.get('user_id')
if user_id is None or User.query.get(user_id) is None:
if 'user_id' in session:
del session['user_id']
response = make_response({'reason': 'authentication_required'}, 403
)
response.set_cookie('is_authenticated', max_age=0, expires=0)
return response
return to_be_wrapped(*args, **kwargs)
return wrapper
| <mask token>
import functools
from typing import Any, Callable, cast, Dict
from flask import Blueprint, make_response, request, session
from werkzeug.security import check_password_hash as _check_password_hash
from .accesscontrol import PERMISSIONS
from .api import APIError, UserSchema
from .db.models import User
bp = Blueprint('auth', __name__, url_prefix='/api/v1/auth')
_CHECK_HASH_ANYWAY = (
'pbkdf2:sha256:150000$tRQtwnYW$80442246fe5dbd649c8a90cd0209f7a3751e8a0ec1327f88f6b331f929642050'
)
check_password_hash: Callable[[str, str], bool] = cast(Callable[[str, str],
bool], _check_password_hash)
@bp.route('/login', methods=('POST',))
def login() ->Any:
"""Flask view for logging a user in."""
user_dict = UserSchema().load(request.json, partial=('id',
'qualifications') + PERMISSIONS)
username = user_dict['username']
password = user_dict['password']
if is_password_correct(username, password):
user = fetch_user(username)
session['user_id'] = user['id']
response = make_response(user)
response.set_cookie('is_authenticated', '1')
return response
raise APIError(reason='invalid_user_or_password', status_code=403)
@bp.route('/logout', methods=('POST',))
def logout() ->Any:
"""Flask view to log a user out."""
if 'user_id' in session:
del session['user_id']
response = make_response({'success': True})
response.set_cookie('is_authenticated', max_age=0, expires=0)
return response
def is_password_correct(username: str, password: str) ->bool:
"""Checks whether password is valid for user, tries to avoid timing attacks."""
user = User.query.filter_by(username=username).first()
if user is None:
password_hash = _CHECK_HASH_ANYWAY
else:
password_hash = user.password
return check_password_hash(password_hash, password) and user is not None
def fetch_user(username: str) ->Dict[str, Any]:
"""Look up a user as a dictionary from the DB."""
user = User.query.filter_by(username=username).first()
return cast(Dict[str, Any], UserSchema().dump(user))
def authentication_required(to_be_wrapped: Callable[..., Any]) ->Callable[
..., Any]:
"""Wraps a view with a check for whether the user is authenticated."""
@functools.wraps(to_be_wrapped)
def wrapper(*args: Any, **kwargs: Any) ->Any:
user_id = session.get('user_id')
if user_id is None or User.query.get(user_id) is None:
if 'user_id' in session:
del session['user_id']
response = make_response({'reason': 'authentication_required'}, 403
)
response.set_cookie('is_authenticated', max_age=0, expires=0)
return response
return to_be_wrapped(*args, **kwargs)
return wrapper
| """
Authentication views.
login()
Flask view to log a user in.
"""
import functools
from typing import Any, Callable, cast, Dict
from flask import Blueprint, make_response, request, session
from werkzeug.security import check_password_hash as _check_password_hash
from .accesscontrol import PERMISSIONS
from .api import APIError, UserSchema
from .db.models import User
bp = Blueprint("auth", __name__, url_prefix="/api/v1/auth")
_CHECK_HASH_ANYWAY = "pbkdf2:sha256:150000$tRQtwnYW$80442246fe5dbd649c8a90cd0209f7a3751e8a0ec1327f88f6b331f929642050" # pylint: disable=line-too-long
check_password_hash: Callable[[str, str], bool] = cast(
Callable[[str, str], bool], _check_password_hash
)
@bp.route("/login", methods=("POST",))
def login() -> Any:
"""Flask view for logging a user in."""
user_dict = UserSchema().load(
request.json, partial=("id", "qualifications") + PERMISSIONS
)
username = user_dict["username"]
password = user_dict["password"]
if is_password_correct(username, password):
user = fetch_user(username)
session["user_id"] = user["id"]
response = make_response(user)
response.set_cookie("is_authenticated", "1")
return response
raise APIError(reason="invalid_user_or_password", status_code=403)
@bp.route("/logout", methods=("POST",))
def logout() -> Any:
"""Flask view to log a user out."""
if "user_id" in session:
del session["user_id"]
response = make_response({"success": True})
response.set_cookie("is_authenticated", max_age=0, expires=0)
return response
def is_password_correct(username: str, password: str) -> bool:
"""Checks whether password is valid for user, tries to avoid timing attacks."""
user = User.query.filter_by(username=username).first()
if user is None:
# We need to prevent timing-based side-channel attacks
# that could be exploited for user enumeration
password_hash = _CHECK_HASH_ANYWAY
else:
password_hash = user.password
return check_password_hash(password_hash, password) and user is not None
def fetch_user(username: str) -> Dict[str, Any]:
"""Look up a user as a dictionary from the DB."""
user = User.query.filter_by(username=username).first()
return cast(Dict[str, Any], UserSchema().dump(user))
def authentication_required(to_be_wrapped: Callable[..., Any]) -> Callable[..., Any]:
"""Wraps a view with a check for whether the user is authenticated."""
@functools.wraps(to_be_wrapped)
def wrapper(*args: Any, **kwargs: Any) -> Any:
user_id = session.get("user_id")
if user_id is None or User.query.get(user_id) is None:
if "user_id" in session:
del session["user_id"]
response = make_response({"reason": "authentication_required"}, 403)
response.set_cookie("is_authenticated", max_age=0, expires=0)
return response
return to_be_wrapped(*args, **kwargs)
return wrapper
| [
4,
6,
7,
8,
9
] |
1,529 | 42be9077ec51a9be1d4923011a38cd64d829f876 | <mask token>
| <mask token>
with webdriver.Chrome() as browser:
browser.get('http://suninjuly.github.io/selects1.html')
time.sleep(1)
x = int(browser.find_element_by_id('num1').text)
y = int(browser.find_element_by_id('num2').text)
sum_xy = str(int(x) + int(y))
browser.find_element_by_tag_name('select').click()
sum_opt = browser.find_element_by_css_selector("[value='{}']".format(
sum_xy))
sum_opt.click()
browser.find_element_by_tag_name('button').click()
time.sleep(5)
| from selenium import webdriver
import time
with webdriver.Chrome() as browser:
browser.get('http://suninjuly.github.io/selects1.html')
time.sleep(1)
x = int(browser.find_element_by_id('num1').text)
y = int(browser.find_element_by_id('num2').text)
sum_xy = str(int(x) + int(y))
browser.find_element_by_tag_name('select').click()
sum_opt = browser.find_element_by_css_selector("[value='{}']".format(
sum_xy))
sum_opt.click()
browser.find_element_by_tag_name('button').click()
time.sleep(5)
| from selenium import webdriver
import time
with webdriver.Chrome() as browser:
browser.get("http://suninjuly.github.io/selects1.html")
time.sleep(1)
x = int(browser.find_element_by_id("num1").text)
y = int(browser.find_element_by_id("num2").text)
sum_xy = str(int(x)+int(y))
browser.find_element_by_tag_name("select").click()
sum_opt = browser.find_element_by_css_selector("[value='{}']".format(sum_xy))
sum_opt.click()
browser.find_element_by_tag_name("button").click()
time.sleep(5)
| null | [
0,
1,
2,
3
] |
1,530 | 4d35bb83378805daf4392a1752386ab1403404e0 | <mask token>
| print("""
1. Lists of Numbers""")
print('\t' + str([1, 2, 3]))
print("""
2. Lists of Strings""")
print('\t' + str(['Lemon', 'Mango', 'Papaya']))
<mask token>
print('\tMy favorite fruit is ' + list_fruits[1])
print("""
3. List operations""")
<mask token>
print('\tNew List: ' + str(list_fruits))
<mask token>
print("""
5. Create empty list""")
print('\tList of Organizations: ' + str(list_Organizations))
print("""
5. Add values to list""")
list_Organizations.append('Microsoft')
list_Organizations.append('Amazon')
list_Organizations.append('Google')
print('\tAppend List of Organizations: ' + str(list_Organizations))
print('\tList of characters in string:' + str(list('Sandeep Dhamale')))
print("""
6. Retrieve List using for loop""")
for organization in list_Organizations:
print('\t' + organization)
print("""
7. Get specific elements within list: Slicing""")
<mask token>
print('\tSub list: ' + str(sub_list_numbers))
print('\tLast element in list: ' + str(list_numbers[-1]))
print('\tGet all elements in list except first and lasr: ' + str(
list_numbers[1:-1]))
print('\tElements from index 2 in list: ' + str(list_numbers[2:]))
print('\tElements till index 4 in list: ' + str(list_numbers[:4]))
print("""
8. Copying Lists to other list""")
<mask token>
print('\tUsing assignment. Is list_numbers_direct is list_numbers ' + str(
list_numbers_direct is list_numbers))
<mask token>
print('\tUsing assignment. Is list_numbers_list_values is list_numbers ' +
str(list_numbers_list_values is list_numbers))
<mask token>
print('\tUsing assignment. Is list_numbers_copy is list_numbers ' + str(
list_numbers_copy is list_numbers))
<mask token>
print('\tUsing assignment. Is list_numbers_list is list_numbers ' + str(
list_numbers_list is list_numbers))
print(
"""
9. Note: Although the copies are not equal the objects inside the lists are equal"""
)
<mask token>
print('\tcopy_list_of_list is list_of_list: ' + str(copy_list_of_list is
list_of_list))
print('\tcopy_list_of_list[element] is list_of_list[element]: ' + str(
copy_list_of_list[0] is list_of_list[0]))
print('\tEven if the values are modified e.g. append the list will be same')
list_of_list[0].append('a')
print('\tlist_of_list: ' + str(list_of_list))
print('\tcopy_list_of_list: ' + str(copy_list_of_list))
print('\tcopy_list_of_list[element] is list_of_list[element]: ' + str(
copy_list_of_list[0] is list_of_list[0]))
print(
"""
10.Search in a list: list.index() - Returns the first matched element"""
)
<mask token>
print('\tString: ' + temp_string)
print('\tString list: ' + str(temp_string_list))
print('\tSearch a sub string in string list using list.index(): ' + str(
temp_string_list.index('scripting')))
print("""
11.Count occurrence of substring in list""")
print('\tCount occurrence of substring Python: ' + str(temp_string_list.
count('easy')))
print("""
12.Remove substring from string list""")
del temp_string_list[3]
print('\tA. Remove substring from list using del (by index): ' + str(
temp_string_list))
print('\tOriginal string is unaffected: ' + str(temp_string))
temp_string_list.remove('learn')
print('\tB. Remove substring from list using remove (by value): ' + str(
temp_string_list))
print('\tOriginal string is unaffected: ' + str(temp_string))
print("""
12.Insert a substring in string. list.insert()""")
temp_string_list.insert(3, 'scripting')
print('\tA. Insert substring to list (at index): ' + str(temp_string_list))
print('\tOriginal string is unaffected: ' + str(temp_string))
print("""
13.Concatenating lists.""")
<mask token>
print('\ta. temp_list = temp_list_1 + temp_list_2 = ' + str(temp_list))
temp_list += temp_list
print('\tb. temp_list += temp_list ' + str(temp_list))
temp_list.extend([7, 8, 9])
print('\tc. temp_list.extend() ' + str(temp_list))
print("""
14. Reversing lists.""")
temp_list.reverse()
print('Reverse temp list: ' + str(temp_list))
print("""
15. Sorting lists.""")
<mask token>
temp_list.sort()
print('\tSorted list: ' + str(temp_list))
temp_list.sort(reverse=True)
print('\tSorted list: ' + str(temp_list))
print("\tSorting lists by callable functions (inbuilt) e.g. len using 'key")
<mask token>
print('\tString list: ' + str(temp_string_list))
temp_string_list.sort(key=len)
print('\tSort by length of each word: ' + str(temp_string_list))
<mask token>
print(
"""
16. Using Sorted (copy of sort) instead of sort. and reversed to avoid modifications in original list."""
)
<mask token>
y.sort()
print('\t y= ' + str(y))
print('\t x= ' + str(x))
<mask token>
print('\t y= ' + str(sorted(x)))
print('\t x= ' + str(x))
print('\t z= ' + str(list(reversed(x))))
print('\t x= ' + str(x))
| print("""
1. Lists of Numbers""")
print('\t' + str([1, 2, 3]))
print("""
2. Lists of Strings""")
print('\t' + str(['Lemon', 'Mango', 'Papaya']))
list_fruits = ['Lemon', 'Mango', 'Papaya']
print('\tMy favorite fruit is ' + list_fruits[1])
print("""
3. List operations""")
list_fruits[2] = 'Water Melons'
print('\tNew List: ' + str(list_fruits))
list_Organizations = []
print("""
5. Create empty list""")
print('\tList of Organizations: ' + str(list_Organizations))
print("""
5. Add values to list""")
list_Organizations.append('Microsoft')
list_Organizations.append('Amazon')
list_Organizations.append('Google')
print('\tAppend List of Organizations: ' + str(list_Organizations))
print('\tList of characters in string:' + str(list('Sandeep Dhamale')))
print("""
6. Retrieve List using for loop""")
for organization in list_Organizations:
print('\t' + organization)
print("""
7. Get specific elements within list: Slicing""")
list_numbers = [1, 2, 3, 4, 5]
sub_list_numbers = list_numbers[1:3]
print('\tSub list: ' + str(sub_list_numbers))
print('\tLast element in list: ' + str(list_numbers[-1]))
print('\tGet all elements in list except first and lasr: ' + str(
list_numbers[1:-1]))
print('\tElements from index 2 in list: ' + str(list_numbers[2:]))
print('\tElements till index 4 in list: ' + str(list_numbers[:4]))
print("""
8. Copying Lists to other list""")
list_numbers_direct = list_numbers
print('\tUsing assignment. Is list_numbers_direct is list_numbers ' + str(
list_numbers_direct is list_numbers))
list_numbers_list_values = list_numbers[:]
print('\tUsing assignment. Is list_numbers_list_values is list_numbers ' +
str(list_numbers_list_values is list_numbers))
list_numbers_copy = list_numbers.copy()
print('\tUsing assignment. Is list_numbers_copy is list_numbers ' + str(
list_numbers_copy is list_numbers))
list_numbers_list = list(list_numbers)
print('\tUsing assignment. Is list_numbers_list is list_numbers ' + str(
list_numbers_list is list_numbers))
print(
"""
9. Note: Although the copies are not equal the objects inside the lists are equal"""
)
list_of_list = [[1, 2], [3, 4]]
copy_list_of_list = list_of_list[:]
print('\tcopy_list_of_list is list_of_list: ' + str(copy_list_of_list is
list_of_list))
print('\tcopy_list_of_list[element] is list_of_list[element]: ' + str(
copy_list_of_list[0] is list_of_list[0]))
print('\tEven if the values are modified e.g. append the list will be same')
list_of_list[0].append('a')
print('\tlist_of_list: ' + str(list_of_list))
print('\tcopy_list_of_list: ' + str(copy_list_of_list))
print('\tcopy_list_of_list[element] is list_of_list[element]: ' + str(
copy_list_of_list[0] is list_of_list[0]))
print(
"""
10.Search in a list: list.index() - Returns the first matched element"""
)
temp_string = (
'Python is easy scripting language. It is easy to learn and build apps using Python.'
)
temp_string_list = temp_string.split(' ')
print('\tString: ' + temp_string)
print('\tString list: ' + str(temp_string_list))
print('\tSearch a sub string in string list using list.index(): ' + str(
temp_string_list.index('scripting')))
print("""
11.Count occurrence of substring in list""")
print('\tCount occurrence of substring Python: ' + str(temp_string_list.
count('easy')))
print("""
12.Remove substring from string list""")
del temp_string_list[3]
print('\tA. Remove substring from list using del (by index): ' + str(
temp_string_list))
print('\tOriginal string is unaffected: ' + str(temp_string))
temp_string_list.remove('learn')
print('\tB. Remove substring from list using remove (by value): ' + str(
temp_string_list))
print('\tOriginal string is unaffected: ' + str(temp_string))
print("""
12.Insert a substring in string. list.insert()""")
temp_string_list.insert(3, 'scripting')
print('\tA. Insert substring to list (at index): ' + str(temp_string_list))
print('\tOriginal string is unaffected: ' + str(temp_string))
print("""
13.Concatenating lists.""")
temp_list_1 = [1, 2, 3]
temp_list_2 = [4, 5, 6]
temp_list = temp_list_1 + temp_list_2
print('\ta. temp_list = temp_list_1 + temp_list_2 = ' + str(temp_list))
temp_list += temp_list
print('\tb. temp_list += temp_list ' + str(temp_list))
temp_list.extend([7, 8, 9])
print('\tc. temp_list.extend() ' + str(temp_list))
print("""
14. Reversing lists.""")
temp_list.reverse()
print('Reverse temp list: ' + str(temp_list))
print("""
15. Sorting lists.""")
temp_list = [5, 55, 555]
temp_list.sort()
print('\tSorted list: ' + str(temp_list))
temp_list.sort(reverse=True)
print('\tSorted list: ' + str(temp_list))
print("\tSorting lists by callable functions (inbuilt) e.g. len using 'key")
temp_string = 'I am a software tester.'
temp_string_list = temp_string.split()
print('\tString list: ' + str(temp_string_list))
temp_string_list.sort(key=len)
print('\tSort by length of each word: ' + str(temp_string_list))
temp_number_list = [3, 45, 12, 1, 99, 44]
print(
"""
16. Using Sorted (copy of sort) instead of sort. and reversed to avoid modifications in original list."""
)
x = [4, 9, 2, 1]
y = x
y.sort()
print('\t y= ' + str(y))
print('\t x= ' + str(x))
x = [4, 9, 2, 1]
print('\t y= ' + str(sorted(x)))
print('\t x= ' + str(x))
print('\t z= ' + str(list(reversed(x))))
print('\t x= ' + str(x))
| # Lists are sequence of objects
# Mutable
# Lists are represented within square brackets and items are seperated by commas
#-----------------------------------Lists-----------------------------------#
# Lists of Numbers
print("\n1. Lists of Numbers")
print("\t" + str([1,2,3]))
# Lists of Strings
print("\n2. Lists of Strings")
print("\t" + str(["Lemon","Mango","Papaya"]))
list_fruits =["Lemon","Mango","Papaya"]
print("\tMy favorite fruit is " + list_fruits[1])
print("\n3. List operations")
#Replace items within list
list_fruits[2]="Water Melons"
print("\tNew List: " + str(list_fruits))
#Create Empty List
list_Organizations = []
print("\n5. Create empty list")
print("\tList of Organizations: " + str(list_Organizations))
#Add values to list
print("\n5. Add values to list")
list_Organizations.append("Microsoft")
list_Organizations.append("Amazon")
list_Organizations.append("Google")
print("\tAppend List of Organizations: " + str(list_Organizations))
#List of characters within string
print("\tList of characters in string:" + str(list("Sandeep Dhamale")))
# Retrieve List using for loop
print("\n6. Retrieve List using for loop")
for organization in list_Organizations:
print("\t" + organization)
# Get specific elements within list: Slicing
print("\n7. Get specific elements within list: Slicing")
list_numbers = [1,2,3,4,5]
sub_list_numbers = list_numbers[1:3]
print("\tSub list: " + str(sub_list_numbers))
print("\tLast element in list: " + str(list_numbers[-1]))
print("\tGet all elements in list except first and lasr: " + str(list_numbers[1:-1]))
print("\tElements from index 2 in list: " + str(list_numbers[2:]))
print("\tElements till index 4 in list: " + str(list_numbers[:4]))
#Copying Lists to other list - Shallow copy
print("\n8. Copying Lists to other list")
list_numbers_direct = list_numbers
print("\tUsing assignment. Is list_numbers_direct is list_numbers " + str(list_numbers_direct is list_numbers))
list_numbers_list_values = list_numbers[:]
print("\tUsing assignment. Is list_numbers_list_values is list_numbers " + str(list_numbers_list_values is list_numbers))
list_numbers_copy = list_numbers.copy()
print("\tUsing assignment. Is list_numbers_copy is list_numbers " + str(list_numbers_copy is list_numbers))
list_numbers_list = list(list_numbers)
print("\tUsing assignment. Is list_numbers_list is list_numbers " + str(list_numbers_list is list_numbers))
print("\n9. Note: Although the copies are not equal the objects inside the lists are equal")
list_of_list = [[1,2],[3,4]]
copy_list_of_list = list_of_list[:]
print("\tcopy_list_of_list is list_of_list: " + str(copy_list_of_list is list_of_list))
print("\tcopy_list_of_list[element] is list_of_list[element]: " + str(copy_list_of_list[0] is list_of_list[0]))
print("\tEven if the values are modified e.g. append the list will be same")
list_of_list[0].append('a')
print("\tlist_of_list: " + str(list_of_list))
print("\tcopy_list_of_list: " + str(copy_list_of_list))
print("\tcopy_list_of_list[element] is list_of_list[element]: " + str(copy_list_of_list[0] is list_of_list[0]))
print("\n10.Search in a list: list.index() - Returns the first matched element")
temp_string = "Python is easy scripting language. It is easy to learn and build apps using Python."
temp_string_list = temp_string.split(" ")
print("\tString: " + temp_string)
print("\tString list: " + str(temp_string_list))
print("\tSearch a sub string in string list using list.index(): " + str(temp_string_list.index("scripting")))
print("\n11.Count occurrence of substring in list")
print("\tCount occurrence of substring Python: " + str(temp_string_list.count("easy")))
print("\n12.Remove substring from string list")
del temp_string_list[3]
print("\tA. Remove substring from list using del (by index): " + str(temp_string_list))
print("\tOriginal string is unaffected: " + str(temp_string))
temp_string_list.remove("learn")
print("\tB. Remove substring from list using remove (by value): " + str(temp_string_list))
print("\tOriginal string is unaffected: " + str(temp_string))
print("\n12.Insert a substring in string. list.insert()")
temp_string_list.insert(3, "scripting")
print("\tA. Insert substring to list (at index): " + str(temp_string_list))
print("\tOriginal string is unaffected: " + str(temp_string))
print("\n13.Concatenating lists.")
temp_list_1=[1,2,3]
temp_list_2 = [4,5,6]
temp_list = temp_list_1 + temp_list_2
print("\ta. temp_list = temp_list_1 + temp_list_2 = " + str(temp_list))
temp_list+=temp_list
print("\tb. temp_list += temp_list " + str(temp_list))
temp_list.extend([7,8,9])
print("\tc. temp_list.extend() " + str(temp_list))
print("\n14. Reversing lists.")
temp_list.reverse()
print("Reverse temp list: "+ str(temp_list))
print("\n15. Sorting lists.")
temp_list = [5,55,555]
temp_list.sort()
print("\tSorted list: " + str(temp_list))
temp_list.sort(reverse=True)
print("\tSorted list: " + str(temp_list))
print("\tSorting lists by callable functions (inbuilt) e.g. len using 'key")
temp_string = "I am a software tester."
temp_string_list = temp_string.split()
print("\tString list: " + str(temp_string_list))
temp_string_list.sort(key=len)
print("\tSort by length of each word: " + str(temp_string_list))
temp_number_list=[3,45,12,1,99,44]
print("\n16. Using Sorted (copy of sort) instead of sort. and reversed to avoid modifications in original list.")
x=[4, 9, 2, 1]
y = x
y.sort()
print("\t y= " + str(y))
print("\t x= " + str(x))
x=[4, 9, 2, 1]
print("\t y= " + str(sorted(x)))
print("\t x= " + str(x))
print("\t z= " + str(list(reversed(x))))
print("\t x= " + str(x))
| null | [
0,
1,
2,
3
] |
1,531 | bbd50c40bc0897fe7a93f277bcfdcba3ba6d6f2a | <mask token>
| <mask token>
def add_sub_path(yaml_path):
file = open(yaml_path, 'r', encoding='utf-8')
file_data = file.read()
file.close()
data = yaml.safe_load(file_data)
for p, p_info in data.get('paths', {}).items():
for method, m_info in p_info.items():
url_path = m_info['x-bk-apigateway-resource']['backend']['path']
m_info['x-bk-apigateway-resource']['backend']['path'
] = '{}{}'.format('/{env.api_sub_path}', url_path[0:])
file = open(yaml_path, 'w')
yaml.dump(data, file)
file.close()
<mask token>
| <mask token>
def add_sub_path(yaml_path):
file = open(yaml_path, 'r', encoding='utf-8')
file_data = file.read()
file.close()
data = yaml.safe_load(file_data)
for p, p_info in data.get('paths', {}).items():
for method, m_info in p_info.items():
url_path = m_info['x-bk-apigateway-resource']['backend']['path']
m_info['x-bk-apigateway-resource']['backend']['path'
] = '{}{}'.format('/{env.api_sub_path}', url_path[0:])
file = open(yaml_path, 'w')
yaml.dump(data, file)
file.close()
if __name__ == '__main__':
path = sys.argv[1]
add_sub_path(path)
| import sys
import yaml
def add_sub_path(yaml_path):
file = open(yaml_path, 'r', encoding='utf-8')
file_data = file.read()
file.close()
data = yaml.safe_load(file_data)
for p, p_info in data.get('paths', {}).items():
for method, m_info in p_info.items():
url_path = m_info['x-bk-apigateway-resource']['backend']['path']
m_info['x-bk-apigateway-resource']['backend']['path'
] = '{}{}'.format('/{env.api_sub_path}', url_path[0:])
file = open(yaml_path, 'w')
yaml.dump(data, file)
file.close()
if __name__ == '__main__':
path = sys.argv[1]
add_sub_path(path)
| # -*- coding: utf-8 -*-
import sys
import yaml
def add_sub_path(yaml_path):
file = open(yaml_path, "r", encoding="utf-8")
file_data = file.read()
file.close()
data = yaml.safe_load(file_data)
for p, p_info in data.get("paths", {}).items():
for method, m_info in p_info.items():
url_path = m_info["x-bk-apigateway-resource"]["backend"]["path"]
m_info["x-bk-apigateway-resource"]["backend"]["path"] = "{}{}".format(
"/{env.api_sub_path}", url_path[0:]
)
file = open(yaml_path, "w")
yaml.dump(data, file)
file.close()
if __name__ == "__main__":
# 为所有path添加env.api_sub_path前缀
path = sys.argv[1]
add_sub_path(path)
| [
0,
1,
2,
3,
4
] |
1,532 | af40239551709eff02b8a1f034583ab80845d1d7 | <mask token>
| <mask token>
for i in range(N):
x[i], y[i], z[i] = (int(x) for x in input().split())
<mask token>
for sx in (-1, 1):
for sy in (-1, 1):
for sz in (-1, 1):
_x, _y, _z = sx * x, sy * y, sz * z
T = np.sort(_x + _y + _z)[::-1][:M].sum()
temp.append(T)
print(max(temp))
| <mask token>
N, M = (int(x) for x in input().split())
x, y, z = np.zeros(N, dtype=int), np.zeros(N, dtype=int), np.zeros(N, dtype=int
)
for i in range(N):
x[i], y[i], z[i] = (int(x) for x in input().split())
temp = []
for sx in (-1, 1):
for sy in (-1, 1):
for sz in (-1, 1):
_x, _y, _z = sx * x, sy * y, sz * z
T = np.sort(_x + _y + _z)[::-1][:M].sum()
temp.append(T)
print(max(temp))
| import numpy as np
N, M = (int(x) for x in input().split())
x, y, z = np.zeros(N, dtype=int), np.zeros(N, dtype=int), np.zeros(N, dtype=int
)
for i in range(N):
x[i], y[i], z[i] = (int(x) for x in input().split())
temp = []
for sx in (-1, 1):
for sy in (-1, 1):
for sz in (-1, 1):
_x, _y, _z = sx * x, sy * y, sz * z
T = np.sort(_x + _y + _z)[::-1][:M].sum()
temp.append(T)
print(max(temp))
| null | [
0,
1,
2,
3
] |
1,533 | 0dad1937df39c012f7991c3897f27964bed1d5a0 | <mask token>
class CrossValidate(object):
<mask token>
<mask token>
| <mask token>
class CrossValidate(object):
def __init__(self, df, target_cols, problem_type, num_folds=3, shuffle=
False, random_state=0):
"""
df - pandas dataframe
target_cols - list of targets
problem_type - ["binary", "multiclass", holdout_n, multilabel]
"""
self.dataframe = df
self.target_cols = target_cols
self.num_targets = len(target_cols)
self.problem_type = problem_type
self.shuffle = shuffle
self.num_folds = num_folds
self.random_state = random_state
if self.shuffle:
self.dataframe = self.dataframe.sample(frac=1, random_state=
self.random_state).reset_index(drop=True)
self.dataframe['kfold'] = -1
<mask token>
| <mask token>
class CrossValidate(object):
def __init__(self, df, target_cols, problem_type, num_folds=3, shuffle=
False, random_state=0):
"""
df - pandas dataframe
target_cols - list of targets
problem_type - ["binary", "multiclass", holdout_n, multilabel]
"""
self.dataframe = df
self.target_cols = target_cols
self.num_targets = len(target_cols)
self.problem_type = problem_type
self.shuffle = shuffle
self.num_folds = num_folds
self.random_state = random_state
if self.shuffle:
self.dataframe = self.dataframe.sample(frac=1, random_state=
self.random_state).reset_index(drop=True)
self.dataframe['kfold'] = -1
def split(self):
if self.problem_type in ('binary', 'multiclass'):
"""
target_cols - ['target_1']
unique_values - eg, [0, 1] for binary, [0, 1, 2,...] for multiclass
"""
if self.num_targets != 1:
raise Exception(
'Invalid number of targets for this problem type. Needed number of targets = 1'
)
target = self.target_cols[0]
unique_values = self.dataframe[target].nunique()
if unique_values == 1:
raise Exception(
'Only one unique value found! Must be two for Binary and Multiclass cross validation'
)
elif unique_values > 1:
kf = model_selection.StratifiedKFold(n_splits=self.
num_folds, shuffle=False)
for fold, (train_idx, val_idx) in enumerate(kf.split(X=self
.dataframe, y=self.dataframe[target].values)):
self.dataframe.loc[val_idx, 'kfold'] = fold
elif self.problem_type == 'multilabel':
"""
target_cols - ['target_1', 'target_2', 'target_3',....]
"""
if self.num_targets < 1:
raise Exception(
'Invalid number of targets for this problem type. Must be greater than 1.'
)
kf = MultilabelStratifiedKFold(n_splits=self.num_folds, shuffle
=False)
for fold, (train_idx, val_idx) in enumerate(kf.split(X=self.
dataframe, y=self.dataframe[self.target_cols].values)):
self.dataframe.loc[val_idx, 'kfold'] = fold
elif self.problem_type in 'regression':
kf = model_selection.KFold(n_splits=self.num_folds)
for fold, (train_idx, val_idx) in enumerate(kf.split(X=self.
dataframe)):
self.dataframe.loc[val_idx, 'kfold'] = fold
elif self.problem_type.startswith('holdout_'):
"""
1 : Training Set
0 : Validation Set
holdout_n : n% to holdout
"""
holdout_percentage = int(self.problem_type.split('_')[1])
num_holdout_samples = int(len(self.dataframe) *
holdout_percentage / 100)
self.dataframe.loc[:len(self.dataframe) - num_holdout_samples,
'kfold'] = 0
self.dataframe.loc[len(self.dataframe) - num_holdout_samples:,
'kfold'] = 1
else:
raise Exception('Problem type not understood!')
return self.dataframe
| import numpy as np
from sklearn import model_selection
from iterstrat.ml_stratifiers import MultilabelStratifiedKFold
<mask token>
class CrossValidate(object):
def __init__(self, df, target_cols, problem_type, num_folds=3, shuffle=
False, random_state=0):
"""
df - pandas dataframe
target_cols - list of targets
problem_type - ["binary", "multiclass", holdout_n, multilabel]
"""
self.dataframe = df
self.target_cols = target_cols
self.num_targets = len(target_cols)
self.problem_type = problem_type
self.shuffle = shuffle
self.num_folds = num_folds
self.random_state = random_state
if self.shuffle:
self.dataframe = self.dataframe.sample(frac=1, random_state=
self.random_state).reset_index(drop=True)
self.dataframe['kfold'] = -1
def split(self):
if self.problem_type in ('binary', 'multiclass'):
"""
target_cols - ['target_1']
unique_values - eg, [0, 1] for binary, [0, 1, 2,...] for multiclass
"""
if self.num_targets != 1:
raise Exception(
'Invalid number of targets for this problem type. Needed number of targets = 1'
)
target = self.target_cols[0]
unique_values = self.dataframe[target].nunique()
if unique_values == 1:
raise Exception(
'Only one unique value found! Must be two for Binary and Multiclass cross validation'
)
elif unique_values > 1:
kf = model_selection.StratifiedKFold(n_splits=self.
num_folds, shuffle=False)
for fold, (train_idx, val_idx) in enumerate(kf.split(X=self
.dataframe, y=self.dataframe[target].values)):
self.dataframe.loc[val_idx, 'kfold'] = fold
elif self.problem_type == 'multilabel':
"""
target_cols - ['target_1', 'target_2', 'target_3',....]
"""
if self.num_targets < 1:
raise Exception(
'Invalid number of targets for this problem type. Must be greater than 1.'
)
kf = MultilabelStratifiedKFold(n_splits=self.num_folds, shuffle
=False)
for fold, (train_idx, val_idx) in enumerate(kf.split(X=self.
dataframe, y=self.dataframe[self.target_cols].values)):
self.dataframe.loc[val_idx, 'kfold'] = fold
elif self.problem_type in 'regression':
kf = model_selection.KFold(n_splits=self.num_folds)
for fold, (train_idx, val_idx) in enumerate(kf.split(X=self.
dataframe)):
self.dataframe.loc[val_idx, 'kfold'] = fold
elif self.problem_type.startswith('holdout_'):
"""
1 : Training Set
0 : Validation Set
holdout_n : n% to holdout
"""
holdout_percentage = int(self.problem_type.split('_')[1])
num_holdout_samples = int(len(self.dataframe) *
holdout_percentage / 100)
self.dataframe.loc[:len(self.dataframe) - num_holdout_samples,
'kfold'] = 0
self.dataframe.loc[len(self.dataframe) - num_holdout_samples:,
'kfold'] = 1
else:
raise Exception('Problem type not understood!')
return self.dataframe
| import numpy as np
from sklearn import model_selection
from iterstrat.ml_stratifiers import MultilabelStratifiedKFold
"""
- binary cross-validate
- multi-class cross-validate
- multi-label cross-validate
- holdout
- regression
"""
class CrossValidate(object):
def __init__(self, df,
target_cols,
problem_type,
num_folds = 3,
shuffle = False,
random_state = 0):
"""
df - pandas dataframe
target_cols - list of targets
problem_type - ["binary", "multiclass", holdout_n, multilabel]
"""
self.dataframe = df
self.target_cols = target_cols
self.num_targets = len(target_cols)
self.problem_type = problem_type
self.shuffle = shuffle
self.num_folds = num_folds
self.random_state = random_state
if self.shuffle:
self.dataframe = self.dataframe.sample(frac = 1,
random_state = self.random_state).reset_index(drop = True)
self.dataframe["kfold"] = -1
def split(self):
if self.problem_type in ("binary", "multiclass"):
"""
target_cols - ['target_1']
unique_values - eg, [0, 1] for binary, [0, 1, 2,...] for multiclass
"""
if self.num_targets != 1:
raise Exception("Invalid number of targets for this problem type. \
Needed number of targets = 1")
target = self.target_cols[0]
unique_values = self.dataframe[target].nunique()
if unique_values == 1:
raise Exception("Only one unique value found! \
Must be two for Binary and Multiclass cross validation")
elif unique_values > 1:
kf = model_selection.StratifiedKFold(n_splits=self.num_folds,
shuffle = False)
for fold, (train_idx, val_idx) in enumerate(kf.split(X=self.dataframe,
y=self.dataframe[target].values)):
self.dataframe.loc[val_idx, 'kfold'] = fold
elif self.problem_type == "multilabel":
"""
target_cols - ['target_1', 'target_2', 'target_3',....]
"""
if self.num_targets < 1:
raise Exception("Invalid number of targets for this problem type. \
Must be greater than 1.")
kf = MultilabelStratifiedKFold(n_splits=self.num_folds,
shuffle = False)
for fold, (train_idx, val_idx) in enumerate(kf.split(X=self.dataframe,
y=self.dataframe[self.target_cols].values)):
self.dataframe.loc[val_idx, 'kfold'] = fold
elif self.problem_type in ("regression"):
kf = model_selection.KFold(n_splits=self.num_folds)
for fold, (train_idx, val_idx) in enumerate(kf.split(X=self.dataframe)):
self.dataframe.loc[val_idx, 'kfold'] = fold
elif self.problem_type.startswith("holdout_"):
"""
1 : Training Set
0 : Validation Set
holdout_n : n% to holdout
"""
holdout_percentage = int(self.problem_type.split("_")[1])
num_holdout_samples = int(len(self.dataframe) * holdout_percentage / 100)
self.dataframe.loc[:len(self.dataframe) - num_holdout_samples, "kfold"] = 0
self.dataframe.loc[len(self.dataframe) - num_holdout_samples:, "kfold"] = 1
else:
raise Exception("Problem type not understood!")
return self.dataframe
| [
1,
2,
3,
4,
5
] |
1,534 | 9c2cc5b993f020b8a1c96ea4cd5c2fb2da44a251 | <mask token>
class RefTrackCollectionRegistry(object):
<mask token>
def __init__(self):
self._genome2TrackIndexReg = defaultdict(set)
self._trackIndex2CollectionReg = defaultdict(set)
self._allCollections = set()
if not os.path.exists(REF_COLL_GSUITES_PATH):
return
for root, dirs, files in os.walk(REF_COLL_GSUITES_PATH):
for fn in files:
trackIndex, genome, trackCollection = os.path.join(root, fn
).split(os.sep)[-3:]
self._genome2TrackIndexReg[genome].add(trackIndex)
if not trackCollection.endswith('.gsuite'):
continue
trackCollection = trackCollection[:-7]
self._trackIndex2CollectionReg[trackIndex].add(trackCollection)
self._allCollections.add(trackCollection)
def getTrackCollectionList(self, genome):
if genome not in self._genome2TrackIndexReg:
return []
collStrList = []
for trackIndex in sorted(self._genome2TrackIndexReg[genome]):
for trackCollection in sorted(self._trackIndex2CollectionReg[
trackIndex]):
collStrList.append('{}: {}'.format(trackIndex, trackCollection)
)
return collStrList
@classmethod
def getTrackCollSpecFromCollStr(cls, collStr):
if collStr:
return [cls.PREBUILT] + collStr.split(': ')
else:
return [cls.PREBUILT]
def isPartOfTrackCollSpec(self, trackFile):
return isinstance(trackFile, basestring) and (trackFile == self.
PREBUILT or trackFile in self._trackIndex2CollectionReg or
trackFile in self._allCollections)
<mask token>
@staticmethod
def getTrackIndexAndCollFromTrackCollSpec(trackFiles):
if len(trackFiles) == 3:
return trackFiles[1], trackFiles[2]
else:
return '', ''
<mask token>
| <mask token>
class RefTrackCollectionRegistry(object):
<mask token>
def __init__(self):
self._genome2TrackIndexReg = defaultdict(set)
self._trackIndex2CollectionReg = defaultdict(set)
self._allCollections = set()
if not os.path.exists(REF_COLL_GSUITES_PATH):
return
for root, dirs, files in os.walk(REF_COLL_GSUITES_PATH):
for fn in files:
trackIndex, genome, trackCollection = os.path.join(root, fn
).split(os.sep)[-3:]
self._genome2TrackIndexReg[genome].add(trackIndex)
if not trackCollection.endswith('.gsuite'):
continue
trackCollection = trackCollection[:-7]
self._trackIndex2CollectionReg[trackIndex].add(trackCollection)
self._allCollections.add(trackCollection)
def getTrackCollectionList(self, genome):
if genome not in self._genome2TrackIndexReg:
return []
collStrList = []
for trackIndex in sorted(self._genome2TrackIndexReg[genome]):
for trackCollection in sorted(self._trackIndex2CollectionReg[
trackIndex]):
collStrList.append('{}: {}'.format(trackIndex, trackCollection)
)
return collStrList
@classmethod
def getTrackCollSpecFromCollStr(cls, collStr):
if collStr:
return [cls.PREBUILT] + collStr.split(': ')
else:
return [cls.PREBUILT]
def isPartOfTrackCollSpec(self, trackFile):
return isinstance(trackFile, basestring) and (trackFile == self.
PREBUILT or trackFile in self._trackIndex2CollectionReg or
trackFile in self._allCollections)
def isTrackCollSpec(self, trackFiles):
if not all(isinstance(trackFile, basestring) for trackFile in
trackFiles):
return False
return len(trackFiles) == 1 and trackFiles[0] == self.PREBUILT or len(
trackFiles) == 3 and trackFiles[0] == self.PREBUILT and trackFiles[
1] in self._trackIndex2CollectionReg and trackFiles[2
] in self._allCollections
@staticmethod
def getTrackIndexAndCollFromTrackCollSpec(trackFiles):
if len(trackFiles) == 3:
return trackFiles[1], trackFiles[2]
else:
return '', ''
<mask token>
| <mask token>
class RefTrackCollectionRegistry(object):
PREBUILT = '__prebuilt__'
def __init__(self):
self._genome2TrackIndexReg = defaultdict(set)
self._trackIndex2CollectionReg = defaultdict(set)
self._allCollections = set()
if not os.path.exists(REF_COLL_GSUITES_PATH):
return
for root, dirs, files in os.walk(REF_COLL_GSUITES_PATH):
for fn in files:
trackIndex, genome, trackCollection = os.path.join(root, fn
).split(os.sep)[-3:]
self._genome2TrackIndexReg[genome].add(trackIndex)
if not trackCollection.endswith('.gsuite'):
continue
trackCollection = trackCollection[:-7]
self._trackIndex2CollectionReg[trackIndex].add(trackCollection)
self._allCollections.add(trackCollection)
def getTrackCollectionList(self, genome):
if genome not in self._genome2TrackIndexReg:
return []
collStrList = []
for trackIndex in sorted(self._genome2TrackIndexReg[genome]):
for trackCollection in sorted(self._trackIndex2CollectionReg[
trackIndex]):
collStrList.append('{}: {}'.format(trackIndex, trackCollection)
)
return collStrList
@classmethod
def getTrackCollSpecFromCollStr(cls, collStr):
if collStr:
return [cls.PREBUILT] + collStr.split(': ')
else:
return [cls.PREBUILT]
def isPartOfTrackCollSpec(self, trackFile):
return isinstance(trackFile, basestring) and (trackFile == self.
PREBUILT or trackFile in self._trackIndex2CollectionReg or
trackFile in self._allCollections)
def isTrackCollSpec(self, trackFiles):
if not all(isinstance(trackFile, basestring) for trackFile in
trackFiles):
return False
return len(trackFiles) == 1 and trackFiles[0] == self.PREBUILT or len(
trackFiles) == 3 and trackFiles[0] == self.PREBUILT and trackFiles[
1] in self._trackIndex2CollectionReg and trackFiles[2
] in self._allCollections
@staticmethod
def getTrackIndexAndCollFromTrackCollSpec(trackFiles):
if len(trackFiles) == 3:
return trackFiles[1], trackFiles[2]
else:
return '', ''
<mask token>
| from __future__ import absolute_import, division, print_function, unicode_literals
import os
from collections import defaultdict
from past.builtins import basestring
from pycolocstats.core.config import REF_COLL_GSUITES_PATH
__metaclass__ = type
class RefTrackCollectionRegistry(object):
PREBUILT = '__prebuilt__'
def __init__(self):
self._genome2TrackIndexReg = defaultdict(set)
self._trackIndex2CollectionReg = defaultdict(set)
self._allCollections = set()
if not os.path.exists(REF_COLL_GSUITES_PATH):
return
for root, dirs, files in os.walk(REF_COLL_GSUITES_PATH):
for fn in files:
trackIndex, genome, trackCollection = os.path.join(root, fn
).split(os.sep)[-3:]
self._genome2TrackIndexReg[genome].add(trackIndex)
if not trackCollection.endswith('.gsuite'):
continue
trackCollection = trackCollection[:-7]
self._trackIndex2CollectionReg[trackIndex].add(trackCollection)
self._allCollections.add(trackCollection)
def getTrackCollectionList(self, genome):
if genome not in self._genome2TrackIndexReg:
return []
collStrList = []
for trackIndex in sorted(self._genome2TrackIndexReg[genome]):
for trackCollection in sorted(self._trackIndex2CollectionReg[
trackIndex]):
collStrList.append('{}: {}'.format(trackIndex, trackCollection)
)
return collStrList
@classmethod
def getTrackCollSpecFromCollStr(cls, collStr):
if collStr:
return [cls.PREBUILT] + collStr.split(': ')
else:
return [cls.PREBUILT]
def isPartOfTrackCollSpec(self, trackFile):
return isinstance(trackFile, basestring) and (trackFile == self.
PREBUILT or trackFile in self._trackIndex2CollectionReg or
trackFile in self._allCollections)
def isTrackCollSpec(self, trackFiles):
if not all(isinstance(trackFile, basestring) for trackFile in
trackFiles):
return False
return len(trackFiles) == 1 and trackFiles[0] == self.PREBUILT or len(
trackFiles) == 3 and trackFiles[0] == self.PREBUILT and trackFiles[
1] in self._trackIndex2CollectionReg and trackFiles[2
] in self._allCollections
@staticmethod
def getTrackIndexAndCollFromTrackCollSpec(trackFiles):
if len(trackFiles) == 3:
return trackFiles[1], trackFiles[2]
else:
return '', ''
refTrackCollRegistry = RefTrackCollectionRegistry()
| from __future__ import absolute_import, division, print_function, unicode_literals
import os
from collections import defaultdict
from past.builtins import basestring
from pycolocstats.core.config import REF_COLL_GSUITES_PATH
__metaclass__ = type
class RefTrackCollectionRegistry(object):
PREBUILT = '__prebuilt__'
def __init__(self):
self._genome2TrackIndexReg = defaultdict(set)
self._trackIndex2CollectionReg = defaultdict(set)
self._allCollections = set()
if not os.path.exists(REF_COLL_GSUITES_PATH):
return
for root, dirs, files in os.walk(REF_COLL_GSUITES_PATH):
for fn in files:
trackIndex, genome, trackCollection = os.path.join(root, fn).split(os.sep)[-3:]
self._genome2TrackIndexReg[genome].add(trackIndex)
if not trackCollection.endswith('.gsuite'):
continue
trackCollection = trackCollection[:-7]
self._trackIndex2CollectionReg[trackIndex].add(trackCollection)
self._allCollections.add(trackCollection)
def getTrackCollectionList(self, genome):
if genome not in self._genome2TrackIndexReg:
return []
collStrList = []
for trackIndex in sorted(self._genome2TrackIndexReg[genome]):
for trackCollection in sorted(self._trackIndex2CollectionReg[trackIndex]):
collStrList.append('{}: {}'.format(trackIndex, trackCollection))
return collStrList
# Temporary solution. Should be refactored to not make use of setReferenceTrackFileNames()
# in Method classes.
@classmethod
def getTrackCollSpecFromCollStr(cls, collStr):
if collStr:
return [cls.PREBUILT] + collStr.split(': ')
else:
return [cls.PREBUILT]
def isPartOfTrackCollSpec(self, trackFile):
return isinstance(trackFile, basestring) and \
(trackFile == self.PREBUILT or
trackFile in self._trackIndex2CollectionReg or
trackFile in self._allCollections)
def isTrackCollSpec(self, trackFiles):
if not all(isinstance(trackFile, basestring) for trackFile in trackFiles):
return False
return (len(trackFiles) == 1 and
trackFiles[0] == self.PREBUILT) or \
(len(trackFiles) == 3 and
trackFiles[0] == self.PREBUILT and
trackFiles[1] in self._trackIndex2CollectionReg and
trackFiles[2] in self._allCollections)
@staticmethod
def getTrackIndexAndCollFromTrackCollSpec(trackFiles):
if len(trackFiles) == 3:
return trackFiles[1], trackFiles[2]
else:
return '', ''
refTrackCollRegistry = RefTrackCollectionRegistry()
| [
6,
7,
8,
10,
11
] |
1,535 | 951fafe9f1b9a3273f30d101831d1e59e26fe85d | <mask token>
class ZakerNewsTab(models.Model):
code = models.IntegerField(blank=True, null=True)
tabName = models.CharField(db_column='tabName', max_length=20, blank=
True, null=True)
class Meta:
managed = False
db_table = 'zaker_news_tab'
class BxtZbgg(models.Model):
area = models.CharField(max_length=20, blank=True, null=True)
city = models.CharField(max_length=25, blank=True, null=True)
ywtype = models.CharField(db_column='ywType', max_length=32, blank=True,
null=True)
xxtype = models.CharField(db_column='xxType', max_length=40, blank=True,
null=True)
type = models.CharField(max_length=40, blank=True, null=True)
ly = models.CharField(max_length=50, blank=True, null=True)
title = models.CharField(max_length=255, blank=True, null=True)
pubdata = models.CharField(db_column='pubData', max_length=30, blank=
True, null=True)
deaddata = models.CharField(db_column='deadData', max_length=30, blank=
True, null=True)
status = models.CharField(max_length=20, blank=True, null=True)
itemnum = models.CharField(db_column='itemNum', max_length=100, blank=
True, null=True)
detailurl = models.CharField(db_column='detailUrl', unique=True,
max_length=255, blank=True, null=True)
class Meta:
managed = False
db_table = 'bxt_zbgg'
| <mask token>
class ScggjyList(models.Model):
<mask token>
<mask token>
<mask token>
<mask token>
class Meta:
managed = False
db_table = 'scggjy_list'
class ZakerNews(models.Model):
zTitle = models.CharField(db_column='zTitle', unique=True, max_length=
255, blank=True, null=True)
zSubtitle = models.CharField(db_column='zSubtitle', max_length=255,
blank=True, null=True)
sSubImageLink = models.CharField(db_column='sSubImageLink', max_length=
255, blank=True, null=True)
zDetailLink = models.CharField(db_column='zDetailLink', max_length=255,
blank=True, null=True)
zType = models.CharField(db_column='zType', max_length=20, blank=True,
null=True)
class Meta:
managed = False
db_table = 'zaker_news'
class ZakerNewsTab(models.Model):
code = models.IntegerField(blank=True, null=True)
tabName = models.CharField(db_column='tabName', max_length=20, blank=
True, null=True)
class Meta:
managed = False
db_table = 'zaker_news_tab'
class BxtZbgg(models.Model):
area = models.CharField(max_length=20, blank=True, null=True)
city = models.CharField(max_length=25, blank=True, null=True)
ywtype = models.CharField(db_column='ywType', max_length=32, blank=True,
null=True)
xxtype = models.CharField(db_column='xxType', max_length=40, blank=True,
null=True)
type = models.CharField(max_length=40, blank=True, null=True)
ly = models.CharField(max_length=50, blank=True, null=True)
title = models.CharField(max_length=255, blank=True, null=True)
pubdata = models.CharField(db_column='pubData', max_length=30, blank=
True, null=True)
deaddata = models.CharField(db_column='deadData', max_length=30, blank=
True, null=True)
status = models.CharField(max_length=20, blank=True, null=True)
itemnum = models.CharField(db_column='itemNum', max_length=100, blank=
True, null=True)
detailurl = models.CharField(db_column='detailUrl', unique=True,
max_length=255, blank=True, null=True)
class Meta:
managed = False
db_table = 'bxt_zbgg'
| <mask token>
class ScggjyList(models.Model):
title = models.CharField(max_length=255)
pubData = models.CharField(db_column='pubData', max_length=255)
detailLink = models.CharField(db_column='detailLink', max_length=255)
detailTitle = models.CharField(db_column='detailTitle', max_length=255)
class Meta:
managed = False
db_table = 'scggjy_list'
class ZakerNews(models.Model):
zTitle = models.CharField(db_column='zTitle', unique=True, max_length=
255, blank=True, null=True)
zSubtitle = models.CharField(db_column='zSubtitle', max_length=255,
blank=True, null=True)
sSubImageLink = models.CharField(db_column='sSubImageLink', max_length=
255, blank=True, null=True)
zDetailLink = models.CharField(db_column='zDetailLink', max_length=255,
blank=True, null=True)
zType = models.CharField(db_column='zType', max_length=20, blank=True,
null=True)
class Meta:
managed = False
db_table = 'zaker_news'
class ZakerNewsTab(models.Model):
code = models.IntegerField(blank=True, null=True)
tabName = models.CharField(db_column='tabName', max_length=20, blank=
True, null=True)
class Meta:
managed = False
db_table = 'zaker_news_tab'
class BxtZbgg(models.Model):
area = models.CharField(max_length=20, blank=True, null=True)
city = models.CharField(max_length=25, blank=True, null=True)
ywtype = models.CharField(db_column='ywType', max_length=32, blank=True,
null=True)
xxtype = models.CharField(db_column='xxType', max_length=40, blank=True,
null=True)
type = models.CharField(max_length=40, blank=True, null=True)
ly = models.CharField(max_length=50, blank=True, null=True)
title = models.CharField(max_length=255, blank=True, null=True)
pubdata = models.CharField(db_column='pubData', max_length=30, blank=
True, null=True)
deaddata = models.CharField(db_column='deadData', max_length=30, blank=
True, null=True)
status = models.CharField(max_length=20, blank=True, null=True)
itemnum = models.CharField(db_column='itemNum', max_length=100, blank=
True, null=True)
detailurl = models.CharField(db_column='detailUrl', unique=True,
max_length=255, blank=True, null=True)
class Meta:
managed = False
db_table = 'bxt_zbgg'
| from django.db import models
class ScggjyList(models.Model):
title = models.CharField(max_length=255)
pubData = models.CharField(db_column='pubData', max_length=255)
detailLink = models.CharField(db_column='detailLink', max_length=255)
detailTitle = models.CharField(db_column='detailTitle', max_length=255)
class Meta:
managed = False
db_table = 'scggjy_list'
class ZakerNews(models.Model):
zTitle = models.CharField(db_column='zTitle', unique=True, max_length=
255, blank=True, null=True)
zSubtitle = models.CharField(db_column='zSubtitle', max_length=255,
blank=True, null=True)
sSubImageLink = models.CharField(db_column='sSubImageLink', max_length=
255, blank=True, null=True)
zDetailLink = models.CharField(db_column='zDetailLink', max_length=255,
blank=True, null=True)
zType = models.CharField(db_column='zType', max_length=20, blank=True,
null=True)
class Meta:
managed = False
db_table = 'zaker_news'
class ZakerNewsTab(models.Model):
code = models.IntegerField(blank=True, null=True)
tabName = models.CharField(db_column='tabName', max_length=20, blank=
True, null=True)
class Meta:
managed = False
db_table = 'zaker_news_tab'
class BxtZbgg(models.Model):
area = models.CharField(max_length=20, blank=True, null=True)
city = models.CharField(max_length=25, blank=True, null=True)
ywtype = models.CharField(db_column='ywType', max_length=32, blank=True,
null=True)
xxtype = models.CharField(db_column='xxType', max_length=40, blank=True,
null=True)
type = models.CharField(max_length=40, blank=True, null=True)
ly = models.CharField(max_length=50, blank=True, null=True)
title = models.CharField(max_length=255, blank=True, null=True)
pubdata = models.CharField(db_column='pubData', max_length=30, blank=
True, null=True)
deaddata = models.CharField(db_column='deadData', max_length=30, blank=
True, null=True)
status = models.CharField(max_length=20, blank=True, null=True)
itemnum = models.CharField(db_column='itemNum', max_length=100, blank=
True, null=True)
detailurl = models.CharField(db_column='detailUrl', unique=True,
max_length=255, blank=True, null=True)
class Meta:
managed = False
db_table = 'bxt_zbgg'
| from django.db import models
class ScggjyList(models.Model):
title = models.CharField(max_length=255)
pubData = models.CharField(db_column='pubData', max_length=255)
detailLink = models.CharField(db_column='detailLink', max_length=255)
detailTitle = models.CharField(db_column='detailTitle', max_length=255)
class Meta:
managed = False
db_table = 'scggjy_list'
class ZakerNews(models.Model):
zTitle = models.CharField(db_column='zTitle', unique=True, max_length=255, blank=True, null=True)
zSubtitle = models.CharField(db_column='zSubtitle', max_length=255, blank=True, null=True)
sSubImageLink = models.CharField(db_column='sSubImageLink', max_length=255, blank=True, null=True)
zDetailLink = models.CharField(db_column='zDetailLink', max_length=255, blank=True, null=True)
zType = models.CharField(db_column='zType', max_length=20, blank=True, null=True)
class Meta:
managed = False
db_table = 'zaker_news'
class ZakerNewsTab(models.Model):
code = models.IntegerField(blank=True, null=True)
tabName = models.CharField(db_column='tabName', max_length=20, blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'zaker_news_tab'
class BxtZbgg(models.Model):
area = models.CharField(max_length=20, blank=True, null=True)
city = models.CharField(max_length=25, blank=True, null=True)
ywtype = models.CharField(db_column='ywType', max_length=32, blank=True, null=True) # Field name made lowercase.
xxtype = models.CharField(db_column='xxType', max_length=40, blank=True, null=True) # Field name made lowercase.
type = models.CharField(max_length=40, blank=True, null=True)
ly = models.CharField(max_length=50, blank=True, null=True)
title = models.CharField(max_length=255, blank=True, null=True)
pubdata = models.CharField(db_column='pubData', max_length=30, blank=True, null=True) # Field name made lowercase.
deaddata = models.CharField(db_column='deadData', max_length=30, blank=True, null=True) # Field name made lowercase.
status = models.CharField(max_length=20, blank=True, null=True)
itemnum = models.CharField(db_column='itemNum', max_length=100, blank=True, null=True) # Field name made lowercase.
detailurl = models.CharField(db_column='detailUrl', unique=True, max_length=255, blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'bxt_zbgg'
| [
4,
7,
8,
9,
10
] |
1,536 | 81f0119f6f348f6d33e8d22f588fc8c2e0593d3c | <mask token>
class SponsorType(models.Model):
<mask token>
<mask token>
class Sponsor(models.Model):
type = models.ForeignKey(SponsorType, on_delete=models.CASCADE, null=True)
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=200, default='')
image = models.ImageField(upload_to='images', default=
'default-image.png', blank=True, null=True)
slug = models.SlugField(max_length=200, blank=True)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super().save(*args, **kwargs)
class Meta:
verbose_name_plural = 'sponsors'
| <mask token>
class SponsorType(models.Model):
<mask token>
def __str__(self):
return self.name
class Sponsor(models.Model):
type = models.ForeignKey(SponsorType, on_delete=models.CASCADE, null=True)
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=200, default='')
image = models.ImageField(upload_to='images', default=
'default-image.png', blank=True, null=True)
slug = models.SlugField(max_length=200, blank=True)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super().save(*args, **kwargs)
class Meta:
verbose_name_plural = 'sponsors'
| <mask token>
class SponsorType(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Sponsor(models.Model):
type = models.ForeignKey(SponsorType, on_delete=models.CASCADE, null=True)
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=200, default='')
image = models.ImageField(upload_to='images', default=
'default-image.png', blank=True, null=True)
slug = models.SlugField(max_length=200, blank=True)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super().save(*args, **kwargs)
class Meta:
verbose_name_plural = 'sponsors'
| from django.db import models
from django.utils.text import slugify
class SponsorType(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Sponsor(models.Model):
type = models.ForeignKey(SponsorType, on_delete=models.CASCADE, null=True)
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=200, default='')
image = models.ImageField(upload_to='images', default=
'default-image.png', blank=True, null=True)
slug = models.SlugField(max_length=200, blank=True)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super().save(*args, **kwargs)
class Meta:
verbose_name_plural = 'sponsors'
| from django.db import models
from django.utils.text import slugify
# Create your models here.
class SponsorType(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Sponsor(models.Model):
type = models.ForeignKey(SponsorType, on_delete=models.CASCADE, null=True)
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=200, default='')
image = models.ImageField(upload_to="images",default="default-image.png",blank=True,null=True)
slug = models.SlugField(max_length=200, blank=True)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super().save(*args,**kwargs)
class Meta:
verbose_name_plural = 'sponsors'
| [
5,
6,
7,
8,
9
] |
1,537 | f702cdef3782ddc96244f3cf8e2026581d60baa9 | <mask token>
class State(DocumentTemplate):
_key = ValueHashKey()
country: 'Country'
name: str
| <mask token>
class Address(DocumentTemplate):
<mask token>
city: 'City'
coordinates: List['Coordinates']
postal_code: str
street: str
class Brewery(DocumentTemplate):
_key = RandomKey()
address_of: 'Address'
name: str
phone: str
type_of: 'Brewery_Type'
website_url: str
class Brewery_Type(EnumTemplate):
micro = ()
nano = ()
regional = ()
brewpub = ()
large = ()
planning = ()
bar = ()
contract = ()
proprietor = ()
closed = ()
taproom = ()
class City(DocumentTemplate):
_key = ValueHashKey()
name: str
state: 'State'
class Coordinates(DocumentTemplate):
_key = RandomKey()
latitude: float
longitude: float
class Country(DocumentTemplate):
_key = ValueHashKey()
name: str
class State(DocumentTemplate):
_key = ValueHashKey()
country: 'Country'
name: str
| <mask token>
class Address(DocumentTemplate):
_subdocument = []
city: 'City'
coordinates: List['Coordinates']
postal_code: str
street: str
class Brewery(DocumentTemplate):
_key = RandomKey()
address_of: 'Address'
name: str
phone: str
type_of: 'Brewery_Type'
website_url: str
class Brewery_Type(EnumTemplate):
micro = ()
nano = ()
regional = ()
brewpub = ()
large = ()
planning = ()
bar = ()
contract = ()
proprietor = ()
closed = ()
taproom = ()
class City(DocumentTemplate):
_key = ValueHashKey()
name: str
state: 'State'
class Coordinates(DocumentTemplate):
_key = RandomKey()
latitude: float
longitude: float
class Country(DocumentTemplate):
_key = ValueHashKey()
name: str
class State(DocumentTemplate):
_key = ValueHashKey()
country: 'Country'
name: str
| from typing import List
from terminusdb_client.woqlschema import DocumentTemplate, EnumTemplate, RandomKey, ValueHashKey
class Address(DocumentTemplate):
_subdocument = []
city: 'City'
coordinates: List['Coordinates']
postal_code: str
street: str
class Brewery(DocumentTemplate):
_key = RandomKey()
address_of: 'Address'
name: str
phone: str
type_of: 'Brewery_Type'
website_url: str
class Brewery_Type(EnumTemplate):
micro = ()
nano = ()
regional = ()
brewpub = ()
large = ()
planning = ()
bar = ()
contract = ()
proprietor = ()
closed = ()
taproom = ()
class City(DocumentTemplate):
_key = ValueHashKey()
name: str
state: 'State'
class Coordinates(DocumentTemplate):
_key = RandomKey()
latitude: float
longitude: float
class Country(DocumentTemplate):
_key = ValueHashKey()
name: str
class State(DocumentTemplate):
_key = ValueHashKey()
country: 'Country'
name: str
| ####
# This is the script for storing the schema of your TerminusDB
# database for your project.
# Use 'terminusdb commit' to commit changes to the database and
# use 'terminusdb sync' to change this file according to
# the exsisting database schema
####
from typing import List
from terminusdb_client.woqlschema import (
DocumentTemplate,
EnumTemplate,
RandomKey,
ValueHashKey,
)
class Address(DocumentTemplate):
_subdocument = []
city: "City"
coordinates: List["Coordinates"]
postal_code: str
street: str
class Brewery(DocumentTemplate):
_key = RandomKey()
address_of: "Address"
name: str
phone: str
type_of: "Brewery_Type"
website_url: str
class Brewery_Type(EnumTemplate):
micro = ()
nano = ()
regional = ()
brewpub = ()
large = ()
planning = ()
bar = ()
contract = ()
proprietor = ()
closed = ()
taproom = ()
class City(DocumentTemplate):
_key = ValueHashKey()
name: str
state: "State"
class Coordinates(DocumentTemplate):
_key = RandomKey()
latitude: float
longitude: float
class Country(DocumentTemplate):
_key = ValueHashKey()
name: str
class State(DocumentTemplate):
_key = ValueHashKey()
country: "Country"
name: str
| [
2,
13,
14,
15,
16
] |
1,538 | 5c3bf49f88dec429ec85cceb8130cccf2691363b | <mask token>
| if __name__ == '__main__':
print('--------------------------------------')
query = 'user=pilgrim&database=master&password=PapayaWhip'
a_list = query.split('&')
print(a_list)
print('--------------------------------------')
a_list_of_lists = [v.split('=', 1) for v in a_list if '=' in v]
print(a_list_of_lists)
a_dict = dict(a_list_of_lists)
print(a_dict)
print('--------------------------------------')
a_string = 'My alphabet starts where your alphabet ends.'
print(a_string[3:11])
print(a_string[3:-3])
print(a_string[0:2])
print(a_string[:18])
print(a_string[18:])
| null | null | null | [
0,
1
] |
1,539 | a0ffb793650b0e911dd9bcbec0b7ba76f7829c12 | <mask token>
| def minvalue(weight, Day):
maximum = 0
res = 0
for x in range(0, len(weight)):
if weight[x] > maximum:
maximum = weight[x]
res += weight[x]
Capitivity = max(res // Day, maximum)
while True:
sum = 0
day = 1
for t in range(0, len(weight)):
if weight[t] + sum <= Capitivity:
sum += weight[t]
else:
sum = weight[t]
day += 1
if day <= Day:
return Capitivity
else:
Capitivity += 1
<mask token>
| def minvalue(weight, Day):
maximum = 0
res = 0
for x in range(0, len(weight)):
if weight[x] > maximum:
maximum = weight[x]
res += weight[x]
Capitivity = max(res // Day, maximum)
while True:
sum = 0
day = 1
for t in range(0, len(weight)):
if weight[t] + sum <= Capitivity:
sum += weight[t]
else:
sum = weight[t]
day += 1
if day <= Day:
return Capitivity
else:
Capitivity += 1
<mask token>
store.append(list(map(int, a.split(','))))
<mask token>
print(minvalue(weight, Day))
| def minvalue(weight, Day):
maximum = 0
res = 0
for x in range(0, len(weight)):
if weight[x] > maximum:
maximum = weight[x]
res += weight[x]
Capitivity = max(res // Day, maximum)
while True:
sum = 0
day = 1
for t in range(0, len(weight)):
if weight[t] + sum <= Capitivity:
sum += weight[t]
else:
sum = weight[t]
day += 1
if day <= Day:
return Capitivity
else:
Capitivity += 1
a = input()
a = a[1:len(a) - 1]
store = []
store.append(list(map(int, a.split(','))))
weight = store[0]
Day = int(input())
print(minvalue(weight, Day))
| def minvalue(weight,Day):
maximum = 0
res = 0
for x in range(0, len(weight)):
if weight[x] > maximum:
maximum = weight[x]
res += weight[x]
Capitivity = max(res // Day, maximum)
while True:
sum=0
day=1
for t in range(0, len(weight)):
if weight[t]+sum<=Capitivity:
sum+=weight[t]
else:
sum=weight[t]
day+=1
if day<=Day:
return Capitivity
else:
Capitivity+=1
a=input()
a=a[1:len(a)-1]
store=[]
store.append(list(map(int, a.split(","))))
weight=store[0]
Day=int(input())
print(minvalue(weight,Day)) | [
0,
1,
2,
3,
4
] |
1,540 | b46b9b086fc089e24cb39a0c2c4ac252591b2190 | import MySQLdb
import settings
import redis
import socket
import fcntl
import struct
import datetime
db = MySQLdb.connect(settings.host, settings.user, settings.pwd, settings.db)
cursor = db.cursor()
def connect_mysql():
try:
db.ping()
except:
db = MySQLdb.connect(settings.host, settings.user, settings.pwd, settings.db)
def init_database(table, sql):
cursor.execute("DROP TABLE IF EXISTS %s" % table)
cursor.execute(sql)
print "init %s successful" % table
def insert_data(sql):
connect_mysql()
try:
cursor = db.cursor()
cursor.execute(sql)
db.commit()
except:
print "execute %s error" % sql
db.rollback()
def set_tags_from_result():
sql = "select WIDTH,DROP_RATE,MEMORY,CPU,SERVICE,THREAD_NUM,FRECURENT,R100 from result"
devide = [125.0, 1, 100.0, 100.0, 1000.0, 20.0, 1, 1]
result = [1,2,3,4,5,6,7,8]
try:
cursor.execute(sql)
results = cursor.fetchall()
for element in results:
for i in range(len(element)):
result[i] = element[i]/devide[i]
sql = "insert into tags (WIDTH,DROP_RATE,MEMORY,CPU,SERVICE,THREAD_NUM,FRECURENT,R100) values('%f', '%f', '%f', '%f', '%f', '%f', '%f', '%f')" % (result[0], result[1], result[2],result[3], result[4], result[5], result[6], result[7])
insert_data(sql)
except Exception as msg:
print "select from result error"
print msg
print str(msg)
db.close()
if __name__ == '__main__':
table = 'tags'
sql = """CREATE TABLE %s (
WIDTH FLOAT(3,2),
DROP_RATE FLOAT,
MEMORY FLOAT(3,2),
CPU FLOAT(3,2),
SERVICE FLOAT(3,2),
THREAD_NUM FLOAT,
FRECURENT FLOAT,
R100 FLOAT(2, 1))""" % table
init_database(table, sql)
set_tags_from_result()
| null | null | null | null | [
0
] |
1,541 | adae4f9ebcbbb775fc40278ceec9a0cc30c0a503 | <mask token>
class TestXLUtility:
<mask token>
def getRowCount(file, sheetname):
workbook = openpyxl.load_workbook(file)
sheet = workbook[sheetname]
return sheet.max_row
<mask token>
<mask token>
<mask token>
| <mask token>
class TestXLUtility:
<mask token>
def getRowCount(file, sheetname):
workbook = openpyxl.load_workbook(file)
sheet = workbook[sheetname]
return sheet.max_row
def getColumnCount(file, sheetname):
workbook = openpyxl.load_workbook(file)
sheet = workbook[sheetname]
return sheet.max_column
<mask token>
def writeData(file, sheetname, rownum, columno, data):
workbook = openpyxl.load_workbook(file)
sheet = workbook[sheetname]
sheet.cell(row=rownum, column=columno).value = data
workbook.save(file)
| <mask token>
class TestXLUtility:
def __init__(self, driver):
self.driver = driver
def getRowCount(file, sheetname):
workbook = openpyxl.load_workbook(file)
sheet = workbook[sheetname]
return sheet.max_row
def getColumnCount(file, sheetname):
workbook = openpyxl.load_workbook(file)
sheet = workbook[sheetname]
return sheet.max_column
def readData(file, sheetname, rownum, columno):
workbook = openpyxl.load_workbook(file)
sheet = workbook[sheetname]
return sheet.cell(row=rownum, column=columno).value
def writeData(file, sheetname, rownum, columno, data):
workbook = openpyxl.load_workbook(file)
sheet = workbook[sheetname]
sheet.cell(row=rownum, column=columno).value = data
workbook.save(file)
| import openpyxl
class TestXLUtility:
def __init__(self, driver):
self.driver = driver
def getRowCount(file, sheetname):
workbook = openpyxl.load_workbook(file)
sheet = workbook[sheetname]
return sheet.max_row
def getColumnCount(file, sheetname):
workbook = openpyxl.load_workbook(file)
sheet = workbook[sheetname]
return sheet.max_column
def readData(file, sheetname, rownum, columno):
workbook = openpyxl.load_workbook(file)
sheet = workbook[sheetname]
return sheet.cell(row=rownum, column=columno).value
def writeData(file, sheetname, rownum, columno, data):
workbook = openpyxl.load_workbook(file)
sheet = workbook[sheetname]
sheet.cell(row=rownum, column=columno).value = data
workbook.save(file)
| import openpyxl
class TestXLUtility:
def __init__(self, driver):
self.driver = driver
def getRowCount(file, sheetname):
workbook = openpyxl.load_workbook(file)
#sheet = workbook.get_sheet_by_name(sheetname)
sheet = workbook[sheetname]
return(sheet.max_row)
def getColumnCount(file, sheetname):
workbook = openpyxl.load_workbook(file)
#sheet = workbook.get_sheet_by_name(sheetname)
sheet = workbook[sheetname]
return (sheet.max_column)
def readData(file,sheetname,rownum,columno):
workbook = openpyxl.load_workbook(file)
#sheet = workbook.get_sheet_by_name(sheetname)
sheet = workbook[sheetname]
return(sheet.cell(row=rownum, column=columno).value)
def writeData(file,sheetname,rownum,columno,data):
workbook = openpyxl.load_workbook(file)
#sheet = workbook.get_sheet_by_name(sheetname)
sheet = workbook[sheetname]
sheet.cell(row=rownum, column=columno).value = data
workbook.save(file)
| [
2,
4,
6,
7,
8
] |
1,542 | c24bf42cfeaa1fb8ac188b9e08146762e0e86fed | <mask token>
| <mask token>
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(name='array-sqrt-openmp', description=
'Illustration of Python extensions using OpenMP', author=
'Mihai Duta', author_email='[email protected]', ext_modules=[
c_array_sqrt, f_array_sqrt])
| <mask token>
c_array_sqrt = Extension(name='c_array_sqrt_omp', sources=[
'./src/c_array_sqrt_omp.c'], extra_compile_args=[
'-O2 -ffast-math -std=c99 -fopenmp'], extra_link_args=['-lgomp'])
f_array_sqrt = Extension(name='f_array_sqrt_omp', sources=[
'./src/f_array_sqrt_omp.f90'], extra_compile_args=[
'-O2 -ffast-math -fopenmp'], extra_link_args=['-lgomp'])
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(name='array-sqrt-openmp', description=
'Illustration of Python extensions using OpenMP', author=
'Mihai Duta', author_email='[email protected]', ext_modules=[
c_array_sqrt, f_array_sqrt])
| from numpy.distutils.core import Extension
c_array_sqrt = Extension(name='c_array_sqrt_omp', sources=[
'./src/c_array_sqrt_omp.c'], extra_compile_args=[
'-O2 -ffast-math -std=c99 -fopenmp'], extra_link_args=['-lgomp'])
f_array_sqrt = Extension(name='f_array_sqrt_omp', sources=[
'./src/f_array_sqrt_omp.f90'], extra_compile_args=[
'-O2 -ffast-math -fopenmp'], extra_link_args=['-lgomp'])
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(name='array-sqrt-openmp', description=
'Illustration of Python extensions using OpenMP', author=
'Mihai Duta', author_email='[email protected]', ext_modules=[
c_array_sqrt, f_array_sqrt])
| #
# purpose: setup file to install the compiled-language python libraries
# usage: python setup.py config_fc --f90flags="-O2 -fopenmp" install --prefix=$PWD
#
from numpy.distutils.core import Extension
c_array_sqrt = Extension (name = "c_array_sqrt_omp",
sources = ["./src/c_array_sqrt_omp.c"],
extra_compile_args = ["-O2 -ffast-math -std=c99 -fopenmp"],
extra_link_args = ["-lgomp"])
f_array_sqrt = Extension (name = "f_array_sqrt_omp",
sources = ["./src/f_array_sqrt_omp.f90"],
extra_compile_args = ["-O2 -ffast-math -fopenmp"],
extra_link_args = ["-lgomp"])
if __name__ == "__main__":
from numpy.distutils.core import setup
setup ( name = "array-sqrt-openmp",
description = "Illustration of Python extensions using OpenMP",
author = "Mihai Duta",
author_email = "[email protected]",
ext_modules = [c_array_sqrt, f_array_sqrt]
)
# end
| [
0,
1,
2,
3,
4
] |
1,543 | d20b336c6588c3cfc4393256b660d6e4ff56b84e | <mask token>
| <mask token>
def lcs2(a, b):
dp_result = [[(0) for j in range(b + 1)] for i in range(a + 1)]
for x in range(1, a + 1):
for y in range(1, b + 1):
if a[x - 1] == b[y - 1] and b[y - 1] == c[z - 1]:
dp_result[x][y] = dp_result[x - 1][y - 1] + 1
else:
dp_result[x][y] = max(dp_result[x - 1][y], dp_result[x][y -
1], dp_result[x][y])
return dp_result
<mask token>
| <mask token>
def lcs2(a, b):
dp_result = [[(0) for j in range(b + 1)] for i in range(a + 1)]
for x in range(1, a + 1):
for y in range(1, b + 1):
if a[x - 1] == b[y - 1] and b[y - 1] == c[z - 1]:
dp_result[x][y] = dp_result[x - 1][y - 1] + 1
else:
dp_result[x][y] = max(dp_result[x - 1][y], dp_result[x][y -
1], dp_result[x][y])
return dp_result
if __name__ == '__main__':
input = sys.stdin.read()
data = list(map(int, input.split()))
n = data[0]
data = data[1:]
a = data[:n]
data = data[n:]
m = data[0]
data = data[1:]
b = data[:m]
print(lcs2(a, b))
| import sys
def lcs2(a, b):
dp_result = [[(0) for j in range(b + 1)] for i in range(a + 1)]
for x in range(1, a + 1):
for y in range(1, b + 1):
if a[x - 1] == b[y - 1] and b[y - 1] == c[z - 1]:
dp_result[x][y] = dp_result[x - 1][y - 1] + 1
else:
dp_result[x][y] = max(dp_result[x - 1][y], dp_result[x][y -
1], dp_result[x][y])
return dp_result
if __name__ == '__main__':
input = sys.stdin.read()
data = list(map(int, input.split()))
n = data[0]
data = data[1:]
a = data[:n]
data = data[n:]
m = data[0]
data = data[1:]
b = data[:m]
print(lcs2(a, b))
| #Uses python3
import sys
def lcs2(a, b):
dp_result = [[0 for j in range(b+1)] for i in range(a+1)]
for x in range(1, a+1):
for y in range(1, b+1):
if a[x-1] == b[y-1] and b[y-1] == c[z-1]:
dp_result[x][y] = dp_result[x-1][y-1] + 1
else:
dp_result[x][y] = max(dp_result[x-1][y], dp_result[x][y-1], dp_result[x][y])
return dp_result
if __name__ == '__main__':
input = sys.stdin.read()
data = list(map(int, input.split()))
n = data[0]
data = data[1:]
a = data[:n]
data = data[n:]
m = data[0]
data = data[1:]
b = data[:m]
print(lcs2(a, b))
| [
0,
1,
2,
3,
4
] |
1,544 | 137e80b3bfdc0dba33a3108b37d21d298a8f251d | <mask token>
@app.task
def delete_kube_by_name(name):
try:
logging.info(kubectl['delete', name]())
return True
except ProcessExecutionError:
return False
| <mask token>
@app.task
def create_kube_from_template(file_name, *aargs):
args = {}
for a in aargs:
args.update(a)
template = open(os.path.join('..', file_name)).read() % args
logging.info((kubectl['create', '-f', '-', '--logtostderr'] << template)())
@app.task
def delete_kube_by_name(name):
try:
logging.info(kubectl['delete', name]())
return True
except ProcessExecutionError:
return False
| <mask token>
kubectl = local['kubectl']
@app.task
def create_kube_from_template(file_name, *aargs):
args = {}
for a in aargs:
args.update(a)
template = open(os.path.join('..', file_name)).read() % args
logging.info((kubectl['create', '-f', '-', '--logtostderr'] << template)())
@app.task
def delete_kube_by_name(name):
try:
logging.info(kubectl['delete', name]())
return True
except ProcessExecutionError:
return False
| from plumbum import local, FG, ProcessExecutionError
import logging
import os.path
from task import app
kubectl = local['kubectl']
@app.task
def create_kube_from_template(file_name, *aargs):
args = {}
for a in aargs:
args.update(a)
template = open(os.path.join('..', file_name)).read() % args
logging.info((kubectl['create', '-f', '-', '--logtostderr'] << template)())
@app.task
def delete_kube_by_name(name):
try:
logging.info(kubectl['delete', name]())
return True
except ProcessExecutionError:
return False
| from plumbum import local, FG, ProcessExecutionError
import logging
import os.path
from task import app
kubectl = local["kubectl"]
@app.task
def create_kube_from_template(file_name, *aargs):
args = {}
for a in aargs:
args.update(a)
template = open(os.path.join('..', file_name)).read() % args
logging.info((kubectl["create", "-f", "-", "--logtostderr"] << template)())
@app.task
def delete_kube_by_name(name):
try:
logging.info((kubectl["delete", name])())
return True
except ProcessExecutionError:
return False
| [
1,
2,
3,
4,
5
] |
1,545 | b20a8160ba455a39e990b8b37c5017645530ced3 | <mask token>
class VideoClassSerializer(serializers.ModelSerializer):
<mask token>
class Meta:
model = VideoClass
fields = 'title', 'video_set'
def get_video_set(self, instance):
videos = instance.video_set.all()
return VideoSerializer(videos, many=True).data
| <mask token>
class VideoClassSerializer(serializers.ModelSerializer):
video_set = serializers.SerializerMethodField()
class Meta:
model = VideoClass
fields = 'title', 'video_set'
def get_video_set(self, instance):
videos = instance.video_set.all()
return VideoSerializer(videos, many=True).data
| <mask token>
class VideoSerializer(serializers.ModelSerializer):
class Meta:
model = Video
fields = ['videoURL', 'subTitle', 'numOfLike', 'numOfPlay']
class VideoClassSerializer(serializers.ModelSerializer):
video_set = serializers.SerializerMethodField()
class Meta:
model = VideoClass
fields = 'title', 'video_set'
def get_video_set(self, instance):
videos = instance.video_set.all()
return VideoSerializer(videos, many=True).data
| from .models import Video, VideoClass
from rest_framework import serializers
class VideoSerializer(serializers.ModelSerializer):
class Meta:
model = Video
fields = ['videoURL', 'subTitle', 'numOfLike', 'numOfPlay']
class VideoClassSerializer(serializers.ModelSerializer):
video_set = serializers.SerializerMethodField()
class Meta:
model = VideoClass
fields = 'title', 'video_set'
def get_video_set(self, instance):
videos = instance.video_set.all()
return VideoSerializer(videos, many=True).data
| from .models import Video, VideoClass
from rest_framework import serializers
# Video 정보
class VideoSerializer(serializers.ModelSerializer):
class Meta:
model = Video
fields = ['videoURL','subTitle', 'numOfLike', 'numOfPlay']
# Video 분류
class VideoClassSerializer(serializers.ModelSerializer):
video_set = serializers.SerializerMethodField()
class Meta:
model = VideoClass
fields = ('title', 'video_set')
def get_video_set(self, instance):
videos = instance.video_set.all()
return VideoSerializer(videos, many=True).data
| [
2,
3,
4,
5,
6
] |
1,546 | 63ee99012089dcb0e5b41860c95e13fff52c6731 | <mask token>
class Task:
<mask token>
def __init__(self):
"""
Create the object
:rtype: object
"""
self.queue = list()
self.pending = []
self.complete = []
self.failed = []
self.url_map = {}
self.created = datetime.datetime.now().isoformat()
self.finished = None
self.status = 'pending'
self.credentials = None
def initialize(self, urls, cred):
"""
Initialize the object with parameters urls and cred
:param urls : list > the list of urls
:param cred : dict > the client credentials
:rtype: object
"""
for i in urls:
self.enqueue(i)
self.pending.append(i)
clean = str(cred).replace('b"', '').replace('"', '').replace("'", '"')
self.credentials = ast.literal_eval(clean)
def export(self):
"""
:rtype: dict
"""
return {'created': self.created, 'finished': self.finished,
'status': self.status, 'uploaded': {'pending': self.pending,
'complete': self.complete, 'failed': self.failed}}
def executeAll(self, _set_task_progress):
"""
Sequentially upload images and update job progress
:rtype: object
"""
_set_task_progress(self)
self.status = 'in-progress'
_set_task_progress(self)
while self.size() != 0:
val = self.dequeue()
if self.executeOne(val):
self.pending.remove(val)
self.complete.append(self.url_map[val])
_set_task_progress(self)
else:
self.pending.remove(val)
self.failed.append(val)
_set_task_progress(self)
self.status = 'complete'
self.finished = datetime.datetime.now().isoformat()
_set_task_progress(self)
<mask token>
def enqueue(self, data):
"""
Adding elements to queue
:rtype: object
"""
if data not in self.queue:
self.queue.insert(0, data)
return True
return False
def dequeue(self):
"""
Adding elements to queue
:rtype: object
"""
if len(self.queue) > 0:
return self.queue.pop()
return 'Queue Empty!'
<mask token>
def upload_image(self, path=None, url=None, title=None, description=
None, album=None):
"""
Upload image to the imgur server and returns the new url
:rtype: object
"""
if bool(path) == bool(url):
raise LookupError('Either path or url must be given.')
if path:
with open(path, 'rb') as image_file:
binary_data = image_file.read()
image = b64encode(binary_data)
else:
image = url
payload = {'album_id': '58tq5Nw', 'image': image, 'title': title,
'description': description}
token = ast.literal_eval(str(self.credentials))['access_token']
authentication = {'Authorization': 'Bearer {0}'.format(token)}
verify = True
resp = requests.post(IMGUR_BASE + '/3/image', payload, headers=
authentication, verify=verify)
if 'error' in json.loads(resp.content)['data']:
return False, json.loads(resp.content)['data']['error']
else:
return True, json.loads(resp.content)['data']['link']
| <mask token>
class Task:
<mask token>
def __init__(self):
"""
Create the object
:rtype: object
"""
self.queue = list()
self.pending = []
self.complete = []
self.failed = []
self.url_map = {}
self.created = datetime.datetime.now().isoformat()
self.finished = None
self.status = 'pending'
self.credentials = None
def initialize(self, urls, cred):
"""
Initialize the object with parameters urls and cred
:param urls : list > the list of urls
:param cred : dict > the client credentials
:rtype: object
"""
for i in urls:
self.enqueue(i)
self.pending.append(i)
clean = str(cred).replace('b"', '').replace('"', '').replace("'", '"')
self.credentials = ast.literal_eval(clean)
def export(self):
"""
:rtype: dict
"""
return {'created': self.created, 'finished': self.finished,
'status': self.status, 'uploaded': {'pending': self.pending,
'complete': self.complete, 'failed': self.failed}}
def executeAll(self, _set_task_progress):
"""
Sequentially upload images and update job progress
:rtype: object
"""
_set_task_progress(self)
self.status = 'in-progress'
_set_task_progress(self)
while self.size() != 0:
val = self.dequeue()
if self.executeOne(val):
self.pending.remove(val)
self.complete.append(self.url_map[val])
_set_task_progress(self)
else:
self.pending.remove(val)
self.failed.append(val)
_set_task_progress(self)
self.status = 'complete'
self.finished = datetime.datetime.now().isoformat()
_set_task_progress(self)
def executeOne(self, val):
"""
Upload a unique image
:rtype: object
"""
v, url = self.upload_image(path=None, url=val, title=None,
description=None, album=None)
if v:
self.url_map.update({val: url})
return True
else:
self.url_map.update({val: url})
return False
def enqueue(self, data):
"""
Adding elements to queue
:rtype: object
"""
if data not in self.queue:
self.queue.insert(0, data)
return True
return False
def dequeue(self):
"""
Adding elements to queue
:rtype: object
"""
if len(self.queue) > 0:
return self.queue.pop()
return 'Queue Empty!'
<mask token>
def upload_image(self, path=None, url=None, title=None, description=
None, album=None):
"""
Upload image to the imgur server and returns the new url
:rtype: object
"""
if bool(path) == bool(url):
raise LookupError('Either path or url must be given.')
if path:
with open(path, 'rb') as image_file:
binary_data = image_file.read()
image = b64encode(binary_data)
else:
image = url
payload = {'album_id': '58tq5Nw', 'image': image, 'title': title,
'description': description}
token = ast.literal_eval(str(self.credentials))['access_token']
authentication = {'Authorization': 'Bearer {0}'.format(token)}
verify = True
resp = requests.post(IMGUR_BASE + '/3/image', payload, headers=
authentication, verify=verify)
if 'error' in json.loads(resp.content)['data']:
return False, json.loads(resp.content)['data']['error']
else:
return True, json.loads(resp.content)['data']['link']
| <mask token>
IMGUR_BASE = 'https://api.imgur.com'
class Task:
"""
A class used to represent a job
...
Attributes
----------
queue : list
the list of all urls
pending : list
the name of all pending urls
complete : list
the name of all completed urls
failed : list
the name of all failed urls
url_map : dict
a dictionary that maps provided urls with imgur urls
created:
date created
finished:
date finished
status:
the job status
credentials:
the access token and other useful objects
"""
def __init__(self):
"""
Create the object
:rtype: object
"""
self.queue = list()
self.pending = []
self.complete = []
self.failed = []
self.url_map = {}
self.created = datetime.datetime.now().isoformat()
self.finished = None
self.status = 'pending'
self.credentials = None
def initialize(self, urls, cred):
"""
Initialize the object with parameters urls and cred
:param urls : list > the list of urls
:param cred : dict > the client credentials
:rtype: object
"""
for i in urls:
self.enqueue(i)
self.pending.append(i)
clean = str(cred).replace('b"', '').replace('"', '').replace("'", '"')
self.credentials = ast.literal_eval(clean)
def export(self):
"""
:rtype: dict
"""
return {'created': self.created, 'finished': self.finished,
'status': self.status, 'uploaded': {'pending': self.pending,
'complete': self.complete, 'failed': self.failed}}
def executeAll(self, _set_task_progress):
"""
Sequentially upload images and update job progress
:rtype: object
"""
_set_task_progress(self)
self.status = 'in-progress'
_set_task_progress(self)
while self.size() != 0:
val = self.dequeue()
if self.executeOne(val):
self.pending.remove(val)
self.complete.append(self.url_map[val])
_set_task_progress(self)
else:
self.pending.remove(val)
self.failed.append(val)
_set_task_progress(self)
self.status = 'complete'
self.finished = datetime.datetime.now().isoformat()
_set_task_progress(self)
def executeOne(self, val):
"""
Upload a unique image
:rtype: object
"""
v, url = self.upload_image(path=None, url=val, title=None,
description=None, album=None)
if v:
self.url_map.update({val: url})
return True
else:
self.url_map.update({val: url})
return False
def enqueue(self, data):
"""
Adding elements to queue
:rtype: object
"""
if data not in self.queue:
self.queue.insert(0, data)
return True
return False
def dequeue(self):
"""
Adding elements to queue
:rtype: object
"""
if len(self.queue) > 0:
return self.queue.pop()
return 'Queue Empty!'
def size(self):
"""
Getting the size of the queue
:rtype: object
"""
return len(self.queue)
def upload_image(self, path=None, url=None, title=None, description=
None, album=None):
"""
Upload image to the imgur server and returns the new url
:rtype: object
"""
if bool(path) == bool(url):
raise LookupError('Either path or url must be given.')
if path:
with open(path, 'rb') as image_file:
binary_data = image_file.read()
image = b64encode(binary_data)
else:
image = url
payload = {'album_id': '58tq5Nw', 'image': image, 'title': title,
'description': description}
token = ast.literal_eval(str(self.credentials))['access_token']
authentication = {'Authorization': 'Bearer {0}'.format(token)}
verify = True
resp = requests.post(IMGUR_BASE + '/3/image', payload, headers=
authentication, verify=verify)
if 'error' in json.loads(resp.content)['data']:
return False, json.loads(resp.content)['data']['error']
else:
return True, json.loads(resp.content)['data']['link']
| import ast
import datetime
import json
from base64 import b64encode
import requests
IMGUR_BASE = 'https://api.imgur.com'
class Task:
"""
A class used to represent a job
...
Attributes
----------
queue : list
the list of all urls
pending : list
the name of all pending urls
complete : list
the name of all completed urls
failed : list
the name of all failed urls
url_map : dict
a dictionary that maps provided urls with imgur urls
created:
date created
finished:
date finished
status:
the job status
credentials:
the access token and other useful objects
"""
def __init__(self):
"""
Create the object
:rtype: object
"""
self.queue = list()
self.pending = []
self.complete = []
self.failed = []
self.url_map = {}
self.created = datetime.datetime.now().isoformat()
self.finished = None
self.status = 'pending'
self.credentials = None
def initialize(self, urls, cred):
"""
Initialize the object with parameters urls and cred
:param urls : list > the list of urls
:param cred : dict > the client credentials
:rtype: object
"""
for i in urls:
self.enqueue(i)
self.pending.append(i)
clean = str(cred).replace('b"', '').replace('"', '').replace("'", '"')
self.credentials = ast.literal_eval(clean)
def export(self):
"""
:rtype: dict
"""
return {'created': self.created, 'finished': self.finished,
'status': self.status, 'uploaded': {'pending': self.pending,
'complete': self.complete, 'failed': self.failed}}
def executeAll(self, _set_task_progress):
"""
Sequentially upload images and update job progress
:rtype: object
"""
_set_task_progress(self)
self.status = 'in-progress'
_set_task_progress(self)
while self.size() != 0:
val = self.dequeue()
if self.executeOne(val):
self.pending.remove(val)
self.complete.append(self.url_map[val])
_set_task_progress(self)
else:
self.pending.remove(val)
self.failed.append(val)
_set_task_progress(self)
self.status = 'complete'
self.finished = datetime.datetime.now().isoformat()
_set_task_progress(self)
def executeOne(self, val):
"""
Upload a unique image
:rtype: object
"""
v, url = self.upload_image(path=None, url=val, title=None,
description=None, album=None)
if v:
self.url_map.update({val: url})
return True
else:
self.url_map.update({val: url})
return False
def enqueue(self, data):
"""
Adding elements to queue
:rtype: object
"""
if data not in self.queue:
self.queue.insert(0, data)
return True
return False
def dequeue(self):
"""
Adding elements to queue
:rtype: object
"""
if len(self.queue) > 0:
return self.queue.pop()
return 'Queue Empty!'
def size(self):
"""
Getting the size of the queue
:rtype: object
"""
return len(self.queue)
def upload_image(self, path=None, url=None, title=None, description=
None, album=None):
"""
Upload image to the imgur server and returns the new url
:rtype: object
"""
if bool(path) == bool(url):
raise LookupError('Either path or url must be given.')
if path:
with open(path, 'rb') as image_file:
binary_data = image_file.read()
image = b64encode(binary_data)
else:
image = url
payload = {'album_id': '58tq5Nw', 'image': image, 'title': title,
'description': description}
token = ast.literal_eval(str(self.credentials))['access_token']
authentication = {'Authorization': 'Bearer {0}'.format(token)}
verify = True
resp = requests.post(IMGUR_BASE + '/3/image', payload, headers=
authentication, verify=verify)
if 'error' in json.loads(resp.content)['data']:
return False, json.loads(resp.content)['data']['error']
else:
return True, json.loads(resp.content)['data']['link']
| import ast
import datetime
import json
from base64 import b64encode
import requests
IMGUR_BASE = "https://api.imgur.com"
class Task:
"""
A class used to represent a job
...
Attributes
----------
queue : list
the list of all urls
pending : list
the name of all pending urls
complete : list
the name of all completed urls
failed : list
the name of all failed urls
url_map : dict
a dictionary that maps provided urls with imgur urls
created:
date created
finished:
date finished
status:
the job status
credentials:
the access token and other useful objects
"""
def __init__(self):
"""
Create the object
:rtype: object
"""
self.queue = list()
self.pending = []
self.complete = []
self.failed = []
self.url_map = {}
self.created = datetime.datetime.now().isoformat()
self.finished = None
self.status = "pending"
self.credentials = None
def initialize(self, urls, cred):
"""
Initialize the object with parameters urls and cred
:param urls : list > the list of urls
:param cred : dict > the client credentials
:rtype: object
"""
for i in urls:
self.enqueue(i)
self.pending.append(i)
clean = str(cred).replace('b\"', '').replace('\"', '').replace("'", '"')
self.credentials = ast.literal_eval(clean)
def export(self):
"""
:rtype: dict
"""
return {
"created": self.created,
"finished": self.finished,
"status": self.status,
"uploaded": {
"pending": self.pending,
"complete": self.complete,
"failed": self.failed
}
}
def executeAll(self, _set_task_progress):
"""
Sequentially upload images and update job progress
:rtype: object
"""
_set_task_progress(self)
self.status = 'in-progress'
_set_task_progress(self)
while self.size() != 0:
val = self.dequeue()
if self.executeOne(val):
self.pending.remove(val)
self.complete.append(self.url_map[val])
_set_task_progress(self)
else:
self.pending.remove(val)
self.failed.append(val)
_set_task_progress(self)
self.status = 'complete'
self.finished = datetime.datetime.now().isoformat()
_set_task_progress(self)
def executeOne(self, val):
"""
Upload a unique image
:rtype: object
"""
v,url = self.upload_image(path=None, url=val, title=None, description=None, album=None)
if v:
self.url_map.update({val: url})
return True
else:
self.url_map.update({val: url})
return False
def enqueue(self, data):
"""
Adding elements to queue
:rtype: object
"""
# Checking to avoid duplicate entry (not mandatory)
if data not in self.queue:
self.queue.insert(0, data)
return True
return False
def dequeue(self):
"""
Adding elements to queue
:rtype: object
"""
if len(self.queue) > 0:
return self.queue.pop()
return ("Queue Empty!")
def size(self):
"""
Getting the size of the queue
:rtype: object
"""
return len(self.queue)
def upload_image(self, path=None, url=None, title=None, description=None,
album=None):
"""
Upload image to the imgur server and returns the new url
:rtype: object
"""
if bool(path) == bool(url):
raise LookupError("Either path or url must be given.")
if path:
with open(path, 'rb') as image_file:
binary_data = image_file.read()
image = b64encode(binary_data)
else:
image = url
payload = {'album_id': "58tq5Nw", 'image': image,
'title': title, 'description': description}
token = ast.literal_eval(str(self.credentials))["access_token"]
authentication = {'Authorization': 'Bearer {0}'.format(token)}
verify = True
resp = requests.post(IMGUR_BASE + "/3/image", payload, headers=authentication, verify=verify)
if 'error' in json.loads(resp.content)["data"]:
return False, json.loads(resp.content)["data"]["error"]
else:
return True, json.loads(resp.content)["data"]["link"]
| [
8,
9,
12,
13,
14
] |
1,547 | e7a283e0e0e16e9adb415b26d724b2ee84c4f4f8 | <mask token>
| <mask token>
class NoticiaForm(ModelForm):
class Meta:
model = Noticia
fields = ['idNoticia', 'resumen', 'titulo', 'categoria']
| from django import forms
from django.forms import ModelForm
from .models import Noticia
class NoticiaForm(ModelForm):
class Meta:
model = Noticia
fields = ['idNoticia', 'resumen', 'titulo', 'categoria']
| null | null | [
0,
1,
2
] |
1,548 | 7d10fb58aa5213516c656c05966fcaad6868ae81 | <mask token>
| <mask token>
print(x)
| <mask token>
client = pymongo.MongoClient('mongodb://localhost:27017/')
db = client['Test']
col = db['C100']
x = col.find_one()
print(x)
| import pymongo
client = pymongo.MongoClient('mongodb://localhost:27017/')
db = client['Test']
col = db['C100']
x = col.find_one()
print(x)
| import pymongo
client = pymongo.MongoClient("mongodb://localhost:27017/")
# Database Name
db = client["Test"]
# Collection Name
col = db["C100"]
x = col.find_one()
print(x) | [
0,
1,
2,
3,
4
] |
1,549 | b27913d2cd29f174d79652af6da2846e397373fc | <mask token>
| <mask token>
class Migration(migrations.Migration):
<mask token>
<mask token>
| <mask token>
class Migration(migrations.Migration):
dependencies = [('lists', '0004_auto_20180608_1835')]
operations = [migrations.AlterModelOptions(name='todo', options={
'ordering': ('-created_at',)}), migrations.AddField(model_name=
'todo', name='content', field=models.TextField(default='',
max_length=500))]
| from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('lists', '0004_auto_20180608_1835')]
operations = [migrations.AlterModelOptions(name='todo', options={
'ordering': ('-created_at',)}), migrations.AddField(model_name=
'todo', name='content', field=models.TextField(default='',
max_length=500))]
| # Generated by Django 2.0.4 on 2018-06-09 05:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lists', '0004_auto_20180608_1835'),
]
operations = [
migrations.AlterModelOptions(
name='todo',
options={'ordering': ('-created_at',)},
),
migrations.AddField(
model_name='todo',
name='content',
field=models.TextField(default='', max_length=500),
),
]
| [
0,
1,
2,
3,
4
] |
1,550 | 0ec5d6ce11851a577046cf73cf98c91b6dfb9f67 | <mask token>
def worker():
display_subtitle = ''
while True:
item = q.get()
image = np.zeros((480, 640))
if item is not None:
vertices = item
show_img = plot_vertices(np.zeros_like(image), vertices)
else:
show_img = image
if not subtitles.empty():
text = subtitles.get()
subtitles.task_done()
display_subtitle = text
cv2.putText(show_img, display_subtitle, bottomLeftCornerOfText,
font, fontScale, fontColor, lineType)
cv2.imshow('frame', show_img)
if cv2.waitKey(1) & 255 == ord('q'):
break
q.task_done()
class FakeServer(prediction_service_pb2_grpc.PredictionServiceServicer):
def Predict(self, request, context):
"""Predict -- provides access to loaded TensorFlow model.
"""
global q
global stream
if 'vertices' in request.inputs:
print('vertices')
vertices = tensor_util.MakeNdarray(request.inputs['vertices'])
q.put(vertices)
elif 'audio' in request.inputs:
print('audio')
print(type(request.inputs['audio'].string_val[0]))
audio = request.inputs['audio'].string_val[0]
stream.write(audio)
elif 'subtitle' in request.inputs:
print('subtitle')
subtitles.put(request.inputs['subtitle'].string_val[0])
dumbresult = predict_pb2.PredictResponse()
dumbresult.outputs['message'].CopyFrom(tf.make_tensor_proto('OK'))
return dumbresult
<mask token>
| <mask token>
sys.path.append(
'/home/yitao/Documents/fun-project/tensorflow-related/miniature-winner/')
<mask token>
def worker():
display_subtitle = ''
while True:
item = q.get()
image = np.zeros((480, 640))
if item is not None:
vertices = item
show_img = plot_vertices(np.zeros_like(image), vertices)
else:
show_img = image
if not subtitles.empty():
text = subtitles.get()
subtitles.task_done()
display_subtitle = text
cv2.putText(show_img, display_subtitle, bottomLeftCornerOfText,
font, fontScale, fontColor, lineType)
cv2.imshow('frame', show_img)
if cv2.waitKey(1) & 255 == ord('q'):
break
q.task_done()
class FakeServer(prediction_service_pb2_grpc.PredictionServiceServicer):
def Predict(self, request, context):
"""Predict -- provides access to loaded TensorFlow model.
"""
global q
global stream
if 'vertices' in request.inputs:
print('vertices')
vertices = tensor_util.MakeNdarray(request.inputs['vertices'])
q.put(vertices)
elif 'audio' in request.inputs:
print('audio')
print(type(request.inputs['audio'].string_val[0]))
audio = request.inputs['audio'].string_val[0]
stream.write(audio)
elif 'subtitle' in request.inputs:
print('subtitle')
subtitles.put(request.inputs['subtitle'].string_val[0])
dumbresult = predict_pb2.PredictResponse()
dumbresult.outputs['message'].CopyFrom(tf.make_tensor_proto('OK'))
return dumbresult
def serve():
t = threading.Thread(target=worker)
t.daemon = True
t.start()
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
prediction_service_pb2_grpc.add_PredictionServiceServicer_to_server(
FakeServer(), server)
server.add_insecure_port('[::]:50051')
server.start()
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
stream.stop_stream()
stream.close()
p.terminate()
q.join()
subtitles.join()
if __name__ == '__main__':
logging.basicConfig()
serve()
| <mask token>
sys.path.append(
'/home/yitao/Documents/fun-project/tensorflow-related/miniature-winner/')
<mask token>
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paInt16, channels=1, rate=22500, output=True)
font = cv2.FONT_HERSHEY_SIMPLEX
bottomLeftCornerOfText = 10, 350
fontScale = 1
fontColor = 255, 255, 255
lineType = 2
subtitles = Queue()
q = Queue()
def worker():
display_subtitle = ''
while True:
item = q.get()
image = np.zeros((480, 640))
if item is not None:
vertices = item
show_img = plot_vertices(np.zeros_like(image), vertices)
else:
show_img = image
if not subtitles.empty():
text = subtitles.get()
subtitles.task_done()
display_subtitle = text
cv2.putText(show_img, display_subtitle, bottomLeftCornerOfText,
font, fontScale, fontColor, lineType)
cv2.imshow('frame', show_img)
if cv2.waitKey(1) & 255 == ord('q'):
break
q.task_done()
class FakeServer(prediction_service_pb2_grpc.PredictionServiceServicer):
def Predict(self, request, context):
"""Predict -- provides access to loaded TensorFlow model.
"""
global q
global stream
if 'vertices' in request.inputs:
print('vertices')
vertices = tensor_util.MakeNdarray(request.inputs['vertices'])
q.put(vertices)
elif 'audio' in request.inputs:
print('audio')
print(type(request.inputs['audio'].string_val[0]))
audio = request.inputs['audio'].string_val[0]
stream.write(audio)
elif 'subtitle' in request.inputs:
print('subtitle')
subtitles.put(request.inputs['subtitle'].string_val[0])
dumbresult = predict_pb2.PredictResponse()
dumbresult.outputs['message'].CopyFrom(tf.make_tensor_proto('OK'))
return dumbresult
def serve():
t = threading.Thread(target=worker)
t.daemon = True
t.start()
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
prediction_service_pb2_grpc.add_PredictionServiceServicer_to_server(
FakeServer(), server)
server.add_insecure_port('[::]:50051')
server.start()
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
stream.stop_stream()
stream.close()
p.terminate()
q.join()
subtitles.join()
if __name__ == '__main__':
logging.basicConfig()
serve()
| from concurrent import futures
import time
import math
import logging
import grpc
import tensorflow as tf
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
import sys
sys.path.append(
'/home/yitao/Documents/fun-project/tensorflow-related/miniature-winner/')
import cv2
from PRNet.utils.cv_plot import plot_kpt, plot_vertices
import pymesh
import threading
from Queue import Queue
from tensorflow.python.framework import tensor_util
import numpy as np
import pyaudio
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paInt16, channels=1, rate=22500, output=True)
font = cv2.FONT_HERSHEY_SIMPLEX
bottomLeftCornerOfText = 10, 350
fontScale = 1
fontColor = 255, 255, 255
lineType = 2
subtitles = Queue()
q = Queue()
def worker():
display_subtitle = ''
while True:
item = q.get()
image = np.zeros((480, 640))
if item is not None:
vertices = item
show_img = plot_vertices(np.zeros_like(image), vertices)
else:
show_img = image
if not subtitles.empty():
text = subtitles.get()
subtitles.task_done()
display_subtitle = text
cv2.putText(show_img, display_subtitle, bottomLeftCornerOfText,
font, fontScale, fontColor, lineType)
cv2.imshow('frame', show_img)
if cv2.waitKey(1) & 255 == ord('q'):
break
q.task_done()
class FakeServer(prediction_service_pb2_grpc.PredictionServiceServicer):
def Predict(self, request, context):
"""Predict -- provides access to loaded TensorFlow model.
"""
global q
global stream
if 'vertices' in request.inputs:
print('vertices')
vertices = tensor_util.MakeNdarray(request.inputs['vertices'])
q.put(vertices)
elif 'audio' in request.inputs:
print('audio')
print(type(request.inputs['audio'].string_val[0]))
audio = request.inputs['audio'].string_val[0]
stream.write(audio)
elif 'subtitle' in request.inputs:
print('subtitle')
subtitles.put(request.inputs['subtitle'].string_val[0])
dumbresult = predict_pb2.PredictResponse()
dumbresult.outputs['message'].CopyFrom(tf.make_tensor_proto('OK'))
return dumbresult
def serve():
t = threading.Thread(target=worker)
t.daemon = True
t.start()
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
prediction_service_pb2_grpc.add_PredictionServiceServicer_to_server(
FakeServer(), server)
server.add_insecure_port('[::]:50051')
server.start()
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
stream.stop_stream()
stream.close()
p.terminate()
q.join()
subtitles.join()
if __name__ == '__main__':
logging.basicConfig()
serve()
| from concurrent import futures
import time
import math
import logging
import grpc
import tensorflow as tf
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
import sys
sys.path.append('/home/yitao/Documents/fun-project/tensorflow-related/miniature-winner/')
import cv2
from PRNet.utils.cv_plot import plot_kpt, plot_vertices
import pymesh
import threading
from Queue import Queue
from tensorflow.python.framework import tensor_util
import numpy as np
import pyaudio
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paInt16,
channels=1,
rate=22500,
output=True)
font = cv2.FONT_HERSHEY_SIMPLEX
bottomLeftCornerOfText = (10,350)
fontScale = 1
fontColor = (255,255,255)
lineType = 2
subtitles = Queue()
q = Queue()
def worker():
display_subtitle = ""
while True:
item = q.get()
image = np.zeros((480, 640))
if item is not None:
vertices = item
show_img = plot_vertices(np.zeros_like(image), vertices)
else:
show_img = image
# Display the resulting frame
if not subtitles.empty():
text = subtitles.get()
subtitles.task_done()
display_subtitle = text
cv2.putText(show_img,display_subtitle,
bottomLeftCornerOfText,
font,
fontScale,
fontColor,
lineType)
cv2.imshow('frame',show_img)
# Press Q on keyboard to stop recording
if cv2.waitKey(1) & 0xFF == ord('q'):
break
q.task_done()
class FakeServer(prediction_service_pb2_grpc.PredictionServiceServicer):
def Predict(self, request, context):
"""Predict -- provides access to loaded TensorFlow model.
"""
global q
global stream
if "vertices" in request.inputs:
print("vertices")
vertices = tensor_util.MakeNdarray(request.inputs["vertices"])
q.put(vertices)
elif "audio" in request.inputs:
print('audio')
# audio = tensor_util.MakeNdarray(request.inputs['audio'])
print(type(request.inputs['audio'].string_val[0]))
audio = request.inputs['audio'].string_val[0]
# print(request.inputs['audio'])
stream.write(audio)
elif "subtitle" in request.inputs:
print('subtitle')
subtitles.put(request.inputs['subtitle'].string_val[0])
dumbresult = predict_pb2.PredictResponse()
dumbresult.outputs["message"].CopyFrom(tf.make_tensor_proto("OK"))
return dumbresult
def serve():
t = threading.Thread(target=worker)
t.daemon = True
t.start()
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
prediction_service_pb2_grpc.add_PredictionServiceServicer_to_server(
FakeServer(), server)
server.add_insecure_port('[::]:50051')
server.start()
# server.wait_for_termination()
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
stream.stop_stream()
stream.close()
p.terminate()
q.join() # block until all tasks are donet
subtitles.join()
if __name__ == '__main__':
logging.basicConfig()
serve()
| [
3,
5,
6,
7,
8
] |
1,551 | b4b4dad5cf630dc1a627e323ea63577583d1e1c3 | <mask token>
class YahooHelper:
<mask token>
def __init__(self):
"""
Default constructor which initiates object
"""
pass
<mask token>
def get_stock_data(symbol):
"""
Function to get stock data for current year by ticker symbol.
:param symbol: The Symbol used to identify
an NASDAQ-100 stock.
:return: Stock data for current year
"""
start = date(date.today().year, 1, 1)
end = date.today()
data = pdr.get_data_yahoo(symbol, start=start, end=end)
data.columns = ['Highest price (USD)', 'Lowest price (USD)',
'Opening price (USD)', 'Closing price (USD)', 'Volume',
'Adjusted closing price (USD)']
return data
def export_data(self):
"""
Function to extract stock data to csv.
"""
with open('../data/yahoodata.csv', 'a', encoding='utf-8') as f:
self.data.to_csv('../data/yahoodata.csv', sep='\t', encoding=
'utf-8')
template = ('# TSLA Stocks over time \n' +
"""# ---------------------------------------------------------------------
"""
+
'# Export of stock data of "Tesla Inc." for current year. The dataset\n'
+
"""# consists of selected key stock exchange figures on a daily basis.
"""
+
'# The data can be recreated at any time with the "load_data.py"-script.\n'
+
"""# The data record contains one record sorted per trading day.
"""
+ '#\n' +
'# The data is restricted to the NASDAQ symbol "TSLA" which represents \n'
+
"""# the company Tesla Inc. The stock information was limited to the period
"""
+ '# from 1st January to the current day of the year. \n' +
'#\n' +
"""# Extracted via Yahoo-Finance API, https://pypi.org/project/yahoo-finance/
"""
+ '# December, 26, 2018, Marco Romanutti \n' + '#\n' + '#\n' +
'{}')
with open('../data/yahoodata.csv', 'w', encoding='utf-8') as fp:
fp.write(template.format(self.data.to_csv(index=True, encoding=
'utf-8')))
| <mask token>
class YahooHelper:
<mask token>
def __init__(self):
"""
Default constructor which initiates object
"""
pass
def get_data(self, symbol):
"""
Function to collect Twitter data.
:param symbol: The Symbol used to identify
an NASDAQ-100 stock.
"""
self.data = self.get_stock_data(symbol)
def get_stock_data(symbol):
"""
Function to get stock data for current year by ticker symbol.
:param symbol: The Symbol used to identify
an NASDAQ-100 stock.
:return: Stock data for current year
"""
start = date(date.today().year, 1, 1)
end = date.today()
data = pdr.get_data_yahoo(symbol, start=start, end=end)
data.columns = ['Highest price (USD)', 'Lowest price (USD)',
'Opening price (USD)', 'Closing price (USD)', 'Volume',
'Adjusted closing price (USD)']
return data
def export_data(self):
"""
Function to extract stock data to csv.
"""
with open('../data/yahoodata.csv', 'a', encoding='utf-8') as f:
self.data.to_csv('../data/yahoodata.csv', sep='\t', encoding=
'utf-8')
template = ('# TSLA Stocks over time \n' +
"""# ---------------------------------------------------------------------
"""
+
'# Export of stock data of "Tesla Inc." for current year. The dataset\n'
+
"""# consists of selected key stock exchange figures on a daily basis.
"""
+
'# The data can be recreated at any time with the "load_data.py"-script.\n'
+
"""# The data record contains one record sorted per trading day.
"""
+ '#\n' +
'# The data is restricted to the NASDAQ symbol "TSLA" which represents \n'
+
"""# the company Tesla Inc. The stock information was limited to the period
"""
+ '# from 1st January to the current day of the year. \n' +
'#\n' +
"""# Extracted via Yahoo-Finance API, https://pypi.org/project/yahoo-finance/
"""
+ '# December, 26, 2018, Marco Romanutti \n' + '#\n' + '#\n' +
'{}')
with open('../data/yahoodata.csv', 'w', encoding='utf-8') as fp:
fp.write(template.format(self.data.to_csv(index=True, encoding=
'utf-8')))
| <mask token>
class YahooHelper:
"""
Class to fetch Yahoo data
"""
def __init__(self):
"""
Default constructor which initiates object
"""
pass
def get_data(self, symbol):
"""
Function to collect Twitter data.
:param symbol: The Symbol used to identify
an NASDAQ-100 stock.
"""
self.data = self.get_stock_data(symbol)
def get_stock_data(symbol):
"""
Function to get stock data for current year by ticker symbol.
:param symbol: The Symbol used to identify
an NASDAQ-100 stock.
:return: Stock data for current year
"""
start = date(date.today().year, 1, 1)
end = date.today()
data = pdr.get_data_yahoo(symbol, start=start, end=end)
data.columns = ['Highest price (USD)', 'Lowest price (USD)',
'Opening price (USD)', 'Closing price (USD)', 'Volume',
'Adjusted closing price (USD)']
return data
def export_data(self):
"""
Function to extract stock data to csv.
"""
with open('../data/yahoodata.csv', 'a', encoding='utf-8') as f:
self.data.to_csv('../data/yahoodata.csv', sep='\t', encoding=
'utf-8')
template = ('# TSLA Stocks over time \n' +
"""# ---------------------------------------------------------------------
"""
+
'# Export of stock data of "Tesla Inc." for current year. The dataset\n'
+
"""# consists of selected key stock exchange figures on a daily basis.
"""
+
'# The data can be recreated at any time with the "load_data.py"-script.\n'
+
"""# The data record contains one record sorted per trading day.
"""
+ '#\n' +
'# The data is restricted to the NASDAQ symbol "TSLA" which represents \n'
+
"""# the company Tesla Inc. The stock information was limited to the period
"""
+ '# from 1st January to the current day of the year. \n' +
'#\n' +
"""# Extracted via Yahoo-Finance API, https://pypi.org/project/yahoo-finance/
"""
+ '# December, 26, 2018, Marco Romanutti \n' + '#\n' + '#\n' +
'{}')
with open('../data/yahoodata.csv', 'w', encoding='utf-8') as fp:
fp.write(template.format(self.data.to_csv(index=True, encoding=
'utf-8')))
| from pandas_datareader import data as pdr
from datetime import date
class YahooHelper:
"""
Class to fetch Yahoo data
"""
def __init__(self):
"""
Default constructor which initiates object
"""
pass
def get_data(self, symbol):
"""
Function to collect Twitter data.
:param symbol: The Symbol used to identify
an NASDAQ-100 stock.
"""
self.data = self.get_stock_data(symbol)
def get_stock_data(symbol):
"""
Function to get stock data for current year by ticker symbol.
:param symbol: The Symbol used to identify
an NASDAQ-100 stock.
:return: Stock data for current year
"""
start = date(date.today().year, 1, 1)
end = date.today()
data = pdr.get_data_yahoo(symbol, start=start, end=end)
data.columns = ['Highest price (USD)', 'Lowest price (USD)',
'Opening price (USD)', 'Closing price (USD)', 'Volume',
'Adjusted closing price (USD)']
return data
def export_data(self):
"""
Function to extract stock data to csv.
"""
with open('../data/yahoodata.csv', 'a', encoding='utf-8') as f:
self.data.to_csv('../data/yahoodata.csv', sep='\t', encoding=
'utf-8')
template = ('# TSLA Stocks over time \n' +
"""# ---------------------------------------------------------------------
"""
+
'# Export of stock data of "Tesla Inc." for current year. The dataset\n'
+
"""# consists of selected key stock exchange figures on a daily basis.
"""
+
'# The data can be recreated at any time with the "load_data.py"-script.\n'
+
"""# The data record contains one record sorted per trading day.
"""
+ '#\n' +
'# The data is restricted to the NASDAQ symbol "TSLA" which represents \n'
+
"""# the company Tesla Inc. The stock information was limited to the period
"""
+ '# from 1st January to the current day of the year. \n' +
'#\n' +
"""# Extracted via Yahoo-Finance API, https://pypi.org/project/yahoo-finance/
"""
+ '# December, 26, 2018, Marco Romanutti \n' + '#\n' + '#\n' +
'{}')
with open('../data/yahoodata.csv', 'w', encoding='utf-8') as fp:
fp.write(template.format(self.data.to_csv(index=True, encoding=
'utf-8')))
| from pandas_datareader import data as pdr
from datetime import date
class YahooHelper:
"""
Class to fetch Yahoo data
"""
def __init__(self):
"""
Default constructor which initiates object
"""
pass
def get_data(self, symbol):
"""
Function to collect Twitter data.
:param symbol: The Symbol used to identify
an NASDAQ-100 stock.
"""
# Collect stock market data
self.data = self.get_stock_data(symbol)
# Symbol lookup:
def get_stock_data(symbol):
"""
Function to get stock data for current year by ticker symbol.
:param symbol: The Symbol used to identify
an NASDAQ-100 stock.
:return: Stock data for current year
"""
# Set current dates
start = date(date.today().year, 1, 1) # first of current year
end = date.today() # today
# Get yahoo Yahoo data
data = pdr.get_data_yahoo(symbol, start=start, end=end)
# Rename columns
data.columns = ["Highest price (USD)",
"Lowest price (USD)",
"Opening price (USD)",
"Closing price (USD)",
"Volume",
"Adjusted closing price (USD)"]
return data
# Export data to csv
def export_data(self):
"""
Function to extract stock data to csv.
"""
with open('../data/yahoodata.csv', 'a', encoding='utf-8') as f:
self.data.to_csv('../data/yahoodata.csv', sep='\t', encoding='utf-8')
# Header information
template = "# TSLA Stocks over time \n" + \
"# --------------------------------------------------------------------- \n" + \
"# Export of stock data of \"Tesla Inc.\" for current year. The dataset\n" + \
"# consists of selected key stock exchange figures on a daily basis. \n" + \
"# The data can be recreated at any time with the \"load_data.py\"-script.\n" + \
"# The data record contains one record sorted per trading day. \n" + \
"#\n" + \
"# The data is restricted to the NASDAQ symbol \"TSLA\" which represents \n" + \
"# the company Tesla Inc. The stock information was limited to the period \n" + \
"# from 1st January to the current day of the year. \n" + \
"#\n" + \
"# Extracted via Yahoo-Finance API, https://pypi.org/project/yahoo-finance/ \n" + \
"# December, 26, 2018, Marco Romanutti \n" + \
"#\n" + \
"#\n" + \
"{}"""
with open('../data/yahoodata.csv', 'w', encoding='utf-8') as fp:
fp.write(template.format(self.data.to_csv(index=True, encoding='utf-8')))
| [
4,
5,
6,
7,
8
] |
1,552 | b6dbed95b321ac93c712c4735d601a00650b8dc4 | <mask token>
class PlSqlLexer(Lexer):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
| <mask token>
class PlSqlLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [DFA(ds, i) for i, ds in enumerate(atn.decisionToState)]
T__0 = 1
A_LETTER = 2
ADD = 3
AFTER = 4
AGENT = 5
AGGREGATE = 6
ALL = 7
ALTER = 8
ANALYZE = 9
AND = 10
ANY = 11
ARRAY = 12
AS = 13
ASSUME = 14
ASSERT = 15
ASC = 16
ASSOCIATE = 17
AT = 18
ATTRIBUTE = 19
AUDIT = 20
AUTHID = 21
AUTO = 22
AUTOMATIC = 23
AUTONOMOUS_TRANSACTION = 24
BATCH = 25
BEFORE = 26
BEGIN = 27
BETWEEN = 28
BFILE = 29
BINARY_DOUBLE = 30
BINARY_FLOAT = 31
BINARY_INTEGER = 32
BLOB = 33
BLOCK = 34
BODY = 35
BOOLEAN = 36
BOTH = 37
BREADTH = 38
BULK = 39
BY = 40
BYTE = 41
C_LETTER = 42
CACHE = 43
CALL = 44
CANONICAL = 45
CASCADE = 46
CASE = 47
CAST = 48
CHAR = 49
CHAR_CS = 50
CHARACTER = 51
CHECK = 52
CHR = 53
CLOB = 54
CLOSE = 55
CLUSTER = 56
COLLECT = 57
COLUMNS = 58
COMMENT = 59
COMMIT = 60
COMMITTED = 61
COMPATIBILITY = 62
COMPILE = 63
COMPOUND = 64
CONNECT = 65
CONNECT_BY_ROOT = 66
CONSTANT = 67
CONSTRAINT = 68
CONSTRAINTS = 69
CONSTRUCTOR = 70
CONTENT = 71
CONTEXT = 72
CONTINUE = 73
CONVERT = 74
CORRUPT_XID = 75
CORRUPT_XID_ALL = 76
COST = 77
COUNT = 78
CREATE = 79
CROSS = 80
CUBE = 81
CURRENT = 82
CURRENT_USER = 83
CURSOR = 84
CUSTOMDATUM = 85
CYCLE = 86
DATA = 87
DATABASE = 88
DATE = 89
DAY = 90
DB_ROLE_CHANGE = 91
DBTIMEZONE = 92
DDL = 93
DEBUG = 94
DEC = 95
DECIMAL = 96
DECLARE = 97
DECOMPOSE = 98
DECREMENT = 99
DEFAULT = 100
DEFAULTS = 101
DEFERRED = 102
DEFINER = 103
DELETE = 104
DEPTH = 105
DESC = 106
DETERMINISTIC = 107
DIMENSION = 108
DISABLE = 109
DISASSOCIATE = 110
DISTINCT = 111
DOCUMENT = 112
DOUBLE = 113
DROP = 114
DSINTERVAL_UNCONSTRAINED = 115
EACH = 116
ELEMENT = 117
ELSE = 118
ELSIF = 119
EMPTY = 120
ENABLE = 121
ENCODING = 122
END = 123
ENTITYESCAPING = 124
ERR = 125
ERRORS = 126
ESCAPE = 127
EVALNAME = 128
EXCEPT = 129
EXCEPTION = 130
EXCEPTION_INIT = 131
EXCEPTIONS = 132
EXCLUDE = 133
EXCLUSIVE = 134
EXECUTE = 135
EXISTS = 136
EXIT = 137
EXPLAIN = 138
EXTERNAL = 139
EXTRACT = 140
FAILURE = 141
FALSE = 142
FETCH = 143
FINAL = 144
FIRST = 145
FIRST_VALUE = 146
FLOAT = 147
FOLLOWING = 148
FOLLOWS = 149
FOR = 150
FORALL = 151
FORCE = 152
FROM = 153
FULL = 154
FUNCTION = 155
GOTO = 156
GRANT = 157
GROUP = 158
GROUPING = 159
HASH = 160
HAVING = 161
HIDE = 162
HOUR = 163
IF = 164
IGNORE = 165
IMMEDIATE = 166
IN = 167
INCLUDE = 168
INCLUDING = 169
INCREMENT = 170
INDENT = 171
INDEX = 172
INDEXED = 173
INDICATOR = 174
INDICES = 175
INFINITE = 176
INLINE = 177
INNER = 178
INOUT = 179
INSERT = 180
INSTANTIABLE = 181
INSTEAD = 182
INT = 183
INTEGER = 184
INTERSECT = 185
INTERVAL = 186
INTO = 187
INVALIDATE = 188
IS = 189
ISOLATION = 190
ITERATE = 191
JAVA = 192
JOIN = 193
KEEP = 194
LANGUAGE = 195
LAST = 196
LAST_VALUE = 197
LEADING = 198
LEFT = 199
LEVEL = 200
LIBRARY = 201
LIKE = 202
LIKE2 = 203
LIKE4 = 204
LIKEC = 205
LIMIT = 206
LOCAL = 207
LOCK = 208
LOCKED = 209
LOG = 210
LOGOFF = 211
LOGON = 212
LONG = 213
LOOP = 214
MAIN = 215
MAP = 216
MATCHED = 217
MAXVALUE = 218
MEASURES = 219
MEMBER = 220
MERGE = 221
MINUS = 222
MINUTE = 223
MINVALUE = 224
MLSLABEL = 225
MODE = 226
MODEL = 227
MODIFY = 228
MONTH = 229
MULTISET = 230
NAME = 231
NAN = 232
NATURAL = 233
NATURALN = 234
NAV = 235
NCHAR = 236
NCHAR_CS = 237
NCLOB = 238
NESTED = 239
NEW = 240
NO = 241
NOAUDIT = 242
NOCACHE = 243
NOCOPY = 244
NOCYCLE = 245
NOENTITYESCAPING = 246
NOMAXVALUE = 247
NOMINVALUE = 248
NONE = 249
NOORDER = 250
NOSCHEMACHECK = 251
NOT = 252
NOWAIT = 253
NULL = 254
NULLS = 255
NUMBER = 256
NUMERIC = 257
NVARCHAR2 = 258
OBJECT = 259
OF = 260
OFF = 261
OID = 262
OLD = 263
ON = 264
ONLY = 265
OPEN = 266
OPTION = 267
OR = 268
ORADATA = 269
ORDER = 270
ORDINALITY = 271
OSERROR = 272
OUT = 273
OUTER = 274
OVER = 275
OVERRIDING = 276
PACKAGE = 277
PARALLEL_ENABLE = 278
PARAMETERS = 279
PARENT = 280
PARTITION = 281
PASSING = 282
PATH = 283
PERCENT_ROWTYPE = 284
PERCENT_TYPE = 285
PIPELINED = 286
PIVOT = 287
PLAN = 288
PLS_INTEGER = 289
POSITIVE = 290
POSITIVEN = 291
PRAGMA = 292
PRECEDING = 293
PRECISION = 294
PRESENT = 295
PRIOR = 296
PROCEDURE = 297
RAISE = 298
RANGE = 299
RAW = 300
READ = 301
REAL = 302
RECORD = 303
REF = 304
REFERENCE = 305
REFERENCING = 306
REJECT = 307
RELIES_ON = 308
RENAME = 309
REPLACE = 310
RESPECT = 311
RESTRICT_REFERENCES = 312
RESULT = 313
RESULT_CACHE = 314
RETURN = 315
RETURNING = 316
REUSE = 317
REVERSE = 318
REVOKE = 319
RIGHT = 320
ROLLBACK = 321
ROLLUP = 322
ROW = 323
ROWID = 324
ROWS = 325
RULES = 326
SAMPLE = 327
SAVE = 328
SAVEPOINT = 329
SCHEMA = 330
SCHEMACHECK = 331
SCN = 332
SEARCH = 333
SECOND = 334
SEED = 335
SEGMENT = 336
SELECT = 337
SELF = 338
SEQUENCE = 339
SEQUENTIAL = 340
SERIALIZABLE = 341
SERIALLY_REUSABLE = 342
SERVERERROR = 343
SESSIONTIMEZONE = 344
SET = 345
SETS = 346
SETTINGS = 347
SHARE = 348
SHOW = 349
SHUTDOWN = 350
SIBLINGS = 351
SIGNTYPE = 352
SIMPLE_INTEGER = 353
SINGLE = 354
SIZE = 355
SKIP_ = 356
SMALLINT = 357
SNAPSHOT = 358
SOME = 359
SPECIFICATION = 360
SQLDATA = 361
SQLERROR = 362
STANDALONE = 363
START = 364
STARTUP = 365
STATEMENT = 366
STATEMENT_ID = 367
STATIC = 368
STATISTICS = 369
STRING = 370
SUBMULTISET = 371
SUBPARTITION = 372
SUBSTITUTABLE = 373
SUBTYPE = 374
SUCCESS = 375
SUSPEND = 376
TABLE = 377
THE = 378
THEN = 379
TIME = 380
TIMESTAMP = 381
TIMESTAMP_LTZ_UNCONSTRAINED = 382
TIMESTAMP_TZ_UNCONSTRAINED = 383
TIMESTAMP_UNCONSTRAINED = 384
TIMEZONE_ABBR = 385
TIMEZONE_HOUR = 386
TIMEZONE_MINUTE = 387
TIMEZONE_REGION = 388
TO = 389
TRAILING = 390
TRANSACTION = 391
TRANSLATE = 392
TREAT = 393
TRIGGER = 394
TRIM = 395
TRUE = 396
TRUNCATE = 397
TYPE = 398
UNBOUNDED = 399
UNDER = 400
UNION = 401
UNIQUE = 402
UNLIMITED = 403
UNPIVOT = 404
UNTIL = 405
UPDATE = 406
UPDATED = 407
UPSERT = 408
UROWID = 409
USE = 410
USING = 411
VALIDATE = 412
VALUE = 413
VALUES = 414
VARCHAR = 415
VARCHAR2 = 416
VARIABLE = 417
VARRAY = 418
VARYING = 419
VERSION = 420
VERSIONS = 421
WAIT = 422
WARNING = 423
WELLFORMED = 424
WHEN = 425
WHENEVER = 426
WHERE = 427
WHILE = 428
WITH = 429
WITHIN = 430
WORK = 431
WRITE = 432
XML = 433
XMLAGG = 434
XMLATTRIBUTES = 435
XMLCAST = 436
XMLCOLATTVAL = 437
XMLELEMENT = 438
XMLEXISTS = 439
XMLFOREST = 440
XMLNAMESPACES = 441
XMLPARSE = 442
XMLPI = 443
XMLQUERY = 444
XMLROOT = 445
XMLSERIALIZE = 446
XMLTABLE = 447
YEAR = 448
YES = 449
YMINTERVAL_UNCONSTRAINED = 450
ZONE = 451
PREDICTION = 452
PREDICTION_BOUNDS = 453
PREDICTION_COST = 454
PREDICTION_DETAILS = 455
PREDICTION_PROBABILITY = 456
PREDICTION_SET = 457
CUME_DIST = 458
DENSE_RANK = 459
LISTAGG = 460
PERCENT_RANK = 461
PERCENTILE_CONT = 462
PERCENTILE_DISC = 463
RANK = 464
AVG = 465
CORR = 466
LAG = 467
LEAD = 468
MAX = 469
MEDIAN = 470
MIN = 471
NTILE = 472
RATIO_TO_REPORT = 473
ROW_NUMBER = 474
SUM = 475
VARIANCE = 476
REGR_ = 477
STDDEV = 478
VAR_ = 479
COVAR_ = 480
NATIONAL_CHAR_STRING_LIT = 481
BIT_STRING_LIT = 482
HEX_STRING_LIT = 483
DOUBLE_PERIOD = 484
PERIOD = 485
UNSIGNED_INTEGER = 486
APPROXIMATE_NUM_LIT = 487
CHAR_STRING = 488
DELIMITED_ID = 489
PERCENT = 490
AMPERSAND = 491
LEFT_PAREN = 492
RIGHT_PAREN = 493
DOUBLE_ASTERISK = 494
ASTERISK = 495
PLUS_SIGN = 496
MINUS_SIGN = 497
COMMA = 498
SOLIDUS = 499
AT_SIGN = 500
ASSIGN_OP = 501
BINDVAR = 502
COLON = 503
SEMICOLON = 504
LESS_THAN_OR_EQUALS_OP = 505
LESS_THAN_OP = 506
GREATER_THAN_OR_EQUALS_OP = 507
NOT_EQUAL_OP = 508
CARRET_OPERATOR_PART = 509
TILDE_OPERATOR_PART = 510
EXCLAMATION_OPERATOR_PART = 511
GREATER_THAN_OP = 512
CONCATENATION_OP = 513
VERTICAL_BAR = 514
EQUALS_OP = 515
LEFT_BRACKET = 516
RIGHT_BRACKET = 517
INTRODUCER = 518
SPACES = 519
SINGLE_LINE_COMMENT = 520
MULTI_LINE_COMMENT = 521
PROMPT = 522
REGULAR_ID = 523
ZV = 524
channelNames = [u'DEFAULT_TOKEN_CHANNEL', u'HIDDEN']
modeNames = ['DEFAULT_MODE']
literalNames = ['<INVALID>', "'..'", "'.'", "'%'", "'&'", "'('", "')'",
"'**'", "'*'", "'+'", "'-'", "','", "'/'", "'@'", "':='", "':'",
"';'", "'<='", "'<'", "'>='", "'^'", "'~'", "'!'", "'>'", "'||'",
"'|'", "'='", "'['", "']'", "'_'", "'@!'"]
symbolicNames = ['<INVALID>', 'A_LETTER', 'ADD', 'AFTER', 'AGENT',
'AGGREGATE', 'ALL', 'ALTER', 'ANALYZE', 'AND', 'ANY', 'ARRAY', 'AS',
'ASSUME', 'ASSERT', 'ASC', 'ASSOCIATE', 'AT', 'ATTRIBUTE', 'AUDIT',
'AUTHID', 'AUTO', 'AUTOMATIC', 'AUTONOMOUS_TRANSACTION', 'BATCH',
'BEFORE', 'BEGIN', 'BETWEEN', 'BFILE', 'BINARY_DOUBLE',
'BINARY_FLOAT', 'BINARY_INTEGER', 'BLOB', 'BLOCK', 'BODY',
'BOOLEAN', 'BOTH', 'BREADTH', 'BULK', 'BY', 'BYTE', 'C_LETTER',
'CACHE', 'CALL', 'CANONICAL', 'CASCADE', 'CASE', 'CAST', 'CHAR',
'CHAR_CS', 'CHARACTER', 'CHECK', 'CHR', 'CLOB', 'CLOSE', 'CLUSTER',
'COLLECT', 'COLUMNS', 'COMMENT', 'COMMIT', 'COMMITTED',
'COMPATIBILITY', 'COMPILE', 'COMPOUND', 'CONNECT',
'CONNECT_BY_ROOT', 'CONSTANT', 'CONSTRAINT', 'CONSTRAINTS',
'CONSTRUCTOR', 'CONTENT', 'CONTEXT', 'CONTINUE', 'CONVERT',
'CORRUPT_XID', 'CORRUPT_XID_ALL', 'COST', 'COUNT', 'CREATE',
'CROSS', 'CUBE', 'CURRENT', 'CURRENT_USER', 'CURSOR', 'CUSTOMDATUM',
'CYCLE', 'DATA', 'DATABASE', 'DATE', 'DAY', 'DB_ROLE_CHANGE',
'DBTIMEZONE', 'DDL', 'DEBUG', 'DEC', 'DECIMAL', 'DECLARE',
'DECOMPOSE', 'DECREMENT', 'DEFAULT', 'DEFAULTS', 'DEFERRED',
'DEFINER', 'DELETE', 'DEPTH', 'DESC', 'DETERMINISTIC', 'DIMENSION',
'DISABLE', 'DISASSOCIATE', 'DISTINCT', 'DOCUMENT', 'DOUBLE', 'DROP',
'DSINTERVAL_UNCONSTRAINED', 'EACH', 'ELEMENT', 'ELSE', 'ELSIF',
'EMPTY', 'ENABLE', 'ENCODING', 'END', 'ENTITYESCAPING', 'ERR',
'ERRORS', 'ESCAPE', 'EVALNAME', 'EXCEPT', 'EXCEPTION',
'EXCEPTION_INIT', 'EXCEPTIONS', 'EXCLUDE', 'EXCLUSIVE', 'EXECUTE',
'EXISTS', 'EXIT', 'EXPLAIN', 'EXTERNAL', 'EXTRACT', 'FAILURE',
'FALSE', 'FETCH', 'FINAL', 'FIRST', 'FIRST_VALUE', 'FLOAT',
'FOLLOWING', 'FOLLOWS', 'FOR', 'FORALL', 'FORCE', 'FROM', 'FULL',
'FUNCTION', 'GOTO', 'GRANT', 'GROUP', 'GROUPING', 'HASH', 'HAVING',
'HIDE', 'HOUR', 'IF', 'IGNORE', 'IMMEDIATE', 'IN', 'INCLUDE',
'INCLUDING', 'INCREMENT', 'INDENT', 'INDEX', 'INDEXED', 'INDICATOR',
'INDICES', 'INFINITE', 'INLINE', 'INNER', 'INOUT', 'INSERT',
'INSTANTIABLE', 'INSTEAD', 'INT', 'INTEGER', 'INTERSECT',
'INTERVAL', 'INTO', 'INVALIDATE', 'IS', 'ISOLATION', 'ITERATE',
'JAVA', 'JOIN', 'KEEP', 'LANGUAGE', 'LAST', 'LAST_VALUE', 'LEADING',
'LEFT', 'LEVEL', 'LIBRARY', 'LIKE', 'LIKE2', 'LIKE4', 'LIKEC',
'LIMIT', 'LOCAL', 'LOCK', 'LOCKED', 'LOG', 'LOGOFF', 'LOGON',
'LONG', 'LOOP', 'MAIN', 'MAP', 'MATCHED', 'MAXVALUE', 'MEASURES',
'MEMBER', 'MERGE', 'MINUS', 'MINUTE', 'MINVALUE', 'MLSLABEL',
'MODE', 'MODEL', 'MODIFY', 'MONTH', 'MULTISET', 'NAME', 'NAN',
'NATURAL', 'NATURALN', 'NAV', 'NCHAR', 'NCHAR_CS', 'NCLOB',
'NESTED', 'NEW', 'NO', 'NOAUDIT', 'NOCACHE', 'NOCOPY', 'NOCYCLE',
'NOENTITYESCAPING', 'NOMAXVALUE', 'NOMINVALUE', 'NONE', 'NOORDER',
'NOSCHEMACHECK', 'NOT', 'NOWAIT', 'NULL', 'NULLS', 'NUMBER',
'NUMERIC', 'NVARCHAR2', 'OBJECT', 'OF', 'OFF', 'OID', 'OLD', 'ON',
'ONLY', 'OPEN', 'OPTION', 'OR', 'ORADATA', 'ORDER', 'ORDINALITY',
'OSERROR', 'OUT', 'OUTER', 'OVER', 'OVERRIDING', 'PACKAGE',
'PARALLEL_ENABLE', 'PARAMETERS', 'PARENT', 'PARTITION', 'PASSING',
'PATH', 'PERCENT_ROWTYPE', 'PERCENT_TYPE', 'PIPELINED', 'PIVOT',
'PLAN', 'PLS_INTEGER', 'POSITIVE', 'POSITIVEN', 'PRAGMA',
'PRECEDING', 'PRECISION', 'PRESENT', 'PRIOR', 'PROCEDURE', 'RAISE',
'RANGE', 'RAW', 'READ', 'REAL', 'RECORD', 'REF', 'REFERENCE',
'REFERENCING', 'REJECT', 'RELIES_ON', 'RENAME', 'REPLACE',
'RESPECT', 'RESTRICT_REFERENCES', 'RESULT', 'RESULT_CACHE',
'RETURN', 'RETURNING', 'REUSE', 'REVERSE', 'REVOKE', 'RIGHT',
'ROLLBACK', 'ROLLUP', 'ROW', 'ROWID', 'ROWS', 'RULES', 'SAMPLE',
'SAVE', 'SAVEPOINT', 'SCHEMA', 'SCHEMACHECK', 'SCN', 'SEARCH',
'SECOND', 'SEED', 'SEGMENT', 'SELECT', 'SELF', 'SEQUENCE',
'SEQUENTIAL', 'SERIALIZABLE', 'SERIALLY_REUSABLE', 'SERVERERROR',
'SESSIONTIMEZONE', 'SET', 'SETS', 'SETTINGS', 'SHARE', 'SHOW',
'SHUTDOWN', 'SIBLINGS', 'SIGNTYPE', 'SIMPLE_INTEGER', 'SINGLE',
'SIZE', 'SKIP_', 'SMALLINT', 'SNAPSHOT', 'SOME', 'SPECIFICATION',
'SQLDATA', 'SQLERROR', 'STANDALONE', 'START', 'STARTUP',
'STATEMENT', 'STATEMENT_ID', 'STATIC', 'STATISTICS', 'STRING',
'SUBMULTISET', 'SUBPARTITION', 'SUBSTITUTABLE', 'SUBTYPE',
'SUCCESS', 'SUSPEND', 'TABLE', 'THE', 'THEN', 'TIME', 'TIMESTAMP',
'TIMESTAMP_LTZ_UNCONSTRAINED', 'TIMESTAMP_TZ_UNCONSTRAINED',
'TIMESTAMP_UNCONSTRAINED', 'TIMEZONE_ABBR', 'TIMEZONE_HOUR',
'TIMEZONE_MINUTE', 'TIMEZONE_REGION', 'TO', 'TRAILING',
'TRANSACTION', 'TRANSLATE', 'TREAT', 'TRIGGER', 'TRIM', 'TRUE',
'TRUNCATE', 'TYPE', 'UNBOUNDED', 'UNDER', 'UNION', 'UNIQUE',
'UNLIMITED', 'UNPIVOT', 'UNTIL', 'UPDATE', 'UPDATED', 'UPSERT',
'UROWID', 'USE', 'USING', 'VALIDATE', 'VALUE', 'VALUES', 'VARCHAR',
'VARCHAR2', 'VARIABLE', 'VARRAY', 'VARYING', 'VERSION', 'VERSIONS',
'WAIT', 'WARNING', 'WELLFORMED', 'WHEN', 'WHENEVER', 'WHERE',
'WHILE', 'WITH', 'WITHIN', 'WORK', 'WRITE', 'XML', 'XMLAGG',
'XMLATTRIBUTES', 'XMLCAST', 'XMLCOLATTVAL', 'XMLELEMENT',
'XMLEXISTS', 'XMLFOREST', 'XMLNAMESPACES', 'XMLPARSE', 'XMLPI',
'XMLQUERY', 'XMLROOT', 'XMLSERIALIZE', 'XMLTABLE', 'YEAR', 'YES',
'YMINTERVAL_UNCONSTRAINED', 'ZONE', 'PREDICTION',
'PREDICTION_BOUNDS', 'PREDICTION_COST', 'PREDICTION_DETAILS',
'PREDICTION_PROBABILITY', 'PREDICTION_SET', 'CUME_DIST',
'DENSE_RANK', 'LISTAGG', 'PERCENT_RANK', 'PERCENTILE_CONT',
'PERCENTILE_DISC', 'RANK', 'AVG', 'CORR', 'LAG', 'LEAD', 'MAX',
'MEDIAN', 'MIN', 'NTILE', 'RATIO_TO_REPORT', 'ROW_NUMBER', 'SUM',
'VARIANCE', 'REGR_', 'STDDEV', 'VAR_', 'COVAR_',
'NATIONAL_CHAR_STRING_LIT', 'BIT_STRING_LIT', 'HEX_STRING_LIT',
'DOUBLE_PERIOD', 'PERIOD', 'UNSIGNED_INTEGER',
'APPROXIMATE_NUM_LIT', 'CHAR_STRING', 'DELIMITED_ID', 'PERCENT',
'AMPERSAND', 'LEFT_PAREN', 'RIGHT_PAREN', 'DOUBLE_ASTERISK',
'ASTERISK', 'PLUS_SIGN', 'MINUS_SIGN', 'COMMA', 'SOLIDUS',
'AT_SIGN', 'ASSIGN_OP', 'BINDVAR', 'COLON', 'SEMICOLON',
'LESS_THAN_OR_EQUALS_OP', 'LESS_THAN_OP',
'GREATER_THAN_OR_EQUALS_OP', 'NOT_EQUAL_OP', 'CARRET_OPERATOR_PART',
'TILDE_OPERATOR_PART', 'EXCLAMATION_OPERATOR_PART',
'GREATER_THAN_OP', 'CONCATENATION_OP', 'VERTICAL_BAR', 'EQUALS_OP',
'LEFT_BRACKET', 'RIGHT_BRACKET', 'INTRODUCER', 'SPACES',
'SINGLE_LINE_COMMENT', 'MULTI_LINE_COMMENT', 'PROMPT', 'REGULAR_ID',
'ZV']
ruleNames = ['T__0', 'A_LETTER', 'ADD', 'AFTER', 'AGENT', 'AGGREGATE',
'ALL', 'ALTER', 'ANALYZE', 'AND', 'ANY', 'ARRAY', 'AS', 'ASSUME',
'ASSERT', 'ASC', 'ASSOCIATE', 'AT', 'ATTRIBUTE', 'AUDIT', 'AUTHID',
'AUTO', 'AUTOMATIC', 'AUTONOMOUS_TRANSACTION', 'BATCH', 'BEFORE',
'BEGIN', 'BETWEEN', 'BFILE', 'BINARY_DOUBLE', 'BINARY_FLOAT',
'BINARY_INTEGER', 'BLOB', 'BLOCK', 'BODY', 'BOOLEAN', 'BOTH',
'BREADTH', 'BULK', 'BY', 'BYTE', 'C_LETTER', 'CACHE', 'CALL',
'CANONICAL', 'CASCADE', 'CASE', 'CAST', 'CHAR', 'CHAR_CS',
'CHARACTER', 'CHECK', 'CHR', 'CLOB', 'CLOSE', 'CLUSTER', 'COLLECT',
'COLUMNS', 'COMMENT', 'COMMIT', 'COMMITTED', 'COMPATIBILITY',
'COMPILE', 'COMPOUND', 'CONNECT', 'CONNECT_BY_ROOT', 'CONSTANT',
'CONSTRAINT', 'CONSTRAINTS', 'CONSTRUCTOR', 'CONTENT', 'CONTEXT',
'CONTINUE', 'CONVERT', 'CORRUPT_XID', 'CORRUPT_XID_ALL', 'COST',
'COUNT', 'CREATE', 'CROSS', 'CUBE', 'CURRENT', 'CURRENT_USER',
'CURSOR', 'CUSTOMDATUM', 'CYCLE', 'DATA', 'DATABASE', 'DATE', 'DAY',
'DB_ROLE_CHANGE', 'DBTIMEZONE', 'DDL', 'DEBUG', 'DEC', 'DECIMAL',
'DECLARE', 'DECOMPOSE', 'DECREMENT', 'DEFAULT', 'DEFAULTS',
'DEFERRED', 'DEFINER', 'DELETE', 'DEPTH', 'DESC', 'DETERMINISTIC',
'DIMENSION', 'DISABLE', 'DISASSOCIATE', 'DISTINCT', 'DOCUMENT',
'DOUBLE', 'DROP', 'DSINTERVAL_UNCONSTRAINED', 'EACH', 'ELEMENT',
'ELSE', 'ELSIF', 'EMPTY', 'ENABLE', 'ENCODING', 'END',
'ENTITYESCAPING', 'ERR', 'ERRORS', 'ESCAPE', 'EVALNAME', 'EXCEPT',
'EXCEPTION', 'EXCEPTION_INIT', 'EXCEPTIONS', 'EXCLUDE', 'EXCLUSIVE',
'EXECUTE', 'EXISTS', 'EXIT', 'EXPLAIN', 'EXTERNAL', 'EXTRACT',
'FAILURE', 'FALSE', 'FETCH', 'FINAL', 'FIRST', 'FIRST_VALUE',
'FLOAT', 'FOLLOWING', 'FOLLOWS', 'FOR', 'FORALL', 'FORCE', 'FROM',
'FULL', 'FUNCTION', 'GOTO', 'GRANT', 'GROUP', 'GROUPING', 'HASH',
'HAVING', 'HIDE', 'HOUR', 'IF', 'IGNORE', 'IMMEDIATE', 'IN',
'INCLUDE', 'INCLUDING', 'INCREMENT', 'INDENT', 'INDEX', 'INDEXED',
'INDICATOR', 'INDICES', 'INFINITE', 'INLINE', 'INNER', 'INOUT',
'INSERT', 'INSTANTIABLE', 'INSTEAD', 'INT', 'INTEGER', 'INTERSECT',
'INTERVAL', 'INTO', 'INVALIDATE', 'IS', 'ISOLATION', 'ITERATE',
'JAVA', 'JOIN', 'KEEP', 'LANGUAGE', 'LAST', 'LAST_VALUE', 'LEADING',
'LEFT', 'LEVEL', 'LIBRARY', 'LIKE', 'LIKE2', 'LIKE4', 'LIKEC',
'LIMIT', 'LOCAL', 'LOCK', 'LOCKED', 'LOG', 'LOGOFF', 'LOGON',
'LONG', 'LOOP', 'MAIN', 'MAP', 'MATCHED', 'MAXVALUE', 'MEASURES',
'MEMBER', 'MERGE', 'MINUS', 'MINUTE', 'MINVALUE', 'MLSLABEL',
'MODE', 'MODEL', 'MODIFY', 'MONTH', 'MULTISET', 'NAME', 'NAN',
'NATURAL', 'NATURALN', 'NAV', 'NCHAR', 'NCHAR_CS', 'NCLOB',
'NESTED', 'NEW', 'NO', 'NOAUDIT', 'NOCACHE', 'NOCOPY', 'NOCYCLE',
'NOENTITYESCAPING', 'NOMAXVALUE', 'NOMINVALUE', 'NONE', 'NOORDER',
'NOSCHEMACHECK', 'NOT', 'NOWAIT', 'NULL', 'NULLS', 'NUMBER',
'NUMERIC', 'NVARCHAR2', 'OBJECT', 'OF', 'OFF', 'OID', 'OLD', 'ON',
'ONLY', 'OPEN', 'OPTION', 'OR', 'ORADATA', 'ORDER', 'ORDINALITY',
'OSERROR', 'OUT', 'OUTER', 'OVER', 'OVERRIDING', 'PACKAGE',
'PARALLEL_ENABLE', 'PARAMETERS', 'PARENT', 'PARTITION', 'PASSING',
'PATH', 'PERCENT_ROWTYPE', 'PERCENT_TYPE', 'PIPELINED', 'PIVOT',
'PLAN', 'PLS_INTEGER', 'POSITIVE', 'POSITIVEN', 'PRAGMA',
'PRECEDING', 'PRECISION', 'PRESENT', 'PRIOR', 'PROCEDURE', 'RAISE',
'RANGE', 'RAW', 'READ', 'REAL', 'RECORD', 'REF', 'REFERENCE',
'REFERENCING', 'REJECT', 'RELIES_ON', 'RENAME', 'REPLACE',
'RESPECT', 'RESTRICT_REFERENCES', 'RESULT', 'RESULT_CACHE',
'RETURN', 'RETURNING', 'REUSE', 'REVERSE', 'REVOKE', 'RIGHT',
'ROLLBACK', 'ROLLUP', 'ROW', 'ROWID', 'ROWS', 'RULES', 'SAMPLE',
'SAVE', 'SAVEPOINT', 'SCHEMA', 'SCHEMACHECK', 'SCN', 'SEARCH',
'SECOND', 'SEED', 'SEGMENT', 'SELECT', 'SELF', 'SEQUENCE',
'SEQUENTIAL', 'SERIALIZABLE', 'SERIALLY_REUSABLE', 'SERVERERROR',
'SESSIONTIMEZONE', 'SET', 'SETS', 'SETTINGS', 'SHARE', 'SHOW',
'SHUTDOWN', 'SIBLINGS', 'SIGNTYPE', 'SIMPLE_INTEGER', 'SINGLE',
'SIZE', 'SKIP_', 'SMALLINT', 'SNAPSHOT', 'SOME', 'SPECIFICATION',
'SQLDATA', 'SQLERROR', 'STANDALONE', 'START', 'STARTUP',
'STATEMENT', 'STATEMENT_ID', 'STATIC', 'STATISTICS', 'STRING',
'SUBMULTISET', 'SUBPARTITION', 'SUBSTITUTABLE', 'SUBTYPE',
'SUCCESS', 'SUSPEND', 'TABLE', 'THE', 'THEN', 'TIME', 'TIMESTAMP',
'TIMESTAMP_LTZ_UNCONSTRAINED', 'TIMESTAMP_TZ_UNCONSTRAINED',
'TIMESTAMP_UNCONSTRAINED', 'TIMEZONE_ABBR', 'TIMEZONE_HOUR',
'TIMEZONE_MINUTE', 'TIMEZONE_REGION', 'TO', 'TRAILING',
'TRANSACTION', 'TRANSLATE', 'TREAT', 'TRIGGER', 'TRIM', 'TRUE',
'TRUNCATE', 'TYPE', 'UNBOUNDED', 'UNDER', 'UNION', 'UNIQUE',
'UNLIMITED', 'UNPIVOT', 'UNTIL', 'UPDATE', 'UPDATED', 'UPSERT',
'UROWID', 'USE', 'USING', 'VALIDATE', 'VALUE', 'VALUES', 'VARCHAR',
'VARCHAR2', 'VARIABLE', 'VARRAY', 'VARYING', 'VERSION', 'VERSIONS',
'WAIT', 'WARNING', 'WELLFORMED', 'WHEN', 'WHENEVER', 'WHERE',
'WHILE', 'WITH', 'WITHIN', 'WORK', 'WRITE', 'XML', 'XMLAGG',
'XMLATTRIBUTES', 'XMLCAST', 'XMLCOLATTVAL', 'XMLELEMENT',
'XMLEXISTS', 'XMLFOREST', 'XMLNAMESPACES', 'XMLPARSE', 'XMLPI',
'XMLQUERY', 'XMLROOT', 'XMLSERIALIZE', 'XMLTABLE', 'YEAR', 'YES',
'YMINTERVAL_UNCONSTRAINED', 'ZONE', 'PREDICTION',
'PREDICTION_BOUNDS', 'PREDICTION_COST', 'PREDICTION_DETAILS',
'PREDICTION_PROBABILITY', 'PREDICTION_SET', 'CUME_DIST',
'DENSE_RANK', 'LISTAGG', 'PERCENT_RANK', 'PERCENTILE_CONT',
'PERCENTILE_DISC', 'RANK', 'AVG', 'CORR', 'LAG', 'LEAD', 'MAX',
'MEDIAN', 'MIN', 'NTILE', 'RATIO_TO_REPORT', 'ROW_NUMBER', 'SUM',
'VARIANCE', 'REGR_', 'STDDEV', 'VAR_', 'COVAR_',
'NATIONAL_CHAR_STRING_LIT', 'BIT_STRING_LIT', 'HEX_STRING_LIT',
'DOUBLE_PERIOD', 'PERIOD', 'UNSIGNED_INTEGER',
'APPROXIMATE_NUM_LIT', 'CHAR_STRING', 'CHAR_STRING_PERL', 'QUOTE',
'QS_ANGLE', 'QS_BRACE', 'QS_BRACK', 'QS_PAREN', 'QS_OTHER_CH',
'DELIMITED_ID', 'PERCENT', 'AMPERSAND', 'LEFT_PAREN', 'RIGHT_PAREN',
'DOUBLE_ASTERISK', 'ASTERISK', 'PLUS_SIGN', 'MINUS_SIGN', 'COMMA',
'SOLIDUS', 'AT_SIGN', 'ASSIGN_OP', 'BINDVAR', 'COLON', 'SEMICOLON',
'LESS_THAN_OR_EQUALS_OP', 'LESS_THAN_OP',
'GREATER_THAN_OR_EQUALS_OP', 'NOT_EQUAL_OP', 'CARRET_OPERATOR_PART',
'TILDE_OPERATOR_PART', 'EXCLAMATION_OPERATOR_PART',
'GREATER_THAN_OP', 'QUESTION_MARK', 'CONCATENATION_OP',
'VERTICAL_BAR', 'EQUALS_OP', 'LEFT_BRACKET', 'RIGHT_BRACKET',
'INTRODUCER', 'SPACES', 'SIMPLE_LETTER',
'UNSIGNED_INTEGER_FRAGMENT', 'FLOAT_FRAGMENT',
'SINGLE_LINE_COMMENT', 'MULTI_LINE_COMMENT', 'PROMPT', 'NEWLINE',
'SPACE', 'REGULAR_ID', 'ZV', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U',
'V', 'W', 'X', 'Y', 'Z']
grammarFileName = 'PlSql.g4'
def __init__(self, input=None, output: TextIO=sys.stdout):
super().__init__(input, output)
self.checkVersion('4.7.2')
self._interp = LexerATNSimulator(self, self.atn, self.
decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
| <mask token>
def serializedATN():
with StringIO() as buf:
buf.write('\x03悋Ꜫ脳맭䅼㯧瞆奤\x02Ȏ')
buf.write(
'ᓗ\x08\x01\x04\x02\t\x02\x04\x03\t\x03\x04\x04\t\x04\x04\x05\t\x05\x04\x06\t\x06\x04\x07'
)
buf.write(
'\t\x07\x04\x08\t\x08\x04\t\t\t\x04\n\t\n\x04\x0b\t\x0b\x04\x0c\t\x0c\x04\r\t\r'
)
buf.write(
'\x04\x0e\t\x0e\x04\x0f\t\x0f\x04\x10\t\x10\x04\x11\t\x11\x04\x12\t\x12\x04\x13'
)
buf.write(
'\t\x13\x04\x14\t\x14\x04\x15\t\x15\x04\x16\t\x16\x04\x17\t\x17\x04\x18\t\x18'
)
buf.write(
'\x04\x19\t\x19\x04\x1a\t\x1a\x04\x1b\t\x1b\x04\x1c\t\x1c\x04\x1d\t\x1d\x04\x1e'
)
buf.write(
'\t\x1e\x04\x1f\t\x1f\x04 \t \x04!\t!\x04"\t"\x04#\t#\x04$\t$\x04%\t%'
)
buf.write(
"\x04&\t&\x04'\t'\x04(\t(\x04)\t)\x04*\t*\x04+\t+\x04,\t,\x04-\t-\x04."
)
buf.write('\t.\x04/\t/\x040\t0\x041\t1\x042\t2\x043\t3\x044')
buf.write('\t4\x045\t5\x046\t6\x047\t7\x048\t8\x049\t9\x04:\t:')
buf.write(
'\x04;\t;\x04<\t<\x04=\t=\x04>\t>\x04?\t?\x04@\t@\x04A\tA\x04B\tB\x04C\t'
)
buf.write(
'C\x04D\tD\x04E\tE\x04F\tF\x04G\tG\x04H\tH\x04I\tI\x04J\tJ\x04K\tK\x04L\t'
)
buf.write(
'L\x04M\tM\x04N\tN\x04O\tO\x04P\tP\x04Q\tQ\x04R\tR\x04S\tS\x04T\tT\x04U\t'
)
buf.write(
'U\x04V\tV\x04W\tW\x04X\tX\x04Y\tY\x04Z\tZ\x04[\t[\x04\\\t\\\x04]\t]\x04'
)
buf.write(
'^\t^\x04_\t_\x04`\t`\x04a\ta\x04b\tb\x04c\tc\x04d\td\x04e\te\x04f\tf\x04'
)
buf.write(
'g\tg\x04h\th\x04i\ti\x04j\tj\x04k\tk\x04l\tl\x04m\tm\x04n\tn\x04o\to\x04'
)
buf.write(
'p\tp\x04q\tq\x04r\tr\x04s\ts\x04t\tt\x04u\tu\x04v\tv\x04w\tw\x04x\tx\x04'
)
buf.write(
'y\ty\x04z\tz\x04{\t{\x04|\t|\x04}\t}\x04~\t~\x04\x7f\t\x7f\x04\x80'
)
buf.write('\t\x80\x04\x81\t\x81\x04\x82\t\x82\x04\x83\t\x83')
buf.write('\x04\x84\t\x84\x04\x85\t\x85\x04\x86\t\x86\x04\x87')
buf.write('\t\x87\x04\x88\t\x88\x04\x89\t\x89\x04\x8a\t\x8a')
buf.write('\x04\x8b\t\x8b\x04\x8c\t\x8c\x04\x8d\t\x8d\x04\x8e')
buf.write('\t\x8e\x04\x8f\t\x8f\x04\x90\t\x90\x04\x91\t\x91')
buf.write('\x04\x92\t\x92\x04\x93\t\x93\x04\x94\t\x94\x04\x95')
buf.write('\t\x95\x04\x96\t\x96\x04\x97\t\x97\x04\x98\t\x98')
buf.write('\x04\x99\t\x99\x04\x9a\t\x9a\x04\x9b\t\x9b\x04\x9c')
buf.write('\t\x9c\x04\x9d\t\x9d\x04\x9e\t\x9e\x04\x9f\t\x9f')
buf.write('\x04\xa0\t\xa0\x04¡\t¡\x04¢\t¢\x04£')
buf.write('\t£\x04¤\t¤\x04¥\t¥\x04¦\t¦')
buf.write('\x04§\t§\x04¨\t¨\x04©\t©\x04ª')
buf.write('\tª\x04«\t«\x04¬\t¬\x04\xad\t\xad')
buf.write('\x04®\t®\x04¯\t¯\x04°\t°\x04±')
buf.write('\t±\x04²\t²\x04³\t³\x04´\t´')
buf.write('\x04µ\tµ\x04¶\t¶\x04·\t·\x04¸')
buf.write('\t¸\x04¹\t¹\x04º\tº\x04»\t»')
buf.write('\x04¼\t¼\x04½\t½\x04¾\t¾\x04¿')
buf.write('\t¿\x04À\tÀ\x04Á\tÁ\x04Â\tÂ')
buf.write('\x04Ã\tÃ\x04Ä\tÄ\x04Å\tÅ\x04Æ')
buf.write('\tÆ\x04Ç\tÇ\x04È\tÈ\x04É\tÉ')
buf.write('\x04Ê\tÊ\x04Ë\tË\x04Ì\tÌ\x04Í')
buf.write('\tÍ\x04Î\tÎ\x04Ï\tÏ\x04Ð\tÐ')
buf.write('\x04Ñ\tÑ\x04Ò\tÒ\x04Ó\tÓ\x04Ô')
buf.write('\tÔ\x04Õ\tÕ\x04Ö\tÖ\x04×\t×')
buf.write('\x04Ø\tØ\x04Ù\tÙ\x04Ú\tÚ\x04Û')
buf.write('\tÛ\x04Ü\tÜ\x04Ý\tÝ\x04Þ\tÞ')
buf.write('\x04ß\tß\x04à\tà\x04á\tá\x04â')
buf.write('\tâ\x04ã\tã\x04ä\tä\x04å\tå')
buf.write('\x04æ\tæ\x04ç\tç\x04è\tè\x04é')
buf.write('\té\x04ê\tê\x04ë\të\x04ì\tì')
buf.write('\x04í\tí\x04î\tî\x04ï\tï\x04ð')
buf.write('\tð\x04ñ\tñ\x04ò\tò\x04ó\tó')
buf.write('\x04ô\tô\x04õ\tõ\x04ö\tö\x04÷')
buf.write('\t÷\x04ø\tø\x04ù\tù\x04ú\tú')
buf.write('\x04û\tû\x04ü\tü\x04ý\tý\x04þ')
buf.write('\tþ\x04ÿ\tÿ\x04Ā\tĀ\x04ā\tā')
buf.write('\x04Ă\tĂ\x04ă\tă\x04Ą\tĄ\x04ą')
buf.write('\tą\x04Ć\tĆ\x04ć\tć\x04Ĉ\tĈ')
buf.write('\x04ĉ\tĉ\x04Ċ\tĊ\x04ċ\tċ\x04Č')
buf.write('\tČ\x04č\tč\x04Ď\tĎ\x04ď\tď')
buf.write('\x04Đ\tĐ\x04đ\tđ\x04Ē\tĒ\x04ē')
buf.write('\tē\x04Ĕ\tĔ\x04ĕ\tĕ\x04Ė\tĖ')
buf.write('\x04ė\tė\x04Ę\tĘ\x04ę\tę\x04Ě')
buf.write('\tĚ\x04ě\tě\x04Ĝ\tĜ\x04ĝ\tĝ')
buf.write('\x04Ğ\tĞ\x04ğ\tğ\x04Ġ\tĠ\x04ġ')
buf.write('\tġ\x04Ģ\tĢ\x04ģ\tģ\x04Ĥ\tĤ')
buf.write('\x04ĥ\tĥ\x04Ħ\tĦ\x04ħ\tħ\x04Ĩ')
buf.write('\tĨ\x04ĩ\tĩ\x04Ī\tĪ\x04ī\tī')
buf.write('\x04Ĭ\tĬ\x04ĭ\tĭ\x04Į\tĮ\x04į')
buf.write('\tį\x04İ\tİ\x04ı\tı\x04IJ\tIJ')
buf.write('\x04ij\tij\x04Ĵ\tĴ\x04ĵ\tĵ\x04Ķ')
buf.write('\tĶ\x04ķ\tķ\x04ĸ\tĸ\x04Ĺ\tĹ')
buf.write('\x04ĺ\tĺ\x04Ļ\tĻ\x04ļ\tļ\x04Ľ')
buf.write('\tĽ\x04ľ\tľ\x04Ŀ\tĿ\x04ŀ\tŀ')
buf.write('\x04Ł\tŁ\x04ł\tł\x04Ń\tŃ\x04ń')
buf.write('\tń\x04Ņ\tŅ\x04ņ\tņ\x04Ň\tŇ')
buf.write('\x04ň\tň\x04ʼn\tʼn\x04Ŋ\tŊ\x04ŋ')
buf.write('\tŋ\x04Ō\tŌ\x04ō\tō\x04Ŏ\tŎ')
buf.write('\x04ŏ\tŏ\x04Ő\tŐ\x04ő\tő\x04Œ')
buf.write('\tŒ\x04œ\tœ\x04Ŕ\tŔ\x04ŕ\tŕ')
buf.write('\x04Ŗ\tŖ\x04ŗ\tŗ\x04Ř\tŘ\x04ř')
buf.write('\tř\x04Ś\tŚ\x04ś\tś\x04Ŝ\tŜ')
buf.write('\x04ŝ\tŝ\x04Ş\tŞ\x04ş\tş\x04Š')
buf.write('\tŠ\x04š\tš\x04Ţ\tŢ\x04ţ\tţ')
buf.write('\x04Ť\tŤ\x04ť\tť\x04Ŧ\tŦ\x04ŧ')
buf.write('\tŧ\x04Ũ\tŨ\x04ũ\tũ\x04Ū\tŪ')
buf.write('\x04ū\tū\x04Ŭ\tŬ\x04ŭ\tŭ\x04Ů')
buf.write('\tŮ\x04ů\tů\x04Ű\tŰ\x04ű\tű')
buf.write('\x04Ų\tŲ\x04ų\tų\x04Ŵ\tŴ\x04ŵ')
buf.write('\tŵ\x04Ŷ\tŶ\x04ŷ\tŷ\x04Ÿ\tŸ')
buf.write('\x04Ź\tŹ\x04ź\tź\x04Ż\tŻ\x04ż')
buf.write('\tż\x04Ž\tŽ\x04ž\tž\x04ſ\tſ')
buf.write('\x04ƀ\tƀ\x04Ɓ\tƁ\x04Ƃ\tƂ\x04ƃ')
buf.write('\tƃ\x04Ƅ\tƄ\x04ƅ\tƅ\x04Ɔ\tƆ')
buf.write('\x04Ƈ\tƇ\x04ƈ\tƈ\x04Ɖ\tƉ\x04Ɗ')
buf.write('\tƊ\x04Ƌ\tƋ\x04ƌ\tƌ\x04ƍ\tƍ')
buf.write('\x04Ǝ\tƎ\x04Ə\tƏ\x04Ɛ\tƐ\x04Ƒ')
buf.write('\tƑ\x04ƒ\tƒ\x04Ɠ\tƓ\x04Ɣ\tƔ')
buf.write('\x04ƕ\tƕ\x04Ɩ\tƖ\x04Ɨ\tƗ\x04Ƙ')
buf.write('\tƘ\x04ƙ\tƙ\x04ƚ\tƚ\x04ƛ\tƛ')
buf.write('\x04Ɯ\tƜ\x04Ɲ\tƝ\x04ƞ\tƞ\x04Ɵ')
buf.write('\tƟ\x04Ơ\tƠ\x04ơ\tơ\x04Ƣ\tƢ')
buf.write('\x04ƣ\tƣ\x04Ƥ\tƤ\x04ƥ\tƥ\x04Ʀ')
buf.write('\tƦ\x04Ƨ\tƧ\x04ƨ\tƨ\x04Ʃ\tƩ')
buf.write('\x04ƪ\tƪ\x04ƫ\tƫ\x04Ƭ\tƬ\x04ƭ')
buf.write('\tƭ\x04Ʈ\tƮ\x04Ư\tƯ\x04ư\tư')
buf.write('\x04Ʊ\tƱ\x04Ʋ\tƲ\x04Ƴ\tƳ\x04ƴ')
buf.write('\tƴ\x04Ƶ\tƵ\x04ƶ\tƶ\x04Ʒ\tƷ')
buf.write('\x04Ƹ\tƸ\x04ƹ\tƹ\x04ƺ\tƺ\x04ƻ')
buf.write('\tƻ\x04Ƽ\tƼ\x04ƽ\tƽ\x04ƾ\tƾ')
buf.write('\x04ƿ\tƿ\x04ǀ\tǀ\x04ǁ\tǁ\x04ǂ')
buf.write('\tǂ\x04ǃ\tǃ\x04DŽ\tDŽ\x04Dž\tDž')
buf.write('\x04dž\tdž\x04LJ\tLJ\x04Lj\tLj\x04lj')
buf.write('\tlj\x04NJ\tNJ\x04Nj\tNj\x04nj\tnj')
buf.write('\x04Ǎ\tǍ\x04ǎ\tǎ\x04Ǐ\tǏ\x04ǐ')
buf.write('\tǐ\x04Ǒ\tǑ\x04ǒ\tǒ\x04Ǔ\tǓ')
buf.write('\x04ǔ\tǔ\x04Ǖ\tǕ\x04ǖ\tǖ\x04Ǘ')
buf.write('\tǗ\x04ǘ\tǘ\x04Ǚ\tǙ\x04ǚ\tǚ')
buf.write('\x04Ǜ\tǛ\x04ǜ\tǜ\x04ǝ\tǝ\x04Ǟ')
buf.write('\tǞ\x04ǟ\tǟ\x04Ǡ\tǠ\x04ǡ\tǡ')
buf.write('\x04Ǣ\tǢ\x04ǣ\tǣ\x04Ǥ\tǤ\x04ǥ')
buf.write('\tǥ\x04Ǧ\tǦ\x04ǧ\tǧ\x04Ǩ\tǨ')
buf.write('\x04ǩ\tǩ\x04Ǫ\tǪ\x04ǫ\tǫ\x04Ǭ')
buf.write('\tǬ\x04ǭ\tǭ\x04Ǯ\tǮ\x04ǯ\tǯ')
buf.write('\x04ǰ\tǰ\x04DZ\tDZ\x04Dz\tDz\x04dz')
buf.write('\tdz\x04Ǵ\tǴ\x04ǵ\tǵ\x04Ƕ\tǶ')
buf.write('\x04Ƿ\tǷ\x04Ǹ\tǸ\x04ǹ\tǹ\x04Ǻ')
buf.write('\tǺ\x04ǻ\tǻ\x04Ǽ\tǼ\x04ǽ\tǽ')
buf.write('\x04Ǿ\tǾ\x04ǿ\tǿ\x04Ȁ\tȀ\x04ȁ')
buf.write('\tȁ\x04Ȃ\tȂ\x04ȃ\tȃ\x04Ȅ\tȄ')
buf.write('\x04ȅ\tȅ\x04Ȇ\tȆ\x04ȇ\tȇ\x04Ȉ')
buf.write('\tȈ\x04ȉ\tȉ\x04Ȋ\tȊ\x04ȋ\tȋ')
buf.write('\x04Ȍ\tȌ\x04ȍ\tȍ\x04Ȏ\tȎ\x04ȏ')
buf.write('\tȏ\x04Ȑ\tȐ\x04ȑ\tȑ\x04Ȓ\tȒ')
buf.write('\x04ȓ\tȓ\x04Ȕ\tȔ\x04ȕ\tȕ\x04Ȗ')
buf.write('\tȖ\x04ȗ\tȗ\x04Ș\tȘ\x04ș\tș')
buf.write('\x04Ț\tȚ\x04ț\tț\x04Ȝ\tȜ\x04ȝ')
buf.write('\tȝ\x04Ȟ\tȞ\x04ȟ\tȟ\x04Ƞ\tȠ')
buf.write('\x04ȡ\tȡ\x04Ȣ\tȢ\x04ȣ\tȣ\x04Ȥ')
buf.write('\tȤ\x04ȥ\tȥ\x04Ȧ\tȦ\x04ȧ\tȧ')
buf.write('\x04Ȩ\tȨ\x04ȩ\tȩ\x04Ȫ\tȪ\x04ȫ')
buf.write('\tȫ\x04Ȭ\tȬ\x04ȭ\tȭ\x04Ȯ\tȮ')
buf.write('\x04ȯ\tȯ\x04Ȱ\tȰ\x04ȱ\tȱ\x04Ȳ')
buf.write('\tȲ\x04ȳ\tȳ\x04ȴ\tȴ\x03\x02\x03\x02\x03\x02\x03')
buf.write(
'\x03\x03\x03\x03\x04\x03\x04\x03\x04\x03\x04\x03\x05\x03\x05\x03\x05\x03\x05\x03\x05\x03\x05\x03\x06\x03\x06'
)
buf.write(
'\x03\x06\x03\x06\x03\x06\x03\x06\x03\x07\x03\x07\x03\x07\x03\x07\x03\x07\x03\x07\x03\x07\x03\x07\x03\x07\x03'
)
buf.write("""
""")
buf.write("""
""")
buf.write(
'\x0c\x03\r\x03\r\x03\r\x03\r\x03\r\x03\r\x03\x0e\x03\x0e\x03\x0e\x03\x0f\x03\x0f\x03'
)
buf.write(
'\x0f\x03\x0f\x03\x0f\x03\x0f\x03\x0f\x03\x10\x03\x10\x03\x10\x03\x10\x03\x10\x03\x10'
)
buf.write(
'\x03\x10\x03\x11\x03\x11\x03\x11\x03\x11\x03\x12\x03\x12\x03\x12\x03\x12\x03\x12\x03\x12'
)
buf.write(
'\x03\x12\x03\x12\x03\x12\x03\x12\x03\x13\x03\x13\x03\x13\x03\x14\x03\x14\x03\x14\x03\x14'
)
buf.write(
'\x03\x14\x03\x14\x03\x14\x03\x14\x03\x14\x03\x14\x03\x15\x03\x15\x03\x15\x03\x15\x03\x15'
)
buf.write(
'\x03\x15\x03\x16\x03\x16\x03\x16\x03\x16\x03\x16\x03\x16\x03\x16\x03\x17\x03\x17\x03\x17'
)
buf.write(
'\x03\x17\x03\x17\x03\x18\x03\x18\x03\x18\x03\x18\x03\x18\x03\x18\x03\x18\x03\x18\x03\x18'
)
buf.write(
'\x03\x18\x03\x19\x03\x19\x03\x19\x03\x19\x03\x19\x03\x19\x03\x19\x03\x19\x03\x19\x03\x19'
)
buf.write(
'\x03\x19\x03\x19\x03\x19\x03\x19\x03\x19\x03\x19\x03\x19\x03\x19\x03\x19\x03\x19\x03\x19'
)
buf.write(
'\x03\x19\x03\x19\x03\x1a\x03\x1a\x03\x1a\x03\x1a\x03\x1a\x03\x1a\x03\x1b\x03\x1b\x03\x1b'
)
buf.write(
'\x03\x1b\x03\x1b\x03\x1b\x03\x1b\x03\x1c\x03\x1c\x03\x1c\x03\x1c\x03\x1c\x03\x1c\x03\x1d'
)
buf.write(
'\x03\x1d\x03\x1d\x03\x1d\x03\x1d\x03\x1d\x03\x1d\x03\x1d\x03\x1e\x03\x1e\x03\x1e\x03\x1e'
)
buf.write(
'\x03\x1e\x03\x1e\x03\x1f\x03\x1f\x03\x1f\x03\x1f\x03\x1f\x03\x1f\x03\x1f\x03\x1f\x03\x1f'
)
buf.write(
'\x03\x1f\x03\x1f\x03\x1f\x03\x1f\x03\x1f\x03 \x03 \x03 \x03 \x03 \x03 \x03 \x03 \x03 \x03'
)
buf.write(
' \x03 \x03 \x03 \x03!\x03!\x03!\x03!\x03!\x03!\x03!\x03!\x03!\x03!\x03!\x03!\x03!\x03!\x03'
)
buf.write(
'!\x03"\x03"\x03"\x03"\x03"\x03#\x03#\x03#\x03#\x03#\x03#\x03$\x03$\x03$\x03$\x03'
)
buf.write(
"$\x03%\x03%\x03%\x03%\x03%\x03%\x03%\x03%\x03&\x03&\x03&\x03&\x03&\x03'\x03'\x03'\x03"
)
buf.write(
"'\x03'\x03'\x03'\x03'\x03(\x03(\x03(\x03(\x03(\x03)\x03)\x03)\x03*\x03*\x03*\x03"
)
buf.write(
'*\x03*\x03+\x03+\x03,\x03,\x03,\x03,\x03,\x03,\x03-\x03-\x03-\x03-\x03-\x03.\x03.\x03.\x03'
)
buf.write(
'.\x03.\x03.\x03.\x03.\x03.\x03.\x03/\x03/\x03/\x03/\x03/\x03/\x03/\x03/\x030\x030'
)
buf.write('\x030\x030\x030\x031\x031\x031\x031\x031\x032\x032\x032')
buf.write('\x032\x032\x033\x033\x033\x033\x033\x033\x033\x033\x034')
buf.write('\x034\x034\x034\x034\x034\x034\x034\x034\x034\x035\x035')
buf.write('\x035\x035\x035\x035\x036\x036\x036\x036\x037\x037\x037')
buf.write(
'\x037\x037\x038\x038\x038\x038\x038\x038\x039\x039\x039\x039\x039\x039\x039\x039\x03'
)
buf.write(
':\x03:\x03:\x03:\x03:\x03:\x03:\x03:\x03;\x03;\x03;\x03;\x03;\x03;\x03;\x03;\x03<\x03<\x03'
)
buf.write(
'<\x03<\x03<\x03<\x03<\x03<\x03=\x03=\x03=\x03=\x03=\x03=\x03=\x03>\x03>\x03>\x03>\x03>\x03'
)
buf.write(
'>\x03>\x03>\x03>\x03>\x03?\x03?\x03?\x03?\x03?\x03?\x03?\x03?\x03?\x03?\x03?\x03?\x03?\x03'
)
buf.write(
'?\x03@\x03@\x03@\x03@\x03@\x03@\x03@\x03@\x03A\x03A\x03A\x03A\x03A\x03A\x03A\x03A\x03A\x03'
)
buf.write(
'B\x03B\x03B\x03B\x03B\x03B\x03B\x03B\x03C\x03C\x03C\x03C\x03C\x03C\x03C\x03C\x03C\x03C\x03'
)
buf.write(
'C\x03C\x03C\x03C\x03C\x03C\x03D\x03D\x03D\x03D\x03D\x03D\x03D\x03D\x03D\x03E\x03E\x03E\x03'
)
buf.write(
'E\x03E\x03E\x03E\x03E\x03E\x03E\x03E\x03F\x03F\x03F\x03F\x03F\x03F\x03F\x03F\x03F\x03F\x03'
)
buf.write(
'F\x03F\x03G\x03G\x03G\x03G\x03G\x03G\x03G\x03G\x03G\x03G\x03G\x03G\x03H\x03H\x03H\x03H\x03'
)
buf.write(
'H\x03H\x03H\x03H\x03I\x03I\x03I\x03I\x03I\x03I\x03I\x03I\x03J\x03J\x03J\x03J\x03J\x03J\x03'
)
buf.write(
'J\x03J\x03J\x03K\x03K\x03K\x03K\x03K\x03K\x03K\x03K\x03L\x03L\x03L\x03L\x03L\x03L\x03L\x03'
)
buf.write(
'L\x03L\x03L\x03L\x03L\x03M\x03M\x03M\x03M\x03M\x03M\x03M\x03M\x03M\x03M\x03M\x03M\x03M\x03'
)
buf.write(
'M\x03M\x03M\x03N\x03N\x03N\x03N\x03N\x03O\x03O\x03O\x03O\x03O\x03O\x03P\x03P\x03P\x03P\x03'
)
buf.write(
'P\x03P\x03P\x03Q\x03Q\x03Q\x03Q\x03Q\x03Q\x03R\x03R\x03R\x03R\x03R\x03S\x03S\x03S\x03S\x03'
)
buf.write(
'S\x03S\x03S\x03S\x03T\x03T\x03T\x03T\x03T\x03T\x03T\x03T\x03T\x03T\x03T\x03T\x03T\x03U\x03'
)
buf.write(
'U\x03U\x03U\x03U\x03U\x03U\x03V\x03V\x03V\x03V\x03V\x03V\x03V\x03V\x03V\x03V\x03V\x03V\x03'
)
buf.write(
'W\x03W\x03W\x03W\x03W\x03W\x03X\x03X\x03X\x03X\x03X\x03Y\x03Y\x03Y\x03Y\x03Y\x03Y\x03Y\x03'
)
buf.write(
'Y\x03Y\x03Z\x03Z\x03Z\x03Z\x03Z\x03[\x03[\x03[\x03[\x03\\\x03\\\x03\\\x03\\\x03\\\x03'
)
buf.write(
'\\\x03\\\x03\\\x03\\\x03\\\x03\\\x03\\\x03\\\x03\\\x03\\\x03]\x03]\x03]\x03]\x03]'
)
buf.write(
'\x03]\x03]\x03]\x03]\x03]\x03]\x03^\x03^\x03^\x03^\x03_\x03_\x03_\x03_\x03_\x03_\x03`\x03'
)
buf.write(
'`\x03`\x03`\x03a\x03a\x03a\x03a\x03a\x03a\x03a\x03a\x03b\x03b\x03b\x03b\x03b\x03b\x03b\x03'
)
buf.write(
'b\x03c\x03c\x03c\x03c\x03c\x03c\x03c\x03c\x03c\x03c\x03d\x03d\x03d\x03d\x03d\x03d\x03d\x03'
)
buf.write(
'd\x03d\x03d\x03e\x03e\x03e\x03e\x03e\x03e\x03e\x03e\x03f\x03f\x03f\x03f\x03f\x03f\x03f\x03'
)
buf.write(
'f\x03f\x03g\x03g\x03g\x03g\x03g\x03g\x03g\x03g\x03g\x03h\x03h\x03h\x03h\x03h\x03h\x03h\x03'
)
buf.write(
'h\x03i\x03i\x03i\x03i\x03i\x03i\x03i\x03j\x03j\x03j\x03j\x03j\x03j\x03k\x03k\x03k\x03k\x03'
)
buf.write(
'k\x03l\x03l\x03l\x03l\x03l\x03l\x03l\x03l\x03l\x03l\x03l\x03l\x03l\x03l\x03m\x03m\x03m\x03'
)
buf.write(
'm\x03m\x03m\x03m\x03m\x03m\x03m\x03n\x03n\x03n\x03n\x03n\x03n\x03n\x03n\x03o\x03o\x03o\x03'
)
buf.write(
'o\x03o\x03o\x03o\x03o\x03o\x03o\x03o\x03o\x03o\x03p\x03p\x03p\x03p\x03p\x03p\x03p\x03p\x03'
)
buf.write(
'p\x03q\x03q\x03q\x03q\x03q\x03q\x03q\x03q\x03q\x03r\x03r\x03r\x03r\x03r\x03r\x03r\x03s\x03'
)
buf.write(
's\x03s\x03s\x03s\x03t\x03t\x03t\x03t\x03t\x03t\x03t\x03t\x03t\x03t\x03t\x03t\x03t\x03t\x03'
)
buf.write(
't\x03t\x03t\x03t\x03t\x03t\x03t\x03t\x03t\x03t\x03t\x03u\x03u\x03u\x03u\x03u\x03v\x03v\x03'
)
buf.write(
'v\x03v\x03v\x03v\x03v\x03v\x03w\x03w\x03w\x03w\x03w\x03x\x03x\x03x\x03x\x03x\x03x\x03y\x03'
)
buf.write(
'y\x03y\x03y\x03y\x03y\x03z\x03z\x03z\x03z\x03z\x03z\x03z\x03{\x03{\x03{\x03{\x03{\x03{\x03'
)
buf.write(
'{\x03{\x03{\x03|\x03|\x03|\x03|\x03}\x03}\x03}\x03}\x03}\x03}\x03}\x03}\x03}\x03}\x03}\x03'
)
buf.write(
'}\x03}\x03}\x03}\x03~\x03~\x03~\x03~\x03\x7f\x03\x7f\x03\x7f\x03\x7f\x03\x7f\x03'
)
buf.write(
'\x7f\x03\x7f\x03\x80\x03\x80\x03\x80\x03\x80\x03\x80\x03\x80')
buf.write('\x03\x80\x03\x81\x03\x81\x03\x81\x03\x81\x03\x81\x03\x81')
buf.write('\x03\x81\x03\x81\x03\x81\x03\x82\x03\x82\x03\x82\x03\x82')
buf.write('\x03\x82\x03\x82\x03\x82\x03\x83\x03\x83\x03\x83\x03\x83')
buf.write('\x03\x83\x03\x83\x03\x83\x03\x83\x03\x83\x03\x83\x03\x84')
buf.write('\x03\x84\x03\x84\x03\x84\x03\x84\x03\x84\x03\x84\x03\x84')
buf.write('\x03\x84\x03\x84\x03\x84\x03\x84\x03\x84\x03\x84\x03\x84')
buf.write('\x03\x85\x03\x85\x03\x85\x03\x85\x03\x85\x03\x85\x03\x85')
buf.write('\x03\x85\x03\x85\x03\x85\x03\x85\x03\x86\x03\x86\x03\x86')
buf.write('\x03\x86\x03\x86\x03\x86\x03\x86\x03\x86\x03\x87\x03\x87')
buf.write('\x03\x87\x03\x87\x03\x87\x03\x87\x03\x87\x03\x87\x03\x87')
buf.write('\x03\x87\x03\x88\x03\x88\x03\x88\x03\x88\x03\x88\x03\x88')
buf.write('\x03\x88\x03\x88\x03\x89\x03\x89\x03\x89\x03\x89\x03\x89')
buf.write('\x03\x89\x03\x89\x03\x8a\x03\x8a\x03\x8a\x03\x8a\x03\x8a')
buf.write('\x03\x8b\x03\x8b\x03\x8b\x03\x8b\x03\x8b\x03\x8b\x03\x8b')
buf.write('\x03\x8b\x03\x8c\x03\x8c\x03\x8c\x03\x8c\x03\x8c\x03\x8c')
buf.write('\x03\x8c\x03\x8c\x03\x8c\x03\x8d\x03\x8d\x03\x8d\x03\x8d')
buf.write('\x03\x8d\x03\x8d\x03\x8d\x03\x8d\x03\x8e\x03\x8e\x03\x8e')
buf.write('\x03\x8e\x03\x8e\x03\x8e\x03\x8e\x03\x8e\x03\x8f\x03\x8f')
buf.write('\x03\x8f\x03\x8f\x03\x8f\x03\x8f\x03\x90\x03\x90\x03\x90')
buf.write('\x03\x90\x03\x90\x03\x90\x03\x91\x03\x91\x03\x91\x03\x91')
buf.write('\x03\x91\x03\x91\x03\x92\x03\x92\x03\x92\x03\x92\x03\x92')
buf.write('\x03\x92\x03\x93\x03\x93\x03\x93\x03\x93\x03\x93\x03\x93')
buf.write('\x03\x93\x03\x93\x03\x93\x03\x93\x03\x93\x03\x93\x03\x94')
buf.write('\x03\x94\x03\x94\x03\x94\x03\x94\x03\x94\x03\x95\x03\x95')
buf.write('\x03\x95\x03\x95\x03\x95\x03\x95\x03\x95\x03\x95\x03\x95')
buf.write('\x03\x95\x03\x96\x03\x96\x03\x96\x03\x96\x03\x96\x03\x96')
buf.write('\x03\x96\x03\x96\x03\x97\x03\x97\x03\x97\x03\x97\x03\x98')
buf.write('\x03\x98\x03\x98\x03\x98\x03\x98\x03\x98\x03\x98\x03\x99')
buf.write('\x03\x99\x03\x99\x03\x99\x03\x99\x03\x99\x03\x9a\x03\x9a')
buf.write('\x03\x9a\x03\x9a\x03\x9a\x03\x9b\x03\x9b\x03\x9b\x03\x9b')
buf.write('\x03\x9b\x03\x9c\x03\x9c\x03\x9c\x03\x9c\x03\x9c\x03\x9c')
buf.write('\x03\x9c\x03\x9c\x03\x9c\x03\x9d\x03\x9d\x03\x9d\x03\x9d')
buf.write('\x03\x9d\x03\x9e\x03\x9e\x03\x9e\x03\x9e\x03\x9e\x03\x9e')
buf.write('\x03\x9f\x03\x9f\x03\x9f\x03\x9f\x03\x9f\x03\x9f\x03\xa0')
buf.write('\x03\xa0\x03\xa0\x03\xa0\x03\xa0\x03\xa0\x03\xa0\x03\xa0')
buf.write('\x03\xa0\x03¡\x03¡\x03¡\x03¡\x03¡\x03¢')
buf.write('\x03¢\x03¢\x03¢\x03¢\x03¢\x03¢\x03£')
buf.write('\x03£\x03£\x03£\x03£\x03¤\x03¤\x03¤')
buf.write('\x03¤\x03¤\x03¥\x03¥\x03¥\x03¦\x03¦')
buf.write('\x03¦\x03¦\x03¦\x03¦\x03¦\x03§\x03§')
buf.write('\x03§\x03§\x03§\x03§\x03§\x03§\x03§')
buf.write('\x03§\x03¨\x03¨\x03¨\x03©\x03©\x03©')
buf.write('\x03©\x03©\x03©\x03©\x03©\x03ª\x03ª')
buf.write('\x03ª\x03ª\x03ª\x03ª\x03ª\x03ª\x03ª')
buf.write('\x03ª\x03«\x03«\x03«\x03«\x03«\x03«')
buf.write('\x03«\x03«\x03«\x03«\x03¬\x03¬\x03¬')
buf.write('\x03¬\x03¬\x03¬\x03¬\x03\xad\x03\xad\x03\xad')
buf.write('\x03\xad\x03\xad\x03\xad\x03®\x03®\x03®\x03®')
buf.write('\x03®\x03®\x03®\x03®\x03¯\x03¯\x03¯')
buf.write('\x03¯\x03¯\x03¯\x03¯\x03¯\x03¯\x03¯')
buf.write('\x03°\x03°\x03°\x03°\x03°\x03°\x03°')
buf.write('\x03°\x03±\x03±\x03±\x03±\x03±\x03±')
buf.write('\x03±\x03±\x03±\x03²\x03²\x03²\x03²')
buf.write('\x03²\x03²\x03²\x03³\x03³\x03³\x03³')
buf.write('\x03³\x03³\x03´\x03´\x03´\x03´\x03´')
buf.write('\x03´\x03µ\x03µ\x03µ\x03µ\x03µ\x03µ')
buf.write('\x03µ\x03¶\x03¶\x03¶\x03¶\x03¶\x03¶')
buf.write('\x03¶\x03¶\x03¶\x03¶\x03¶\x03¶\x03¶')
buf.write('\x03·\x03·\x03·\x03·\x03·\x03·\x03·')
buf.write('\x03·\x03¸\x03¸\x03¸\x03¸\x03¹\x03¹')
buf.write('\x03¹\x03¹\x03¹\x03¹\x03¹\x03¹\x03º')
buf.write('\x03º\x03º\x03º\x03º\x03º\x03º\x03º')
buf.write('\x03º\x03º\x03»\x03»\x03»\x03»\x03»')
buf.write('\x03»\x03»\x03»\x03»\x03¼\x03¼\x03¼')
buf.write('\x03¼\x03¼\x03½\x03½\x03½\x03½\x03½')
buf.write('\x03½\x03½\x03½\x03½\x03½\x03½\x03¾')
buf.write('\x03¾\x03¾\x03¿\x03¿\x03¿\x03¿\x03¿')
buf.write('\x03¿\x03¿\x03¿\x03¿\x03¿\x03À\x03À')
buf.write('\x03À\x03À\x03À\x03À\x03À\x03À\x03Á')
buf.write('\x03Á\x03Á\x03Á\x03Á\x03Â\x03Â\x03Â')
buf.write('\x03Â\x03Â\x03Ã\x03Ã\x03Ã\x03Ã\x03Ã')
buf.write('\x03Ä\x03Ä\x03Ä\x03Ä\x03Ä\x03Ä\x03Ä')
buf.write('\x03Ä\x03Ä\x03Å\x03Å\x03Å\x03Å\x03Å')
buf.write('\x03Æ\x03Æ\x03Æ\x03Æ\x03Æ\x03Æ\x03Æ')
buf.write('\x03Æ\x03Æ\x03Æ\x03Æ\x03Ç\x03Ç\x03Ç')
buf.write('\x03Ç\x03Ç\x03Ç\x03Ç\x03Ç\x03È\x03È')
buf.write('\x03È\x03È\x03È\x03É\x03É\x03É\x03É')
buf.write('\x03É\x03É\x03Ê\x03Ê\x03Ê\x03Ê\x03Ê')
buf.write('\x03Ê\x03Ê\x03Ê\x03Ë\x03Ë\x03Ë\x03Ë')
buf.write('\x03Ë\x03Ì\x03Ì\x03Ì\x03Ì\x03Ì\x03Ì')
buf.write('\x03Í\x03Í\x03Í\x03Í\x03Í\x03Í\x03Î')
buf.write('\x03Î\x03Î\x03Î\x03Î\x03Î\x03Ï\x03Ï')
buf.write('\x03Ï\x03Ï\x03Ï\x03Ï\x03Ð\x03Ð\x03Ð')
buf.write('\x03Ð\x03Ð\x03Ð\x03Ñ\x03Ñ\x03Ñ\x03Ñ')
buf.write('\x03Ñ\x03Ò\x03Ò\x03Ò\x03Ò\x03Ò\x03Ò')
buf.write('\x03Ò\x03Ó\x03Ó\x03Ó\x03Ó\x03Ô\x03Ô')
buf.write('\x03Ô\x03Ô\x03Ô\x03Ô\x03Ô\x03Õ\x03Õ')
buf.write('\x03Õ\x03Õ\x03Õ\x03Õ\x03Ö\x03Ö\x03Ö')
buf.write('\x03Ö\x03Ö\x03×\x03×\x03×\x03×\x03×')
buf.write('\x03Ø\x03Ø\x03Ø\x03Ø\x03Ø\x03Ù\x03Ù')
buf.write('\x03Ù\x03Ù\x03Ú\x03Ú\x03Ú\x03Ú\x03Ú')
buf.write('\x03Ú\x03Ú\x03Ú\x03Û\x03Û\x03Û\x03Û')
buf.write('\x03Û\x03Û\x03Û\x03Û\x03Û\x03Ü\x03Ü')
buf.write('\x03Ü\x03Ü\x03Ü\x03Ü\x03Ü\x03Ü\x03Ü')
buf.write('\x03Ý\x03Ý\x03Ý\x03Ý\x03Ý\x03Ý\x03Ý')
buf.write('\x03Þ\x03Þ\x03Þ\x03Þ\x03Þ\x03Þ\x03ß')
buf.write('\x03ß\x03ß\x03ß\x03ß\x03ß\x03à\x03à')
buf.write('\x03à\x03à\x03à\x03à\x03à\x03á\x03á')
buf.write('\x03á\x03á\x03á\x03á\x03á\x03á\x03á')
buf.write('\x03â\x03â\x03â\x03â\x03â\x03â\x03â')
buf.write('\x03â\x03â\x03ã\x03ã\x03ã\x03ã\x03ã')
buf.write('\x03ä\x03ä\x03ä\x03ä\x03ä\x03ä\x03å')
buf.write('\x03å\x03å\x03å\x03å\x03å\x03å\x03æ')
buf.write('\x03æ\x03æ\x03æ\x03æ\x03æ\x03ç\x03ç')
buf.write('\x03ç\x03ç\x03ç\x03ç\x03ç\x03ç\x03ç')
buf.write('\x03è\x03è\x03è\x03è\x03è\x03é\x03é')
buf.write('\x03é\x03é\x03ê\x03ê\x03ê\x03ê\x03ê')
buf.write('\x03ê\x03ê\x03ê\x03ë\x03ë\x03ë\x03ë')
buf.write('\x03ë\x03ë\x03ë\x03ë\x03ë\x03ì\x03ì')
buf.write('\x03ì\x03ì\x03í\x03í\x03í\x03í\x03í')
buf.write('\x03í\x03î\x03î\x03î\x03î\x03î\x03î')
buf.write('\x03î\x03î\x03î\x03ï\x03ï\x03ï\x03ï')
buf.write('\x03ï\x03ï\x03ð\x03ð\x03ð\x03ð\x03ð')
buf.write('\x03ð\x03ð\x03ñ\x03ñ\x03ñ\x03ñ\x03ò')
buf.write('\x03ò\x03ò\x03ó\x03ó\x03ó\x03ó\x03ó')
buf.write('\x03ó\x03ó\x03ó\x03ô\x03ô\x03ô\x03ô')
buf.write('\x03ô\x03ô\x03ô\x03ô\x03õ\x03õ\x03õ')
buf.write('\x03õ\x03õ\x03õ\x03õ\x03ö\x03ö\x03ö')
buf.write('\x03ö\x03ö\x03ö\x03ö\x03ö\x03÷\x03÷')
buf.write('\x03÷\x03÷\x03÷\x03÷\x03÷\x03÷\x03÷')
buf.write('\x03÷\x03÷\x03÷\x03÷\x03÷\x03÷\x03÷')
buf.write('\x03÷\x03ø\x03ø\x03ø\x03ø\x03ø\x03ø')
buf.write('\x03ø\x03ø\x03ø\x03ø\x03ø\x03ù\x03ù')
buf.write('\x03ù\x03ù\x03ù\x03ù\x03ù\x03ù\x03ù')
buf.write('\x03ù\x03ù\x03ú\x03ú\x03ú\x03ú\x03ú')
buf.write('\x03û\x03û\x03û\x03û\x03û\x03û\x03û')
buf.write('\x03û\x03ü\x03ü\x03ü\x03ü\x03ü\x03ü')
buf.write('\x03ü\x03ü\x03ü\x03ü\x03ü\x03ü\x03ü')
buf.write('\x03ü\x03ý\x03ý\x03ý\x03ý\x03þ\x03þ')
buf.write('\x03þ\x03þ\x03þ\x03þ\x03þ\x03ÿ\x03ÿ')
buf.write('\x03ÿ\x03ÿ\x03ÿ\x03Ā\x03Ā\x03Ā\x03Ā')
buf.write('\x03Ā\x03Ā\x03ā\x03ā\x03ā\x03ā\x03ā')
buf.write('\x03ā\x03ā\x03Ă\x03Ă\x03Ă\x03Ă\x03Ă')
buf.write('\x03Ă\x03Ă\x03Ă\x03ă\x03ă\x03ă\x03ă')
buf.write('\x03ă\x03ă\x03ă\x03ă\x03ă\x03ă\x03Ą')
buf.write('\x03Ą\x03Ą\x03Ą\x03Ą\x03Ą\x03Ą\x03ą')
buf.write('\x03ą\x03ą\x03Ć\x03Ć\x03Ć\x03Ć\x03ć')
buf.write('\x03ć\x03ć\x03ć\x03Ĉ\x03Ĉ\x03Ĉ\x03Ĉ')
buf.write('\x03ĉ\x03ĉ\x03ĉ\x03Ċ\x03Ċ\x03Ċ\x03Ċ')
buf.write('\x03Ċ\x03ċ\x03ċ\x03ċ\x03ċ\x03ċ\x03Č')
buf.write('\x03Č\x03Č\x03Č\x03Č\x03Č\x03Č\x03č')
buf.write('\x03č\x03č\x03Ď\x03Ď\x03Ď\x03Ď\x03Ď')
buf.write('\x03Ď\x03Ď\x03Ď\x03ď\x03ď\x03ď\x03ď')
buf.write('\x03ď\x03ď\x03Đ\x03Đ\x03Đ\x03Đ\x03Đ')
buf.write('\x03Đ\x03Đ\x03Đ\x03Đ\x03Đ\x03Đ\x03đ')
buf.write('\x03đ\x03đ\x03đ\x03đ\x03đ\x03đ\x03đ')
buf.write('\x03Ē\x03Ē\x03Ē\x03Ē\x03ē\x03ē\x03ē')
buf.write('\x03ē\x03ē\x03ē\x03Ĕ\x03Ĕ\x03Ĕ\x03Ĕ')
buf.write('\x03Ĕ\x03ĕ\x03ĕ\x03ĕ\x03ĕ\x03ĕ\x03ĕ')
buf.write('\x03ĕ\x03ĕ\x03ĕ\x03ĕ\x03ĕ\x03Ė\x03Ė')
buf.write('\x03Ė\x03Ė\x03Ė\x03Ė\x03Ė\x03Ė\x03ė')
buf.write('\x03ė\x03ė\x03ė\x03ė\x03ė\x03ė\x03ė')
buf.write('\x03ė\x03ė\x03ė\x03ė\x03ė\x03ė\x03ė')
buf.write('\x03ė\x03Ę\x03Ę\x03Ę\x03Ę\x03Ę\x03Ę')
buf.write('\x03Ę\x03Ę\x03Ę\x03Ę\x03Ę\x03ę\x03ę')
buf.write('\x03ę\x03ę\x03ę\x03ę\x03ę\x03Ě\x03Ě')
buf.write('\x03Ě\x03Ě\x03Ě\x03Ě\x03Ě\x03Ě\x03Ě')
buf.write('\x03Ě\x03ě\x03ě\x03ě\x03ě\x03ě\x03ě')
buf.write('\x03ě\x03ě\x03Ĝ\x03Ĝ\x03Ĝ\x03Ĝ\x03Ĝ')
buf.write('\x03ĝ\x03ĝ\x03ĝ\x03ĝ\x03ĝ\x03ĝ\x03ĝ')
buf.write('\x03ĝ\x03ĝ\x03Ğ\x03Ğ\x03Ğ\x03Ğ\x03Ğ')
buf.write('\x03Ğ\x03ğ\x03ğ\x03ğ\x03ğ\x03ğ\x03ğ')
buf.write('\x03ğ\x03ğ\x03ğ\x03ğ\x03Ġ\x03Ġ\x03Ġ')
buf.write('\x03Ġ\x03Ġ\x03Ġ\x03ġ\x03ġ\x03ġ\x03ġ')
buf.write('\x03ġ\x03Ģ\x03Ģ\x03Ģ\x03Ģ\x03Ģ\x03Ģ')
buf.write('\x03Ģ\x03Ģ\x03Ģ\x03Ģ\x03Ģ\x03Ģ\x03ģ')
buf.write('\x03ģ\x03ģ\x03ģ\x03ģ\x03ģ\x03ģ\x03ģ')
buf.write('\x03ģ\x03Ĥ\x03Ĥ\x03Ĥ\x03Ĥ\x03Ĥ\x03Ĥ')
buf.write('\x03Ĥ\x03Ĥ\x03Ĥ\x03Ĥ\x03ĥ\x03ĥ\x03ĥ')
buf.write('\x03ĥ\x03ĥ\x03ĥ\x03ĥ\x03Ħ\x03Ħ\x03Ħ')
buf.write('\x03Ħ\x03Ħ\x03Ħ\x03Ħ\x03Ħ\x03Ħ\x03Ħ')
buf.write('\x03ħ\x03ħ\x03ħ\x03ħ\x03ħ\x03ħ\x03ħ')
buf.write('\x03ħ\x03ħ\x03ħ\x03Ĩ\x03Ĩ\x03Ĩ\x03Ĩ')
buf.write('\x03Ĩ\x03Ĩ\x03Ĩ\x03Ĩ\x03ĩ\x03ĩ\x03ĩ')
buf.write('\x03ĩ\x03ĩ\x03ĩ\x03Ī\x03Ī\x03Ī\x03Ī')
buf.write('\x03Ī\x03Ī\x03Ī\x03Ī\x03Ī\x03Ī\x03ī')
buf.write('\x03ī\x03ī\x03ī\x03ī\x03ī\x03Ĭ\x03Ĭ')
buf.write('\x03Ĭ\x03Ĭ\x03Ĭ\x03Ĭ\x03ĭ\x03ĭ\x03ĭ')
buf.write('\x03ĭ\x03Į\x03Į\x03Į\x03Į\x03Į\x03į')
buf.write('\x03į\x03į\x03į\x03į\x03İ\x03İ\x03İ')
buf.write('\x03İ\x03İ\x03İ\x03İ\x03ı\x03ı\x03ı')
buf.write('\x03ı\x03IJ\x03IJ\x03IJ\x03IJ\x03IJ\x03IJ')
buf.write('\x03IJ\x03IJ\x03IJ\x03IJ\x03ij\x03ij\x03ij')
buf.write('\x03ij\x03ij\x03ij\x03ij\x03ij\x03ij\x03ij')
buf.write('\x03ij\x03ij\x03Ĵ\x03Ĵ\x03Ĵ\x03Ĵ\x03Ĵ')
buf.write('\x03Ĵ\x03Ĵ\x03ĵ\x03ĵ\x03ĵ\x03ĵ\x03ĵ')
buf.write('\x03ĵ\x03ĵ\x03ĵ\x03ĵ\x03ĵ\x03Ķ\x03Ķ')
buf.write('\x03Ķ\x03Ķ\x03Ķ\x03Ķ\x03Ķ\x03ķ\x03ķ')
buf.write('\x03ķ\x03ķ\x03ķ\x03ķ\x03ķ\x03ķ\x03ĸ')
buf.write('\x03ĸ\x03ĸ\x03ĸ\x03ĸ\x03ĸ\x03ĸ\x03ĸ')
buf.write('\x03Ĺ\x03Ĺ\x03Ĺ\x03Ĺ\x03Ĺ\x03Ĺ\x03Ĺ')
buf.write('\x03Ĺ\x03Ĺ\x03Ĺ\x03Ĺ\x03Ĺ\x03Ĺ\x03Ĺ')
buf.write('\x03Ĺ\x03Ĺ\x03Ĺ\x03Ĺ\x03Ĺ\x03Ĺ\x03ĺ')
buf.write('\x03ĺ\x03ĺ\x03ĺ\x03ĺ\x03ĺ\x03ĺ\x03Ļ')
buf.write('\x03Ļ\x03Ļ\x03Ļ\x03Ļ\x03Ļ\x03Ļ\x03Ļ')
buf.write('\x03Ļ\x03Ļ\x03Ļ\x03Ļ\x03Ļ\x03ļ\x03ļ')
buf.write('\x03ļ\x03ļ\x03ļ\x03ļ\x03ļ\x03Ľ\x03Ľ')
buf.write('\x03Ľ\x03Ľ\x03Ľ\x03Ľ\x03Ľ\x03Ľ\x03Ľ')
buf.write('\x03Ľ\x03ľ\x03ľ\x03ľ\x03ľ\x03ľ\x03ľ')
buf.write('\x03Ŀ\x03Ŀ\x03Ŀ\x03Ŀ\x03Ŀ\x03Ŀ\x03Ŀ')
buf.write('\x03Ŀ\x03ŀ\x03ŀ\x03ŀ\x03ŀ\x03ŀ\x03ŀ')
buf.write('\x03ŀ\x03Ł\x03Ł\x03Ł\x03Ł\x03Ł\x03Ł')
buf.write('\x03ł\x03ł\x03ł\x03ł\x03ł\x03ł\x03ł')
buf.write('\x03ł\x03ł\x03Ń\x03Ń\x03Ń\x03Ń\x03Ń')
buf.write('\x03Ń\x03Ń\x03ń\x03ń\x03ń\x03ń\x03Ņ')
buf.write('\x03Ņ\x03Ņ\x03Ņ\x03Ņ\x03Ņ\x03ņ\x03ņ')
buf.write('\x03ņ\x03ņ\x03ņ\x03Ň\x03Ň\x03Ň\x03Ň')
buf.write('\x03Ň\x03Ň\x03ň\x03ň\x03ň\x03ň\x03ň')
buf.write('\x03ň\x03ň\x03ʼn\x03ʼn\x03ʼn\x03ʼn\x03ʼn')
buf.write('\x03Ŋ\x03Ŋ\x03Ŋ\x03Ŋ\x03Ŋ\x03Ŋ\x03Ŋ')
buf.write('\x03Ŋ\x03Ŋ\x03Ŋ\x03ŋ\x03ŋ\x03ŋ\x03ŋ')
buf.write('\x03ŋ\x03ŋ\x03ŋ\x03Ō\x03Ō\x03Ō\x03Ō')
buf.write('\x03Ō\x03Ō\x03Ō\x03Ō\x03Ō\x03Ō\x03Ō')
buf.write('\x03Ō\x03ō\x03ō\x03ō\x03ō\x03Ŏ\x03Ŏ')
buf.write('\x03Ŏ\x03Ŏ\x03Ŏ\x03Ŏ\x03Ŏ\x03ŏ\x03ŏ')
buf.write('\x03ŏ\x03ŏ\x03ŏ\x03ŏ\x03ŏ\x03Ő\x03Ő')
buf.write('\x03Ő\x03Ő\x03Ő\x03ő\x03ő\x03ő\x03ő')
buf.write('\x03ő\x03ő\x03ő\x03ő\x03Œ\x03Œ\x03Œ')
buf.write('\x03Œ\x03Œ\x03Œ\x03Œ\x03œ\x03œ\x03œ')
buf.write('\x03œ\x03œ\x03Ŕ\x03Ŕ\x03Ŕ\x03Ŕ\x03Ŕ')
buf.write('\x03Ŕ\x03Ŕ\x03Ŕ\x03Ŕ\x03ŕ\x03ŕ\x03ŕ')
buf.write('\x03ŕ\x03ŕ\x03ŕ\x03ŕ\x03ŕ\x03ŕ\x03ŕ')
buf.write('\x03ŕ\x03Ŗ\x03Ŗ\x03Ŗ\x03Ŗ\x03Ŗ\x03Ŗ')
buf.write('\x03Ŗ\x03Ŗ\x03Ŗ\x03Ŗ\x03Ŗ\x03Ŗ\x03Ŗ')
buf.write('\x03ŗ\x03ŗ\x03ŗ\x03ŗ\x03ŗ\x03ŗ\x03ŗ')
buf.write('\x03ŗ\x03ŗ\x03ŗ\x03ŗ\x03ŗ\x03ŗ\x03ŗ')
buf.write('\x03ŗ\x03ŗ\x03ŗ\x03ŗ\x03Ř\x03Ř\x03Ř')
buf.write('\x03Ř\x03Ř\x03Ř\x03Ř\x03Ř\x03Ř\x03Ř')
buf.write('\x03Ř\x03Ř\x03ř\x03ř\x03ř\x03ř\x03ř')
buf.write('\x03ř\x03ř\x03ř\x03ř\x03ř\x03ř\x03ř')
buf.write('\x03ř\x03ř\x03ř\x03ř\x03Ś\x03Ś\x03Ś')
buf.write('\x03Ś\x03ś\x03ś\x03ś\x03ś\x03ś\x03Ŝ')
buf.write('\x03Ŝ\x03Ŝ\x03Ŝ\x03Ŝ\x03Ŝ\x03Ŝ\x03Ŝ')
buf.write('\x03Ŝ\x03ŝ\x03ŝ\x03ŝ\x03ŝ\x03ŝ\x03ŝ')
buf.write('\x03Ş\x03Ş\x03Ş\x03Ş\x03Ş\x03ş\x03ş')
buf.write('\x03ş\x03ş\x03ş\x03ş\x03ş\x03ş\x03ş')
buf.write('\x03Š\x03Š\x03Š\x03Š\x03Š\x03Š\x03Š')
buf.write('\x03Š\x03Š\x03š\x03š\x03š\x03š\x03š')
buf.write('\x03š\x03š\x03š\x03š\x03Ţ\x03Ţ\x03Ţ')
buf.write('\x03Ţ\x03Ţ\x03Ţ\x03Ţ\x03Ţ\x03Ţ\x03Ţ')
buf.write('\x03Ţ\x03Ţ\x03Ţ\x03Ţ\x03Ţ\x03ţ\x03ţ')
buf.write('\x03ţ\x03ţ\x03ţ\x03ţ\x03ţ\x03Ť\x03Ť')
buf.write('\x03Ť\x03Ť\x03Ť\x03ť\x03ť\x03ť\x03ť')
buf.write('\x03ť\x03Ŧ\x03Ŧ\x03Ŧ\x03Ŧ\x03Ŧ\x03Ŧ')
buf.write('\x03Ŧ\x03Ŧ\x03Ŧ\x03ŧ\x03ŧ\x03ŧ\x03ŧ')
buf.write('\x03ŧ\x03ŧ\x03ŧ\x03ŧ\x03ŧ\x03Ũ\x03Ũ')
buf.write('\x03Ũ\x03Ũ\x03Ũ\x03ũ\x03ũ\x03ũ\x03ũ')
buf.write('\x03ũ\x03ũ\x03ũ\x03ũ\x03ũ\x03ũ\x03ũ')
buf.write('\x03ũ\x03ũ\x03ũ\x03Ū\x03Ū\x03Ū\x03Ū')
buf.write('\x03Ū\x03Ū\x03Ū\x03Ū\x03ū\x03ū\x03ū')
buf.write('\x03ū\x03ū\x03ū\x03ū\x03ū\x03ū\x03Ŭ')
buf.write('\x03Ŭ\x03Ŭ\x03Ŭ\x03Ŭ\x03Ŭ\x03Ŭ\x03Ŭ')
buf.write('\x03Ŭ\x03Ŭ\x03Ŭ\x03ŭ\x03ŭ\x03ŭ\x03ŭ')
buf.write('\x03ŭ\x03ŭ\x03Ů\x03Ů\x03Ů\x03Ů\x03Ů')
buf.write('\x03Ů\x03Ů\x03Ů\x03ů\x03ů\x03ů\x03ů')
buf.write('\x03ů\x03ů\x03ů\x03ů\x03ů\x03ů\x03Ű')
buf.write('\x03Ű\x03Ű\x03Ű\x03Ű\x03Ű\x03Ű\x03Ű')
buf.write('\x03Ű\x03Ű\x03Ű\x03Ű\x03Ű\x03ű\x03ű')
buf.write('\x03ű\x03ű\x03ű\x03ű\x03ű\x03Ų\x03Ų')
buf.write('\x03Ų\x03Ų\x03Ų\x03Ų\x03Ų\x03Ų\x03Ų')
buf.write('\x03Ų\x03Ų\x03ų\x03ų\x03ų\x03ų\x03ų')
buf.write('\x03ų\x03ų\x03Ŵ\x03Ŵ\x03Ŵ\x03Ŵ\x03Ŵ')
buf.write('\x03Ŵ\x03Ŵ\x03Ŵ\x03Ŵ\x03Ŵ\x03Ŵ\x03Ŵ')
buf.write('\x03ŵ\x03ŵ\x03ŵ\x03ŵ\x03ŵ\x03ŵ\x03ŵ')
buf.write('\x03ŵ\x03ŵ\x03ŵ\x03ŵ\x03ŵ\x03ŵ\x03Ŷ')
buf.write('\x03Ŷ\x03Ŷ\x03Ŷ\x03Ŷ\x03Ŷ\x03Ŷ\x03Ŷ')
buf.write('\x03Ŷ\x03Ŷ\x03Ŷ\x03Ŷ\x03Ŷ\x03Ŷ\x03ŷ')
buf.write('\x03ŷ\x03ŷ\x03ŷ\x03ŷ\x03ŷ\x03ŷ\x03ŷ')
buf.write('\x03Ÿ\x03Ÿ\x03Ÿ\x03Ÿ\x03Ÿ\x03Ÿ\x03Ÿ')
buf.write('\x03Ÿ\x03Ź\x03Ź\x03Ź\x03Ź\x03Ź\x03Ź')
buf.write('\x03Ź\x03Ź\x03ź\x03ź\x03ź\x03ź\x03ź')
buf.write('\x03ź\x03Ż\x03Ż\x03Ż\x03Ż\x03ż\x03ż')
buf.write('\x03ż\x03ż\x03ż\x03Ž\x03Ž\x03Ž\x03Ž')
buf.write('\x03Ž\x03ž\x03ž\x03ž\x03ž\x03ž\x03ž')
buf.write('\x03ž\x03ž\x03ž\x03ž\x03ſ\x03ſ\x03ſ')
buf.write('\x03ſ\x03ſ\x03ſ\x03ſ\x03ſ\x03ſ\x03ſ')
buf.write('\x03ſ\x03ſ\x03ſ\x03ſ\x03ſ\x03ſ\x03ſ')
buf.write('\x03ſ\x03ſ\x03ſ\x03ſ\x03ſ\x03ſ\x03ſ')
buf.write('\x03ſ\x03ſ\x03ſ\x03ſ\x03ƀ\x03ƀ\x03ƀ')
buf.write('\x03ƀ\x03ƀ\x03ƀ\x03ƀ\x03ƀ\x03ƀ\x03ƀ')
buf.write('\x03ƀ\x03ƀ\x03ƀ\x03ƀ\x03ƀ\x03ƀ\x03ƀ')
buf.write('\x03ƀ\x03ƀ\x03ƀ\x03ƀ\x03ƀ\x03ƀ\x03ƀ')
buf.write('\x03ƀ\x03ƀ\x03ƀ\x03Ɓ\x03Ɓ\x03Ɓ\x03Ɓ')
buf.write('\x03Ɓ\x03Ɓ\x03Ɓ\x03Ɓ\x03Ɓ\x03Ɓ\x03Ɓ')
buf.write('\x03Ɓ\x03Ɓ\x03Ɓ\x03Ɓ\x03Ɓ\x03Ɓ\x03Ɓ')
buf.write('\x03Ɓ\x03Ɓ\x03Ɓ\x03Ɓ\x03Ɓ\x03Ɓ\x03Ƃ')
buf.write('\x03Ƃ\x03Ƃ\x03Ƃ\x03Ƃ\x03Ƃ\x03Ƃ\x03Ƃ')
buf.write('\x03Ƃ\x03Ƃ\x03Ƃ\x03Ƃ\x03Ƃ\x03Ƃ\x03ƃ')
buf.write('\x03ƃ\x03ƃ\x03ƃ\x03ƃ\x03ƃ\x03ƃ\x03ƃ')
buf.write('\x03ƃ\x03ƃ\x03ƃ\x03ƃ\x03ƃ\x03ƃ\x03Ƅ')
buf.write('\x03Ƅ\x03Ƅ\x03Ƅ\x03Ƅ\x03Ƅ\x03Ƅ\x03Ƅ')
buf.write('\x03Ƅ\x03Ƅ\x03Ƅ\x03Ƅ\x03Ƅ\x03Ƅ\x03Ƅ')
buf.write('\x03Ƅ\x03ƅ\x03ƅ\x03ƅ\x03ƅ\x03ƅ\x03ƅ')
buf.write('\x03ƅ\x03ƅ\x03ƅ\x03ƅ\x03ƅ\x03ƅ\x03ƅ')
buf.write('\x03ƅ\x03ƅ\x03ƅ\x03Ɔ\x03Ɔ\x03Ɔ\x03Ƈ')
buf.write('\x03Ƈ\x03Ƈ\x03Ƈ\x03Ƈ\x03Ƈ\x03Ƈ\x03Ƈ')
buf.write('\x03Ƈ\x03ƈ\x03ƈ\x03ƈ\x03ƈ\x03ƈ\x03ƈ')
buf.write('\x03ƈ\x03ƈ\x03ƈ\x03ƈ\x03ƈ\x03ƈ\x03Ɖ')
buf.write('\x03Ɖ\x03Ɖ\x03Ɖ\x03Ɖ\x03Ɖ\x03Ɖ\x03Ɖ')
buf.write('\x03Ɖ\x03Ɖ\x03Ɗ\x03Ɗ\x03Ɗ\x03Ɗ\x03Ɗ')
buf.write('\x03Ɗ\x03Ƌ\x03Ƌ\x03Ƌ\x03Ƌ\x03Ƌ\x03Ƌ')
buf.write('\x03Ƌ\x03Ƌ\x03ƌ\x03ƌ\x03ƌ\x03ƌ\x03ƌ')
buf.write('\x03ƍ\x03ƍ\x03ƍ\x03ƍ\x03ƍ\x03Ǝ\x03Ǝ')
buf.write('\x03Ǝ\x03Ǝ\x03Ǝ\x03Ǝ\x03Ǝ\x03Ǝ\x03Ǝ')
buf.write('\x03Ə\x03Ə\x03Ə\x03Ə\x03Ə\x03Ɛ\x03Ɛ')
buf.write('\x03Ɛ\x03Ɛ\x03Ɛ\x03Ɛ\x03Ɛ\x03Ɛ\x03Ɛ')
buf.write('\x03Ɛ\x03Ƒ\x03Ƒ\x03Ƒ\x03Ƒ\x03Ƒ\x03Ƒ')
buf.write('\x03ƒ\x03ƒ\x03ƒ\x03ƒ\x03ƒ\x03ƒ\x03Ɠ')
buf.write('\x03Ɠ\x03Ɠ\x03Ɠ\x03Ɠ\x03Ɠ\x03Ɠ\x03Ɣ')
buf.write('\x03Ɣ\x03Ɣ\x03Ɣ\x03Ɣ\x03Ɣ\x03Ɣ\x03Ɣ')
buf.write('\x03Ɣ\x03Ɣ\x03ƕ\x03ƕ\x03ƕ\x03ƕ\x03ƕ')
buf.write('\x03ƕ\x03ƕ\x03ƕ\x03Ɩ\x03Ɩ\x03Ɩ\x03Ɩ')
buf.write('\x03Ɩ\x03Ɩ\x03Ɨ\x03Ɨ\x03Ɨ\x03Ɨ\x03Ɨ')
buf.write('\x03Ɨ\x03Ɨ\x03Ƙ\x03Ƙ\x03Ƙ\x03Ƙ\x03Ƙ')
buf.write('\x03Ƙ\x03Ƙ\x03Ƙ\x03ƙ\x03ƙ\x03ƙ\x03ƙ')
buf.write('\x03ƙ\x03ƙ\x03ƙ\x03ƚ\x03ƚ\x03ƚ\x03ƚ')
buf.write('\x03ƚ\x03ƚ\x03ƚ\x03ƛ\x03ƛ\x03ƛ\x03ƛ')
buf.write('\x03Ɯ\x03Ɯ\x03Ɯ\x03Ɯ\x03Ɯ\x03Ɯ\x03Ɲ')
buf.write('\x03Ɲ\x03Ɲ\x03Ɲ\x03Ɲ\x03Ɲ\x03Ɲ\x03Ɲ')
buf.write('\x03Ɲ\x03ƞ\x03ƞ\x03ƞ\x03ƞ\x03ƞ\x03ƞ')
buf.write('\x03Ɵ\x03Ɵ\x03Ɵ\x03Ɵ\x03Ɵ\x03Ɵ\x03Ɵ')
buf.write('\x03Ơ\x03Ơ\x03Ơ\x03Ơ\x03Ơ\x03Ơ\x03Ơ')
buf.write('\x03Ơ\x03ơ\x03ơ\x03ơ\x03ơ\x03ơ\x03ơ')
buf.write('\x03ơ\x03ơ\x03ơ\x03Ƣ\x03Ƣ\x03Ƣ\x03Ƣ')
buf.write('\x03Ƣ\x03Ƣ\x03Ƣ\x03Ƣ\x03Ƣ\x03ƣ\x03ƣ')
buf.write('\x03ƣ\x03ƣ\x03ƣ\x03ƣ\x03ƣ\x03Ƥ\x03Ƥ')
buf.write('\x03Ƥ\x03Ƥ\x03Ƥ\x03Ƥ\x03Ƥ\x03Ƥ\x03ƥ')
buf.write('\x03ƥ\x03ƥ\x03ƥ\x03ƥ\x03ƥ\x03ƥ\x03ƥ')
buf.write('\x03Ʀ\x03Ʀ\x03Ʀ\x03Ʀ\x03Ʀ\x03Ʀ\x03Ʀ')
buf.write('\x03Ʀ\x03Ʀ\x03Ƨ\x03Ƨ\x03Ƨ\x03Ƨ\x03Ƨ')
buf.write('\x03ƨ\x03ƨ\x03ƨ\x03ƨ\x03ƨ\x03ƨ\x03ƨ')
buf.write('\x03ƨ\x03Ʃ\x03Ʃ\x03Ʃ\x03Ʃ\x03Ʃ\x03Ʃ')
buf.write('\x03Ʃ\x03Ʃ\x03Ʃ\x03Ʃ\x03Ʃ\x03ƪ\x03ƪ')
buf.write('\x03ƪ\x03ƪ\x03ƪ\x03ƫ\x03ƫ\x03ƫ\x03ƫ')
buf.write('\x03ƫ\x03ƫ\x03ƫ\x03ƫ\x03ƫ\x03Ƭ\x03Ƭ')
buf.write('\x03Ƭ\x03Ƭ\x03Ƭ\x03Ƭ\x03ƭ\x03ƭ\x03ƭ')
buf.write('\x03ƭ\x03ƭ\x03ƭ\x03Ʈ\x03Ʈ\x03Ʈ\x03Ʈ')
buf.write('\x03Ʈ\x03Ư\x03Ư\x03Ư\x03Ư\x03Ư\x03Ư')
buf.write('\x03Ư\x03ư\x03ư\x03ư\x03ư\x03ư\x03Ʊ')
buf.write('\x03Ʊ\x03Ʊ\x03Ʊ\x03Ʊ\x03Ʊ\x03Ʋ\x03Ʋ')
buf.write('\x03Ʋ\x03Ʋ\x03Ƴ\x03Ƴ\x03Ƴ\x03Ƴ\x03Ƴ')
buf.write('\x03Ƴ\x03Ƴ\x03ƴ\x03ƴ\x03ƴ\x03ƴ\x03ƴ')
buf.write('\x03ƴ\x03ƴ\x03ƴ\x03ƴ\x03ƴ\x03ƴ\x03ƴ')
buf.write('\x03ƴ\x03ƴ\x03Ƶ\x03Ƶ\x03Ƶ\x03Ƶ\x03Ƶ')
buf.write('\x03Ƶ\x03Ƶ\x03Ƶ\x03ƶ\x03ƶ\x03ƶ\x03ƶ')
buf.write('\x03ƶ\x03ƶ\x03ƶ\x03ƶ\x03ƶ\x03ƶ\x03ƶ')
buf.write('\x03ƶ\x03ƶ\x03Ʒ\x03Ʒ\x03Ʒ\x03Ʒ\x03Ʒ')
buf.write('\x03Ʒ\x03Ʒ\x03Ʒ\x03Ʒ\x03Ʒ\x03Ʒ\x03Ƹ')
buf.write('\x03Ƹ\x03Ƹ\x03Ƹ\x03Ƹ\x03Ƹ\x03Ƹ\x03Ƹ')
buf.write('\x03Ƹ\x03Ƹ\x03ƹ\x03ƹ\x03ƹ\x03ƹ\x03ƹ')
buf.write('\x03ƹ\x03ƹ\x03ƹ\x03ƹ\x03ƹ\x03ƺ\x03ƺ')
buf.write('\x03ƺ\x03ƺ\x03ƺ\x03ƺ\x03ƺ\x03ƺ\x03ƺ')
buf.write('\x03ƺ\x03ƺ\x03ƺ\x03ƺ\x03ƺ\x03ƻ\x03ƻ')
buf.write('\x03ƻ\x03ƻ\x03ƻ\x03ƻ\x03ƻ\x03ƻ\x03ƻ')
buf.write('\x03Ƽ\x03Ƽ\x03Ƽ\x03Ƽ\x03Ƽ\x03Ƽ\x03ƽ')
buf.write('\x03ƽ\x03ƽ\x03ƽ\x03ƽ\x03ƽ\x03ƽ\x03ƽ')
buf.write('\x03ƽ\x03ƾ\x03ƾ\x03ƾ\x03ƾ\x03ƾ\x03ƾ')
buf.write('\x03ƾ\x03ƾ\x03ƿ\x03ƿ\x03ƿ\x03ƿ\x03ƿ')
buf.write('\x03ƿ\x03ƿ\x03ƿ\x03ƿ\x03ƿ\x03ƿ\x03ƿ')
buf.write('\x03ƿ\x03ǀ\x03ǀ\x03ǀ\x03ǀ\x03ǀ\x03ǀ')
buf.write('\x03ǀ\x03ǀ\x03ǀ\x03ǁ\x03ǁ\x03ǁ\x03ǁ')
buf.write('\x03ǁ\x03ǂ\x03ǂ\x03ǂ\x03ǂ\x03ǃ\x03ǃ')
buf.write('\x03ǃ\x03ǃ\x03ǃ\x03ǃ\x03ǃ\x03ǃ\x03ǃ')
buf.write('\x03ǃ\x03ǃ\x03ǃ\x03ǃ\x03ǃ\x03ǃ\x03ǃ')
buf.write('\x03ǃ\x03ǃ\x03ǃ\x03ǃ\x03ǃ\x03ǃ\x03ǃ')
buf.write('\x03ǃ\x03ǃ\x03DŽ\x03DŽ\x03DŽ\x03DŽ\x03DŽ')
buf.write('\x03Dž\x03Dž\x03Dž\x03Dž\x03Dž\x03Dž\x03Dž')
buf.write('\x03Dž\x03Dž\x03Dž\x03Dž\x03dž\x03dž\x03dž')
buf.write('\x03dž\x03dž\x03dž\x03dž\x03dž\x03dž\x03dž')
buf.write('\x03dž\x03dž\x03dž\x03dž\x03dž\x03dž\x03dž')
buf.write('\x03dž\x03LJ\x03LJ\x03LJ\x03LJ\x03LJ\x03LJ')
buf.write('\x03LJ\x03LJ\x03LJ\x03LJ\x03LJ\x03LJ\x03LJ')
buf.write('\x03LJ\x03LJ\x03LJ\x03Lj\x03Lj\x03Lj\x03Lj')
buf.write('\x03Lj\x03Lj\x03Lj\x03Lj\x03Lj\x03Lj\x03Lj')
buf.write('\x03Lj\x03Lj\x03Lj\x03Lj\x03Lj\x03Lj\x03Lj')
buf.write('\x03Lj\x03lj\x03lj\x03lj\x03lj\x03lj\x03lj')
buf.write('\x03lj\x03lj\x03lj\x03lj\x03lj\x03lj\x03lj')
buf.write('\x03lj\x03lj\x03lj\x03lj\x03lj\x03lj\x03lj')
buf.write('\x03lj\x03lj\x03lj\x03NJ\x03NJ\x03NJ\x03NJ')
buf.write('\x03NJ\x03NJ\x03NJ\x03NJ\x03NJ\x03NJ\x03NJ')
buf.write('\x03NJ\x03NJ\x03NJ\x03NJ\x03Nj\x03Nj\x03Nj')
buf.write('\x03Nj\x03Nj\x03Nj\x03Nj\x03Nj\x03Nj\x03Nj')
buf.write('\x03nj\x03nj\x03nj\x03nj\x03nj\x03nj\x03nj')
buf.write('\x03nj\x03nj\x03nj\x03nj\x03Ǎ\x03Ǎ\x03Ǎ')
buf.write('\x03Ǎ\x03Ǎ\x03Ǎ\x03Ǎ\x03Ǎ\x03ǎ\x03ǎ')
buf.write('\x03ǎ\x03ǎ\x03ǎ\x03ǎ\x03ǎ\x03ǎ\x03ǎ')
buf.write('\x03ǎ\x03ǎ\x03ǎ\x03ǎ\x03Ǐ\x03Ǐ\x03Ǐ')
buf.write('\x03Ǐ\x03Ǐ\x03Ǐ\x03Ǐ\x03Ǐ\x03Ǐ\x03Ǐ')
buf.write('\x03Ǐ\x03Ǐ\x03Ǐ\x03Ǐ\x03Ǐ\x03Ǐ\x03ǐ')
buf.write('\x03ǐ\x03ǐ\x03ǐ\x03ǐ\x03ǐ\x03ǐ\x03ǐ')
buf.write('\x03ǐ\x03ǐ\x03ǐ\x03ǐ\x03ǐ\x03ǐ\x03ǐ')
buf.write('\x03ǐ\x03Ǒ\x03Ǒ\x03Ǒ\x03Ǒ\x03Ǒ\x03ǒ')
buf.write('\x03ǒ\x03ǒ\x03ǒ\x03Ǔ\x03Ǔ\x03Ǔ\x03Ǔ')
buf.write('\x03Ǔ\x03ǔ\x03ǔ\x03ǔ\x03ǔ\x03Ǖ\x03Ǖ')
buf.write('\x03Ǖ\x03Ǖ\x03Ǖ\x03ǖ\x03ǖ\x03ǖ\x03ǖ')
buf.write('\x03Ǘ\x03Ǘ\x03Ǘ\x03Ǘ\x03Ǘ\x03Ǘ\x03Ǘ')
buf.write('\x03ǘ\x03ǘ\x03ǘ\x03ǘ\x03Ǚ\x03Ǚ\x03Ǚ')
buf.write('\x03Ǚ\x03Ǚ\x03Ǚ\x03ǚ\x03ǚ\x03ǚ\x03ǚ')
buf.write('\x03ǚ\x03ǚ\x03ǚ\x03ǚ\x03ǚ\x03ǚ\x03ǚ')
buf.write('\x03ǚ\x03ǚ\x03ǚ\x03ǚ\x03ǚ\x03Ǜ\x03Ǜ')
buf.write('\x03Ǜ\x03Ǜ\x03Ǜ\x03Ǜ\x03Ǜ\x03Ǜ\x03Ǜ')
buf.write('\x03Ǜ\x03Ǜ\x03ǜ\x03ǜ\x03ǜ\x03ǜ\x03ǝ')
buf.write('\x03ǝ\x03ǝ\x03ǝ\x03ǝ\x03ǝ\x03ǝ\x03ǝ')
buf.write('\x03ǝ\x03Ǟ\x03Ǟ\x03Ǟ\x03Ǟ\x03Ǟ\x03Ǟ')
buf.write('\x03ǟ\x03ǟ\x03ǟ\x03ǟ\x03ǟ\x03ǟ\x03ǟ')
buf.write('\x03Ǡ\x03Ǡ\x03Ǡ\x03Ǡ\x03Ǡ\x03ǡ\x03ǡ')
buf.write('\x03ǡ\x03ǡ\x03ǡ\x03ǡ\x03ǡ\x03Ǣ\x03Ǣ')
buf.write('\x03Ǣ\x03Ǣ\x03Ǣ\x03Ǣ\x07Ǣ፨\nǢ')
buf.write('\x0cǢ\x0eǢ፫\x0bǢ\x03Ǣ\x03Ǣ\x03ǣ')
buf.write('\x03ǣ\x03ǣ\x07ǣ፲\nǣ\x0cǣ\x0eǣ')
buf.write('፵\x0bǣ\x03ǣ\x06ǣ፸\nǣ\rǣ')
buf.write('\x0eǣ፹\x03Ǥ\x03Ǥ\x03Ǥ\x07Ǥ\u137f')
buf.write('\nǤ\x0cǤ\x0eǤᎂ\x0bǤ\x03Ǥ\x06Ǥ')
buf.write('ᎅ\nǤ\rǤ\x0eǤᎆ\x03ǥ\x03ǥ')
buf.write('\x03ǥ\x03Ǧ\x03Ǧ\x03ǧ\x03ǧ\x03Ǩ\x03Ǩ')
buf.write('\x03Ǩ\x05Ǩ᎓\nǨ\x03Ǩ\x03Ǩ\x05Ǩ')
buf.write('᎗\nǨ\x05Ǩ᎙\nǨ\x03Ǩ\x03Ǩ\x05')
buf.write('Ǩ\u139d\nǨ\x03ǩ\x03ǩ\x03ǩ\x03ǩ\x03')
buf.write('ǩ\x07ǩᎤ\nǩ\x0cǩ\x0eǩᎧ\x0b')
buf.write('ǩ\x03ǩ\x03ǩ\x03Ǫ\x03Ǫ\x03Ǫ\x03Ǫ')
buf.write('\x03Ǫ\x05ǪᎰ\nǪ\x03Ǫ\x03Ǫ\x03ǫ')
buf.write('\x03ǫ\x03Ǭ\x03Ǭ\x03Ǭ\x07ǬᎹ\nǬ')
buf.write('\x0cǬ\x0eǬᎼ\x0bǬ\x03Ǭ\x03Ǭ\x03Ǭ')
buf.write('\x03ǭ\x03ǭ\x03ǭ\x07ǭᏄ\nǭ\x0cǭ')
buf.write('\x0eǭᏇ\x0bǭ\x03ǭ\x03ǭ\x03ǭ\x03Ǯ')
buf.write('\x03Ǯ\x03Ǯ\x07ǮᏏ\nǮ\x0cǮ\x0eǮ')
buf.write('Ꮢ\x0bǮ\x03Ǯ\x03Ǯ\x03Ǯ\x03ǯ\x03ǯ')
buf.write('\x03ǯ\x07ǯᏚ\nǯ\x0cǯ\x0eǯᏝ')
buf.write('\x0bǯ\x03ǯ\x03ǯ\x03ǯ\x03ǰ\x03ǰ\x03DZ')
buf.write('\x03DZ\x03DZ\x03DZ\x06DZᏨ\nDZ\rDZ')
buf.write('\x0eDZᏩ\x03DZ\x03DZ\x03Dz\x03Dz\x03dz')
buf.write('\x03dz\x03Ǵ\x03Ǵ\x03ǵ\x03ǵ\x03Ƕ\x03Ƕ')
buf.write('\x03Ƕ\x03Ƿ\x03Ƿ\x03Ǹ\x03Ǹ\x03ǹ\x03ǹ')
buf.write('\x03Ǻ\x03Ǻ\x03ǻ\x03ǻ\x03Ǽ\x03Ǽ\x03ǽ')
buf.write('\x03ǽ\x03ǽ\x03Ǿ\x03Ǿ\x03Ǿ\x03Ǿ\x07Ǿ')
buf.write('ᐌ\nǾ\x0cǾ\x0eǾᐏ\x0bǾ\x03Ǿ')
buf.write('\x03Ǿ\x03Ǿ\x03Ǿ\x03Ǿ\x05Ǿᐖ\nǾ')
buf.write('\x03ǿ\x03ǿ\x03Ȁ\x03Ȁ\x03ȁ\x03ȁ\x03ȁ')
buf.write('\x03Ȃ\x03Ȃ\x03ȃ\x03ȃ\x03ȃ\x03Ȅ\x03Ȅ')
buf.write('\x03Ȅ\x03Ȅ\x03Ȅ\x03Ȅ\x03Ȅ\x03Ȅ\x05Ȅ')
buf.write('ᐬ\nȄ\x03ȅ\x03ȅ\x03Ȇ\x03Ȇ\x03ȇ')
buf.write('\x03ȇ\x03Ȉ\x03Ȉ\x03ȉ\x03ȉ\x03Ȋ\x03Ȋ')
buf.write('\x03Ȋ\x03ȋ\x03ȋ\x03Ȍ\x03Ȍ\x03ȍ\x03ȍ')
buf.write('\x03Ȏ\x03Ȏ\x03ȏ\x03ȏ\x03Ȑ\x06Ȑᑆ')
buf.write('\nȐ\rȐ\x0eȐᑇ\x03Ȑ\x03Ȑ\x03ȑ')
buf.write('\x03ȑ\x03Ȓ\x06Ȓᑏ\nȒ\rȒ\x0eȒ')
buf.write('ᑐ\x03ȓ\x07ȓᑔ\nȓ\x0cȓ\x0eȓ')
buf.write('ᑗ\x0bȓ\x03ȓ\x05ȓᑚ\nȓ\x03ȓ')
buf.write('\x06ȓᑝ\nȓ\rȓ\x0eȓᑞ\x03Ȕ')
buf.write('\x03Ȕ\x03Ȕ\x03Ȕ\x07Ȕᑥ\nȔ\x0cȔ')
buf.write('\x0eȔᑨ\x0bȔ\x03Ȕ\x03Ȕ\x05Ȕᑬ')
buf.write('\nȔ\x03Ȕ\x03Ȕ\x03ȕ\x03ȕ\x03ȕ\x03ȕ')
buf.write('\x07ȕᑴ\nȕ\x0cȕ\x0eȕᑷ\x0bȕ')
buf.write('\x03ȕ\x03ȕ\x03ȕ\x03ȕ\x03ȕ\x03Ȗ\x03Ȗ')
buf.write('\x03Ȗ\x03Ȗ\x03Ȗ\x03Ȗ\x03Ȗ\x03Ȗ\x03Ȗ')
buf.write('\x07Ȗᒇ\nȖ\x0cȖ\x0eȖᒊ\x0bȖ')
buf.write('\x03Ȗ\x03Ȗ\x05Ȗᒎ\nȖ\x03ȗ\x05ȗ')
buf.write('ᒑ\nȗ\x03ȗ\x03ȗ\x03Ș\x03Ș\x03ș')
buf.write('\x03ș\x03ș\x07șᒚ\nș\x0cș\x0eș')
buf.write('ᒝ\x0bș\x03Ț\x03Ț\x03Ț\x03Ț\x03Ț')
buf.write('\x03ț\x03ț\x03Ȝ\x03Ȝ\x03ȝ\x03ȝ\x03Ȟ')
buf.write('\x03Ȟ\x03ȟ\x03ȟ\x03Ƞ\x03Ƞ\x03ȡ\x03ȡ')
buf.write('\x03Ȣ\x03Ȣ\x03ȣ\x03ȣ\x03Ȥ\x03Ȥ\x03ȥ')
buf.write('\x03ȥ\x03Ȧ\x03Ȧ\x03ȧ\x03ȧ\x03Ȩ\x03Ȩ')
buf.write('\x03ȩ\x03ȩ\x03Ȫ\x03Ȫ\x03ȫ\x03ȫ\x03Ȭ')
buf.write('\x03Ȭ\x03ȭ\x03ȭ\x03Ȯ\x03Ȯ\x03ȯ\x03ȯ')
buf.write('\x03Ȱ\x03Ȱ\x03ȱ\x03ȱ\x03Ȳ\x03Ȳ\x03ȳ')
buf.write('\x03ȳ\x03ȴ\x03ȴ\x07ᎺᏅᏐᏛᑵ')
buf.write(
'\x02ȵ\x03\x03\x05\x04\x07\x05\t\x06\x0b\x07\r\x08\x0f\t\x11\n\x13\x0b\x15\x0c'
)
buf.write(
"\x17\r\x19\x0e\x1b\x0f\x1d\x10\x1f\x11!\x12#\x13%\x14'\x15)\x16+\x17"
)
buf.write('-\x18/\x191\x1a3\x1b5\x1c7\x1d9\x1e;\x1f= ?!A"C#E$G%')
buf.write("I&K'M(O)Q*S+U,W-Y.[/]0_1a2c3e4g5i6k7")
buf.write('m8o9q:s;u<w=y>{?}@\x7fA\x81B\x83C\x85D\x87E\x89')
buf.write('F\x8bG\x8dH\x8fI\x91J\x93K\x95L\x97M\x99')
buf.write('N\x9bO\x9dP\x9fQ¡R£S¥T§U©')
buf.write('V«W\xadX¯Y±Z³[µ\\·]¹')
buf.write('^»_½`¿aÁbÃcÅdÇeÉ')
buf.write('fËgÍhÏiÑjÓkÕl×mÙ')
buf.write('nÛoÝpßqárãsåtçué')
buf.write('vëwíxïyñzó{õ|÷}ù')
buf.write('~û\x7fý\x80ÿ\x81ā\x82ă')
buf.write('\x83ą\x84ć\x85ĉ\x86ċ\x87')
buf.write('č\x88ď\x89đ\x8aē\x8bĕ')
buf.write('\x8cė\x8dę\x8eě\x8fĝ\x90')
buf.write('ğ\x91ġ\x92ģ\x93ĥ\x94ħ')
buf.write('\x95ĩ\x96ī\x97ĭ\x98į\x99')
buf.write('ı\x9aij\x9bĵ\x9cķ\x9dĹ')
buf.write('\x9eĻ\x9fĽ\xa0Ŀ¡Ł¢')
buf.write('Ń£Ņ¤Ň¥ʼn¦ŋ')
buf.write('§ō¨ŏ©őªœ«')
buf.write('ŕ¬ŗ\xadř®ś¯ŝ')
buf.write('°ş±š²ţ³ť´')
buf.write('ŧµũ¶ū·ŭ¸ů')
buf.write('¹űºų»ŵ¼ŷ½')
buf.write('Ź¾Ż¿ŽÀſÁƁ')
buf.write('ÂƃÃƅÄƇÅƉÆ')
buf.write('ƋÇƍÈƏÉƑÊƓ')
buf.write('ËƕÌƗÍƙÎƛÏ')
buf.write('ƝÐƟÑơÒƣÓƥ')
buf.write('ÔƧÕƩÖƫ×ƭØ')
buf.write('ƯÙƱÚƳÛƵÜƷ')
buf.write('ÝƹÞƻßƽàƿá')
buf.write('ǁâǃãDžäLJålj')
buf.write('æNjçǍèǏéǑê')
buf.write('ǓëǕìǗíǙîǛ')
buf.write('ïǝðǟñǡòǣó')
buf.write('ǥôǧõǩöǫ÷ǭ')
buf.write('øǯùDZúdzûǵü')
buf.write('ǷýǹþǻÿǽĀǿ')
buf.write('āȁĂȃăȅĄȇą')
buf.write('ȉĆȋćȍĈȏĉȑ')
buf.write('ĊȓċȕČȗčșĎ')
buf.write('țďȝĐȟđȡĒȣ')
buf.write('ēȥĔȧĕȩĖȫė')
buf.write('ȭĘȯęȱĚȳěȵ')
buf.write('ĜȷĝȹĞȻğȽĠ')
buf.write('ȿġɁĢɃģɅĤɇ')
buf.write('ĥɉĦɋħɍĨɏĩ')
buf.write('ɑĪɓīɕĬɗĭə')
buf.write('ĮɛįɝİɟıɡIJ')
buf.write('ɣijɥĴɧĵɩĶɫ')
buf.write('ķɭĸɯĹɱĺɳĻ')
buf.write('ɵļɷĽɹľɻĿɽ')
buf.write('ŀɿŁʁłʃŃʅń')
buf.write('ʇŅʉņʋŇʍňʏ')
buf.write('ʼnʑŊʓŋʕŌʗō')
buf.write('ʙŎʛŏʝŐʟőʡ')
buf.write('ŒʣœʥŔʧŕʩŖ')
buf.write('ʫŗʭŘʯřʱŚʳ')
buf.write('śʵŜʷŝʹŞʻş')
buf.write('ʽŠʿšˁŢ˃ţ˅')
buf.write('ŤˇťˉŦˋŧˍŨ')
buf.write('ˏũˑŪ˓ū˕Ŭ˗')
buf.write('ŭ˙ٲů˝Ű˟ű')
buf.write('ˡŲˣų˥Ŵ˧ŵ˩')
buf.write('Ŷ˫ŷ˭Ÿ˯Ź˱ź')
buf.write('˳Ż˵ż˷Ž˹ž˻')
buf.write('ſ˽ƀ˿Ɓ́Ƃ̃ƃ')
buf.write('̅Ƅ̇ƅ̉Ɔ̋Ƈ̍')
buf.write('ƈ̏Ɖ̑Ɗ̓Ƌ̕ƌ')
buf.write('̗ƍ̙Ǝ̛Ə̝Ɛ̟')
buf.write('Ƒ̡ƒ̣Ɠ̥Ɣ̧ƕ')
buf.write('̩Ɩ̫Ɨ̭Ƙ̯ƙ̱')
buf.write('ƚ̳ƛ̵Ɯ̷Ɲ̹ƞ')
buf.write('̻Ɵ̽Ơ̿ớƢ̓')
buf.write('ƣͅƤ͇ƥ͉Ʀ͋Ƨ')
buf.write('͍ƨ͏Ʃ͑ƪ͓ƫ͕')
buf.write('Ƭ͗ƭ͙Ʈ͛Ư͝ư')
buf.write('͟Ʊ͡ƲͣƳͥƴͧ')
buf.write('ƵͩƶͫƷͭƸͯƹ')
buf.write('ͱƺͳƻ͵Ƽͷƽ\u0379')
buf.write('ƾͻƿͽǀͿǁ\u0381ǂ')
buf.write('\u0383ǃ΅DŽ·DžΉdž\u038b')
buf.write('LJ\u038dLjΏljΑNJΓNj')
buf.write('ΕnjΗǍΙǎΛǏΝ')
buf.write('ǐΟǑΡǒΣǓΥǔ')
buf.write('ΧǕΩǖΫǗέǘί')
buf.write('ǙαǚγǛεǜηǝ')
buf.write('ιǞλǟνǠοǡρ')
buf.write('ǢσǣυǤχǥωǦ')
buf.write('ϋǧύǨϏǩϑǪϓ')
buf.write('\x02ϕ\x02ϗ\x02ϙ\x02ϛ\x02ϝ\x02ϟ\x02ϡ')
buf.write('ǫϣǬϥǭϧǮϩǯ')
buf.write('ϫǰϭDZϯDzϱdzϳ')
buf.write('ǴϵǵϷǶϹǷϻǸ')
buf.write('ϽǹϿǺЁǻЃǼЅ')
buf.write('ǽЇǾЉǿЋȀЍȁ')
buf.write('ЏȂБ\x02ГȃЕȄЗȅ')
buf.write('ЙȆЛȇНȈПȉС')
buf.write('\x02У\x02Х\x02ЧȊЩȋЫȌ')
buf.write('Э\x02Я\x02бȍгȎе\x02з')
buf.write('\x02й\x02л\x02н\x02п\x02с\x02у\x02х')
buf.write('\x02ч\x02щ\x02ы\x02э\x02я\x02ё\x02ѓ')
buf.write('\x02ѕ\x02ї\x02љ\x02ћ\x02ѝ\x02џ\x02ѡ')
buf.write(
"\x02ѣ\x02ѥ\x02ѧ\x02\x03\x02'\x05\x02\x0c\x0c\x0f\x0f))\x05\x022")
buf.write(
';CHch\x04\x02GGgg\x04\x02--//\t\x02\x0b\x0c\x0f\x0f""**>>]]}}\x05\x02\x0c'
)
buf.write(
'\x0c\x0f\x0f$$\x04\x022;aa\x05\x02\x0b\x0c\x0f\x0f""\x04\x02C\\c|\x04\x02\x0c'
)
buf.write(
'\x0c\x0f\x0f\x04\x02\x0b\x0b""\x05\x02%&2;aa\x04\x02CCcc\x04\x02DDdd\x04\x02'
)
buf.write(
'EEee\x04\x02FFff\x04\x02HHhh\x04\x02IIii\x04\x02JJjj\x04\x02KKkk\x04\x02LLll\x04'
)
buf.write(
'\x02MMmm\x04\x02NNnn\x04\x02OOoo\x04\x02PPpp\x04\x02QQqq\x04\x02RRrr\x04\x02SSs'
)
buf.write(
's\x04\x02TTtt\x04\x02UUuu\x04\x02VVvv\x04\x02WWww\x04\x02XXxx\x04\x02YYyy\x04\x02'
)
buf.write(
'ZZzz\x04\x02[[{{\x04\x02\\\\||\x02ᓝ\x02\x03\x03\x02\x02\x02\x02\x05\x03\x02\x02\x02'
)
buf.write(
'\x02\x07\x03\x02\x02\x02\x02\t\x03\x02\x02\x02\x02\x0b\x03\x02\x02\x02\x02\r\x03\x02\x02\x02\x02\x0f'
)
buf.write(
'\x03\x02\x02\x02\x02\x11\x03\x02\x02\x02\x02\x13\x03\x02\x02\x02\x02\x15\x03\x02\x02\x02\x02\x17\x03'
)
buf.write(
'\x02\x02\x02\x02\x19\x03\x02\x02\x02\x02\x1b\x03\x02\x02\x02\x02\x1d\x03\x02\x02\x02\x02\x1f\x03\x02'
)
buf.write(
"\x02\x02\x02!\x03\x02\x02\x02\x02#\x03\x02\x02\x02\x02%\x03\x02\x02\x02\x02'\x03\x02\x02\x02\x02)\x03"
)
buf.write(
'\x02\x02\x02\x02+\x03\x02\x02\x02\x02-\x03\x02\x02\x02\x02/\x03\x02\x02\x02\x021\x03\x02\x02\x02\x02'
)
buf.write(
'3\x03\x02\x02\x02\x025\x03\x02\x02\x02\x027\x03\x02\x02\x02\x029\x03\x02\x02\x02\x02;\x03'
)
buf.write(
'\x02\x02\x02\x02=\x03\x02\x02\x02\x02?\x03\x02\x02\x02\x02A\x03\x02\x02\x02\x02C\x03\x02\x02\x02\x02E'
)
buf.write(
'\x03\x02\x02\x02\x02G\x03\x02\x02\x02\x02I\x03\x02\x02\x02\x02K\x03\x02\x02\x02\x02M\x03\x02\x02\x02\x02'
)
buf.write(
'O\x03\x02\x02\x02\x02Q\x03\x02\x02\x02\x02S\x03\x02\x02\x02\x02U\x03\x02\x02\x02\x02W\x03\x02\x02\x02'
)
buf.write(
'\x02Y\x03\x02\x02\x02\x02[\x03\x02\x02\x02\x02]\x03\x02\x02\x02\x02_\x03\x02\x02\x02\x02a\x03\x02\x02'
)
buf.write(
'\x02\x02c\x03\x02\x02\x02\x02e\x03\x02\x02\x02\x02g\x03\x02\x02\x02\x02i\x03\x02\x02\x02\x02k\x03\x02'
)
buf.write(
'\x02\x02\x02m\x03\x02\x02\x02\x02o\x03\x02\x02\x02\x02q\x03\x02\x02\x02\x02s\x03\x02\x02\x02\x02u\x03'
)
buf.write(
'\x02\x02\x02\x02w\x03\x02\x02\x02\x02y\x03\x02\x02\x02\x02{\x03\x02\x02\x02\x02}\x03\x02\x02\x02\x02\x7f'
)
buf.write(
'\x03\x02\x02\x02\x02\x81\x03\x02\x02\x02\x02\x83\x03\x02\x02\x02\x02\x85\x03\x02\x02'
)
buf.write(
'\x02\x02\x87\x03\x02\x02\x02\x02\x89\x03\x02\x02\x02\x02\x8b\x03\x02\x02\x02\x02\x8d'
)
buf.write(
'\x03\x02\x02\x02\x02\x8f\x03\x02\x02\x02\x02\x91\x03\x02\x02\x02\x02\x93\x03\x02\x02'
)
buf.write(
'\x02\x02\x95\x03\x02\x02\x02\x02\x97\x03\x02\x02\x02\x02\x99\x03\x02\x02\x02\x02\x9b'
)
buf.write(
'\x03\x02\x02\x02\x02\x9d\x03\x02\x02\x02\x02\x9f\x03\x02\x02\x02\x02¡\x03\x02\x02'
)
buf.write(
'\x02\x02£\x03\x02\x02\x02\x02¥\x03\x02\x02\x02\x02§\x03\x02\x02\x02\x02©'
)
buf.write(
'\x03\x02\x02\x02\x02«\x03\x02\x02\x02\x02\xad\x03\x02\x02\x02\x02¯\x03\x02\x02'
)
buf.write(
'\x02\x02±\x03\x02\x02\x02\x02³\x03\x02\x02\x02\x02µ\x03\x02\x02\x02\x02·'
)
buf.write(
'\x03\x02\x02\x02\x02¹\x03\x02\x02\x02\x02»\x03\x02\x02\x02\x02½\x03\x02\x02'
)
buf.write(
'\x02\x02¿\x03\x02\x02\x02\x02Á\x03\x02\x02\x02\x02Ã\x03\x02\x02\x02\x02Å'
)
buf.write(
'\x03\x02\x02\x02\x02Ç\x03\x02\x02\x02\x02É\x03\x02\x02\x02\x02Ë\x03\x02\x02'
)
buf.write(
'\x02\x02Í\x03\x02\x02\x02\x02Ï\x03\x02\x02\x02\x02Ñ\x03\x02\x02\x02\x02Ó'
)
buf.write(
'\x03\x02\x02\x02\x02Õ\x03\x02\x02\x02\x02×\x03\x02\x02\x02\x02Ù\x03\x02\x02'
)
buf.write(
'\x02\x02Û\x03\x02\x02\x02\x02Ý\x03\x02\x02\x02\x02ß\x03\x02\x02\x02\x02á'
)
buf.write(
'\x03\x02\x02\x02\x02ã\x03\x02\x02\x02\x02å\x03\x02\x02\x02\x02ç\x03\x02\x02'
)
buf.write(
'\x02\x02é\x03\x02\x02\x02\x02ë\x03\x02\x02\x02\x02í\x03\x02\x02\x02\x02ï'
)
buf.write(
'\x03\x02\x02\x02\x02ñ\x03\x02\x02\x02\x02ó\x03\x02\x02\x02\x02õ\x03\x02\x02'
)
buf.write(
'\x02\x02÷\x03\x02\x02\x02\x02ù\x03\x02\x02\x02\x02û\x03\x02\x02\x02\x02ý'
)
buf.write(
'\x03\x02\x02\x02\x02ÿ\x03\x02\x02\x02\x02ā\x03\x02\x02\x02\x02ă\x03\x02\x02'
)
buf.write(
'\x02\x02ą\x03\x02\x02\x02\x02ć\x03\x02\x02\x02\x02ĉ\x03\x02\x02\x02\x02ċ'
)
buf.write(
'\x03\x02\x02\x02\x02č\x03\x02\x02\x02\x02ď\x03\x02\x02\x02\x02đ\x03\x02\x02'
)
buf.write(
'\x02\x02ē\x03\x02\x02\x02\x02ĕ\x03\x02\x02\x02\x02ė\x03\x02\x02\x02\x02ę'
)
buf.write(
'\x03\x02\x02\x02\x02ě\x03\x02\x02\x02\x02ĝ\x03\x02\x02\x02\x02ğ\x03\x02\x02'
)
buf.write(
'\x02\x02ġ\x03\x02\x02\x02\x02ģ\x03\x02\x02\x02\x02ĥ\x03\x02\x02\x02\x02ħ'
)
buf.write(
'\x03\x02\x02\x02\x02ĩ\x03\x02\x02\x02\x02ī\x03\x02\x02\x02\x02ĭ\x03\x02\x02'
)
buf.write(
'\x02\x02į\x03\x02\x02\x02\x02ı\x03\x02\x02\x02\x02ij\x03\x02\x02\x02\x02ĵ'
)
buf.write(
'\x03\x02\x02\x02\x02ķ\x03\x02\x02\x02\x02Ĺ\x03\x02\x02\x02\x02Ļ\x03\x02\x02'
)
buf.write(
'\x02\x02Ľ\x03\x02\x02\x02\x02Ŀ\x03\x02\x02\x02\x02Ł\x03\x02\x02\x02\x02Ń'
)
buf.write(
'\x03\x02\x02\x02\x02Ņ\x03\x02\x02\x02\x02Ň\x03\x02\x02\x02\x02ʼn\x03\x02\x02'
)
buf.write(
'\x02\x02ŋ\x03\x02\x02\x02\x02ō\x03\x02\x02\x02\x02ŏ\x03\x02\x02\x02\x02ő'
)
buf.write(
'\x03\x02\x02\x02\x02œ\x03\x02\x02\x02\x02ŕ\x03\x02\x02\x02\x02ŗ\x03\x02\x02'
)
buf.write(
'\x02\x02ř\x03\x02\x02\x02\x02ś\x03\x02\x02\x02\x02ŝ\x03\x02\x02\x02\x02ş'
)
buf.write(
'\x03\x02\x02\x02\x02š\x03\x02\x02\x02\x02ţ\x03\x02\x02\x02\x02ť\x03\x02\x02'
)
buf.write(
'\x02\x02ŧ\x03\x02\x02\x02\x02ũ\x03\x02\x02\x02\x02ū\x03\x02\x02\x02\x02ŭ'
)
buf.write(
'\x03\x02\x02\x02\x02ů\x03\x02\x02\x02\x02ű\x03\x02\x02\x02\x02ų\x03\x02\x02'
)
buf.write(
'\x02\x02ŵ\x03\x02\x02\x02\x02ŷ\x03\x02\x02\x02\x02Ź\x03\x02\x02\x02\x02Ż'
)
buf.write(
'\x03\x02\x02\x02\x02Ž\x03\x02\x02\x02\x02ſ\x03\x02\x02\x02\x02Ɓ\x03\x02\x02'
)
buf.write(
'\x02\x02ƃ\x03\x02\x02\x02\x02ƅ\x03\x02\x02\x02\x02Ƈ\x03\x02\x02\x02\x02Ɖ'
)
buf.write(
'\x03\x02\x02\x02\x02Ƌ\x03\x02\x02\x02\x02ƍ\x03\x02\x02\x02\x02Ə\x03\x02\x02'
)
buf.write(
'\x02\x02Ƒ\x03\x02\x02\x02\x02Ɠ\x03\x02\x02\x02\x02ƕ\x03\x02\x02\x02\x02Ɨ'
)
buf.write(
'\x03\x02\x02\x02\x02ƙ\x03\x02\x02\x02\x02ƛ\x03\x02\x02\x02\x02Ɲ\x03\x02\x02'
)
buf.write(
'\x02\x02Ɵ\x03\x02\x02\x02\x02ơ\x03\x02\x02\x02\x02ƣ\x03\x02\x02\x02\x02ƥ'
)
buf.write(
'\x03\x02\x02\x02\x02Ƨ\x03\x02\x02\x02\x02Ʃ\x03\x02\x02\x02\x02ƫ\x03\x02\x02'
)
buf.write(
'\x02\x02ƭ\x03\x02\x02\x02\x02Ư\x03\x02\x02\x02\x02Ʊ\x03\x02\x02\x02\x02Ƴ'
)
buf.write(
'\x03\x02\x02\x02\x02Ƶ\x03\x02\x02\x02\x02Ʒ\x03\x02\x02\x02\x02ƹ\x03\x02\x02'
)
buf.write(
'\x02\x02ƻ\x03\x02\x02\x02\x02ƽ\x03\x02\x02\x02\x02ƿ\x03\x02\x02\x02\x02ǁ'
)
buf.write(
'\x03\x02\x02\x02\x02ǃ\x03\x02\x02\x02\x02Dž\x03\x02\x02\x02\x02LJ\x03\x02\x02'
)
buf.write(
'\x02\x02lj\x03\x02\x02\x02\x02Nj\x03\x02\x02\x02\x02Ǎ\x03\x02\x02\x02\x02Ǐ'
)
buf.write(
'\x03\x02\x02\x02\x02Ǒ\x03\x02\x02\x02\x02Ǔ\x03\x02\x02\x02\x02Ǖ\x03\x02\x02'
)
buf.write(
'\x02\x02Ǘ\x03\x02\x02\x02\x02Ǚ\x03\x02\x02\x02\x02Ǜ\x03\x02\x02\x02\x02ǝ'
)
buf.write(
'\x03\x02\x02\x02\x02ǟ\x03\x02\x02\x02\x02ǡ\x03\x02\x02\x02\x02ǣ\x03\x02\x02'
)
buf.write(
'\x02\x02ǥ\x03\x02\x02\x02\x02ǧ\x03\x02\x02\x02\x02ǩ\x03\x02\x02\x02\x02ǫ'
)
buf.write(
'\x03\x02\x02\x02\x02ǭ\x03\x02\x02\x02\x02ǯ\x03\x02\x02\x02\x02DZ\x03\x02\x02'
)
buf.write(
'\x02\x02dz\x03\x02\x02\x02\x02ǵ\x03\x02\x02\x02\x02Ƿ\x03\x02\x02\x02\x02ǹ'
)
buf.write(
'\x03\x02\x02\x02\x02ǻ\x03\x02\x02\x02\x02ǽ\x03\x02\x02\x02\x02ǿ\x03\x02\x02'
)
buf.write(
'\x02\x02ȁ\x03\x02\x02\x02\x02ȃ\x03\x02\x02\x02\x02ȅ\x03\x02\x02\x02\x02ȇ'
)
buf.write(
'\x03\x02\x02\x02\x02ȉ\x03\x02\x02\x02\x02ȋ\x03\x02\x02\x02\x02ȍ\x03\x02\x02'
)
buf.write(
'\x02\x02ȏ\x03\x02\x02\x02\x02ȑ\x03\x02\x02\x02\x02ȓ\x03\x02\x02\x02\x02ȕ'
)
buf.write(
'\x03\x02\x02\x02\x02ȗ\x03\x02\x02\x02\x02ș\x03\x02\x02\x02\x02ț\x03\x02\x02'
)
buf.write(
'\x02\x02ȝ\x03\x02\x02\x02\x02ȟ\x03\x02\x02\x02\x02ȡ\x03\x02\x02\x02\x02ȣ'
)
buf.write(
'\x03\x02\x02\x02\x02ȥ\x03\x02\x02\x02\x02ȧ\x03\x02\x02\x02\x02ȩ\x03\x02\x02'
)
buf.write(
'\x02\x02ȫ\x03\x02\x02\x02\x02ȭ\x03\x02\x02\x02\x02ȯ\x03\x02\x02\x02\x02ȱ'
)
buf.write(
'\x03\x02\x02\x02\x02ȳ\x03\x02\x02\x02\x02ȵ\x03\x02\x02\x02\x02ȷ\x03\x02\x02'
)
buf.write(
'\x02\x02ȹ\x03\x02\x02\x02\x02Ȼ\x03\x02\x02\x02\x02Ƚ\x03\x02\x02\x02\x02ȿ'
)
buf.write(
'\x03\x02\x02\x02\x02Ɂ\x03\x02\x02\x02\x02Ƀ\x03\x02\x02\x02\x02Ʌ\x03\x02\x02'
)
buf.write(
'\x02\x02ɇ\x03\x02\x02\x02\x02ɉ\x03\x02\x02\x02\x02ɋ\x03\x02\x02\x02\x02ɍ'
)
buf.write(
'\x03\x02\x02\x02\x02ɏ\x03\x02\x02\x02\x02ɑ\x03\x02\x02\x02\x02ɓ\x03\x02\x02'
)
buf.write(
'\x02\x02ɕ\x03\x02\x02\x02\x02ɗ\x03\x02\x02\x02\x02ə\x03\x02\x02\x02\x02ɛ'
)
buf.write(
'\x03\x02\x02\x02\x02ɝ\x03\x02\x02\x02\x02ɟ\x03\x02\x02\x02\x02ɡ\x03\x02\x02'
)
buf.write(
'\x02\x02ɣ\x03\x02\x02\x02\x02ɥ\x03\x02\x02\x02\x02ɧ\x03\x02\x02\x02\x02ɩ'
)
buf.write(
'\x03\x02\x02\x02\x02ɫ\x03\x02\x02\x02\x02ɭ\x03\x02\x02\x02\x02ɯ\x03\x02\x02'
)
buf.write(
'\x02\x02ɱ\x03\x02\x02\x02\x02ɳ\x03\x02\x02\x02\x02ɵ\x03\x02\x02\x02\x02ɷ'
)
buf.write(
'\x03\x02\x02\x02\x02ɹ\x03\x02\x02\x02\x02ɻ\x03\x02\x02\x02\x02ɽ\x03\x02\x02'
)
buf.write(
'\x02\x02ɿ\x03\x02\x02\x02\x02ʁ\x03\x02\x02\x02\x02ʃ\x03\x02\x02\x02\x02ʅ'
)
buf.write(
'\x03\x02\x02\x02\x02ʇ\x03\x02\x02\x02\x02ʉ\x03\x02\x02\x02\x02ʋ\x03\x02\x02'
)
buf.write(
'\x02\x02ʍ\x03\x02\x02\x02\x02ʏ\x03\x02\x02\x02\x02ʑ\x03\x02\x02\x02\x02ʓ'
)
buf.write(
'\x03\x02\x02\x02\x02ʕ\x03\x02\x02\x02\x02ʗ\x03\x02\x02\x02\x02ʙ\x03\x02\x02'
)
buf.write(
'\x02\x02ʛ\x03\x02\x02\x02\x02ʝ\x03\x02\x02\x02\x02ʟ\x03\x02\x02\x02\x02ʡ'
)
buf.write(
'\x03\x02\x02\x02\x02ʣ\x03\x02\x02\x02\x02ʥ\x03\x02\x02\x02\x02ʧ\x03\x02\x02'
)
buf.write(
'\x02\x02ʩ\x03\x02\x02\x02\x02ʫ\x03\x02\x02\x02\x02ʭ\x03\x02\x02\x02\x02ʯ'
)
buf.write(
'\x03\x02\x02\x02\x02ʱ\x03\x02\x02\x02\x02ʳ\x03\x02\x02\x02\x02ʵ\x03\x02\x02'
)
buf.write(
'\x02\x02ʷ\x03\x02\x02\x02\x02ʹ\x03\x02\x02\x02\x02ʻ\x03\x02\x02\x02\x02ʽ'
)
buf.write(
'\x03\x02\x02\x02\x02ʿ\x03\x02\x02\x02\x02ˁ\x03\x02\x02\x02\x02˃\x03\x02\x02'
)
buf.write(
'\x02\x02˅\x03\x02\x02\x02\x02ˇ\x03\x02\x02\x02\x02ˉ\x03\x02\x02\x02\x02ˋ'
)
buf.write(
'\x03\x02\x02\x02\x02ˍ\x03\x02\x02\x02\x02ˏ\x03\x02\x02\x02\x02ˑ\x03\x02\x02'
)
buf.write(
'\x02\x02˓\x03\x02\x02\x02\x02˕\x03\x02\x02\x02\x02˗\x03\x02\x02\x02\x02˙'
)
buf.write(
'\x03\x02\x02\x02\x02˛\x03\x02\x02\x02\x02˝\x03\x02\x02\x02\x02˟\x03\x02\x02'
)
buf.write(
'\x02\x02ˡ\x03\x02\x02\x02\x02ˣ\x03\x02\x02\x02\x02˥\x03\x02\x02\x02\x02˧'
)
buf.write(
'\x03\x02\x02\x02\x02˩\x03\x02\x02\x02\x02˫\x03\x02\x02\x02\x02˭\x03\x02\x02'
)
buf.write(
'\x02\x02˯\x03\x02\x02\x02\x02˱\x03\x02\x02\x02\x02˳\x03\x02\x02\x02\x02˵'
)
buf.write(
'\x03\x02\x02\x02\x02˷\x03\x02\x02\x02\x02˹\x03\x02\x02\x02\x02˻\x03\x02\x02'
)
buf.write(
'\x02\x02˽\x03\x02\x02\x02\x02˿\x03\x02\x02\x02\x02́\x03\x02\x02\x02\x02̃'
)
buf.write(
'\x03\x02\x02\x02\x02̅\x03\x02\x02\x02\x02̇\x03\x02\x02\x02\x02̉\x03\x02\x02'
)
buf.write(
'\x02\x02̋\x03\x02\x02\x02\x02̍\x03\x02\x02\x02\x02̏\x03\x02\x02\x02\x02̑'
)
buf.write(
'\x03\x02\x02\x02\x02̓\x03\x02\x02\x02\x02̕\x03\x02\x02\x02\x02̗\x03\x02\x02'
)
buf.write(
'\x02\x02̙\x03\x02\x02\x02\x02̛\x03\x02\x02\x02\x02̝\x03\x02\x02\x02\x02̟'
)
buf.write(
'\x03\x02\x02\x02\x02̡\x03\x02\x02\x02\x02̣\x03\x02\x02\x02\x02̥\x03\x02\x02'
)
buf.write(
'\x02\x02̧\x03\x02\x02\x02\x02̩\x03\x02\x02\x02\x02̫\x03\x02\x02\x02\x02̭'
)
buf.write(
'\x03\x02\x02\x02\x02̯\x03\x02\x02\x02\x02̱\x03\x02\x02\x02\x02̳\x03\x02\x02'
)
buf.write(
'\x02\x02̵\x03\x02\x02\x02\x02̷\x03\x02\x02\x02\x02̹\x03\x02\x02\x02\x02̻'
)
buf.write(
'\x03\x02\x02\x02\x02̽\x03\x02\x02\x02\x02̿\x03\x02\x02\x02\x02́\x03\x02\x02'
)
buf.write(
'\x02\x02̓\x03\x02\x02\x02\x02ͅ\x03\x02\x02\x02\x02͇\x03\x02\x02\x02\x02͉'
)
buf.write(
'\x03\x02\x02\x02\x02͋\x03\x02\x02\x02\x02͍\x03\x02\x02\x02\x02͏\x03\x02\x02'
)
buf.write(
'\x02\x02͑\x03\x02\x02\x02\x02͓\x03\x02\x02\x02\x02͕\x03\x02\x02\x02\x02͗'
)
buf.write(
'\x03\x02\x02\x02\x02͙\x03\x02\x02\x02\x02͛\x03\x02\x02\x02\x02͝\x03\x02\x02'
)
buf.write(
'\x02\x02͟\x03\x02\x02\x02\x02͡\x03\x02\x02\x02\x02ͣ\x03\x02\x02\x02\x02ͥ'
)
buf.write(
'\x03\x02\x02\x02\x02ͧ\x03\x02\x02\x02\x02ͩ\x03\x02\x02\x02\x02ͫ\x03\x02\x02'
)
buf.write(
'\x02\x02ͭ\x03\x02\x02\x02\x02ͯ\x03\x02\x02\x02\x02ͱ\x03\x02\x02\x02\x02ͳ'
)
buf.write(
'\x03\x02\x02\x02\x02͵\x03\x02\x02\x02\x02ͷ\x03\x02\x02\x02\x02\u0379\x03\x02\x02'
)
buf.write(
'\x02\x02ͻ\x03\x02\x02\x02\x02ͽ\x03\x02\x02\x02\x02Ϳ\x03\x02\x02\x02\x02\u0381'
)
buf.write(
'\x03\x02\x02\x02\x02\u0383\x03\x02\x02\x02\x02΅\x03\x02\x02\x02\x02·\x03\x02\x02'
)
buf.write(
'\x02\x02Ή\x03\x02\x02\x02\x02\u038b\x03\x02\x02\x02\x02\u038d\x03\x02\x02\x02\x02Ώ'
)
buf.write(
'\x03\x02\x02\x02\x02Α\x03\x02\x02\x02\x02Γ\x03\x02\x02\x02\x02Ε\x03\x02\x02'
)
buf.write(
'\x02\x02Η\x03\x02\x02\x02\x02Ι\x03\x02\x02\x02\x02Λ\x03\x02\x02\x02\x02Ν'
)
buf.write(
'\x03\x02\x02\x02\x02Ο\x03\x02\x02\x02\x02Ρ\x03\x02\x02\x02\x02Σ\x03\x02\x02'
)
buf.write(
'\x02\x02Υ\x03\x02\x02\x02\x02Χ\x03\x02\x02\x02\x02Ω\x03\x02\x02\x02\x02Ϋ'
)
buf.write(
'\x03\x02\x02\x02\x02έ\x03\x02\x02\x02\x02ί\x03\x02\x02\x02\x02α\x03\x02\x02'
)
buf.write(
'\x02\x02γ\x03\x02\x02\x02\x02ε\x03\x02\x02\x02\x02η\x03\x02\x02\x02\x02ι'
)
buf.write(
'\x03\x02\x02\x02\x02λ\x03\x02\x02\x02\x02ν\x03\x02\x02\x02\x02ο\x03\x02\x02'
)
buf.write(
'\x02\x02ρ\x03\x02\x02\x02\x02σ\x03\x02\x02\x02\x02υ\x03\x02\x02\x02\x02χ'
)
buf.write(
'\x03\x02\x02\x02\x02ω\x03\x02\x02\x02\x02ϋ\x03\x02\x02\x02\x02ύ\x03\x02\x02'
)
buf.write(
'\x02\x02Ϗ\x03\x02\x02\x02\x02ϑ\x03\x02\x02\x02\x02ϓ\x03\x02\x02\x02\x02ϡ'
)
buf.write(
'\x03\x02\x02\x02\x02ϣ\x03\x02\x02\x02\x02ϥ\x03\x02\x02\x02\x02ϧ\x03\x02\x02'
)
buf.write(
'\x02\x02ϩ\x03\x02\x02\x02\x02ϫ\x03\x02\x02\x02\x02ϭ\x03\x02\x02\x02\x02ϯ'
)
buf.write(
'\x03\x02\x02\x02\x02ϱ\x03\x02\x02\x02\x02ϳ\x03\x02\x02\x02\x02ϵ\x03\x02\x02'
)
buf.write(
'\x02\x02Ϸ\x03\x02\x02\x02\x02Ϲ\x03\x02\x02\x02\x02ϻ\x03\x02\x02\x02\x02Ͻ'
)
buf.write(
'\x03\x02\x02\x02\x02Ͽ\x03\x02\x02\x02\x02Ё\x03\x02\x02\x02\x02Ѓ\x03\x02\x02'
)
buf.write(
'\x02\x02Ѕ\x03\x02\x02\x02\x02Ї\x03\x02\x02\x02\x02Љ\x03\x02\x02\x02\x02Ћ'
)
buf.write(
'\x03\x02\x02\x02\x02Ѝ\x03\x02\x02\x02\x02Џ\x03\x02\x02\x02\x02Г\x03\x02\x02'
)
buf.write(
'\x02\x02Е\x03\x02\x02\x02\x02З\x03\x02\x02\x02\x02Й\x03\x02\x02\x02\x02Л'
)
buf.write(
'\x03\x02\x02\x02\x02Н\x03\x02\x02\x02\x02П\x03\x02\x02\x02\x02Ч\x03\x02\x02'
)
buf.write(
'\x02\x02Щ\x03\x02\x02\x02\x02Ы\x03\x02\x02\x02\x02б\x03\x02\x02\x02\x02г'
)
buf.write(
'\x03\x02\x02\x02\x03ѩ\x03\x02\x02\x02\x05Ѭ\x03\x02\x02\x02\x07Ѯ\x03\x02\x02'
)
buf.write(
'\x02\tѲ\x03\x02\x02\x02\x0bѸ\x03\x02\x02\x02\rѾ\x03\x02\x02\x02\x0f'
)
buf.write(
'҈\x03\x02\x02\x02\x11Ҍ\x03\x02\x02\x02\x13Ғ\x03\x02\x02\x02\x15Қ')
buf.write(
'\x03\x02\x02\x02\x17Ҟ\x03\x02\x02\x02\x19Ң\x03\x02\x02\x02\x1bҨ\x03'
)
buf.write(
'\x02\x02\x02\x1dҫ\x03\x02\x02\x02\x1fҲ\x03\x02\x02\x02!ҹ\x03\x02\x02'
)
buf.write(
"\x02#ҽ\x03\x02\x02\x02%Ӈ\x03\x02\x02\x02'ӊ\x03\x02\x02\x02)Ӕ")
buf.write(
'\x03\x02\x02\x02+Ӛ\x03\x02\x02\x02-ӡ\x03\x02\x02\x02/Ӧ\x03\x02\x02\x02'
)
buf.write('1Ӱ\x03\x02\x02\x023ԇ\x03\x02\x02\x025ԍ\x03\x02\x02\x027')
buf.write('Ԕ\x03\x02\x02\x029Ԛ\x03\x02\x02\x02;Ԣ\x03\x02\x02\x02=Ԩ\x03'
)
buf.write(
'\x02\x02\x02?Զ\x03\x02\x02\x02AՃ\x03\x02\x02\x02CՒ\x03\x02\x02\x02E\u0557'
)
buf.write(
'\x03\x02\x02\x02G՝\x03\x02\x02\x02Iբ\x03\x02\x02\x02Kժ\x03\x02\x02\x02'
)
buf.write(
'Mկ\x03\x02\x02\x02Oշ\x03\x02\x02\x02Qռ\x03\x02\x02\x02Sտ\x03')
buf.write(
'\x02\x02\x02Uք\x03\x02\x02\x02Wֆ\x03\x02\x02\x02Y\u058c\x03\x02\x02\x02[֑'
)
buf.write(
'\x03\x02\x02\x02]֛\x03\x02\x02\x02_֣\x03\x02\x02\x02a֨\x03\x02\x02\x02'
)
buf.write(
'c֭\x03\x02\x02\x02eֲ\x03\x02\x02\x02gֺ\x03\x02\x02\x02iׄ\x03')
buf.write(
'\x02\x02\x02k\u05ca\x03\x02\x02\x02m\u05ce\x03\x02\x02\x02oד\x03\x02\x02\x02qי'
)
buf.write(
'\x03\x02\x02\x02sס\x03\x02\x02\x02uש\x03\x02\x02\x02wױ\x03\x02\x02\x02'
)
buf.write(
'y\u05f9\x03\x02\x02\x02{\u0600\x03\x02\x02\x02}؊\x03\x02\x02\x02\x7fؘ'
)
buf.write(
'\x03\x02\x02\x02\x81ؠ\x03\x02\x02\x02\x83ة\x03\x02\x02\x02\x85')
buf.write('ر\x03\x02\x02\x02\x87ف\x03\x02\x02\x02\x89ي\x03\x02\x02\x02'
)
buf.write('\x8bٕ\x03\x02\x02\x02\x8d١\x03\x02\x02\x02\x8f٭\x03')
buf.write('\x02\x02\x02\x91ٵ\x03\x02\x02\x02\x93ٽ\x03\x02\x02\x02\x95چ'
)
buf.write(
'\x03\x02\x02\x02\x97ڎ\x03\x02\x02\x02\x99ښ\x03\x02\x02\x02\x9b')
buf.write('ڪ\x03\x02\x02\x02\x9dگ\x03\x02\x02\x02\x9fڵ\x03\x02\x02\x02'
)
buf.write('¡ڼ\x03\x02\x02\x02£ۂ\x03\x02\x02\x02¥ۇ\x03')
buf.write('\x02\x02\x02§ۏ\x03\x02\x02\x02©ۜ\x03\x02\x02\x02«ۣ')
buf.write('\x03\x02\x02\x02\xadۯ\x03\x02\x02\x02¯۵\x03\x02\x02\x02±')
buf.write('ۺ\x03\x02\x02\x02³܃\x03\x02\x02\x02µ܈\x03\x02\x02\x02')
buf.write('·܌\x03\x02\x02\x02¹ܛ\x03\x02\x02\x02»ܦ\x03')
buf.write('\x02\x02\x02½ܪ\x03\x02\x02\x02¿ܰ\x03\x02\x02\x02Áܴ')
buf.write('\x03\x02\x02\x02Ãܼ\x03\x02\x02\x02Å݄\x03\x02\x02\x02Ç')
buf.write('ݎ\x03\x02\x02\x02Éݘ\x03\x02\x02\x02Ëݠ\x03\x02\x02\x02')
buf.write('Íݩ\x03\x02\x02\x02Ïݲ\x03\x02\x02\x02Ñݺ\x03')
buf.write('\x02\x02\x02Óށ\x03\x02\x02\x02Õއ\x03\x02\x02\x02×ތ')
buf.write('\x03\x02\x02\x02Ùޚ\x03\x02\x02\x02Ûޤ\x03\x02\x02\x02Ý')
buf.write('ެ\x03\x02\x02\x02ß\u07b9\x03\x02\x02\x02á߂\x03\x02\x02\x02')
buf.write('ãߋ\x03\x02\x02\x02åߒ\x03\x02\x02\x02çߗ\x03')
buf.write('\x02\x02\x02é߰\x03\x02\x02\x02ëߵ\x03\x02\x02\x02í߽')
buf.write('\x03\x02\x02\x02ïࠂ\x03\x02\x02\x02ñࠈ\x03\x02\x02\x02ó')
buf.write('ࠎ\x03\x02\x02\x02õࠕ\x03\x02\x02\x02÷ࠞ\x03\x02\x02\x02')
buf.write('ùࠢ\x03\x02\x02\x02û࠱\x03\x02\x02\x02ý࠵\x03')
buf.write('\x02\x02\x02ÿ࠼\x03\x02\x02\x02āࡃ\x03\x02\x02\x02ăࡌ')
buf.write('\x03\x02\x02\x02ąࡓ\x03\x02\x02\x02ć\u085d\x03\x02\x02\x02ĉ')
buf.write('\u086c\x03\x02\x02\x02ċࡷ\x03\x02\x02\x02čࡿ\x03\x02\x02\x02')
buf.write('ďࢉ\x03\x02\x02\x02đ\u0891\x03\x02\x02\x02ē࢘\x03')
buf.write('\x02\x02\x02ĕ࢝\x03\x02\x02\x02ėࢥ\x03\x02\x02\x02ęࢮ')
buf.write('\x03\x02\x02\x02ěࢶ\x03\x02\x02\x02ĝࢾ\x03\x02\x02\x02ğ')
buf.write('ࣄ\x03\x02\x02\x02ġ࣊\x03\x02\x02\x02ģ࣐\x03\x02\x02\x02')
buf.write('ĥࣖ\x03\x02\x02\x02ħ\u08e2\x03\x02\x02\x02ĩࣨ\x03')
buf.write('\x02\x02\x02īࣲ\x03\x02\x02\x02ĭࣺ\x03\x02\x02\x02įࣾ')
buf.write('\x03\x02\x02\x02ıअ\x03\x02\x02\x02ijऋ\x03\x02\x02\x02ĵ')
buf.write('ऐ\x03\x02\x02\x02ķक\x03\x02\x02\x02Ĺञ\x03\x02\x02\x02')
buf.write('Ļण\x03\x02\x02\x02Ľऩ\x03\x02\x02\x02Ŀय\x03')
buf.write('\x02\x02\x02Łस\x03\x02\x02\x02Ńऽ\x03\x02\x02\x02Ņॄ')
buf.write('\x03\x02\x02\x02Ňॉ\x03\x02\x02\x02ʼnॎ\x03\x02\x02\x02ŋ')
buf.write('॑\x03\x02\x02\x02ōक़\x03\x02\x02\x02ŏॢ\x03\x02\x02\x02')
buf.write('ő॥\x03\x02\x02\x02œ७\x03\x02\x02\x02ŕॷ\x03')
buf.write('\x02\x02\x02ŗঁ\x03\x02\x02\x02řঈ\x03\x02\x02\x02ś\u098e')
buf.write('\x03\x02\x02\x02ŝখ\x03\x02\x02\x02şঠ\x03\x02\x02\x02š')
buf.write('ন\x03\x02\x02\x02ţ\u09b1\x03\x02\x02\x02ťস\x03\x02\x02\x02')
buf.write('ŧা\x03\x02\x02\x02ũৄ\x03\x02\x02\x02ūো\x03')
buf.write(
'\x02\x02\x02ŭ\u09d8\x03\x02\x02\x02ůৠ\x03\x02\x02\x02ű\u09e4')
buf.write('\x03\x02\x02\x02ų৬\x03\x02\x02\x02ŵ৶\x03\x02\x02\x02ŷ')
buf.write(
'\u09ff\x03\x02\x02\x02Ź\u0a04\x03\x02\x02\x02Żਏ\x03\x02\x02\x02')
buf.write('Ž\u0a12\x03\x02\x02\x02ſਜ\x03\x02\x02\x02Ɓਤ\x03')
buf.write('\x02\x02\x02ƃ\u0a29\x03\x02\x02\x02ƅਮ\x03\x02\x02\x02Ƈਲ਼')
buf.write('\x03\x02\x02\x02Ɖ਼\x03\x02\x02\x02Ƌੁ\x03\x02\x02\x02ƍ')
buf.write('ੌ\x03\x02\x02\x02Ə\u0a54\x03\x02\x02\x02Ƒਖ਼\x03\x02\x02\x02')
buf.write('Ɠ\u0a5f\x03\x02\x02\x02ƕ੧\x03\x02\x02\x02Ɨ੬\x03')
buf.write(
'\x02\x02\x02ƙੲ\x03\x02\x02\x02ƛ\u0a78\x03\x02\x02\x02Ɲ\u0a7e')
buf.write('\x03\x02\x02\x02Ɵ\u0a84\x03\x02\x02\x02ơઊ\x03\x02\x02\x02ƣ')
buf.write('એ\x03\x02\x02\x02ƥખ\x03\x02\x02\x02Ƨચ\x03\x02\x02\x02')
buf.write('Ʃડ\x03\x02\x02\x02ƫધ\x03\x02\x02\x02ƭબ\x03')
buf.write(
'\x02\x02\x02Ư\u0ab1\x03\x02\x02\x02Ʊશ\x03\x02\x02\x02Ƴ\u0aba')
buf.write('\x03\x02\x02\x02Ƶૂ\x03\x02\x02\x02Ʒો\x03\x02\x02\x02ƹ')
buf.write(
'\u0ad4\x03\x02\x02\x02ƻ\u0adb\x03\x02\x02\x02ƽૡ\x03\x02\x02\x02')
buf.write('ƿ૧\x03\x02\x02\x02ǁ૮\x03\x02\x02\x02ǃ\u0af7\x03')
buf.write('\x02\x02\x02Dž\u0b00\x03\x02\x02\x02LJଅ\x03\x02\x02\x02ljଋ')
buf.write('\x03\x02\x02\x02Nj\u0b12\x03\x02\x02\x02Ǎଘ\x03\x02\x02\x02Ǐ')
buf.write('ଡ\x03\x02\x02\x02Ǒଦ\x03\x02\x02\x02Ǔପ\x03\x02\x02\x02')
buf.write('Ǖଲ\x03\x02\x02\x02Ǘ\u0b3b\x03\x02\x02\x02Ǚି\x03')
buf.write(
'\x02\x02\x02Ǜ\u0b45\x03\x02\x02\x02ǝ\u0b4e\x03\x02\x02\x02ǟ\u0b54'
)
buf.write('\x03\x02\x02\x02ǡ\u0b5b\x03\x02\x02\x02ǣୟ\x03\x02\x02\x02ǥ')
buf.write('ୢ\x03\x02\x02\x02ǧ୪\x03\x02\x02\x02ǩ୲\x03\x02\x02\x02')
buf.write('ǫ\u0b79\x03\x02\x02\x02ǭ\u0b81\x03\x02\x02\x02ǯஒ\x03')
buf.write(
'\x02\x02\x02DZ\u0b9d\x03\x02\x02\x02dzந\x03\x02\x02\x02ǵ\u0bad')
buf.write('\x03\x02\x02\x02Ƿவ\x03\x02\x02\x02ǹ\u0bc3\x03\x02\x02\x02ǻ')
buf.write(
'ே\x03\x02\x02\x02ǽ\u0bce\x03\x02\x02\x02ǿ\u0bd3\x03\x02\x02\x02')
buf.write('ȁ\u0bd9\x03\x02\x02\x02ȃ\u0be0\x03\x02\x02\x02ȅ௨\x03')
buf.write('\x02\x02\x02ȇ௲\x03\x02\x02\x02ȉ௹\x03\x02\x02\x02ȋ\u0bfc')
buf.write('\x03\x02\x02\x02ȍఀ\x03\x02\x02\x02ȏఄ\x03\x02\x02\x02ȑ')
buf.write('ఈ\x03\x02\x02\x02ȓఋ\x03\x02\x02\x02ȕఐ\x03\x02\x02\x02')
buf.write('ȗక\x03\x02\x02\x02șజ\x03\x02\x02\x02țట\x03')
buf.write('\x02\x02\x02ȝధ\x03\x02\x02\x02ȟభ\x03\x02\x02\x02ȡస')
buf.write('\x03\x02\x02\x02ȣీ\x03\x02\x02\x02ȥౄ\x03\x02\x02\x02ȧ')
buf.write('ొ\x03\x02\x02\x02ȩ\u0c4f\x03\x02\x02\x02ȫౚ\x03\x02\x02\x02')
buf.write('ȭౢ\x03\x02\x02\x02ȯ\u0c72\x03\x02\x02\x02ȱ౽\x03')
buf.write('\x02\x02\x02ȳ಄\x03\x02\x02\x02ȵಎ\x03\x02\x02\x02ȷಖ')
buf.write('\x03\x02\x02\x02ȹಛ\x03\x02\x02\x02Ȼತ\x03\x02\x02\x02Ƚ')
buf.write(
'ಪ\x03\x02\x02\x02ȿ\u0cb4\x03\x02\x02\x02Ɂ\u0cba\x03\x02\x02\x02')
buf.write('Ƀಿ\x03\x02\x02\x02Ʌೋ\x03\x02\x02\x02ɇ\u0cd4\x03')
buf.write('\x02\x02\x02ɉೞ\x03\x02\x02\x02ɋ\u0ce5\x03\x02\x02\x02ɍ೯')
buf.write('\x03\x02\x02\x02ɏ\u0cf9\x03\x02\x02\x02ɑഁ\x03\x02\x02\x02ɓ')
buf.write('ഇ\x03\x02\x02\x02ɕ\u0d11\x03\x02\x02\x02ɗഗ\x03\x02\x02\x02')
buf.write('əഝ\x03\x02\x02\x02ɛഡ\x03\x02\x02\x02ɝദ\x03')
buf.write('\x02\x02\x02ɟഫ\x03\x02\x02\x02ɡല\x03\x02\x02\x02ɣശ')
buf.write('\x03\x02\x02\x02ɥീ\x03\x02\x02\x02ɧൌ\x03\x02\x02\x02ɩ')
buf.write(
'\u0d53\x03\x02\x02\x02ɫ൝\x03\x02\x02\x02ɭ\u0d64\x03\x02\x02\x02')
buf.write('ɯ൬\x03\x02\x02\x02ɱ൴\x03\x02\x02\x02ɳඈ\x03')
buf.write('\x02\x02\x02ɵඏ\x03\x02\x02\x02ɷග\x03\x02\x02\x02ɹඣ')
buf.write('\x03\x02\x02\x02ɻත\x03\x02\x02\x02ɽඳ\x03\x02\x02\x02ɿ')
buf.write('ර\x03\x02\x02\x02ʁෂ\x03\x02\x02\x02ʃ\u0dc8\x03\x02\x02\x02')
buf.write('ʅෑ\x03\x02\x02\x02ʇෘ\x03\x02\x02\x02ʉො\x03')
buf.write('\x02\x02\x02ʋ\u0de2\x03\x02\x02\x02ʍ෧\x03\x02\x02\x02ʏ෭')
buf.write('\x03\x02\x02\x02ʑ෴\x03\x02\x02\x02ʓ\u0df9\x03\x02\x02\x02ʕ')
buf.write('ฃ\x03\x02\x02\x02ʗช\x03\x02\x02\x02ʙถ\x03\x02\x02\x02')
buf.write('ʛบ\x03\x02\x02\x02ʝม\x03\x02\x02\x02ʟศ\x03')
buf.write('\x02\x02\x02ʡอ\x03\x02\x02\x02ʣี\x03\x02\x02\x02ʥ\u0e3c')
buf.write('\x03\x02\x02\x02ʧแ\x03\x02\x02\x02ʩ๊\x03\x02\x02\x02ʫ')
buf.write(
'๕\x03\x02\x02\x02ʭ\u0e62\x03\x02\x02\x02ʯ\u0e74\x03\x02\x02\x02')
buf.write('ʱ\u0e80\x03\x02\x02\x02ʳຐ\x03\x02\x02\x02ʵດ\x03')
buf.write('\x02\x02\x02ʷນ\x03\x02\x02\x02ʹຢ\x03\x02\x02\x02ʻຨ')
buf.write('\x03\x02\x02\x02ʽອ\x03\x02\x02\x02ʿຶ\x03\x02\x02\x02ˁ')
buf.write('\u0ebf\x03\x02\x02\x02˃່\x03\x02\x02\x02˅໗\x03\x02\x02\x02')
buf.write('ˇໞ\x03\x02\x02\x02ˉ\u0ee3\x03\x02\x02\x02ˋ\u0ee8\x03')
buf.write(
'\x02\x02\x02ˍ\u0ef1\x03\x02\x02\x02ˏ\u0efa\x03\x02\x02\x02ˑ\u0eff'
)
buf.write('\x03\x02\x02\x02˓།\x03\x02\x02\x02˕༕\x03\x02\x02\x02˗')
buf.write('༞\x03\x02\x02\x02˙༩\x03\x02\x02\x02˛༯\x03\x02\x02\x02')
buf.write('˝༷\x03\x02\x02\x02˟ཁ\x03\x02\x02\x02ˡཎ\x03')
buf.write('\x02\x02\x02ˣཕ\x03\x02\x02\x02˥འ\x03\x02\x02\x02˧ཧ')
buf.write('\x03\x02\x02\x02˩ཱི\x03\x02\x02\x02˫ྀ\x03\x02\x02\x02˭')
buf.write('ྎ\x03\x02\x02\x02˯ྖ\x03\x02\x02\x02˱ྞ\x03\x02\x02\x02')
buf.write('˳ྦ\x03\x02\x02\x02˵ྫྷ\x03\x02\x02\x02˷ྰ\x03')
buf.write('\x02\x02\x02˹ྵ\x03\x02\x02\x02˻ྺ\x03\x02\x02\x02˽࿄')
buf.write(
'\x03\x02\x02\x02˿\u0fe0\x03\x02\x02\x02́\u0ffb\x03\x02\x02\x02̃')
buf.write('ဓ\x03\x02\x02\x02̅အ\x03\x02\x02\x02̇ု\x03\x02\x02\x02')
buf.write('̉ဿ\x03\x02\x02\x02̋၏\x03\x02\x02\x02̍ၒ\x03')
buf.write('\x02\x02\x02̏ၛ\x03\x02\x02\x02̑ၧ\x03\x02\x02\x02̓ၱ')
buf.write('\x03\x02\x02\x02̕ၷ\x03\x02\x02\x02̗ၿ\x03\x02\x02\x02̙')
buf.write('ႄ\x03\x02\x02\x02̛ႉ\x03\x02\x02\x02̝႒\x03\x02\x02\x02')
buf.write('̟႗\x03\x02\x02\x02̡Ⴁ\x03\x02\x02\x02̣Ⴇ\x03')
buf.write('\x02\x02\x02̥Ⴍ\x03\x02\x02\x02̧Ⴔ\x03\x02\x02\x02̩Ⴞ')
buf.write(
'\x03\x02\x02\x02̫\u10c6\x03\x02\x02\x02̭\u10cc\x03\x02\x02\x02̯')
buf.write('დ\x03\x02\x02\x02̱მ\x03\x02\x02\x02̳ტ\x03\x02\x02\x02')
buf.write('̵ჩ\x03\x02\x02\x02̷ჭ\x03\x02\x02\x02̹ჳ\x03')
buf.write('\x02\x02\x02̻ჼ\x03\x02\x02\x02̽ᄂ\x03\x02\x02\x02̿ᄉ')
buf.write('\x03\x02\x02\x02́ᄑ\x03\x02\x02\x02̓ᄚ\x03\x02\x02\x02ͅ')
buf.write('ᄣ\x03\x02\x02\x02͇ᄪ\x03\x02\x02\x02͉ᄲ\x03\x02\x02\x02')
buf.write('͋ᄺ\x03\x02\x02\x02͍ᅃ\x03\x02\x02\x02͏ᅈ\x03')
buf.write('\x02\x02\x02͑ᅐ\x03\x02\x02\x02͓ᅛ\x03\x02\x02\x02͕ᅠ')
buf.write('\x03\x02\x02\x02͗ᅩ\x03\x02\x02\x02͙ᅯ\x03\x02\x02\x02͛')
buf.write('ᅵ\x03\x02\x02\x02͝ᅺ\x03\x02\x02\x02͟ᆁ\x03\x02\x02\x02')
buf.write('͡ᆆ\x03\x02\x02\x02ͣᆌ\x03\x02\x02\x02ͥᆐ\x03')
buf.write('\x02\x02\x02ͧᆗ\x03\x02\x02\x02ͩᆥ\x03\x02\x02\x02ͫᆭ')
buf.write('\x03\x02\x02\x02ͭᆺ\x03\x02\x02\x02ͯᇅ\x03\x02\x02\x02ͱ')
buf.write('ᇏ\x03\x02\x02\x02ͳᇙ\x03\x02\x02\x02͵ᇧ\x03\x02\x02\x02')
buf.write('ͷᇰ\x03\x02\x02\x02\u0379ᇶ\x03\x02\x02\x02ͻᇿ\x03')
buf.write('\x02\x02\x02ͽሇ\x03\x02\x02\x02Ϳሔ\x03\x02\x02\x02\u0381ም')
buf.write('\x03\x02\x02\x02\u0383ሢ\x03\x02\x02\x02΅ሦ\x03\x02\x02\x02·')
buf.write(
'ሿ\x03\x02\x02\x02Ήቄ\x03\x02\x02\x02\u038b\u124f\x03\x02\x02\x02')
buf.write('\u038dቡ\x03\x02\x02\x02Ώቱ\x03\x02\x02\x02Αኄ\x03')
buf.write('\x02\x02\x02Γኛ\x03\x02\x02\x02Εኪ\x03\x02\x02\x02Ηኴ')
buf.write(
'\x03\x02\x02\x02Ι\u12bf\x03\x02\x02\x02Λ\u12c7\x03\x02\x02\x02Ν')
buf.write('ዔ\x03\x02\x02\x02Οዤ\x03\x02\x02\x02Ρዴ\x03\x02\x02\x02')
buf.write('Σዹ\x03\x02\x02\x02Υዽ\x03\x02\x02\x02Χጂ\x03')
buf.write('\x02\x02\x02Ωጆ\x03\x02\x02\x02Ϋጋ\x03\x02\x02\x02έጏ')
buf.write('\x03\x02\x02\x02ί\u1316\x03\x02\x02\x02αጚ\x03\x02\x02\x02γ')
buf.write('ጠ\x03\x02\x02\x02εጰ\x03\x02\x02\x02ηጻ\x03\x02\x02\x02')
buf.write('ιጿ\x03\x02\x02\x02λፈ\x03\x02\x02\x02νፎ\x03')
buf.write('\x02\x02\x02οፕ\x03\x02\x02\x02ρፚ\x03\x02\x02\x02σ፡')
buf.write('\x03\x02\x02\x02υ፮\x03\x02\x02\x02χ፻\x03\x02\x02\x02ω')
buf.write('ᎈ\x03\x02\x02\x02ϋᎋ\x03\x02\x02\x02ύᎍ\x03\x02\x02\x02')
buf.write('Ϗᎏ\x03\x02\x02\x02ϑ\u139e\x03\x02\x02\x02ϓᎪ\x03')
buf.write('\x02\x02\x02ϕᎳ\x03\x02\x02\x02ϗᎵ\x03\x02\x02\x02ϙᏀ')
buf.write('\x03\x02\x02\x02ϛᏋ\x03\x02\x02\x02ϝᏖ\x03\x02\x02\x02ϟ')
buf.write('Ꮱ\x03\x02\x02\x02ϡᏣ\x03\x02\x02\x02ϣᏭ\x03\x02\x02\x02')
buf.write('ϥᏯ\x03\x02\x02\x02ϧᏱ\x03\x02\x02\x02ϩᏳ\x03')
buf.write('\x02\x02\x02ϫᏵ\x03\x02\x02\x02ϭᏸ\x03\x02\x02\x02ϯᏺ')
buf.write('\x03\x02\x02\x02ϱᏼ\x03\x02\x02\x02ϳ\u13fe\x03\x02\x02\x02ϵ')
buf.write('᐀\x03\x02\x02\x02Ϸᐂ\x03\x02\x02\x02Ϲᐄ\x03\x02\x02\x02')
buf.write('ϻᐕ\x03\x02\x02\x02Ͻᐗ\x03\x02\x02\x02Ͽᐙ\x03')
buf.write('\x02\x02\x02Ёᐛ\x03\x02\x02\x02Ѓᐞ\x03\x02\x02\x02Ѕᐠ')
buf.write('\x03\x02\x02\x02Їᐫ\x03\x02\x02\x02Љᐭ\x03\x02\x02\x02Ћ')
buf.write('ᐯ\x03\x02\x02\x02Ѝᐱ\x03\x02\x02\x02Џᐳ\x03\x02\x02\x02')
buf.write('Бᐵ\x03\x02\x02\x02Гᐷ\x03\x02\x02\x02Еᐺ\x03')
buf.write('\x02\x02\x02Зᐼ\x03\x02\x02\x02Йᐾ\x03\x02\x02\x02Лᑀ')
buf.write('\x03\x02\x02\x02Нᑂ\x03\x02\x02\x02Пᑅ\x03\x02\x02\x02С')
buf.write('ᑋ\x03\x02\x02\x02Уᑎ\x03\x02\x02\x02Хᑕ\x03\x02\x02\x02')
buf.write('Чᑠ\x03\x02\x02\x02Щᑯ\x03\x02\x02\x02Ыᑽ\x03')
buf.write('\x02\x02\x02Эᒐ\x03\x02\x02\x02Яᒔ\x03\x02\x02\x02бᒖ')
buf.write('\x03\x02\x02\x02гᒞ\x03\x02\x02\x02еᒣ\x03\x02\x02\x02з')
buf.write('ᒥ\x03\x02\x02\x02йᒧ\x03\x02\x02\x02лᒩ\x03\x02\x02\x02')
buf.write('нᒫ\x03\x02\x02\x02пᒭ\x03\x02\x02\x02сᒯ\x03')
buf.write('\x02\x02\x02уᒱ\x03\x02\x02\x02хᒳ\x03\x02\x02\x02чᒵ')
buf.write('\x03\x02\x02\x02щᒷ\x03\x02\x02\x02ыᒹ\x03\x02\x02\x02э')
buf.write('ᒻ\x03\x02\x02\x02яᒽ\x03\x02\x02\x02ёᒿ\x03\x02\x02\x02')
buf.write('ѓᓁ\x03\x02\x02\x02ѕᓃ\x03\x02\x02\x02їᓅ\x03')
buf.write('\x02\x02\x02љᓇ\x03\x02\x02\x02ћᓉ\x03\x02\x02\x02ѝᓋ')
buf.write('\x03\x02\x02\x02џᓍ\x03\x02\x02\x02ѡᓏ\x03\x02\x02\x02ѣ')
buf.write('ᓑ\x03\x02\x02\x02ѥᓓ\x03\x02\x02\x02ѧᓕ\x03\x02\x02\x02')
buf.write('ѩѪ\x070\x02\x02Ѫѫ\x070\x02\x02ѫ\x04\x03\x02')
buf.write('\x02\x02Ѭѭ\x05еț\x02ѭ\x06\x03\x02\x02\x02Ѯ')
buf.write('ѯ\x05еț\x02ѯѰ\x05лȞ\x02Ѱ')
buf.write('ѱ\x05лȞ\x02ѱ\x08\x03\x02\x02\x02Ѳѳ\x05е')
buf.write('ț\x02ѳѴ\x05пȠ\x02Ѵѵ\x05ћ')
buf.write('Ȯ\x02ѵѶ\x05нȟ\x02Ѷѷ\x05ї')
buf.write('Ȭ\x02ѷ\n\x03\x02\x02\x02Ѹѹ\x05еț\x02ѹ')
buf.write('Ѻ\x05сȡ\x02Ѻѻ\x05нȟ\x02ѻ')
buf.write('Ѽ\x05яȨ\x02Ѽѽ\x05ћȮ\x02ѽ')
buf.write('\x0c\x03\x02\x02\x02Ѿѿ\x05еț\x02ѿҀ\x05с')
buf.write('ȡ\x02Ҁҁ\x05сȡ\x02ҁ҂\x05ї')
buf.write('Ȭ\x02҂҃\x05нȟ\x02҃҄\x05с')
buf.write('ȡ\x02҄҅\x05еț\x02҅҆\x05ћ')
buf.write('Ȯ\x02҆҇\x05нȟ\x02҇\x0e\x03\x02\x02\x02')
buf.write('҈҉\x05еț\x02҉Ҋ\x05ыȦ')
buf.write('\x02Ҋҋ\x05ыȦ\x02ҋ\x10\x03\x02\x02\x02Ҍ')
buf.write('ҍ\x05еț\x02ҍҎ\x05ыȦ\x02Ҏ')
buf.write('ҏ\x05ћȮ\x02ҏҐ\x05нȟ\x02Ґ')
buf.write('ґ\x05їȬ\x02ґ\x12\x03\x02\x02\x02Ғғ\x05')
buf.write('еț\x02ғҔ\x05яȨ\x02Ҕҕ')
buf.write('\x05еț\x02ҕҖ\x05ыȦ\x02Җҗ')
buf.write('\x05ѥȳ\x02җҘ\x05ѧȴ\x02Ҙҙ')
buf.write('\x05нȟ\x02ҙ\x14\x03\x02\x02\x02Ққ\x05е')
buf.write('ț\x02қҜ\x05яȨ\x02Ҝҝ\x05л')
buf.write('Ȟ\x02ҝ\x16\x03\x02\x02\x02Ҟҟ\x05еț\x02')
buf.write('ҟҠ\x05яȨ\x02Ҡҡ\x05ѥȳ')
buf.write('\x02ҡ\x18\x03\x02\x02\x02Ңң\x05еț\x02ң')
buf.write('Ҥ\x05їȬ\x02Ҥҥ\x05їȬ\x02ҥ')
buf.write('Ҧ\x05еț\x02Ҧҧ\x05ѥȳ\x02ҧ')
buf.write('\x1a\x03\x02\x02\x02Ҩҩ\x05еț\x02ҩҪ\x05')
buf.write('љȭ\x02Ҫ\x1c\x03\x02\x02\x02ҫҬ\x05еț')
buf.write('\x02Ҭҭ\x05љȭ\x02ҭҮ\x05љȭ')
buf.write('\x02Үү\x05ѝȯ\x02үҰ\x05эȧ')
buf.write('\x02Ұұ\x05нȟ\x02ұ\x1e\x03\x02\x02\x02Ҳ')
buf.write('ҳ\x05еț\x02ҳҴ\x05љȭ\x02Ҵ')
buf.write('ҵ\x05љȭ\x02ҵҶ\x05нȟ\x02Ҷ')
buf.write('ҷ\x05їȬ\x02ҷҸ\x05ћȮ\x02Ҹ')
buf.write(' \x03\x02\x02\x02ҹҺ\x05еț\x02Һһ\x05љ')
buf.write('ȭ\x02һҼ\x05йȝ\x02Ҽ"\x03\x02\x02\x02ҽ')
buf.write('Ҿ\x05еț\x02Ҿҿ\x05љȭ\x02ҿ')
buf.write('Ӏ\x05љȭ\x02ӀӁ\x05ёȩ\x02Ӂ')
buf.write('ӂ\x05йȝ\x02ӂӃ\x05хȣ\x02Ӄ')
buf.write('ӄ\x05еț\x02ӄӅ\x05ћȮ\x02Ӆ')
buf.write('ӆ\x05нȟ\x02ӆ$\x03\x02\x02\x02Ӈӈ\x05е')
buf.write('ț\x02ӈӉ\x05ћȮ\x02Ӊ&\x03\x02\x02\x02ӊ')
buf.write('Ӌ\x05еț\x02Ӌӌ\x05ћȮ\x02ӌ')
buf.write('Ӎ\x05ћȮ\x02Ӎӎ\x05їȬ\x02ӎ')
buf.write('ӏ\x05хȣ\x02ӏӐ\x05зȜ\x02Ӑ')
buf.write('ӑ\x05ѝȯ\x02ӑӒ\x05ћȮ\x02Ӓ')
buf.write('ӓ\x05нȟ\x02ӓ(\x03\x02\x02\x02Ӕӕ\x05е')
buf.write('ț\x02ӕӖ\x05ѝȯ\x02Ӗӗ\x05л')
buf.write('Ȟ\x02ӗӘ\x05хȣ\x02Әә\x05ћ')
buf.write('Ȯ\x02ә*\x03\x02\x02\x02Ӛӛ\x05еț\x02ӛ')
buf.write('Ӝ\x05ѝȯ\x02Ӝӝ\x05ћȮ\x02ӝ')
buf.write('Ӟ\x05уȢ\x02Ӟӟ\x05хȣ\x02ӟ')
buf.write('Ӡ\x05лȞ\x02Ӡ,\x03\x02\x02\x02ӡӢ\x05е')
buf.write('ț\x02Ӣӣ\x05ѝȯ\x02ӣӤ\x05ћ')
buf.write('Ȯ\x02Ӥӥ\x05ёȩ\x02ӥ.\x03\x02\x02\x02Ӧ')
buf.write('ӧ\x05еț\x02ӧӨ\x05ѝȯ\x02Ө')
buf.write('ө\x05ћȮ\x02өӪ\x05ёȩ\x02Ӫ')
buf.write('ӫ\x05эȧ\x02ӫӬ\x05еț\x02Ӭ')
buf.write('ӭ\x05ћȮ\x02ӭӮ\x05хȣ\x02Ӯ')
buf.write('ӯ\x05йȝ\x02ӯ0\x03\x02\x02\x02Ӱӱ\x05')
buf.write('еț\x02ӱӲ\x05ѝȯ\x02Ӳӳ')
buf.write('\x05ћȮ\x02ӳӴ\x05ёȩ\x02Ӵӵ')
buf.write('\x05яȨ\x02ӵӶ\x05ёȩ\x02Ӷӷ')
buf.write('\x05эȧ\x02ӷӸ\x05ёȩ\x02Ӹӹ')
buf.write('\x05ѝȯ\x02ӹӺ\x05љȭ\x02Ӻӻ')
buf.write('\x07a\x02\x02ӻӼ\x05ћȮ\x02Ӽӽ\x05ї')
buf.write('Ȭ\x02ӽӾ\x05еț\x02Ӿӿ\x05я')
buf.write('Ȩ\x02ӿԀ\x05љȭ\x02Ԁԁ\x05е')
buf.write('ț\x02ԁԂ\x05йȝ\x02Ԃԃ\x05ћ')
buf.write('Ȯ\x02ԃԄ\x05хȣ\x02Ԅԅ\x05ё')
buf.write('ȩ\x02ԅԆ\x05яȨ\x02Ԇ2\x03\x02\x02\x02')
buf.write('ԇԈ\x05зȜ\x02Ԉԉ\x05еț')
buf.write('\x02ԉԊ\x05ћȮ\x02Ԋԋ\x05йȝ')
buf.write('\x02ԋԌ\x05уȢ\x02Ԍ4\x03\x02\x02\x02ԍ')
buf.write('Ԏ\x05зȜ\x02Ԏԏ\x05нȟ\x02ԏ')
buf.write('Ԑ\x05пȠ\x02Ԑԑ\x05ёȩ\x02ԑ')
buf.write('Ԓ\x05їȬ\x02Ԓԓ\x05нȟ\x02ԓ')
buf.write('6\x03\x02\x02\x02Ԕԕ\x05зȜ\x02ԕԖ\x05')
buf.write('нȟ\x02Ԗԗ\x05сȡ\x02ԗԘ')
buf.write('\x05хȣ\x02Ԙԙ\x05яȨ\x02ԙ8\x03')
buf.write('\x02\x02\x02Ԛԛ\x05зȜ\x02ԛԜ\x05н')
buf.write('ȟ\x02Ԝԝ\x05ћȮ\x02ԝԞ\x05ѡ')
buf.write('ȱ\x02Ԟԟ\x05нȟ\x02ԟԠ\x05н')
buf.write('ȟ\x02Ԡԡ\x05яȨ\x02ԡ:\x03\x02\x02\x02Ԣ')
buf.write('ԣ\x05зȜ\x02ԣԤ\x05пȠ\x02Ԥ')
buf.write('ԥ\x05хȣ\x02ԥԦ\x05ыȦ\x02Ԧ')
buf.write('ԧ\x05нȟ\x02ԧ<\x03\x02\x02\x02Ԩԩ\x05з')
buf.write('Ȝ\x02ԩԪ\x05хȣ\x02Ԫԫ\x05я')
buf.write('Ȩ\x02ԫԬ\x05еț\x02Ԭԭ\x05ї')
buf.write('Ȭ\x02ԭԮ\x05ѥȳ\x02Ԯԯ\x07a\x02')
buf.write('\x02ԯ\u0530\x05лȞ\x02\u0530Ա\x05ёȩ')
buf.write('\x02ԱԲ\x05ѝȯ\x02ԲԳ\x05зȜ')
buf.write('\x02ԳԴ\x05ыȦ\x02ԴԵ\x05нȟ')
buf.write('\x02Ե>\x03\x02\x02\x02ԶԷ\x05зȜ\x02ԷԸ')
buf.write('\x05хȣ\x02ԸԹ\x05яȨ\x02ԹԺ')
buf.write('\x05еț\x02ԺԻ\x05їȬ\x02ԻԼ')
buf.write('\x05ѥȳ\x02ԼԽ\x07a\x02\x02ԽԾ\x05п')
buf.write('Ƞ\x02ԾԿ\x05ыȦ\x02ԿՀ\x05ё')
buf.write('ȩ\x02ՀՁ\x05еț\x02ՁՂ\x05ћ')
buf.write('Ȯ\x02Ղ@\x03\x02\x02\x02ՃՄ\x05зȜ\x02Մ')
buf.write('Յ\x05хȣ\x02ՅՆ\x05яȨ\x02Ն')
buf.write('Շ\x05еț\x02ՇՈ\x05їȬ\x02Ո')
buf.write('Չ\x05ѥȳ\x02ՉՊ\x07a\x02\x02ՊՋ')
buf.write('\x05хȣ\x02ՋՌ\x05яȨ\x02ՌՍ')
buf.write('\x05ћȮ\x02ՍՎ\x05нȟ\x02ՎՏ')
buf.write('\x05сȡ\x02ՏՐ\x05нȟ\x02ՐՑ')
buf.write('\x05їȬ\x02ՑB\x03\x02\x02\x02ՒՓ\x05зȜ')
buf.write('\x02ՓՔ\x05ыȦ\x02ՔՕ\x05ёȩ')
buf.write('\x02ՕՖ\x05зȜ\x02ՖD\x03\x02\x02\x02\u0557\u0558')
buf.write('\x05зȜ\x02\u0558ՙ\x05ыȦ\x02ՙ՚')
buf.write('\x05ёȩ\x02՚՛\x05йȝ\x02՛՜')
buf.write('\x05щȥ\x02՜F\x03\x02\x02\x02՝՞\x05зȜ')
buf.write('\x02՞՟\x05ёȩ\x02՟ՠ\x05лȞ')
buf.write('\x02ՠա\x05ѥȳ\x02աH\x03\x02\x02\x02բգ')
buf.write('\x05зȜ\x02գդ\x05ёȩ\x02դե')
buf.write('\x05ёȩ\x02եզ\x05ыȦ\x02զէ')
buf.write('\x05нȟ\x02էը\x05еț\x02ըթ')
buf.write('\x05яȨ\x02թJ\x03\x02\x02\x02ժի\x05зȜ')
buf.write('\x02իլ\x05ёȩ\x02լխ\x05ћȮ')
buf.write('\x02խծ\x05уȢ\x02ծL\x03\x02\x02\x02կհ')
buf.write('\x05зȜ\x02հձ\x05їȬ\x02ձղ')
buf.write('\x05нȟ\x02ղճ\x05еț\x02ճմ')
buf.write('\x05лȞ\x02մյ\x05ћȮ\x02յն')
buf.write('\x05уȢ\x02նN\x03\x02\x02\x02շո\x05зȜ')
buf.write('\x02ոչ\x05ѝȯ\x02չպ\x05ыȦ')
buf.write('\x02պջ\x05щȥ\x02ջP\x03\x02\x02\x02ռս')
buf.write('\x05зȜ\x02սվ\x05ѥȳ\x02վR\x03')
buf.write('\x02\x02\x02տր\x05зȜ\x02րց\x05ѥ')
buf.write('ȳ\x02ցւ\x05ћȮ\x02ւփ\x05н')
buf.write('ȟ\x02փT\x03\x02\x02\x02քօ\x05йȝ\x02օ')
buf.write('V\x03\x02\x02\x02ֆև\x05йȝ\x02ևֈ\x05е')
buf.write('ț\x02ֈ։\x05йȝ\x02։֊\x05у')
buf.write('Ȣ\x02֊\u058b\x05нȟ\x02\u058bX\x03\x02\x02\x02\u058c')
buf.write('֍\x05йȝ\x02֍֎\x05еț\x02֎')
buf.write('֏\x05ыȦ\x02֏\u0590\x05ыȦ\x02\u0590')
buf.write('Z\x03\x02\x02\x02֑֒\x05йȝ\x02֒֓\x05е')
buf.write('ț\x02֓֔\x05яȨ\x02֔֕\x05ё')
buf.write('ȩ\x02֖֕\x05яȨ\x02֖֗\x05х')
buf.write('ȣ\x02֗֘\x05йȝ\x02֘֙\x05е')
buf.write('ț\x02֚֙\x05ыȦ\x02֚\\\x03\x02\x02\x02֛')
buf.write('֜\x05йȝ\x02֜֝\x05еț\x02֝')
buf.write('֞\x05љȭ\x02֞֟\x05йȝ\x02֟')
buf.write('֠\x05еț\x02֠֡\x05лȞ\x02֡')
buf.write('֢\x05нȟ\x02֢^\x03\x02\x02\x02֣֤\x05й')
buf.write('ȝ\x02֤֥\x05еț\x02֥֦\x05љ')
buf.write('ȭ\x02֦֧\x05нȟ\x02֧`\x03\x02\x02\x02֨')
buf.write('֩\x05йȝ\x02֪֩\x05еț\x02֪')
buf.write('֫\x05љȭ\x02֫֬\x05ћȮ\x02֬')
buf.write('b\x03\x02\x02\x02֭֮\x05йȝ\x02֮֯\x05у')
buf.write('Ȣ\x02ְ֯\x05еț\x02ְֱ\x05ї')
buf.write('Ȭ\x02ֱd\x03\x02\x02\x02ֲֳ\x05йȝ\x02ֳ')
buf.write('ִ\x05уȢ\x02ִֵ\x05еț\x02ֵ')
buf.write('ֶ\x05їȬ\x02ֶַ\x07a\x02\x02ַָ')
buf.write('\x05йȝ\x02ָֹ\x05љȭ\x02ֹf\x03')
buf.write('\x02\x02\x02ֺֻ\x05йȝ\x02ֻּ\x05у')
buf.write('Ȣ\x02ּֽ\x05еț\x02ֽ־\x05ї')
buf.write('Ȭ\x02־ֿ\x05еț\x02ֿ׀\x05й')
buf.write('ȝ\x02׀ׁ\x05ћȮ\x02ׁׂ\x05н')
buf.write('ȟ\x02ׂ׃\x05їȬ\x02׃h\x03\x02\x02\x02ׄ')
buf.write('ׅ\x05йȝ\x02ׅ׆\x05уȢ\x02׆')
buf.write('ׇ\x05нȟ\x02ׇ\u05c8\x05йȝ\x02\u05c8')
buf.write('\u05c9\x05щȥ\x02\u05c9j\x03\x02\x02\x02\u05ca\u05cb\x05й')
buf.write('ȝ\x02\u05cb\u05cc\x05уȢ\x02\u05cc\u05cd\x05ї')
buf.write('Ȭ\x02\u05cdl\x03\x02\x02\x02\u05ce\u05cf\x05йȝ\x02\u05cf')
buf.write('א\x05ыȦ\x02אב\x05ёȩ\x02ב')
buf.write('ג\x05зȜ\x02גn\x03\x02\x02\x02דה\x05й')
buf.write('ȝ\x02הו\x05ыȦ\x02וז\x05ё')
buf.write('ȩ\x02זח\x05љȭ\x02חט\x05н')
buf.write('ȟ\x02טp\x03\x02\x02\x02יך\x05йȝ\x02ך')
buf.write('כ\x05ыȦ\x02כל\x05ѝȯ\x02ל')
buf.write('ם\x05љȭ\x02םמ\x05ћȮ\x02מ')
buf.write('ן\x05нȟ\x02ןנ\x05їȬ\x02נ')
buf.write('r\x03\x02\x02\x02סע\x05йȝ\x02עף\x05ё')
buf.write('ȩ\x02ףפ\x05ыȦ\x02פץ\x05ы')
buf.write('Ȧ\x02ץצ\x05нȟ\x02צק\x05й')
buf.write('ȝ\x02קר\x05ћȮ\x02רt\x03\x02\x02\x02ש')
buf.write('ת\x05йȝ\x02ת\u05eb\x05ёȩ\x02\u05eb')
buf.write('\u05ec\x05ыȦ\x02\u05ec\u05ed\x05ѝȯ\x02\u05ed')
buf.write('\u05ee\x05эȧ\x02\u05eeׯ\x05яȨ\x02ׯ')
buf.write('װ\x05љȭ\x02װv\x03\x02\x02\x02ױײ\x05й')
buf.write('ȝ\x02ײ׳\x05ёȩ\x02׳״\x05э')
buf.write('ȧ\x02״\u05f5\x05эȧ\x02\u05f5\u05f6\x05н')
buf.write('ȟ\x02\u05f6\u05f7\x05яȨ\x02\u05f7\u05f8\x05ћ')
buf.write('Ȯ\x02\u05f8x\x03\x02\x02\x02\u05f9\u05fa\x05йȝ\x02\u05fa')
buf.write('\u05fb\x05ёȩ\x02\u05fb\u05fc\x05эȧ\x02\u05fc')
buf.write('\u05fd\x05эȧ\x02\u05fd\u05fe\x05хȣ\x02\u05fe')
buf.write('\u05ff\x05ћȮ\x02\u05ffz\x03\x02\x02\x02\u0600\u0601\x05й')
buf.write('ȝ\x02\u0601\u0602\x05ёȩ\x02\u0602\u0603\x05э')
buf.write('ȧ\x02\u0603\u0604\x05эȧ\x02\u0604\u0605\x05х')
buf.write('ȣ\x02\u0605؆\x05ћȮ\x02؆؇\x05ћ')
buf.write('Ȯ\x02؇؈\x05нȟ\x02؈؉\x05л')
buf.write('Ȟ\x02؉|\x03\x02\x02\x02؊؋\x05йȝ\x02؋')
buf.write('،\x05ёȩ\x02،؍\x05эȧ\x02؍')
buf.write('؎\x05ѓȪ\x02؎؏\x05еț\x02؏')
buf.write('ؐ\x05ћȮ\x02ؐؑ\x05хȣ\x02ؑ')
buf.write('ؒ\x05зȜ\x02ؒؓ\x05хȣ\x02ؓ')
buf.write('ؔ\x05ыȦ\x02ؔؕ\x05хȣ\x02ؕ')
buf.write('ؖ\x05ћȮ\x02ؖؗ\x05ѥȳ\x02ؗ')
buf.write('~\x03\x02\x02\x02ؘؙ\x05йȝ\x02ؙؚ\x05ё')
buf.write('ȩ\x02ؚ؛\x05эȧ\x02؛\u061c\x05ѓ')
buf.write('Ȫ\x02\u061c؝\x05хȣ\x02؝؞\x05ы')
buf.write('Ȧ\x02؞؟\x05нȟ\x02؟\x80\x03\x02\x02')
buf.write('\x02ؠء\x05йȝ\x02ءآ\x05ёȩ')
buf.write('\x02آأ\x05эȧ\x02أؤ\x05ѓȪ')
buf.write('\x02ؤإ\x05ёȩ\x02إئ\x05ѝȯ')
buf.write('\x02ئا\x05яȨ\x02اب\x05лȞ')
buf.write('\x02ب\x82\x03\x02\x02\x02ةت\x05йȝ\x02ت')
buf.write('ث\x05ёȩ\x02ثج\x05яȨ\x02ج')
buf.write('ح\x05яȨ\x02حخ\x05нȟ\x02خ')
buf.write('د\x05йȝ\x02دذ\x05ћȮ\x02ذ')
buf.write('\x84\x03\x02\x02\x02رز\x05йȝ\x02زس')
buf.write('\x05ёȩ\x02سش\x05яȨ\x02شص')
buf.write('\x05яȨ\x02صض\x05нȟ\x02ضط')
buf.write('\x05йȝ\x02طظ\x05ћȮ\x02ظع')
buf.write('\x07a\x02\x02عغ\x05зȜ\x02غػ\x05ѥ')
buf.write('ȳ\x02ػؼ\x07a\x02\x02ؼؽ\x05їȬ')
buf.write('\x02ؽؾ\x05ёȩ\x02ؾؿ\x05ёȩ')
buf.write('\x02ؿـ\x05ћȮ\x02ـ\x86\x03\x02\x02\x02ف')
buf.write('ق\x05йȝ\x02قك\x05ёȩ\x02ك')
buf.write('ل\x05яȨ\x02لم\x05љȭ\x02م')
buf.write('ن\x05ћȮ\x02نه\x05еț\x02ه')
buf.write('و\x05яȨ\x02وى\x05ћȮ\x02ى')
buf.write('\x88\x03\x02\x02\x02يً\x05йȝ\x02ًٌ')
buf.write('\x05ёȩ\x02ٌٍ\x05яȨ\x02ٍَ')
buf.write('\x05љȭ\x02َُ\x05ћȮ\x02ُِ')
buf.write('\x05їȬ\x02ِّ\x05еț\x02ّْ')
buf.write('\x05хȣ\x02ْٓ\x05яȨ\x02ٓٔ')
buf.write('\x05ћȮ\x02ٔ\x8a\x03\x02\x02\x02ٕٖ\x05й')
buf.write('ȝ\x02ٖٗ\x05ёȩ\x02ٗ٘\x05я')
buf.write('Ȩ\x02٘ٙ\x05љȭ\x02ٙٚ\x05ћ')
buf.write('Ȯ\x02ٚٛ\x05їȬ\x02ٜٛ\x05е')
buf.write('ț\x02ٜٝ\x05хȣ\x02ٝٞ\x05я')
buf.write('Ȩ\x02ٟٞ\x05ћȮ\x02ٟ٠\x05љ')
buf.write('ȭ\x02٠\x8c\x03\x02\x02\x02١٢\x05йȝ')
buf.write('\x02٢٣\x05ёȩ\x02٣٤\x05яȨ')
buf.write('\x02٤٥\x05љȭ\x02٥٦\x05ћȮ')
buf.write('\x02٦٧\x05їȬ\x02٧٨\x05ѝȯ')
buf.write('\x02٨٩\x05йȝ\x02٩٪\x05ћȮ')
buf.write('\x02٪٫\x05ёȩ\x02٫٬\x05їȬ')
buf.write('\x02٬\x8e\x03\x02\x02\x02٭ٮ\x05йȝ\x02ٮ')
buf.write('ٯ\x05ёȩ\x02ٯٰ\x05яȨ\x02ٰ')
buf.write('ٱ\x05ћȮ\x02ٱٲ\x05нȟ\x02ٲ')
buf.write('ٳ\x05яȨ\x02ٳٴ\x05ћȮ\x02ٴ')
buf.write('\x90\x03\x02\x02\x02ٵٶ\x05йȝ\x02ٶٷ')
buf.write('\x05ёȩ\x02ٷٸ\x05яȨ\x02ٸٹ')
buf.write('\x05ћȮ\x02ٹٺ\x05нȟ\x02ٺٻ')
buf.write('\x05ѣȲ\x02ٻټ\x05ћȮ\x02ټ\x92')
buf.write('\x03\x02\x02\x02ٽپ\x05йȝ\x02پٿ\x05ё')
buf.write('ȩ\x02ٿڀ\x05яȨ\x02ڀځ\x05ћ')
buf.write('Ȯ\x02ځڂ\x05хȣ\x02ڂڃ\x05я')
buf.write('Ȩ\x02ڃڄ\x05ѝȯ\x02ڄڅ\x05н')
buf.write('ȟ\x02څ\x94\x03\x02\x02\x02چڇ\x05йȝ')
buf.write('\x02ڇڈ\x05ёȩ\x02ڈډ\x05яȨ')
buf.write('\x02ډڊ\x05џȰ\x02ڊڋ\x05нȟ')
buf.write('\x02ڋڌ\x05їȬ\x02ڌڍ\x05ћȮ')
buf.write('\x02ڍ\x96\x03\x02\x02\x02ڎڏ\x05йȝ\x02ڏ')
buf.write('ڐ\x05ёȩ\x02ڐڑ\x05їȬ\x02ڑ')
buf.write('ڒ\x05їȬ\x02ڒړ\x05ѝȯ\x02ړ')
buf.write('ڔ\x05ѓȪ\x02ڔڕ\x05ћȮ\x02ڕ')
buf.write('ږ\x07a\x02\x02ږڗ\x05ѣȲ\x02ڗژ')
buf.write('\x05хȣ\x02ژڙ\x05лȞ\x02ڙ\x98')
buf.write('\x03\x02\x02\x02ښڛ\x05йȝ\x02ڛڜ\x05ё')
buf.write('ȩ\x02ڜڝ\x05їȬ\x02ڝڞ\x05ї')
buf.write('Ȭ\x02ڞڟ\x05ѝȯ\x02ڟڠ\x05ѓ')
buf.write('Ȫ\x02ڠڡ\x05ћȮ\x02ڡڢ\x07a\x02')
buf.write('\x02ڢڣ\x05ѣȲ\x02ڣڤ\x05хȣ')
buf.write('\x02ڤڥ\x05лȞ\x02ڥڦ\x07a\x02\x02ڦ')
buf.write('ڧ\x05еț\x02ڧڨ\x05ыȦ\x02ڨ')
buf.write('ک\x05ыȦ\x02ک\x9a\x03\x02\x02\x02ڪګ')
buf.write('\x05йȝ\x02ګڬ\x05ёȩ\x02ڬڭ')
buf.write('\x05љȭ\x02ڭڮ\x05ћȮ\x02ڮ\x9c')
buf.write('\x03\x02\x02\x02گڰ\x05йȝ\x02ڰڱ\x05ё')
buf.write('ȩ\x02ڱڲ\x05ѝȯ\x02ڲڳ\x05я')
buf.write('Ȩ\x02ڳڴ\x05ћȮ\x02ڴ\x9e\x03\x02\x02')
buf.write('\x02ڵڶ\x05йȝ\x02ڶڷ\x05їȬ')
buf.write('\x02ڷڸ\x05нȟ\x02ڸڹ\x05еț')
buf.write('\x02ڹں\x05ћȮ\x02ںڻ\x05нȟ')
buf.write('\x02ڻ\xa0\x03\x02\x02\x02ڼڽ\x05йȝ\x02ڽ')
buf.write('ھ\x05їȬ\x02ھڿ\x05ёȩ\x02ڿ')
buf.write('ۀ\x05љȭ\x02ۀہ\x05љȭ\x02ہ')
buf.write('¢\x03\x02\x02\x02ۂۃ\x05йȝ\x02ۃۄ')
buf.write('\x05ѝȯ\x02ۄۅ\x05зȜ\x02ۅۆ')
buf.write('\x05нȟ\x02ۆ¤\x03\x02\x02\x02ۇۈ\x05й')
buf.write('ȝ\x02ۈۉ\x05ѝȯ\x02ۉۊ\x05ї')
buf.write('Ȭ\x02ۊۋ\x05їȬ\x02ۋی\x05н')
buf.write('ȟ\x02یۍ\x05яȨ\x02ۍێ\x05ћ')
buf.write('Ȯ\x02ێ¦\x03\x02\x02\x02ۏې\x05йȝ')
buf.write('\x02ېۑ\x05ѝȯ\x02ۑے\x05їȬ')
buf.write('\x02ےۓ\x05їȬ\x02ۓ۔\x05нȟ')
buf.write('\x02۔ە\x05яȨ\x02ەۖ\x05ћȮ')
buf.write('\x02ۖۗ\x07a\x02\x02ۗۘ\x05ѝȯ\x02ۘ')
buf.write('ۙ\x05љȭ\x02ۙۚ\x05нȟ\x02ۚ')
buf.write('ۛ\x05їȬ\x02ۛ¨\x03\x02\x02\x02ۜ\u06dd')
buf.write('\x05йȝ\x02\u06dd۞\x05ѝȯ\x02۞۟')
buf.write('\x05їȬ\x02۟۠\x05љȭ\x02۠ۡ')
buf.write('\x05ёȩ\x02ۡۢ\x05їȬ\x02ۢª')
buf.write('\x03\x02\x02\x02ۣۤ\x05йȝ\x02ۤۥ\x05ѝ')
buf.write('ȯ\x02ۥۦ\x05љȭ\x02ۦۧ\x05ћ')
buf.write('Ȯ\x02ۧۨ\x05ёȩ\x02ۨ۩\x05э')
buf.write('ȧ\x02۩۪\x05лȞ\x02۪۫\x05е')
buf.write('ț\x02۫۬\x05ћȮ\x02ۭ۬\x05ѝ')
buf.write('ȯ\x02ۭۮ\x05эȧ\x02ۮ¬\x03\x02\x02')
buf.write('\x02ۯ۰\x05йȝ\x02۰۱\x05ѥȳ')
buf.write('\x02۱۲\x05йȝ\x02۲۳\x05ыȦ')
buf.write('\x02۳۴\x05нȟ\x02۴®\x03\x02\x02\x02۵')
buf.write('۶\x05лȞ\x02۶۷\x05еț\x02۷')
buf.write('۸\x05ћȮ\x02۸۹\x05еț\x02۹')
buf.write('°\x03\x02\x02\x02ۺۻ\x05лȞ\x02ۻۼ')
buf.write('\x05еț\x02ۼ۽\x05ћȮ\x02۽۾')
buf.write('\x05еț\x02۾ۿ\x05зȜ\x02ۿ܀')
buf.write('\x05еț\x02܀܁\x05љȭ\x02܁܂')
buf.write('\x05нȟ\x02܂²\x03\x02\x02\x02܃܄\x05л')
buf.write('Ȟ\x02܄܅\x05еț\x02܅܆\x05ћ')
buf.write('Ȯ\x02܆܇\x05нȟ\x02܇´\x03\x02\x02')
buf.write('\x02܈܉\x05лȞ\x02܉܊\x05еț')
buf.write('\x02܊܋\x05ѥȳ\x02܋¶\x03\x02\x02\x02܌')
buf.write('܍\x05лȞ\x02܍\u070e\x05зȜ\x02\u070e')
buf.write('\u070f\x07a\x02\x02\u070fܐ\x05їȬ\x02ܐܑ')
buf.write('\x05ёȩ\x02ܑܒ\x05ыȦ\x02ܒܓ')
buf.write('\x05нȟ\x02ܓܔ\x07a\x02\x02ܔܕ\x05й')
buf.write('ȝ\x02ܕܖ\x05уȢ\x02ܖܗ\x05е')
buf.write('ț\x02ܗܘ\x05яȨ\x02ܘܙ\x05с')
buf.write('ȡ\x02ܙܚ\x05нȟ\x02ܚ¸\x03\x02\x02')
buf.write('\x02ܛܜ\x05лȞ\x02ܜܝ\x05зȜ')
buf.write('\x02ܝܞ\x05ћȮ\x02ܞܟ\x05хȣ')
buf.write('\x02ܟܠ\x05эȧ\x02ܠܡ\x05нȟ')
buf.write('\x02ܡܢ\x05ѧȴ\x02ܢܣ\x05ёȩ')
buf.write('\x02ܣܤ\x05яȨ\x02ܤܥ\x05нȟ')
buf.write('\x02ܥº\x03\x02\x02\x02ܦܧ\x05лȞ\x02ܧ')
buf.write('ܨ\x05лȞ\x02ܨܩ\x05ыȦ\x02ܩ')
buf.write('¼\x03\x02\x02\x02ܪܫ\x05лȞ\x02ܫܬ')
buf.write('\x05нȟ\x02ܬܭ\x05зȜ\x02ܭܮ')
buf.write('\x05ѝȯ\x02ܮܯ\x05сȡ\x02ܯ¾')
buf.write('\x03\x02\x02\x02ܱܰ\x05лȞ\x02ܱܲ\x05н')
buf.write('ȟ\x02ܲܳ\x05йȝ\x02ܳÀ\x03\x02\x02')
buf.write('\x02ܴܵ\x05лȞ\x02ܵܶ\x05нȟ')
buf.write('\x02ܷܶ\x05йȝ\x02ܷܸ\x05хȣ')
buf.write('\x02ܸܹ\x05эȧ\x02ܹܺ\x05еț')
buf.write('\x02ܻܺ\x05ыȦ\x02ܻÂ\x03\x02\x02\x02ܼ')
buf.write('ܽ\x05лȞ\x02ܾܽ\x05нȟ\x02ܾ')
buf.write('ܿ\x05йȝ\x02ܿ݀\x05ыȦ\x02݀')
buf.write('݁\x05еț\x02݂݁\x05їȬ\x02݂')
buf.write('݃\x05нȟ\x02݃Ä\x03\x02\x02\x02݄݅')
buf.write('\x05лȞ\x02݆݅\x05нȟ\x02݆݇')
buf.write('\x05йȝ\x02݈݇\x05ёȩ\x02݈݉')
buf.write('\x05эȧ\x02݉݊\x05ѓȪ\x02݊\u074b')
buf.write('\x05ёȩ\x02\u074b\u074c\x05љȭ\x02\u074cݍ')
buf.write('\x05нȟ\x02ݍÆ\x03\x02\x02\x02ݎݏ\x05л')
buf.write('Ȟ\x02ݏݐ\x05нȟ\x02ݐݑ\x05й')
buf.write('ȝ\x02ݑݒ\x05їȬ\x02ݒݓ\x05н')
buf.write('ȟ\x02ݓݔ\x05эȧ\x02ݔݕ\x05н')
buf.write('ȟ\x02ݕݖ\x05яȨ\x02ݖݗ\x05ћ')
buf.write('Ȯ\x02ݗÈ\x03\x02\x02\x02ݘݙ\x05лȞ')
buf.write('\x02ݙݚ\x05нȟ\x02ݚݛ\x05пȠ')
buf.write('\x02ݛݜ\x05еț\x02ݜݝ\x05ѝȯ')
buf.write('\x02ݝݞ\x05ыȦ\x02ݞݟ\x05ћȮ')
buf.write('\x02ݟÊ\x03\x02\x02\x02ݠݡ\x05лȞ\x02ݡ')
buf.write('ݢ\x05нȟ\x02ݢݣ\x05пȠ\x02ݣ')
buf.write('ݤ\x05еț\x02ݤݥ\x05ѝȯ\x02ݥ')
buf.write('ݦ\x05ыȦ\x02ݦݧ\x05ћȮ\x02ݧ')
buf.write('ݨ\x05љȭ\x02ݨÌ\x03\x02\x02\x02ݩݪ')
buf.write('\x05лȞ\x02ݪݫ\x05нȟ\x02ݫݬ')
buf.write('\x05пȠ\x02ݬݭ\x05нȟ\x02ݭݮ')
buf.write('\x05їȬ\x02ݮݯ\x05їȬ\x02ݯݰ')
buf.write('\x05нȟ\x02ݰݱ\x05лȞ\x02ݱÎ')
buf.write('\x03\x02\x02\x02ݲݳ\x05лȞ\x02ݳݴ\x05н')
buf.write('ȟ\x02ݴݵ\x05пȠ\x02ݵݶ\x05х')
buf.write('ȣ\x02ݶݷ\x05яȨ\x02ݷݸ\x05н')
buf.write('ȟ\x02ݸݹ\x05їȬ\x02ݹÐ\x03\x02\x02')
buf.write('\x02ݺݻ\x05лȞ\x02ݻݼ\x05нȟ')
buf.write('\x02ݼݽ\x05ыȦ\x02ݽݾ\x05нȟ')
buf.write('\x02ݾݿ\x05ћȮ\x02ݿހ\x05нȟ')
buf.write('\x02ހÒ\x03\x02\x02\x02ށނ\x05лȞ\x02ނ')
buf.write('ރ\x05нȟ\x02ރބ\x05ѓȪ\x02ބ')
buf.write('ޅ\x05ћȮ\x02ޅކ\x05уȢ\x02ކ')
buf.write('Ô\x03\x02\x02\x02އވ\x05лȞ\x02ވމ')
buf.write('\x05нȟ\x02މފ\x05љȭ\x02ފދ')
buf.write('\x05йȝ\x02ދÖ\x03\x02\x02\x02ތލ\x05л')
buf.write('Ȟ\x02ލގ\x05нȟ\x02ގޏ\x05ћ')
buf.write('Ȯ\x02ޏސ\x05нȟ\x02ސޑ\x05ї')
buf.write('Ȭ\x02ޑޒ\x05эȧ\x02ޒޓ\x05х')
buf.write('ȣ\x02ޓޔ\x05яȨ\x02ޔޕ\x05х')
buf.write('ȣ\x02ޕޖ\x05љȭ\x02ޖޗ\x05ћ')
buf.write('Ȯ\x02ޗޘ\x05хȣ\x02ޘޙ\x05й')
buf.write('ȝ\x02ޙØ\x03\x02\x02\x02ޚޛ\x05лȞ')
buf.write('\x02ޛޜ\x05хȣ\x02ޜޝ\x05эȧ')
buf.write('\x02ޝޞ\x05нȟ\x02ޞޟ\x05яȨ')
buf.write('\x02ޟޠ\x05љȭ\x02ޠޡ\x05хȣ')
buf.write('\x02ޡޢ\x05ёȩ\x02ޢޣ\x05яȨ')
buf.write('\x02ޣÚ\x03\x02\x02\x02ޤޥ\x05лȞ\x02ޥ')
buf.write('ަ\x05хȣ\x02ަާ\x05љȭ\x02ާ')
buf.write('ި\x05еț\x02ިީ\x05зȜ\x02ީ')
buf.write('ު\x05ыȦ\x02ުޫ\x05нȟ\x02ޫ')
buf.write('Ü\x03\x02\x02\x02ެޭ\x05лȞ\x02ޭޮ')
buf.write('\x05хȣ\x02ޮޯ\x05љȭ\x02ޯް')
buf.write('\x05еț\x02ްޱ\x05љȭ\x02ޱ\u07b2')
buf.write('\x05љȭ\x02\u07b2\u07b3\x05ёȩ\x02\u07b3\u07b4')
buf.write('\x05йȝ\x02\u07b4\u07b5\x05хȣ\x02\u07b5\u07b6')
buf.write('\x05еț\x02\u07b6\u07b7\x05ћȮ\x02\u07b7\u07b8')
buf.write('\x05нȟ\x02\u07b8Þ\x03\x02\x02\x02\u07b9\u07ba\x05л')
buf.write('Ȟ\x02\u07ba\u07bb\x05хȣ\x02\u07bb\u07bc\x05љ')
buf.write('ȭ\x02\u07bc\u07bd\x05ћȮ\x02\u07bd\u07be\x05х')
buf.write('ȣ\x02\u07be\u07bf\x05яȨ\x02\u07bf߀\x05й')
buf.write('ȝ\x02߀߁\x05ћȮ\x02߁à\x03\x02\x02')
buf.write('\x02߂߃\x05лȞ\x02߃߄\x05ёȩ')
buf.write('\x02߄߅\x05йȝ\x02߅߆\x05ѝȯ')
buf.write('\x02߆߇\x05эȧ\x02߇߈\x05нȟ')
buf.write('\x02߈߉\x05яȨ\x02߉ߊ\x05ћȮ')
buf.write('\x02ߊâ\x03\x02\x02\x02ߋߌ\x05лȞ\x02ߌ')
buf.write('ߍ\x05ёȩ\x02ߍߎ\x05ѝȯ\x02ߎ')
buf.write('ߏ\x05зȜ\x02ߏߐ\x05ыȦ\x02ߐ')
buf.write('ߑ\x05нȟ\x02ߑä\x03\x02\x02\x02ߒߓ')
buf.write('\x05лȞ\x02ߓߔ\x05їȬ\x02ߔߕ')
buf.write('\x05ёȩ\x02ߕߖ\x05ѓȪ\x02ߖæ')
buf.write('\x03\x02\x02\x02ߗߘ\x05лȞ\x02ߘߙ\x05љ')
buf.write('ȭ\x02ߙߚ\x05хȣ\x02ߚߛ\x05я')
buf.write('Ȩ\x02ߛߜ\x05ћȮ\x02ߜߝ\x05н')
buf.write('ȟ\x02ߝߞ\x05їȬ\x02ߞߟ\x05џ')
buf.write('Ȱ\x02ߟߠ\x05еț\x02ߠߡ\x05ы')
buf.write('Ȧ\x02ߡߢ\x07a\x02\x02ߢߣ\x05ѝȯ')
buf.write('\x02ߣߤ\x05яȨ\x02ߤߥ\x05йȝ')
buf.write('\x02ߥߦ\x05ёȩ\x02ߦߧ\x05яȨ')
buf.write('\x02ߧߨ\x05љȭ\x02ߨߩ\x05ћȮ')
buf.write('\x02ߩߪ\x05їȬ\x02ߪ߫\x05еț')
buf.write('\x02߫߬\x05хȣ\x02߬߭\x05яȨ')
buf.write('\x02߭߮\x05нȟ\x02߮߯\x05лȞ')
buf.write('\x02߯è\x03\x02\x02\x02߰߱\x05нȟ\x02߱')
buf.write('߲\x05еț\x02߲߳\x05йȝ\x02߳')
buf.write('ߴ\x05уȢ\x02ߴê\x03\x02\x02\x02ߵ߶')
buf.write('\x05нȟ\x02߶߷\x05ыȦ\x02߷߸')
buf.write('\x05нȟ\x02߸߹\x05эȧ\x02߹ߺ')
buf.write('\x05нȟ\x02ߺ\u07fb\x05яȨ\x02\u07fb\u07fc')
buf.write('\x05ћȮ\x02\u07fcì\x03\x02\x02\x02߽߾\x05н')
buf.write('ȟ\x02߾߿\x05ыȦ\x02߿ࠀ\x05љ')
buf.write('ȭ\x02ࠀࠁ\x05нȟ\x02ࠁî\x03\x02\x02')
buf.write('\x02ࠂࠃ\x05нȟ\x02ࠃࠄ\x05ыȦ')
buf.write('\x02ࠄࠅ\x05љȭ\x02ࠅࠆ\x05хȣ')
buf.write('\x02ࠆࠇ\x05пȠ\x02ࠇð\x03\x02\x02\x02ࠈ')
buf.write('ࠉ\x05нȟ\x02ࠉࠊ\x05эȧ\x02ࠊ')
buf.write('ࠋ\x05ѓȪ\x02ࠋࠌ\x05ћȮ\x02ࠌ')
buf.write('ࠍ\x05ѥȳ\x02ࠍò\x03\x02\x02\x02ࠎࠏ')
buf.write('\x05нȟ\x02ࠏࠐ\x05яȨ\x02ࠐࠑ')
buf.write('\x05еț\x02ࠑࠒ\x05зȜ\x02ࠒࠓ')
buf.write('\x05ыȦ\x02ࠓࠔ\x05нȟ\x02ࠔô')
buf.write('\x03\x02\x02\x02ࠕࠖ\x05нȟ\x02ࠖࠗ\x05я')
buf.write('Ȩ\x02ࠗ࠘\x05йȝ\x02࠘࠙\x05ё')
buf.write('ȩ\x02࠙ࠚ\x05лȞ\x02ࠚࠛ\x05х')
buf.write('ȣ\x02ࠛࠜ\x05яȨ\x02ࠜࠝ\x05с')
buf.write('ȡ\x02ࠝö\x03\x02\x02\x02ࠞࠟ\x05нȟ')
buf.write('\x02ࠟࠠ\x05яȨ\x02ࠠࠡ\x05лȞ')
buf.write('\x02ࠡø\x03\x02\x02\x02ࠢࠣ\x05нȟ\x02ࠣ')
buf.write('ࠤ\x05яȨ\x02ࠤࠥ\x05ћȮ\x02ࠥ')
buf.write('ࠦ\x05хȣ\x02ࠦࠧ\x05ћȮ\x02ࠧ')
buf.write('ࠨ\x05ѥȳ\x02ࠨࠩ\x05нȟ\x02ࠩ')
buf.write('ࠪ\x05љȭ\x02ࠪࠫ\x05йȝ\x02ࠫ')
buf.write('ࠬ\x05еț\x02ࠬ࠭\x05ѓȪ\x02࠭')
buf.write('\u082e\x05хȣ\x02\u082e\u082f\x05яȨ\x02\u082f')
buf.write('࠰\x05сȡ\x02࠰ú\x03\x02\x02\x02࠱࠲')
buf.write('\x05нȟ\x02࠲࠳\x05їȬ\x02࠳࠴')
buf.write('\x05їȬ\x02࠴ü\x03\x02\x02\x02࠵࠶\x05н')
buf.write('ȟ\x02࠶࠷\x05їȬ\x02࠷࠸\x05ї')
buf.write('Ȭ\x02࠸࠹\x05ёȩ\x02࠹࠺\x05ї')
buf.write('Ȭ\x02࠺࠻\x05љȭ\x02࠻þ\x03\x02\x02')
buf.write('\x02࠼࠽\x05нȟ\x02࠽࠾\x05љȭ')
buf.write('\x02࠾\u083f\x05йȝ\x02\u083fࡀ\x05еț')
buf.write('\x02ࡀࡁ\x05ѓȪ\x02ࡁࡂ\x05нȟ')
buf.write('\x02ࡂĀ\x03\x02\x02\x02ࡃࡄ\x05нȟ\x02ࡄ')
buf.write('ࡅ\x05џȰ\x02ࡅࡆ\x05еț\x02ࡆ')
buf.write('ࡇ\x05ыȦ\x02ࡇࡈ\x05яȨ\x02ࡈ')
buf.write('ࡉ\x05еț\x02ࡉࡊ\x05эȧ\x02ࡊ')
buf.write('ࡋ\x05нȟ\x02ࡋĂ\x03\x02\x02\x02ࡌࡍ')
buf.write('\x05нȟ\x02ࡍࡎ\x05ѣȲ\x02ࡎࡏ')
buf.write('\x05йȝ\x02ࡏࡐ\x05нȟ\x02ࡐࡑ')
buf.write('\x05ѓȪ\x02ࡑࡒ\x05ћȮ\x02ࡒĄ')
buf.write('\x03\x02\x02\x02ࡓࡔ\x05нȟ\x02ࡔࡕ\x05ѣ')
buf.write('Ȳ\x02ࡕࡖ\x05йȝ\x02ࡖࡗ\x05н')
buf.write('ȟ\x02ࡗࡘ\x05ѓȪ\x02ࡘ࡙\x05ћ')
buf.write('Ȯ\x02࡙࡚\x05хȣ\x02࡚࡛\x05ё')
buf.write('ȩ\x02࡛\u085c\x05яȨ\x02\u085cĆ\x03\x02\x02')
buf.write('\x02\u085d࡞\x05нȟ\x02࡞\u085f\x05ѣȲ')
buf.write('\x02\u085fࡠ\x05йȝ\x02ࡠࡡ\x05нȟ')
buf.write('\x02ࡡࡢ\x05ѓȪ\x02ࡢࡣ\x05ћȮ')
buf.write('\x02ࡣࡤ\x05хȣ\x02ࡤࡥ\x05ёȩ')
buf.write('\x02ࡥࡦ\x05яȨ\x02ࡦࡧ\x07a\x02\x02ࡧ')
buf.write('ࡨ\x05хȣ\x02ࡨࡩ\x05яȨ\x02ࡩ')
buf.write('ࡪ\x05хȣ\x02ࡪ\u086b\x05ћȮ\x02\u086b')
buf.write('Ĉ\x03\x02\x02\x02\u086c\u086d\x05нȟ\x02\u086d\u086e')
buf.write('\x05ѣȲ\x02\u086e\u086f\x05йȝ\x02\u086fࡰ')
buf.write('\x05нȟ\x02ࡰࡱ\x05ѓȪ\x02ࡱࡲ')
buf.write('\x05ћȮ\x02ࡲࡳ\x05хȣ\x02ࡳࡴ')
buf.write('\x05ёȩ\x02ࡴࡵ\x05яȨ\x02ࡵࡶ')
buf.write('\x05љȭ\x02ࡶĊ\x03\x02\x02\x02ࡷࡸ\x05н')
buf.write('ȟ\x02ࡸࡹ\x05ѣȲ\x02ࡹࡺ\x05й')
buf.write('ȝ\x02ࡺࡻ\x05ыȦ\x02ࡻࡼ\x05ѝ')
buf.write('ȯ\x02ࡼࡽ\x05лȞ\x02ࡽࡾ\x05н')
buf.write('ȟ\x02ࡾČ\x03\x02\x02\x02ࡿࢀ\x05нȟ')
buf.write('\x02ࢀࢁ\x05ѣȲ\x02ࢁࢂ\x05йȝ')
buf.write('\x02ࢂࢃ\x05ыȦ\x02ࢃࢄ\x05ѝȯ')
buf.write('\x02ࢄࢅ\x05љȭ\x02ࢅࢆ\x05хȣ')
buf.write('\x02ࢆࢇ\x05џȰ\x02ࢇ࢈\x05нȟ')
buf.write('\x02࢈Ď\x03\x02\x02\x02ࢉࢊ\x05нȟ\x02ࢊ')
buf.write('ࢋ\x05ѣȲ\x02ࢋࢌ\x05нȟ\x02ࢌ')
buf.write('ࢍ\x05йȝ\x02ࢍࢎ\x05ѝȯ\x02ࢎ')
buf.write('\u088f\x05ћȮ\x02\u088f\u0890\x05нȟ\x02\u0890')
buf.write('Đ\x03\x02\x02\x02\u0891\u0892\x05нȟ\x02\u0892\u0893')
buf.write('\x05ѣȲ\x02\u0893\u0894\x05хȣ\x02\u0894\u0895')
buf.write('\x05љȭ\x02\u0895\u0896\x05ћȮ\x02\u0896\u0897')
buf.write('\x05љȭ\x02\u0897Ē\x03\x02\x02\x02࢙࢘\x05н')
buf.write('ȟ\x02࢙࢚\x05ѣȲ\x02࢚࢛\x05х')
buf.write('ȣ\x02࢛࢜\x05ћȮ\x02࢜Ĕ\x03\x02\x02')
buf.write('\x02࢝࢞\x05нȟ\x02࢞࢟\x05ѣȲ')
buf.write('\x02࢟ࢠ\x05ѓȪ\x02ࢠࢡ\x05ыȦ')
buf.write('\x02ࢡࢢ\x05еț\x02ࢢࢣ\x05хȣ')
buf.write('\x02ࢣࢤ\x05яȨ\x02ࢤĖ\x03\x02\x02\x02ࢥ')
buf.write('ࢦ\x05нȟ\x02ࢦࢧ\x05ѣȲ\x02ࢧ')
buf.write('ࢨ\x05ћȮ\x02ࢨࢩ\x05нȟ\x02ࢩ')
buf.write('ࢪ\x05їȬ\x02ࢪࢫ\x05яȨ\x02ࢫ')
buf.write('ࢬ\x05еț\x02ࢬࢭ\x05ыȦ\x02ࢭ')
buf.write('Ę\x03\x02\x02\x02ࢮࢯ\x05нȟ\x02ࢯࢰ')
buf.write('\x05ѣȲ\x02ࢰࢱ\x05ћȮ\x02ࢱࢲ')
buf.write('\x05їȬ\x02ࢲࢳ\x05еț\x02ࢳࢴ')
buf.write('\x05йȝ\x02ࢴࢵ\x05ћȮ\x02ࢵĚ')
buf.write('\x03\x02\x02\x02ࢶࢷ\x05пȠ\x02ࢷࢸ\x05е')
buf.write('ț\x02ࢸࢹ\x05хȣ\x02ࢹࢺ\x05ы')
buf.write('Ȧ\x02ࢺࢻ\x05ѝȯ\x02ࢻࢼ\x05ї')
buf.write('Ȭ\x02ࢼࢽ\x05нȟ\x02ࢽĜ\x03\x02\x02')
buf.write('\x02ࢾࢿ\x05пȠ\x02ࢿࣀ\x05еț')
buf.write('\x02ࣀࣁ\x05ыȦ\x02ࣁࣂ\x05љȭ')
buf.write('\x02ࣂࣃ\x05нȟ\x02ࣃĞ\x03\x02\x02\x02ࣄ')
buf.write('ࣅ\x05пȠ\x02ࣅࣆ\x05нȟ\x02ࣆ')
buf.write('ࣇ\x05ћȮ\x02ࣇࣈ\x05йȝ\x02ࣈ')
buf.write('ࣉ\x05уȢ\x02ࣉĠ\x03\x02\x02\x02࣊࣋')
buf.write('\x05пȠ\x02࣋࣌\x05хȣ\x02࣌࣍')
buf.write('\x05яȨ\x02࣍࣎\x05еț\x02࣏࣎')
buf.write('\x05ыȦ\x02࣏Ģ\x03\x02\x02\x02࣐࣑\x05п')
buf.write('Ƞ\x02࣑࣒\x05хȣ\x02࣒࣓\x05ї')
buf.write('Ȭ\x02࣓ࣔ\x05љȭ\x02ࣔࣕ\x05ћ')
buf.write('Ȯ\x02ࣕĤ\x03\x02\x02\x02ࣖࣗ\x05пȠ')
buf.write('\x02ࣗࣘ\x05хȣ\x02ࣘࣙ\x05їȬ')
buf.write('\x02ࣙࣚ\x05љȭ\x02ࣚࣛ\x05ћȮ')
buf.write('\x02ࣛࣜ\x07a\x02\x02ࣜࣝ\x05џȰ\x02ࣝ')
buf.write('ࣞ\x05еț\x02ࣞࣟ\x05ыȦ\x02ࣟ')
buf.write('࣠\x05ѝȯ\x02࣠࣡\x05нȟ\x02࣡')
buf.write('Ħ\x03\x02\x02\x02\u08e2ࣣ\x05пȠ\x02ࣣࣤ')
buf.write('\x05ыȦ\x02ࣤࣥ\x05ёȩ\x02ࣦࣥ')
buf.write('\x05еț\x02ࣦࣧ\x05ћȮ\x02ࣧĨ')
buf.write('\x03\x02\x02\x02ࣩࣨ\x05пȠ\x02ࣩ࣪\x05ё')
buf.write('ȩ\x02࣪࣫\x05ыȦ\x02࣫࣬\x05ы')
buf.write('Ȧ\x02࣭࣬\x05ёȩ\x02࣭࣮\x05ѡ')
buf.write('ȱ\x02࣮࣯\x05хȣ\x02ࣰ࣯\x05я')
buf.write('Ȩ\x02ࣰࣱ\x05сȡ\x02ࣱĪ\x03\x02\x02')
buf.write('\x02ࣲࣳ\x05пȠ\x02ࣳࣴ\x05ёȩ')
buf.write('\x02ࣴࣵ\x05ыȦ\x02ࣶࣵ\x05ыȦ')
buf.write('\x02ࣶࣷ\x05ёȩ\x02ࣷࣸ\x05ѡȱ')
buf.write('\x02ࣹࣸ\x05љȭ\x02ࣹĬ\x03\x02\x02\x02ࣺ')
buf.write('ࣻ\x05пȠ\x02ࣻࣼ\x05ёȩ\x02ࣼ')
buf.write('ࣽ\x05їȬ\x02ࣽĮ\x03\x02\x02\x02ࣾࣿ')
buf.write('\x05пȠ\x02ࣿऀ\x05ёȩ\x02ऀँ')
buf.write('\x05їȬ\x02ँं\x05еț\x02ंः')
buf.write('\x05ыȦ\x02ःऄ\x05ыȦ\x02ऄİ')
buf.write('\x03\x02\x02\x02अआ\x05пȠ\x02आइ\x05ё')
buf.write('ȩ\x02इई\x05їȬ\x02ईउ\x05й')
buf.write('ȝ\x02उऊ\x05нȟ\x02ऊIJ\x03\x02\x02')
buf.write('\x02ऋऌ\x05пȠ\x02ऌऍ\x05їȬ')
buf.write('\x02ऍऎ\x05ёȩ\x02ऎए\x05эȧ')
buf.write('\x02एĴ\x03\x02\x02\x02ऐऑ\x05пȠ\x02ऑ')
buf.write('ऒ\x05ѝȯ\x02ऒओ\x05ыȦ\x02ओ')
buf.write('औ\x05ыȦ\x02औĶ\x03\x02\x02\x02कख')
buf.write('\x05пȠ\x02खग\x05ѝȯ\x02गघ')
buf.write('\x05яȨ\x02घङ\x05йȝ\x02ङच')
buf.write('\x05ћȮ\x02चछ\x05хȣ\x02छज')
buf.write('\x05ёȩ\x02जझ\x05яȨ\x02झĸ')
buf.write('\x03\x02\x02\x02ञट\x05сȡ\x02टठ\x05ё')
buf.write('ȩ\x02ठड\x05ћȮ\x02डढ\x05ё')
buf.write('ȩ\x02ढĺ\x03\x02\x02\x02णत\x05сȡ')
buf.write('\x02तथ\x05їȬ\x02थद\x05еț')
buf.write('\x02दध\x05яȨ\x02धन\x05ћȮ')
buf.write('\x02नļ\x03\x02\x02\x02ऩप\x05сȡ\x02प')
buf.write('फ\x05їȬ\x02फब\x05ёȩ\x02ब')
buf.write('भ\x05ѝȯ\x02भम\x05ѓȪ\x02म')
buf.write('ľ\x03\x02\x02\x02यर\x05сȡ\x02रऱ')
buf.write('\x05їȬ\x02ऱल\x05ёȩ\x02लळ')
buf.write('\x05ѝȯ\x02ळऴ\x05ѓȪ\x02ऴव')
buf.write('\x05хȣ\x02वश\x05яȨ\x02शष')
buf.write('\x05сȡ\x02षŀ\x03\x02\x02\x02सह\x05у')
buf.write('Ȣ\x02हऺ\x05еț\x02ऺऻ\x05љ')
buf.write('ȭ\x02ऻ़\x05уȢ\x02़ł\x03\x02\x02')
buf.write('\x02ऽा\x05уȢ\x02ाि\x05еț')
buf.write('\x02िी\x05џȰ\x02ीु\x05хȣ')
buf.write('\x02ुू\x05яȨ\x02ूृ\x05сȡ')
buf.write('\x02ृń\x03\x02\x02\x02ॄॅ\x05уȢ\x02ॅ')
buf.write('ॆ\x05хȣ\x02ॆे\x05лȞ\x02े')
buf.write('ै\x05нȟ\x02ैņ\x03\x02\x02\x02ॉॊ')
buf.write('\x05уȢ\x02ॊो\x05ёȩ\x02ोौ')
buf.write('\x05ѝȯ\x02ौ्\x05їȬ\x02्ň')
buf.write('\x03\x02\x02\x02ॎॏ\x05хȣ\x02ॏॐ\x05п')
buf.write('Ƞ\x02ॐŊ\x03\x02\x02\x02॒॑\x05хȣ')
buf.write('\x02॒॓\x05сȡ\x02॓॔\x05яȨ')
buf.write('\x02॔ॕ\x05ёȩ\x02ॕॖ\x05їȬ')
buf.write('\x02ॖॗ\x05нȟ\x02ॗŌ\x03\x02\x02\x02क़')
buf.write('ख़\x05хȣ\x02ख़ग़\x05эȧ\x02ग़')
buf.write('ज़\x05эȧ\x02ज़ड़\x05нȟ\x02ड़')
buf.write('ढ़\x05лȞ\x02ढ़फ़\x05хȣ\x02फ़')
buf.write('य़\x05еț\x02य़ॠ\x05ћȮ\x02ॠ')
buf.write('ॡ\x05нȟ\x02ॡŎ\x03\x02\x02\x02ॢॣ')
buf.write('\x05хȣ\x02ॣ।\x05яȨ\x02।Ő')
buf.write('\x03\x02\x02\x02॥०\x05хȣ\x02०१\x05я')
buf.write('Ȩ\x02१२\x05йȝ\x02२३\x05ы')
buf.write('Ȧ\x02३४\x05ѝȯ\x02४५\x05л')
buf.write('Ȟ\x02५६\x05нȟ\x02६Œ\x03\x02\x02')
buf.write('\x02७८\x05хȣ\x02८९\x05яȨ')
buf.write('\x02९॰\x05йȝ\x02॰ॱ\x05ыȦ')
buf.write('\x02ॱॲ\x05ѝȯ\x02ॲॳ\x05лȞ')
buf.write('\x02ॳॴ\x05хȣ\x02ॴॵ\x05яȨ')
buf.write('\x02ॵॶ\x05сȡ\x02ॶŔ\x03\x02\x02\x02ॷ')
buf.write('ॸ\x05хȣ\x02ॸॹ\x05яȨ\x02ॹ')
buf.write('ॺ\x05йȝ\x02ॺॻ\x05їȬ\x02ॻ')
buf.write('ॼ\x05нȟ\x02ॼॽ\x05эȧ\x02ॽ')
buf.write('ॾ\x05нȟ\x02ॾॿ\x05яȨ\x02ॿ')
buf.write('ঀ\x05ћȮ\x02ঀŖ\x03\x02\x02\x02ঁং')
buf.write('\x05хȣ\x02ংঃ\x05яȨ\x02ঃ\u0984')
buf.write('\x05лȞ\x02\u0984অ\x05нȟ\x02অআ')
buf.write('\x05яȨ\x02আই\x05ћȮ\x02ইŘ')
buf.write('\x03\x02\x02\x02ঈউ\x05хȣ\x02উঊ\x05я')
buf.write('Ȩ\x02ঊঋ\x05лȞ\x02ঋঌ\x05н')
buf.write('ȟ\x02ঌ\u098d\x05ѣȲ\x02\u098dŚ\x03\x02\x02')
buf.write('\x02\u098eএ\x05хȣ\x02এঐ\x05яȨ')
buf.write('\x02ঐ\u0991\x05лȞ\x02\u0991\u0992\x05нȟ')
buf.write('\x02\u0992ও\x05ѣȲ\x02ওঔ\x05нȟ')
buf.write('\x02ঔক\x05лȞ\x02কŜ\x03\x02\x02\x02খ')
buf.write('গ\x05хȣ\x02গঘ\x05яȨ\x02ঘ')
buf.write('ঙ\x05лȞ\x02ঙচ\x05хȣ\x02চ')
buf.write('ছ\x05йȝ\x02ছজ\x05еț\x02জ')
buf.write('ঝ\x05ћȮ\x02ঝঞ\x05ёȩ\x02ঞ')
buf.write('ট\x05їȬ\x02টŞ\x03\x02\x02\x02ঠড')
buf.write('\x05хȣ\x02ডঢ\x05яȨ\x02ঢণ')
buf.write('\x05лȞ\x02ণত\x05хȣ\x02তথ')
buf.write('\x05йȝ\x02থদ\x05нȟ\x02দধ')
buf.write('\x05љȭ\x02ধŠ\x03\x02\x02\x02ন\u09a9\x05х')
buf.write('ȣ\x02\u09a9প\x05яȨ\x02পফ\x05п')
buf.write('Ƞ\x02ফব\x05хȣ\x02বভ\x05я')
buf.write('Ȩ\x02ভম\x05хȣ\x02ময\x05ћ')
buf.write('Ȯ\x02যর\x05нȟ\x02রŢ\x03\x02\x02')
buf.write('\x02\u09b1ল\x05хȣ\x02ল\u09b3\x05яȨ')
buf.write('\x02\u09b3\u09b4\x05ыȦ\x02\u09b4\u09b5\x05хȣ')
buf.write('\x02\u09b5শ\x05яȨ\x02শষ\x05нȟ')
buf.write('\x02ষŤ\x03\x02\x02\x02সহ\x05хȣ\x02হ')
buf.write('\u09ba\x05яȨ\x02\u09ba\u09bb\x05яȨ\x02\u09bb')
buf.write('়\x05нȟ\x02়ঽ\x05їȬ\x02ঽ')
buf.write('Ŧ\x03\x02\x02\x02াি\x05хȣ\x02িী')
buf.write('\x05яȨ\x02ীু\x05ёȩ\x02ুূ')
buf.write('\x05ѝȯ\x02ূৃ\x05ћȮ\x02ৃŨ')
buf.write('\x03\x02\x02\x02ৄ\u09c5\x05хȣ\x02\u09c5\u09c6\x05я')
buf.write('Ȩ\x02\u09c6ে\x05љȭ\x02েৈ\x05н')
buf.write('ȟ\x02ৈ\u09c9\x05їȬ\x02\u09c9\u09ca\x05ћ')
buf.write('Ȯ\x02\u09caŪ\x03\x02\x02\x02োৌ\x05хȣ')
buf.write('\x02ৌ্\x05яȨ\x02্ৎ\x05љȭ')
buf.write('\x02ৎ\u09cf\x05ћȮ\x02\u09cf\u09d0\x05еț')
buf.write('\x02\u09d0\u09d1\x05яȨ\x02\u09d1\u09d2\x05ћȮ')
buf.write('\x02\u09d2\u09d3\x05хȣ\x02\u09d3\u09d4\x05еț')
buf.write('\x02\u09d4\u09d5\x05зȜ\x02\u09d5\u09d6\x05ыȦ')
buf.write('\x02\u09d6ৗ\x05нȟ\x02ৗŬ\x03\x02\x02\x02\u09d8')
buf.write('\u09d9\x05хȣ\x02\u09d9\u09da\x05яȨ\x02\u09da')
buf.write('\u09db\x05љȭ\x02\u09dbড়\x05ћȮ\x02ড়')
buf.write('ঢ়\x05нȟ\x02ঢ়\u09de\x05еț\x02\u09de')
buf.write('য়\x05лȞ\x02য়Ů\x03\x02\x02\x02ৠৡ')
buf.write('\x05хȣ\x02ৡৢ\x05яȨ\x02ৢৣ')
buf.write('\x05ћȮ\x02ৣŰ\x03\x02\x02\x02\u09e4\u09e5\x05х')
buf.write('ȣ\x02\u09e5০\x05яȨ\x02০১\x05ћ')
buf.write('Ȯ\x02১২\x05нȟ\x02২৩\x05с')
buf.write('ȡ\x02৩৪\x05нȟ\x02৪৫\x05ї')
buf.write('Ȭ\x02৫Ų\x03\x02\x02\x02৬৭\x05хȣ')
buf.write('\x02৭৮\x05яȨ\x02৮৯\x05ћȮ')
buf.write('\x02৯ৰ\x05нȟ\x02ৰৱ\x05їȬ')
buf.write('\x02ৱ৲\x05љȭ\x02৲৳\x05нȟ')
buf.write('\x02৳৴\x05йȝ\x02৴৵\x05ћȮ')
buf.write('\x02৵Ŵ\x03\x02\x02\x02৶৷\x05хȣ\x02৷')
buf.write('৸\x05яȨ\x02৸৹\x05ћȮ\x02৹')
buf.write('৺\x05нȟ\x02৺৻\x05їȬ\x02৻')
buf.write('ৼ\x05џȰ\x02ৼ৽\x05еț\x02৽')
buf.write('৾\x05ыȦ\x02৾Ŷ\x03\x02\x02\x02\u09ff\u0a00')
buf.write('\x05хȣ\x02\u0a00ਁ\x05яȨ\x02ਁਂ')
buf.write('\x05ћȮ\x02ਂਃ\x05ёȩ\x02ਃŸ')
buf.write('\x03\x02\x02\x02\u0a04ਅ\x05хȣ\x02ਅਆ\x05я')
buf.write('Ȩ\x02ਆਇ\x05џȰ\x02ਇਈ\x05е')
buf.write('ț\x02ਈਉ\x05ыȦ\x02ਉਊ\x05х')
buf.write('ȣ\x02ਊ\u0a0b\x05лȞ\x02\u0a0b\u0a0c\x05е')
buf.write('ț\x02\u0a0c\u0a0d\x05ћȮ\x02\u0a0d\u0a0e\x05н')
buf.write('ȟ\x02\u0a0eź\x03\x02\x02\x02ਏਐ\x05хȣ')
buf.write('\x02ਐ\u0a11\x05љȭ\x02\u0a11ż\x03\x02\x02\x02\u0a12')
buf.write('ਓ\x05хȣ\x02ਓਔ\x05љȭ\x02ਔ')
buf.write('ਕ\x05ёȩ\x02ਕਖ\x05ыȦ\x02ਖ')
buf.write('ਗ\x05еț\x02ਗਘ\x05ћȮ\x02ਘ')
buf.write('ਙ\x05хȣ\x02ਙਚ\x05ёȩ\x02ਚ')
buf.write('ਛ\x05яȨ\x02ਛž\x03\x02\x02\x02ਜਝ')
buf.write('\x05хȣ\x02ਝਞ\x05ћȮ\x02ਞਟ')
buf.write('\x05нȟ\x02ਟਠ\x05їȬ\x02ਠਡ')
buf.write('\x05еț\x02ਡਢ\x05ћȮ\x02ਢਣ')
buf.write('\x05нȟ\x02ਣƀ\x03\x02\x02\x02ਤਥ\x05ч')
buf.write('Ȥ\x02ਥਦ\x05еț\x02ਦਧ\x05џ')
buf.write('Ȱ\x02ਧਨ\x05еț\x02ਨƂ\x03\x02\x02')
buf.write('\x02\u0a29ਪ\x05чȤ\x02ਪਫ\x05ёȩ')
buf.write('\x02ਫਬ\x05хȣ\x02ਬਭ\x05яȨ')
buf.write('\x02ਭƄ\x03\x02\x02\x02ਮਯ\x05щȥ\x02ਯ')
buf.write('ਰ\x05нȟ\x02ਰ\u0a31\x05нȟ\x02\u0a31')
buf.write('ਲ\x05ѓȪ\x02ਲƆ\x03\x02\x02\x02ਲ਼\u0a34')
buf.write('\x05ыȦ\x02\u0a34ਵ\x05еț\x02ਵਸ਼')
buf.write('\x05яȨ\x02ਸ਼\u0a37\x05сȡ\x02\u0a37ਸ')
buf.write('\x05ѝȯ\x02ਸਹ\x05еț\x02ਹ\u0a3a')
buf.write('\x05сȡ\x02\u0a3a\u0a3b\x05нȟ\x02\u0a3bƈ')
buf.write('\x03\x02\x02\x02਼\u0a3d\x05ыȦ\x02\u0a3dਾ\x05е')
buf.write('ț\x02ਾਿ\x05љȭ\x02ਿੀ\x05ћ')
buf.write('Ȯ\x02ੀƊ\x03\x02\x02\x02ੁੂ\x05ыȦ')
buf.write('\x02ੂ\u0a43\x05еț\x02\u0a43\u0a44\x05љȭ')
buf.write('\x02\u0a44\u0a45\x05ћȮ\x02\u0a45\u0a46\x07a\x02\x02\u0a46')
buf.write('ੇ\x05џȰ\x02ੇੈ\x05еț\x02ੈ')
buf.write('\u0a49\x05ыȦ\x02\u0a49\u0a4a\x05ѝȯ\x02\u0a4a')
buf.write('ੋ\x05нȟ\x02ੋƌ\x03\x02\x02\x02ੌ੍')
buf.write('\x05ыȦ\x02੍\u0a4e\x05нȟ\x02\u0a4e\u0a4f')
buf.write('\x05еț\x02\u0a4f\u0a50\x05лȞ\x02\u0a50ੑ')
buf.write('\x05хȣ\x02ੑ\u0a52\x05яȨ\x02\u0a52\u0a53')
buf.write('\x05сȡ\x02\u0a53Ǝ\x03\x02\x02\x02\u0a54\u0a55\x05ы')
buf.write('Ȧ\x02\u0a55\u0a56\x05нȟ\x02\u0a56\u0a57\x05п')
buf.write('Ƞ\x02\u0a57\u0a58\x05ћȮ\x02\u0a58Ɛ\x03\x02\x02')
buf.write('\x02ਖ਼ਗ਼\x05ыȦ\x02ਗ਼ਜ਼\x05нȟ')
buf.write('\x02ਜ਼ੜ\x05џȰ\x02ੜ\u0a5d\x05нȟ')
buf.write('\x02\u0a5dਫ਼\x05ыȦ\x02ਫ਼ƒ\x03\x02\x02\x02\u0a5f')
buf.write('\u0a60\x05ыȦ\x02\u0a60\u0a61\x05хȣ\x02\u0a61')
buf.write('\u0a62\x05зȜ\x02\u0a62\u0a63\x05їȬ\x02\u0a63')
buf.write('\u0a64\x05еț\x02\u0a64\u0a65\x05їȬ\x02\u0a65')
buf.write('੦\x05ѥȳ\x02੦Ɣ\x03\x02\x02\x02੧੨')
buf.write('\x05ыȦ\x02੨੩\x05хȣ\x02੩੪')
buf.write('\x05щȥ\x02੪੫\x05нȟ\x02੫Ɩ')
buf.write('\x03\x02\x02\x02੬੭\x05ыȦ\x02੭੮\x05х')
buf.write('ȣ\x02੮੯\x05щȥ\x02੯ੰ\x05н')
buf.write('ȟ\x02ੰੱ\x074\x02\x02ੱƘ\x03\x02\x02\x02ੲ')
buf.write('ੳ\x05ыȦ\x02ੳੴ\x05хȣ\x02ੴ')
buf.write('ੵ\x05щȥ\x02ੵ੶\x05нȟ\x02੶')
buf.write('\u0a77\x076\x02\x02\u0a77ƚ\x03\x02\x02\x02\u0a78\u0a79\x05ы'
)
buf.write('Ȧ\x02\u0a79\u0a7a\x05хȣ\x02\u0a7a\u0a7b\x05щ')
buf.write('ȥ\x02\u0a7b\u0a7c\x05нȟ\x02\u0a7c\u0a7d\x05й')
buf.write('ȝ\x02\u0a7dƜ\x03\x02\x02\x02\u0a7e\u0a7f\x05ыȦ')
buf.write('\x02\u0a7f\u0a80\x05хȣ\x02\u0a80ઁ\x05эȧ')
buf.write('\x02ઁં\x05хȣ\x02ંઃ\x05ћȮ')
buf.write('\x02ઃƞ\x03\x02\x02\x02\u0a84અ\x05ыȦ\x02અ')
buf.write('આ\x05ёȩ\x02આઇ\x05йȝ\x02ઇ')
buf.write('ઈ\x05еț\x02ઈઉ\x05ыȦ\x02ઉ')
buf.write('Ơ\x03\x02\x02\x02ઊઋ\x05ыȦ\x02ઋઌ')
buf.write('\x05ёȩ\x02ઌઍ\x05йȝ\x02ઍ\u0a8e')
buf.write('\x05щȥ\x02\u0a8eƢ\x03\x02\x02\x02એઐ\x05ы')
buf.write('Ȧ\x02ઐઑ\x05ёȩ\x02ઑ\u0a92\x05й')
buf.write('ȝ\x02\u0a92ઓ\x05щȥ\x02ઓઔ\x05н')
buf.write('ȟ\x02ઔક\x05лȞ\x02કƤ\x03\x02\x02')
buf.write('\x02ખગ\x05ыȦ\x02ગઘ\x05ёȩ')
buf.write('\x02ઘઙ\x05сȡ\x02ઙƦ\x03\x02\x02\x02ચ')
buf.write('છ\x05ыȦ\x02છજ\x05ёȩ\x02જ')
buf.write('ઝ\x05сȡ\x02ઝઞ\x05ёȩ\x02ઞ')
buf.write('ટ\x05пȠ\x02ટઠ\x05пȠ\x02ઠ')
buf.write('ƨ\x03\x02\x02\x02ડઢ\x05ыȦ\x02ઢણ')
buf.write('\x05ёȩ\x02ણત\x05сȡ\x02તથ')
buf.write('\x05ёȩ\x02થદ\x05яȨ\x02દƪ')
buf.write('\x03\x02\x02\x02ધન\x05ыȦ\x02ન\u0aa9\x05ё')
buf.write('ȩ\x02\u0aa9પ\x05яȨ\x02પફ\x05с')
buf.write('ȡ\x02ફƬ\x03\x02\x02\x02બભ\x05ыȦ')
buf.write('\x02ભમ\x05ёȩ\x02મય\x05ёȩ')
buf.write('\x02યર\x05ѓȪ\x02રƮ\x03\x02\x02\x02\u0ab1')
buf.write('લ\x05эȧ\x02લળ\x05еț\x02ળ')
buf.write('\u0ab4\x05хȣ\x02\u0ab4વ\x05яȨ\x02વ')
buf.write('ư\x03\x02\x02\x02શષ\x05эȧ\x02ષસ')
buf.write('\x05еț\x02સહ\x05ѓȪ\x02હƲ')
buf.write('\x03\x02\x02\x02\u0aba\u0abb\x05эȧ\x02\u0abb઼\x05е')
buf.write('ț\x02઼ઽ\x05ћȮ\x02ઽા\x05й')
buf.write('ȝ\x02ાિ\x05уȢ\x02િી\x05н')
buf.write('ȟ\x02ીુ\x05лȞ\x02ુƴ\x03\x02\x02')
buf.write('\x02ૂૃ\x05эȧ\x02ૃૄ\x05еț')
buf.write('\x02ૄૅ\x05ѣȲ\x02ૅ\u0ac6\x05џȰ')
buf.write('\x02\u0ac6ે\x05еț\x02ેૈ\x05ыȦ')
buf.write('\x02ૈૉ\x05ѝȯ\x02ૉ\u0aca\x05нȟ')
buf.write('\x02\u0acaƶ\x03\x02\x02\x02ોૌ\x05эȧ\x02ૌ')
buf.write('્\x05нȟ\x02્\u0ace\x05еț\x02\u0ace')
buf.write('\u0acf\x05љȭ\x02\u0acfૐ\x05ѝȯ\x02ૐ')
buf.write('\u0ad1\x05їȬ\x02\u0ad1\u0ad2\x05нȟ\x02\u0ad2')
buf.write('\u0ad3\x05љȭ\x02\u0ad3Ƹ\x03\x02\x02\x02\u0ad4\u0ad5')
buf.write('\x05эȧ\x02\u0ad5\u0ad6\x05нȟ\x02\u0ad6\u0ad7')
buf.write('\x05эȧ\x02\u0ad7\u0ad8\x05зȜ\x02\u0ad8\u0ad9')
buf.write('\x05нȟ\x02\u0ad9\u0ada\x05їȬ\x02\u0adaƺ')
buf.write('\x03\x02\x02\x02\u0adb\u0adc\x05эȧ\x02\u0adc\u0add\x05н')
buf.write('ȟ\x02\u0add\u0ade\x05їȬ\x02\u0ade\u0adf\x05с')
buf.write('ȡ\x02\u0adfૠ\x05нȟ\x02ૠƼ\x03\x02\x02')
buf.write('\x02ૡૢ\x05эȧ\x02ૢૣ\x05хȣ')
buf.write('\x02ૣ\u0ae4\x05яȨ\x02\u0ae4\u0ae5\x05ѝȯ')
buf.write('\x02\u0ae5૦\x05љȭ\x02૦ƾ\x03\x02\x02\x02૧')
buf.write('૨\x05эȧ\x02૨૩\x05хȣ\x02૩')
buf.write('૪\x05яȨ\x02૪૫\x05ѝȯ\x02૫')
buf.write('૬\x05ћȮ\x02૬૭\x05нȟ\x02૭')
buf.write('ǀ\x03\x02\x02\x02૮૯\x05эȧ\x02૯૰')
buf.write('\x05хȣ\x02૰૱\x05яȨ\x02૱\u0af2')
buf.write('\x05џȰ\x02\u0af2\u0af3\x05еț\x02\u0af3\u0af4')
buf.write('\x05ыȦ\x02\u0af4\u0af5\x05ѝȯ\x02\u0af5\u0af6')
buf.write('\x05нȟ\x02\u0af6ǂ\x03\x02\x02\x02\u0af7\u0af8\x05э')
buf.write('ȧ\x02\u0af8ૹ\x05ыȦ\x02ૹૺ\x05љ')
buf.write('ȭ\x02ૺૻ\x05ыȦ\x02ૻૼ\x05е')
buf.write('ț\x02ૼ૽\x05зȜ\x02૽૾\x05н')
buf.write('ȟ\x02૾૿\x05ыȦ\x02૿DŽ\x03\x02\x02')
buf.write('\x02\u0b00ଁ\x05эȧ\x02ଁଂ\x05ёȩ')
buf.write('\x02ଂଃ\x05лȞ\x02ଃ\u0b04\x05нȟ')
buf.write('\x02\u0b04dž\x03\x02\x02\x02ଅଆ\x05эȧ\x02ଆ')
buf.write('ଇ\x05ёȩ\x02ଇଈ\x05лȞ\x02ଈ')
buf.write('ଉ\x05нȟ\x02ଉଊ\x05ыȦ\x02ଊ')
buf.write('Lj\x03\x02\x02\x02ଋଌ\x05эȧ\x02ଌ\u0b0d')
buf.write('\x05ёȩ\x02\u0b0d\u0b0e\x05лȞ\x02\u0b0eଏ')
buf.write('\x05хȣ\x02ଏଐ\x05пȠ\x02ଐ\u0b11')
buf.write('\x05ѥȳ\x02\u0b11NJ\x03\x02\x02\x02\u0b12ଓ\x05э')
buf.write('ȧ\x02ଓଔ\x05ёȩ\x02ଔକ\x05я')
buf.write('Ȩ\x02କଖ\x05ћȮ\x02ଖଗ\x05у')
buf.write('Ȣ\x02ଗnj\x03\x02\x02\x02ଘଙ\x05эȧ')
buf.write('\x02ଙଚ\x05ѝȯ\x02ଚଛ\x05ыȦ')
buf.write('\x02ଛଜ\x05ћȮ\x02ଜଝ\x05хȣ')
buf.write('\x02ଝଞ\x05љȭ\x02ଞଟ\x05нȟ')
buf.write('\x02ଟଠ\x05ћȮ\x02ଠǎ\x03\x02\x02\x02ଡ')
buf.write('ଢ\x05яȨ\x02ଢଣ\x05еț\x02ଣ')
buf.write('ତ\x05эȧ\x02ତଥ\x05нȟ\x02ଥ')
buf.write('ǐ\x03\x02\x02\x02ଦଧ\x05яȨ\x02ଧନ')
buf.write('\x05еț\x02ନ\u0b29\x05яȨ\x02\u0b29ǒ')
buf.write('\x03\x02\x02\x02ପଫ\x05яȨ\x02ଫବ\x05е')
buf.write('ț\x02ବଭ\x05ћȮ\x02ଭମ\x05ѝ')
buf.write('ȯ\x02ମଯ\x05їȬ\x02ଯର\x05е')
buf.write('ț\x02ର\u0b31\x05ыȦ\x02\u0b31ǔ\x03\x02\x02')
buf.write('\x02ଲଳ\x05яȨ\x02ଳ\u0b34\x05еț')
buf.write('\x02\u0b34ଵ\x05ћȮ\x02ଵଶ\x05ѝȯ')
buf.write('\x02ଶଷ\x05їȬ\x02ଷସ\x05еț')
buf.write('\x02ସହ\x05ыȦ\x02ହ\u0b3a\x05яȨ')
buf.write('\x02\u0b3aǖ\x03\x02\x02\x02\u0b3b଼\x05яȨ\x02଼')
buf.write('ଽ\x05еț\x02ଽା\x05џȰ\x02ା')
buf.write('ǘ\x03\x02\x02\x02ିୀ\x05яȨ\x02ୀୁ')
buf.write('\x05йȝ\x02ୁୂ\x05уȢ\x02ୂୃ')
buf.write('\x05еț\x02ୃୄ\x05їȬ\x02ୄǚ')
buf.write('\x03\x02\x02\x02\u0b45\u0b46\x05яȨ\x02\u0b46େ\x05й')
buf.write('ȝ\x02େୈ\x05уȢ\x02ୈ\u0b49\x05е')
buf.write('ț\x02\u0b49\u0b4a\x05їȬ\x02\u0b4aୋ\x07a\x02')
buf.write('\x02ୋୌ\x05йȝ\x02ୌ୍\x05љȭ')
buf.write('\x02୍ǜ\x03\x02\x02\x02\u0b4e\u0b4f\x05яȨ\x02\u0b4f')
buf.write('\u0b50\x05йȝ\x02\u0b50\u0b51\x05ыȦ\x02\u0b51')
buf.write('\u0b52\x05ёȩ\x02\u0b52\u0b53\x05зȜ\x02\u0b53')
buf.write('Ǟ\x03\x02\x02\x02\u0b54୕\x05яȨ\x02୕ୖ')
buf.write('\x05нȟ\x02ୖୗ\x05љȭ\x02ୗ\u0b58')
buf.write('\x05ћȮ\x02\u0b58\u0b59\x05нȟ\x02\u0b59\u0b5a')
buf.write('\x05лȞ\x02\u0b5aǠ\x03\x02\x02\x02\u0b5bଡ଼\x05я')
buf.write('Ȩ\x02ଡ଼ଢ଼\x05нȟ\x02ଢ଼\u0b5e\x05ѡ')
buf.write('ȱ\x02\u0b5eǢ\x03\x02\x02\x02ୟୠ\x05яȨ')
buf.write('\x02ୠୡ\x05ёȩ\x02ୡǤ\x03\x02\x02\x02ୢ')
buf.write('ୣ\x05яȨ\x02ୣ\u0b64\x05ёȩ\x02\u0b64')
buf.write('\u0b65\x05еț\x02\u0b65୦\x05ѝȯ\x02୦')
buf.write('୧\x05лȞ\x02୧୨\x05хȣ\x02୨')
buf.write('୩\x05ћȮ\x02୩Ǧ\x03\x02\x02\x02୪୫')
buf.write('\x05яȨ\x02୫୬\x05ёȩ\x02୬୭')
buf.write('\x05йȝ\x02୭୮\x05еț\x02୮୯')
buf.write('\x05йȝ\x02୯୰\x05уȢ\x02୰ୱ')
buf.write('\x05нȟ\x02ୱǨ\x03\x02\x02\x02୲୳\x05я')
buf.write('Ȩ\x02୳୴\x05ёȩ\x02୴୵\x05й')
buf.write('ȝ\x02୵୶\x05ёȩ\x02୶୷\x05ѓ')
buf.write('Ȫ\x02୷\u0b78\x05ѥȳ\x02\u0b78Ǫ\x03\x02\x02')
buf.write('\x02\u0b79\u0b7a\x05яȨ\x02\u0b7a\u0b7b\x05ёȩ')
buf.write('\x02\u0b7b\u0b7c\x05йȝ\x02\u0b7c\u0b7d\x05ѥȳ')
buf.write('\x02\u0b7d\u0b7e\x05йȝ\x02\u0b7e\u0b7f\x05ыȦ')
buf.write('\x02\u0b7f\u0b80\x05нȟ\x02\u0b80Ǭ\x03\x02\x02\x02\u0b81')
buf.write('ஂ\x05яȨ\x02ஂஃ\x05ёȩ\x02ஃ')
buf.write('\u0b84\x05нȟ\x02\u0b84அ\x05яȨ\x02அ')
buf.write('ஆ\x05ћȮ\x02ஆஇ\x05хȣ\x02இ')
buf.write('ஈ\x05ћȮ\x02ஈஉ\x05ѥȳ\x02உ')
buf.write('ஊ\x05нȟ\x02ஊ\u0b8b\x05љȭ\x02\u0b8b')
buf.write('\u0b8c\x05йȝ\x02\u0b8c\u0b8d\x05еț\x02\u0b8d')
buf.write('எ\x05ѓȪ\x02எஏ\x05хȣ\x02ஏ')
buf.write('ஐ\x05яȨ\x02ஐ\u0b91\x05сȡ\x02\u0b91')
buf.write('Ǯ\x03\x02\x02\x02ஒஓ\x05яȨ\x02ஓஔ')
buf.write('\x05ёȩ\x02ஔக\x05эȧ\x02க\u0b96')
buf.write('\x05еț\x02\u0b96\u0b97\x05ѣȲ\x02\u0b97\u0b98')
buf.write('\x05џȰ\x02\u0b98ங\x05еț\x02ஙச')
buf.write('\x05ыȦ\x02ச\u0b9b\x05ѝȯ\x02\u0b9bஜ')
buf.write('\x05нȟ\x02ஜǰ\x03\x02\x02\x02\u0b9dஞ\x05я')
buf.write('Ȩ\x02ஞட\x05ёȩ\x02ட\u0ba0\x05э')
buf.write('ȧ\x02\u0ba0\u0ba1\x05хȣ\x02\u0ba1\u0ba2\x05я')
buf.write('Ȩ\x02\u0ba2ண\x05џȰ\x02ணத\x05е')
buf.write('ț\x02த\u0ba5\x05ыȦ\x02\u0ba5\u0ba6\x05ѝ')
buf.write('ȯ\x02\u0ba6\u0ba7\x05нȟ\x02\u0ba7Dz\x03\x02\x02')
buf.write('\x02நன\x05яȨ\x02னப\x05ёȩ')
buf.write('\x02ப\u0bab\x05яȨ\x02\u0bab\u0bac\x05нȟ')
buf.write('\x02\u0bacǴ\x03\x02\x02\x02\u0badம\x05яȨ\x02ம')
buf.write('ய\x05ёȩ\x02யர\x05ёȩ\x02ர')
buf.write('ற\x05їȬ\x02றல\x05лȞ\x02ல')
buf.write('ள\x05нȟ\x02ளழ\x05їȬ\x02ழ')
buf.write('Ƕ\x03\x02\x02\x02வஶ\x05яȨ\x02ஶஷ')
buf.write('\x05ёȩ\x02ஷஸ\x05љȭ\x02ஸஹ')
buf.write('\x05йȝ\x02ஹ\u0bba\x05уȢ\x02\u0bba\u0bbb')
buf.write('\x05нȟ\x02\u0bbb\u0bbc\x05эȧ\x02\u0bbc\u0bbd')
buf.write('\x05еț\x02\u0bbdா\x05йȝ\x02ாி')
buf.write('\x05уȢ\x02ிீ\x05нȟ\x02ீு')
buf.write('\x05йȝ\x02ுூ\x05щȥ\x02ூǸ')
buf.write('\x03\x02\x02\x02\u0bc3\u0bc4\x05яȨ\x02\u0bc4\u0bc5\x05ё')
buf.write('ȩ\x02\u0bc5ெ\x05ћȮ\x02ெǺ\x03\x02\x02')
buf.write('\x02ேை\x05яȨ\x02ை\u0bc9\x05ёȩ')
buf.write('\x02\u0bc9ொ\x05ѡȱ\x02ொோ\x05еț')
buf.write('\x02ோௌ\x05хȣ\x02ௌ்\x05ћȮ')
buf.write('\x02்Ǽ\x03\x02\x02\x02\u0bce\u0bcf\x05яȨ\x02\u0bcf')
buf.write('ௐ\x05ѝȯ\x02ௐ\u0bd1\x05ыȦ\x02\u0bd1')
buf.write('\u0bd2\x05ыȦ\x02\u0bd2Ǿ\x03\x02\x02\x02\u0bd3\u0bd4')
buf.write('\x05яȨ\x02\u0bd4\u0bd5\x05ѝȯ\x02\u0bd5\u0bd6')
buf.write('\x05ыȦ\x02\u0bd6ௗ\x05ыȦ\x02ௗ\u0bd8')
buf.write('\x05љȭ\x02\u0bd8Ȁ\x03\x02\x02\x02\u0bd9\u0bda\x05я')
buf.write('Ȩ\x02\u0bda\u0bdb\x05ѝȯ\x02\u0bdb\u0bdc\x05э')
buf.write('ȧ\x02\u0bdc\u0bdd\x05зȜ\x02\u0bdd\u0bde\x05н')
buf.write('ȟ\x02\u0bde\u0bdf\x05їȬ\x02\u0bdfȂ\x03\x02\x02')
buf.write('\x02\u0be0\u0be1\x05яȨ\x02\u0be1\u0be2\x05ѝȯ')
buf.write('\x02\u0be2\u0be3\x05эȧ\x02\u0be3\u0be4\x05нȟ')
buf.write('\x02\u0be4\u0be5\x05їȬ\x02\u0be5௦\x05хȣ')
buf.write('\x02௦௧\x05йȝ\x02௧Ȅ\x03\x02\x02\x02௨')
buf.write('௩\x05яȨ\x02௩௪\x05џȰ\x02௪')
buf.write('௫\x05еț\x02௫௬\x05їȬ\x02௬')
buf.write('௭\x05йȝ\x02௭௮\x05уȢ\x02௮')
buf.write('௯\x05еț\x02௯௰\x05їȬ\x02௰')
buf.write('௱\x074\x02\x02௱Ȇ\x03\x02\x02\x02௲௳\x05ё')
buf.write('ȩ\x02௳௴\x05зȜ\x02௴௵\x05ч')
buf.write('Ȥ\x02௵௶\x05нȟ\x02௶௷\x05й')
buf.write('ȝ\x02௷௸\x05ћȮ\x02௸Ȉ\x03\x02\x02')
buf.write('\x02௹௺\x05ёȩ\x02௺\u0bfb\x05пȠ')
buf.write('\x02\u0bfbȊ\x03\x02\x02\x02\u0bfc\u0bfd\x05ёȩ\x02\u0bfd')
buf.write('\u0bfe\x05пȠ\x02\u0bfe\u0bff\x05пȠ\x02\u0bff')
buf.write('Ȍ\x03\x02\x02\x02ఀఁ\x05ёȩ\x02ఁం')
buf.write('\x05хȣ\x02ంః\x05лȞ\x02ఃȎ')
buf.write('\x03\x02\x02\x02ఄఅ\x05ёȩ\x02అఆ\x05ы')
buf.write('Ȧ\x02ఆఇ\x05лȞ\x02ఇȐ\x03\x02\x02')
buf.write('\x02ఈఉ\x05ёȩ\x02ఉఊ\x05яȨ')
buf.write('\x02ఊȒ\x03\x02\x02\x02ఋఌ\x05ёȩ\x02ఌ')
buf.write('\u0c0d\x05яȨ\x02\u0c0dఎ\x05ыȦ\x02ఎ')
buf.write('ఏ\x05ѥȳ\x02ఏȔ\x03\x02\x02\x02ఐ\u0c11')
buf.write('\x05ёȩ\x02\u0c11ఒ\x05ѓȪ\x02ఒఓ')
buf.write('\x05нȟ\x02ఓఔ\x05яȨ\x02ఔȖ')
buf.write('\x03\x02\x02\x02కఖ\x05ёȩ\x02ఖగ\x05ѓ')
buf.write('Ȫ\x02గఘ\x05ћȮ\x02ఘఙ\x05х')
buf.write('ȣ\x02ఙచ\x05ёȩ\x02చఛ\x05я')
buf.write('Ȩ\x02ఛȘ\x03\x02\x02\x02జఝ\x05ёȩ')
buf.write('\x02ఝఞ\x05їȬ\x02ఞȚ\x03\x02\x02\x02ట')
buf.write('ఠ\x05ёȩ\x02ఠడ\x05їȬ\x02డ')
buf.write('ఢ\x05еț\x02ఢణ\x05лȞ\x02ణ')
buf.write('త\x05еț\x02తథ\x05ћȮ\x02థ')
buf.write('ద\x05еț\x02దȜ\x03\x02\x02\x02ధన')
buf.write('\x05ёȩ\x02న\u0c29\x05їȬ\x02\u0c29ప')
buf.write('\x05лȞ\x02పఫ\x05нȟ\x02ఫబ')
buf.write('\x05їȬ\x02బȞ\x03\x02\x02\x02భమ\x05ё')
buf.write('ȩ\x02మయ\x05їȬ\x02యర\x05л')
buf.write('Ȟ\x02రఱ\x05хȣ\x02ఱల\x05я')
buf.write('Ȩ\x02లళ\x05еț\x02ళఴ\x05ы')
buf.write('Ȧ\x02ఴవ\x05хȣ\x02వశ\x05ћ')
buf.write('Ȯ\x02శష\x05ѥȳ\x02షȠ\x03\x02\x02')
buf.write('\x02సహ\x05ёȩ\x02హ\u0c3a\x05љȭ')
buf.write('\x02\u0c3a\u0c3b\x05нȟ\x02\u0c3b఼\x05їȬ')
buf.write('\x02఼ఽ\x05їȬ\x02ఽా\x05ёȩ')
buf.write('\x02ాి\x05їȬ\x02ిȢ\x03\x02\x02\x02ీ')
buf.write('ు\x05ёȩ\x02ుూ\x05ѝȯ\x02ూ')
buf.write('ృ\x05ћȮ\x02ృȤ\x03\x02\x02\x02ౄ\u0c45')
buf.write('\x05ёȩ\x02\u0c45ె\x05ѝȯ\x02ెే')
buf.write('\x05ћȮ\x02ేై\x05нȟ\x02ై\u0c49')
buf.write('\x05їȬ\x02\u0c49Ȧ\x03\x02\x02\x02ొో\x05ё')
buf.write('ȩ\x02ోౌ\x05џȰ\x02ౌ్\x05н')
buf.write('ȟ\x02్\u0c4e\x05їȬ\x02\u0c4eȨ\x03\x02\x02')
buf.write('\x02\u0c4f\u0c50\x05ёȩ\x02\u0c50\u0c51\x05џȰ')
buf.write('\x02\u0c51\u0c52\x05нȟ\x02\u0c52\u0c53\x05їȬ')
buf.write('\x02\u0c53\u0c54\x05їȬ\x02\u0c54ౕ\x05хȣ')
buf.write('\x02ౕౖ\x05лȞ\x02ౖ\u0c57\x05хȣ')
buf.write('\x02\u0c57ౘ\x05яȨ\x02ౘౙ\x05сȡ')
buf.write('\x02ౙȪ\x03\x02\x02\x02ౚ\u0c5b\x05ѓȪ\x02\u0c5b')
buf.write('\u0c5c\x05еț\x02\u0c5cౝ\x05йȝ\x02ౝ')
buf.write('\u0c5e\x05щȥ\x02\u0c5e\u0c5f\x05еț\x02\u0c5f')
buf.write('ౠ\x05сȡ\x02ౠౡ\x05нȟ\x02ౡ')
buf.write('Ȭ\x03\x02\x02\x02ౢౣ\x05ѓȪ\x02ౣ\u0c64')
buf.write('\x05еț\x02\u0c64\u0c65\x05їȬ\x02\u0c65౦')
buf.write('\x05еț\x02౦౧\x05ыȦ\x02౧౨')
buf.write('\x05ыȦ\x02౨౩\x05нȟ\x02౩౪')
buf.write('\x05ыȦ\x02౪౫\x07a\x02\x02౫౬\x05н')
buf.write('ȟ\x02౬౭\x05яȨ\x02౭౮\x05е')
buf.write('ț\x02౮౯\x05зȜ\x02౯\u0c70\x05ы')
buf.write('Ȧ\x02\u0c70\u0c71\x05нȟ\x02\u0c71Ȯ\x03\x02\x02')
buf.write('\x02\u0c72\u0c73\x05ѓȪ\x02\u0c73\u0c74\x05еț')
buf.write('\x02\u0c74\u0c75\x05їȬ\x02\u0c75\u0c76\x05еț')
buf.write('\x02\u0c76౷\x05эȧ\x02౷౸\x05нȟ')
buf.write('\x02౸౹\x05ћȮ\x02౹౺\x05нȟ')
buf.write('\x02౺౻\x05їȬ\x02౻౼\x05љȭ')
buf.write('\x02౼Ȱ\x03\x02\x02\x02౽౾\x05ѓȪ\x02౾')
buf.write('౿\x05еț\x02౿ಀ\x05їȬ\x02ಀ')
buf.write('ಁ\x05нȟ\x02ಁಂ\x05яȨ\x02ಂ')
buf.write('ಃ\x05ћȮ\x02ಃȲ\x03\x02\x02\x02಄ಅ')
buf.write('\x05ѓȪ\x02ಅಆ\x05еț\x02ಆಇ')
buf.write('\x05їȬ\x02ಇಈ\x05ћȮ\x02ಈಉ')
buf.write('\x05хȣ\x02ಉಊ\x05ћȮ\x02ಊಋ')
buf.write('\x05хȣ\x02ಋಌ\x05ёȩ\x02ಌ\u0c8d')
buf.write('\x05яȨ\x02\u0c8dȴ\x03\x02\x02\x02ಎಏ\x05ѓ')
buf.write('Ȫ\x02ಏಐ\x05еț\x02ಐ\u0c91\x05љ')
buf.write('ȭ\x02\u0c91ಒ\x05љȭ\x02ಒಓ\x05х')
buf.write('ȣ\x02ಓಔ\x05яȨ\x02ಔಕ\x05с')
buf.write('ȡ\x02ಕȶ\x03\x02\x02\x02ಖಗ\x05ѓȪ')
buf.write('\x02ಗಘ\x05еț\x02ಘಙ\x05ћȮ')
buf.write('\x02ಙಚ\x05уȢ\x02ಚȸ\x03\x02\x02\x02ಛ')
buf.write("ಜ\x07'\x02\x02ಜಝ\x05їȬ\x02ಝಞ")
buf.write('\x05ёȩ\x02ಞಟ\x05ѡȱ\x02ಟಠ')
buf.write('\x05ћȮ\x02ಠಡ\x05ѥȳ\x02ಡಢ')
buf.write('\x05ѓȪ\x02ಢಣ\x05нȟ\x02ಣȺ')
buf.write("\x03\x02\x02\x02ತಥ\x07'\x02\x02ಥದ\x05ћȮ")
buf.write('\x02ದಧ\x05ѥȳ\x02ಧನ\x05ѓȪ')
buf.write('\x02ನ\u0ca9\x05нȟ\x02\u0ca9ȼ\x03\x02\x02\x02ಪ')
buf.write('ಫ\x05ѓȪ\x02ಫಬ\x05хȣ\x02ಬ')
buf.write('ಭ\x05ѓȪ\x02ಭಮ\x05нȟ\x02ಮ')
buf.write('ಯ\x05ыȦ\x02ಯರ\x05хȣ\x02ರ')
buf.write('ಱ\x05яȨ\x02ಱಲ\x05нȟ\x02ಲ')
buf.write('ಳ\x05лȞ\x02ಳȾ\x03\x02\x02\x02\u0cb4ವ')
buf.write('\x05ѓȪ\x02ವಶ\x05хȣ\x02ಶಷ')
buf.write('\x05џȰ\x02ಷಸ\x05ёȩ\x02ಸಹ')
buf.write('\x05ћȮ\x02ಹɀ\x03\x02\x02\x02\u0cba\u0cbb\x05ѓ')
buf.write('Ȫ\x02\u0cbb಼\x05ыȦ\x02಼ಽ\x05е')
buf.write('ț\x02ಽಾ\x05яȨ\x02ಾɂ\x03\x02\x02')
buf.write('\x02ಿೀ\x05ѓȪ\x02ೀು\x05ыȦ')
buf.write('\x02ುೂ\x05љȭ\x02ೂೃ\x07a\x02\x02ೃ')
buf.write('ೄ\x05хȣ\x02ೄ\u0cc5\x05яȨ\x02\u0cc5')
buf.write('ೆ\x05ћȮ\x02ೆೇ\x05нȟ\x02ೇ')
buf.write('ೈ\x05сȡ\x02ೈ\u0cc9\x05нȟ\x02\u0cc9')
buf.write('ೊ\x05їȬ\x02ೊɄ\x03\x02\x02\x02ೋೌ')
buf.write('\x05ѓȪ\x02ೌ್\x05ёȩ\x02್\u0cce')
buf.write('\x05љȭ\x02\u0cce\u0ccf\x05хȣ\x02\u0ccf\u0cd0')
buf.write('\x05ћȮ\x02\u0cd0\u0cd1\x05хȣ\x02\u0cd1\u0cd2')
buf.write('\x05џȰ\x02\u0cd2\u0cd3\x05нȟ\x02\u0cd3Ɇ')
buf.write('\x03\x02\x02\x02\u0cd4ೕ\x05ѓȪ\x02ೕೖ\x05ё')
buf.write('ȩ\x02ೖ\u0cd7\x05љȭ\x02\u0cd7\u0cd8\x05х')
buf.write('ȣ\x02\u0cd8\u0cd9\x05ћȮ\x02\u0cd9\u0cda\x05х')
buf.write('ȣ\x02\u0cda\u0cdb\x05џȰ\x02\u0cdb\u0cdc\x05н')
buf.write('ȟ\x02\u0cdcೝ\x05яȨ\x02ೝɈ\x03\x02\x02')
buf.write('\x02ೞ\u0cdf\x05ѓȪ\x02\u0cdfೠ\x05їȬ')
buf.write('\x02ೠೡ\x05еț\x02ೡೢ\x05сȡ')
buf.write('\x02ೢೣ\x05эȧ\x02ೣ\u0ce4\x05еț')
buf.write('\x02\u0ce4Ɋ\x03\x02\x02\x02\u0ce5೦\x05ѓȪ\x02೦')
buf.write('೧\x05їȬ\x02೧೨\x05нȟ\x02೨')
buf.write('೩\x05йȝ\x02೩೪\x05нȟ\x02೪')
buf.write('೫\x05лȞ\x02೫೬\x05хȣ\x02೬')
buf.write('೭\x05яȨ\x02೭೮\x05сȡ\x02೮')
buf.write('Ɍ\x03\x02\x02\x02೯\u0cf0\x05ѓȪ\x02\u0cf0ೱ')
buf.write('\x05їȬ\x02ೱೲ\x05нȟ\x02ೲ\u0cf3')
buf.write('\x05йȝ\x02\u0cf3\u0cf4\x05хȣ\x02\u0cf4\u0cf5')
buf.write('\x05љȭ\x02\u0cf5\u0cf6\x05хȣ\x02\u0cf6\u0cf7')
buf.write('\x05ёȩ\x02\u0cf7\u0cf8\x05яȨ\x02\u0cf8Ɏ')
buf.write('\x03\x02\x02\x02\u0cf9\u0cfa\x05ѓȪ\x02\u0cfa\u0cfb\x05ї')
buf.write('Ȭ\x02\u0cfb\u0cfc\x05нȟ\x02\u0cfc\u0cfd\x05љ')
buf.write('ȭ\x02\u0cfd\u0cfe\x05нȟ\x02\u0cfe\u0cff\x05я')
buf.write('Ȩ\x02\u0cffഀ\x05ћȮ\x02ഀɐ\x03\x02\x02')
buf.write('\x02ഁം\x05ѓȪ\x02ംഃ\x05їȬ')
buf.write('\x02ഃഄ\x05хȣ\x02ഄഅ\x05ёȩ')
buf.write('\x02അആ\x05їȬ\x02ആɒ\x03\x02\x02\x02ഇ')
buf.write('ഈ\x05ѓȪ\x02ഈഉ\x05їȬ\x02ഉ')
buf.write('ഊ\x05ёȩ\x02ഊഋ\x05йȝ\x02ഋ')
buf.write('ഌ\x05нȟ\x02ഌ\u0d0d\x05лȞ\x02\u0d0d')
buf.write('എ\x05ѝȯ\x02എഏ\x05їȬ\x02ഏ')
buf.write('ഐ\x05нȟ\x02ഐɔ\x03\x02\x02\x02\u0d11ഒ')
buf.write('\x05їȬ\x02ഒഓ\x05еț\x02ഓഔ')
buf.write('\x05хȣ\x02ഔക\x05љȭ\x02കഖ')
buf.write('\x05нȟ\x02ഖɖ\x03\x02\x02\x02ഗഘ\x05ї')
buf.write('Ȭ\x02ഘങ\x05еț\x02ങച\x05я')
buf.write('Ȩ\x02ചഛ\x05сȡ\x02ഛജ\x05н')
buf.write('ȟ\x02ജɘ\x03\x02\x02\x02ഝഞ\x05їȬ')
buf.write('\x02ഞട\x05еț\x02ടഠ\x05ѡȱ')
buf.write('\x02ഠɚ\x03\x02\x02\x02ഡഢ\x05їȬ\x02ഢ')
buf.write('ണ\x05нȟ\x02ണത\x05еț\x02ത')
buf.write('ഥ\x05лȞ\x02ഥɜ\x03\x02\x02\x02ദധ')
buf.write('\x05їȬ\x02ധന\x05нȟ\x02നഩ')
buf.write('\x05еț\x02ഩപ\x05ыȦ\x02പɞ')
buf.write('\x03\x02\x02\x02ഫബ\x05їȬ\x02ബഭ\x05н')
buf.write('ȟ\x02ഭമ\x05йȝ\x02മയ\x05ё')
buf.write('ȩ\x02യര\x05їȬ\x02രറ\x05л')
buf.write('Ȟ\x02റɠ\x03\x02\x02\x02ലള\x05їȬ')
buf.write('\x02ളഴ\x05нȟ\x02ഴവ\x05пȠ')
buf.write('\x02വɢ\x03\x02\x02\x02ശഷ\x05їȬ\x02ഷ')
buf.write('സ\x05нȟ\x02സഹ\x05пȠ\x02ഹ')
buf.write('ഺ\x05нȟ\x02ഺ഻\x05їȬ\x02഻')
buf.write('഼\x05нȟ\x02഼ഽ\x05яȨ\x02ഽ')
buf.write('ാ\x05йȝ\x02ാി\x05нȟ\x02ി')
buf.write('ɤ\x03\x02\x02\x02ീു\x05їȬ\x02ുൂ')
buf.write('\x05нȟ\x02ൂൃ\x05пȠ\x02ൃൄ')
buf.write('\x05нȟ\x02ൄ\u0d45\x05їȬ\x02\u0d45െ')
buf.write('\x05нȟ\x02െേ\x05яȨ\x02േൈ')
buf.write('\x05йȝ\x02ൈ\u0d49\x05хȣ\x02\u0d49ൊ')
buf.write('\x05яȨ\x02ൊോ\x05сȡ\x02ോɦ')
buf.write('\x03\x02\x02\x02ൌ്\x05їȬ\x02്ൎ\x05н')
buf.write('ȟ\x02ൎ൏\x05чȤ\x02൏\u0d50\x05н')
buf.write('ȟ\x02\u0d50\u0d51\x05йȝ\x02\u0d51\u0d52\x05ћ')
buf.write('Ȯ\x02\u0d52ɨ\x03\x02\x02\x02\u0d53ൔ\x05їȬ')
buf.write('\x02ൔൕ\x05нȟ\x02ൕൖ\x05ыȦ')
buf.write('\x02ൖൗ\x05хȣ\x02ൗ൘\x05нȟ')
buf.write('\x02൘൙\x05љȭ\x02൙൚\x07a\x02\x02൚')
buf.write('൛\x05ёȩ\x02൛൜\x05яȨ\x02൜')
buf.write('ɪ\x03\x02\x02\x02൝൞\x05їȬ\x02൞ൟ')
buf.write('\x05нȟ\x02ൟൠ\x05яȨ\x02ൠൡ')
buf.write('\x05еț\x02ൡൢ\x05эȧ\x02ൢൣ')
buf.write('\x05нȟ\x02ൣɬ\x03\x02\x02\x02\u0d64\u0d65\x05ї')
buf.write('Ȭ\x02\u0d65൦\x05нȟ\x02൦൧\x05ѓ')
buf.write('Ȫ\x02൧൨\x05ыȦ\x02൨൩\x05е')
buf.write('ț\x02൩൪\x05йȝ\x02൪൫\x05н')
buf.write('ȟ\x02൫ɮ\x03\x02\x02\x02൬൭\x05їȬ')
buf.write('\x02൭൮\x05нȟ\x02൮൯\x05љȭ')
buf.write('\x02൯൰\x05ѓȪ\x02൰൱\x05нȟ')
buf.write('\x02൱൲\x05йȝ\x02൲൳\x05ћȮ')
buf.write('\x02൳ɰ\x03\x02\x02\x02൴൵\x05їȬ\x02൵')
buf.write('൶\x05нȟ\x02൶൷\x05љȭ\x02൷')
buf.write('൸\x05ћȮ\x02൸൹\x05їȬ\x02൹')
buf.write('ൺ\x05хȣ\x02ൺൻ\x05йȝ\x02ൻ')
buf.write('ർ\x05ћȮ\x02ർൽ\x07a\x02\x02ൽൾ')
buf.write('\x05їȬ\x02ൾൿ\x05нȟ\x02ൿ\u0d80')
buf.write('\x05пȠ\x02\u0d80ඁ\x05нȟ\x02ඁං')
buf.write('\x05їȬ\x02ංඃ\x05нȟ\x02ඃ\u0d84')
buf.write('\x05яȨ\x02\u0d84අ\x05йȝ\x02අආ')
buf.write('\x05нȟ\x02ආඇ\x05љȭ\x02ඇɲ')
buf.write('\x03\x02\x02\x02ඈඉ\x05їȬ\x02ඉඊ\x05н')
buf.write('ȟ\x02ඊඋ\x05љȭ\x02උඌ\x05ѝ')
buf.write('ȯ\x02ඌඍ\x05ыȦ\x02ඍඎ\x05ћ')
buf.write('Ȯ\x02ඎɴ\x03\x02\x02\x02ඏඐ\x05їȬ')
buf.write('\x02ඐඑ\x05нȟ\x02එඒ\x05љȭ')
buf.write('\x02ඒඓ\x05ѝȯ\x02ඓඔ\x05ыȦ')
buf.write('\x02ඔඕ\x05ћȮ\x02ඕඖ\x07a\x02\x02ඖ')
buf.write('\u0d97\x05йȝ\x02\u0d97\u0d98\x05еț\x02\u0d98')
buf.write('\u0d99\x05йȝ\x02\u0d99ක\x05уȢ\x02ක')
buf.write('ඛ\x05нȟ\x02ඛɶ\x03\x02\x02\x02ගඝ')
buf.write('\x05їȬ\x02ඝඞ\x05нȟ\x02ඞඟ')
buf.write('\x05ћȮ\x02ඟච\x05ѝȯ\x02චඡ')
buf.write('\x05їȬ\x02ඡජ\x05яȨ\x02ජɸ')
buf.write('\x03\x02\x02\x02ඣඤ\x05їȬ\x02ඤඥ\x05н')
buf.write('ȟ\x02ඥඦ\x05ћȮ\x02ඦට\x05ѝ')
buf.write('ȯ\x02ටඨ\x05їȬ\x02ඨඩ\x05я')
buf.write('Ȩ\x02ඩඪ\x05хȣ\x02ඪණ\x05я')
buf.write('Ȩ\x02ණඬ\x05сȡ\x02ඬɺ\x03\x02\x02')
buf.write('\x02තථ\x05їȬ\x02ථද\x05нȟ')
buf.write('\x02දධ\x05ѝȯ\x02ධන\x05љȭ')
buf.write('\x02න\u0db2\x05нȟ\x02\u0db2ɼ\x03\x02\x02\x02ඳ')
buf.write('ප\x05їȬ\x02පඵ\x05нȟ\x02ඵ')
buf.write('බ\x05џȰ\x02බභ\x05нȟ\x02භ')
buf.write('ම\x05їȬ\x02මඹ\x05љȭ\x02ඹ')
buf.write('ය\x05нȟ\x02යɾ\x03\x02\x02\x02ර\u0dbc')
buf.write('\x05їȬ\x02\u0dbcල\x05нȟ\x02ල\u0dbe')
buf.write('\x05џȰ\x02\u0dbe\u0dbf\x05ёȩ\x02\u0dbfව')
buf.write('\x05щȥ\x02වශ\x05нȟ\x02ශʀ')
buf.write('\x03\x02\x02\x02ෂස\x05їȬ\x02සහ\x05х')
buf.write('ȣ\x02හළ\x05сȡ\x02ළෆ\x05у')
buf.write('Ȣ\x02ෆ\u0dc7\x05ћȮ\x02\u0dc7ʂ\x03\x02\x02')
buf.write('\x02\u0dc8\u0dc9\x05їȬ\x02\u0dc9්\x05ёȩ')
buf.write('\x02්\u0dcb\x05ыȦ\x02\u0dcb\u0dcc\x05ыȦ')
buf.write('\x02\u0dcc\u0dcd\x05зȜ\x02\u0dcd\u0dce\x05еț')
buf.write('\x02\u0dceා\x05йȝ\x02ාැ\x05щȥ')
buf.write('\x02ැʄ\x03\x02\x02\x02ෑි\x05їȬ\x02ි')
buf.write('ී\x05ёȩ\x02ීු\x05ыȦ\x02ු')
buf.write('\u0dd5\x05ыȦ\x02\u0dd5ූ\x05ѝȯ\x02ූ')
buf.write('\u0dd7\x05ѓȪ\x02\u0dd7ʆ\x03\x02\x02\x02ෘෙ')
buf.write('\x05їȬ\x02ෙේ\x05ёȩ\x02ේෛ')
buf.write('\x05ѡȱ\x02ෛʈ\x03\x02\x02\x02ොෝ\x05ї')
buf.write('Ȭ\x02ෝෞ\x05ёȩ\x02ෞෟ\x05ѡ')
buf.write('ȱ\x02ෟ\u0de0\x05хȣ\x02\u0de0\u0de1\x05л')
buf.write('Ȟ\x02\u0de1ʊ\x03\x02\x02\x02\u0de2\u0de3\x05їȬ')
buf.write('\x02\u0de3\u0de4\x05ёȩ\x02\u0de4\u0de5\x05ѡȱ')
buf.write('\x02\u0de5෦\x05љȭ\x02෦ʌ\x03\x02\x02\x02෧')
buf.write('෨\x05їȬ\x02෨෩\x05ѝȯ\x02෩')
buf.write('෪\x05ыȦ\x02෪෫\x05нȟ\x02෫')
buf.write('෬\x05љȭ\x02෬ʎ\x03\x02\x02\x02෭෮')
buf.write('\x05љȭ\x02෮෯\x05еț\x02෯\u0df0')
buf.write('\x05эȧ\x02\u0df0\u0df1\x05ѓȪ\x02\u0df1ෲ')
buf.write('\x05ыȦ\x02ෲෳ\x05нȟ\x02ෳʐ')
buf.write('\x03\x02\x02\x02෴\u0df5\x05љȭ\x02\u0df5\u0df6\x05е')
buf.write('ț\x02\u0df6\u0df7\x05џȰ\x02\u0df7\u0df8\x05н')
buf.write('ȟ\x02\u0df8ʒ\x03\x02\x02\x02\u0df9\u0dfa\x05љȭ')
buf.write('\x02\u0dfa\u0dfb\x05еț\x02\u0dfb\u0dfc\x05џȰ')
buf.write('\x02\u0dfc\u0dfd\x05нȟ\x02\u0dfd\u0dfe\x05ѓȪ')
buf.write('\x02\u0dfe\u0dff\x05ёȩ\x02\u0dff\u0e00\x05хȣ')
buf.write('\x02\u0e00ก\x05яȨ\x02กข\x05ћȮ')
buf.write('\x02ขʔ\x03\x02\x02\x02ฃค\x05љȭ\x02ค')
buf.write('ฅ\x05йȝ\x02ฅฆ\x05уȢ\x02ฆ')
buf.write('ง\x05нȟ\x02งจ\x05эȧ\x02จ')
buf.write('ฉ\x05еț\x02ฉʖ\x03\x02\x02\x02ชซ')
buf.write('\x05љȭ\x02ซฌ\x05йȝ\x02ฌญ')
buf.write('\x05уȢ\x02ญฎ\x05нȟ\x02ฎฏ')
buf.write('\x05эȧ\x02ฏฐ\x05еț\x02ฐฑ')
buf.write('\x05йȝ\x02ฑฒ\x05уȢ\x02ฒณ')
buf.write('\x05нȟ\x02ณด\x05йȝ\x02ดต')
buf.write('\x05щȥ\x02ตʘ\x03\x02\x02\x02ถท\x05љ')
buf.write('ȭ\x02ทธ\x05йȝ\x02ธน\x05я')
buf.write('Ȩ\x02นʚ\x03\x02\x02\x02บป\x05љȭ')
buf.write('\x02ปผ\x05нȟ\x02ผฝ\x05еț')
buf.write('\x02ฝพ\x05їȬ\x02พฟ\x05йȝ')
buf.write('\x02ฟภ\x05уȢ\x02ภʜ\x03\x02\x02\x02ม')
buf.write('ย\x05љȭ\x02ยร\x05нȟ\x02ร')
buf.write('ฤ\x05йȝ\x02ฤล\x05ёȩ\x02ล')
buf.write('ฦ\x05яȨ\x02ฦว\x05лȞ\x02ว')
buf.write('ʞ\x03\x02\x02\x02ศษ\x05љȭ\x02ษส')
buf.write('\x05нȟ\x02สห\x05нȟ\x02หฬ')
buf.write('\x05лȞ\x02ฬʠ\x03\x02\x02\x02อฮ\x05љ')
buf.write('ȭ\x02ฮฯ\x05нȟ\x02ฯะ\x05с')
buf.write('ȡ\x02ะั\x05эȧ\x02ัา\x05н')
buf.write('ȟ\x02าำ\x05яȨ\x02ำิ\x05ћ')
buf.write('Ȯ\x02ิʢ\x03\x02\x02\x02ีึ\x05љȭ')
buf.write('\x02ึื\x05нȟ\x02ืุ\x05ыȦ')
buf.write('\x02ุู\x05нȟ\x02ฺู\x05йȝ')
buf.write('\x02ฺ\u0e3b\x05ћȮ\x02\u0e3bʤ\x03\x02\x02\x02\u0e3c')
buf.write('\u0e3d\x05љȭ\x02\u0e3d\u0e3e\x05нȟ\x02\u0e3e')
buf.write('฿\x05ыȦ\x02฿เ\x05пȠ\x02เ')
buf.write('ʦ\x03\x02\x02\x02แโ\x05љȭ\x02โใ')
buf.write('\x05нȟ\x02ใไ\x05ѕȫ\x02ไๅ')
buf.write('\x05ѝȯ\x02ๅๆ\x05нȟ\x02ๆ็')
buf.write('\x05яȨ\x02็่\x05йȝ\x02่้')
buf.write('\x05нȟ\x02้ʨ\x03\x02\x02\x02๊๋\x05љ')
buf.write('ȭ\x02๋์\x05нȟ\x02์ํ\x05ѕ')
buf.write('ȫ\x02ํ๎\x05ѝȯ\x02๎๏\x05н')
buf.write('ȟ\x02๏๐\x05яȨ\x02๐๑\x05ћ')
buf.write('Ȯ\x02๑๒\x05хȣ\x02๒๓\x05е')
buf.write('ț\x02๓๔\x05ыȦ\x02๔ʪ\x03\x02\x02')
buf.write('\x02๕๖\x05љȭ\x02๖๗\x05нȟ')
buf.write('\x02๗๘\x05їȬ\x02๘๙\x05хȣ')
buf.write('\x02๙๚\x05еț\x02๚๛\x05ыȦ')
buf.write('\x02๛\u0e5c\x05хȣ\x02\u0e5c\u0e5d\x05ѧȴ')
buf.write('\x02\u0e5d\u0e5e\x05еț\x02\u0e5e\u0e5f\x05зȜ')
buf.write('\x02\u0e5f\u0e60\x05ыȦ\x02\u0e60\u0e61\x05нȟ')
buf.write('\x02\u0e61ʬ\x03\x02\x02\x02\u0e62\u0e63\x05љȭ\x02\u0e63')
buf.write('\u0e64\x05нȟ\x02\u0e64\u0e65\x05їȬ\x02\u0e65')
buf.write('\u0e66\x05хȣ\x02\u0e66\u0e67\x05еț\x02\u0e67')
buf.write('\u0e68\x05ыȦ\x02\u0e68\u0e69\x05ыȦ\x02\u0e69')
buf.write('\u0e6a\x05ѥȳ\x02\u0e6a\u0e6b\x07a\x02\x02\u0e6b\u0e6c')
buf.write('\x05їȬ\x02\u0e6c\u0e6d\x05нȟ\x02\u0e6d\u0e6e')
buf.write('\x05ѝȯ\x02\u0e6e\u0e6f\x05љȭ\x02\u0e6f\u0e70')
buf.write('\x05еț\x02\u0e70\u0e71\x05зȜ\x02\u0e71\u0e72')
buf.write('\x05ыȦ\x02\u0e72\u0e73\x05нȟ\x02\u0e73ʮ')
buf.write('\x03\x02\x02\x02\u0e74\u0e75\x05љȭ\x02\u0e75\u0e76\x05н')
buf.write('ȟ\x02\u0e76\u0e77\x05їȬ\x02\u0e77\u0e78\x05џ')
buf.write('Ȱ\x02\u0e78\u0e79\x05нȟ\x02\u0e79\u0e7a\x05ї')
buf.write('Ȭ\x02\u0e7a\u0e7b\x05нȟ\x02\u0e7b\u0e7c\x05ї')
buf.write('Ȭ\x02\u0e7c\u0e7d\x05їȬ\x02\u0e7d\u0e7e\x05ё')
buf.write('ȩ\x02\u0e7e\u0e7f\x05їȬ\x02\u0e7fʰ\x03\x02\x02')
buf.write('\x02\u0e80ກ\x05љȭ\x02ກຂ\x05нȟ')
buf.write('\x02ຂ\u0e83\x05љȭ\x02\u0e83ຄ\x05љȭ')
buf.write('\x02ຄ\u0e85\x05хȣ\x02\u0e85ຆ\x05ёȩ')
buf.write('\x02ຆງ\x05яȨ\x02ງຈ\x05ћȮ')
buf.write('\x02ຈຉ\x05хȣ\x02ຉຊ\x05эȧ')
buf.write('\x02ຊ\u0e8b\x05нȟ\x02\u0e8bຌ\x05ѧȴ')
buf.write('\x02ຌຍ\x05ёȩ\x02ຍຎ\x05яȨ')
buf.write('\x02ຎຏ\x05нȟ\x02ຏʲ\x03\x02\x02\x02ຐ')
buf.write('ຑ\x05љȭ\x02ຑຒ\x05нȟ\x02ຒ')
buf.write('ຓ\x05ћȮ\x02ຓʴ\x03\x02\x02\x02ດຕ')
buf.write('\x05љȭ\x02ຕຖ\x05нȟ\x02ຖທ')
buf.write('\x05ћȮ\x02ທຘ\x05љȭ\x02ຘʶ')
buf.write('\x03\x02\x02\x02ນບ\x05љȭ\x02ບປ\x05н')
buf.write('ȟ\x02ປຜ\x05ћȮ\x02ຜຝ\x05ћ')
buf.write('Ȯ\x02ຝພ\x05хȣ\x02ພຟ\x05я')
buf.write('Ȩ\x02ຟຠ\x05сȡ\x02ຠມ\x05љ')
buf.write('ȭ\x02ມʸ\x03\x02\x02\x02ຢຣ\x05љȭ')
buf.write('\x02ຣ\u0ea4\x05уȢ\x02\u0ea4ລ\x05еț')
buf.write('\x02ລ\u0ea6\x05їȬ\x02\u0ea6ວ\x05нȟ')
buf.write('\x02ວʺ\x03\x02\x02\x02ຨຩ\x05љȭ\x02ຩ')
buf.write('ສ\x05уȢ\x02ສຫ\x05ёȩ\x02ຫ')
buf.write('ຬ\x05ѡȱ\x02ຬʼ\x03\x02\x02\x02ອຮ')
buf.write('\x05љȭ\x02ຮຯ\x05уȢ\x02ຯະ')
buf.write('\x05ѝȯ\x02ະັ\x05ћȮ\x02ັາ')
buf.write('\x05лȞ\x02າຳ\x05ёȩ\x02ຳິ')
buf.write('\x05ѡȱ\x02ິີ\x05яȨ\x02ີʾ')
buf.write('\x03\x02\x02\x02ຶື\x05љȭ\x02ືຸ\x05х')
buf.write('ȣ\x02ຸູ\x05зȜ\x02຺ູ\x05ы')
buf.write('Ȧ\x02຺ົ\x05хȣ\x02ົຼ\x05я')
buf.write('Ȩ\x02ຼຽ\x05сȡ\x02ຽ\u0ebe\x05љ')
buf.write('ȭ\x02\u0ebeˀ\x03\x02\x02\x02\u0ebfເ\x05љȭ')
buf.write('\x02ເແ\x05хȣ\x02ແໂ\x05сȡ')
buf.write('\x02ໂໃ\x05яȨ\x02ໃໄ\x05ћȮ')
buf.write('\x02ໄ\u0ec5\x05ѥȳ\x02\u0ec5ໆ\x05ѓȪ')
buf.write('\x02ໆ\u0ec7\x05нȟ\x02\u0ec7˂\x03\x02\x02\x02່')
buf.write('້\x05љȭ\x02້໊\x05хȣ\x02໊')
buf.write('໋\x05эȧ\x02໋໌\x05ѓȪ\x02໌')
buf.write('ໍ\x05ыȦ\x02ໍ\u0ece\x05нȟ\x02\u0ece')
buf.write('\u0ecf\x07a\x02\x02\u0ecf໐\x05хȣ\x02໐໑')
buf.write('\x05яȨ\x02໑໒\x05ћȮ\x02໒໓')
buf.write('\x05нȟ\x02໓໔\x05сȡ\x02໔໕')
buf.write('\x05нȟ\x02໕໖\x05їȬ\x02໖˄')
buf.write('\x03\x02\x02\x02໗໘\x05љȭ\x02໘໙\x05х')
buf.write('ȣ\x02໙\u0eda\x05яȨ\x02\u0eda\u0edb\x05с')
buf.write('ȡ\x02\u0edbໜ\x05ыȦ\x02ໜໝ\x05н')
buf.write('ȟ\x02ໝˆ\x03\x02\x02\x02ໞໟ\x05љȭ')
buf.write('\x02ໟ\u0ee0\x05хȣ\x02\u0ee0\u0ee1\x05ѧȴ')
buf.write('\x02\u0ee1\u0ee2\x05нȟ\x02\u0ee2ˈ\x03\x02\x02\x02\u0ee3')
buf.write('\u0ee4\x05љȭ\x02\u0ee4\u0ee5\x05щȥ\x02\u0ee5')
buf.write('\u0ee6\x05хȣ\x02\u0ee6\u0ee7\x05ѓȪ\x02\u0ee7')
buf.write('ˊ\x03\x02\x02\x02\u0ee8\u0ee9\x05љȭ\x02\u0ee9\u0eea')
buf.write('\x05эȧ\x02\u0eea\u0eeb\x05еț\x02\u0eeb\u0eec')
buf.write('\x05ыȦ\x02\u0eec\u0eed\x05ыȦ\x02\u0eed\u0eee')
buf.write('\x05хȣ\x02\u0eee\u0eef\x05яȨ\x02\u0eef\u0ef0')
buf.write('\x05ћȮ\x02\u0ef0ˌ\x03\x02\x02\x02\u0ef1\u0ef2\x05љ')
buf.write('ȭ\x02\u0ef2\u0ef3\x05яȨ\x02\u0ef3\u0ef4\x05е')
buf.write('ț\x02\u0ef4\u0ef5\x05ѓȪ\x02\u0ef5\u0ef6\x05љ')
buf.write('ȭ\x02\u0ef6\u0ef7\x05уȢ\x02\u0ef7\u0ef8\x05ё')
buf.write('ȩ\x02\u0ef8\u0ef9\x05ћȮ\x02\u0ef9ˎ\x03\x02\x02')
buf.write('\x02\u0efa\u0efb\x05љȭ\x02\u0efb\u0efc\x05ёȩ')
buf.write('\x02\u0efc\u0efd\x05эȧ\x02\u0efd\u0efe\x05нȟ')
buf.write('\x02\u0efeː\x03\x02\x02\x02\u0effༀ\x05љȭ\x02ༀ')
buf.write('༁\x05ѓȪ\x02༁༂\x05нȟ\x02༂')
buf.write('༃\x05йȝ\x02༃༄\x05хȣ\x02༄')
buf.write('༅\x05пȠ\x02༅༆\x05хȣ\x02༆')
buf.write('༇\x05йȝ\x02༇༈\x05еț\x02༈')
buf.write('༉\x05ћȮ\x02༉༊\x05хȣ\x02༊')
buf.write('་\x05ёȩ\x02་༌\x05яȨ\x02༌')
buf.write('˒\x03\x02\x02\x02།༎\x05љȭ\x02༎༏')
buf.write('\x05ѕȫ\x02༏༐\x05ыȦ\x02༐༑')
buf.write('\x05лȞ\x02༑༒\x05еț\x02༒༓')
buf.write('\x05ћȮ\x02༓༔\x05еț\x02༔˔')
buf.write('\x03\x02\x02\x02༕༖\x05љȭ\x02༖༗\x05ѕ')
buf.write('ȫ\x02༗༘\x05ыȦ\x02༘༙\x05н')
buf.write('ȟ\x02༙༚\x05їȬ\x02༚༛\x05ї')
buf.write('Ȭ\x02༛༜\x05ёȩ\x02༜༝\x05ї')
buf.write('Ȭ\x02༝˖\x03\x02\x02\x02༞༟\x05љȭ')
buf.write('\x02༟༠\x05ћȮ\x02༠༡\x05еț')
buf.write('\x02༡༢\x05яȨ\x02༢༣\x05лȞ')
buf.write('\x02༣༤\x05еț\x02༤༥\x05ыȦ')
buf.write('\x02༥༦\x05ёȩ\x02༦༧\x05яȨ')
buf.write('\x02༧༨\x05нȟ\x02༨˘\x03\x02\x02\x02༩')
buf.write('༪\x05љȭ\x02༪༫\x05ћȮ\x02༫')
buf.write('༬\x05еț\x02༬༭\x05їȬ\x02༭')
buf.write('༮\x05ћȮ\x02༮˚\x03\x02\x02\x02༯༰')
buf.write('\x05љȭ\x02༰༱\x05ћȮ\x02༱༲')
buf.write('\x05еț\x02༲༳\x05їȬ\x02༳༴')
buf.write('\x05ћȮ\x02༴༵\x05ѝȯ\x02༵༶')
buf.write('\x05ѓȪ\x02༶˜\x03\x02\x02\x02༷༸\x05љ')
buf.write('ȭ\x02༸༹\x05ћȮ\x02༹༺\x05е')
buf.write('ț\x02༺༻\x05ћȮ\x02༻༼\x05н')
buf.write('ȟ\x02༼༽\x05эȧ\x02༽༾\x05н')
buf.write('ȟ\x02༾༿\x05яȨ\x02༿ཀ\x05ћ')
buf.write('Ȯ\x02ཀ˞\x03\x02\x02\x02ཁག\x05љȭ')
buf.write('\x02གགྷ\x05ћȮ\x02གྷང\x05еț')
buf.write('\x02ངཅ\x05ћȮ\x02ཅཆ\x05нȟ')
buf.write('\x02ཆཇ\x05эȧ\x02ཇ\u0f48\x05нȟ')
buf.write('\x02\u0f48ཉ\x05яȨ\x02ཉཊ\x05ћȮ')
buf.write('\x02ཊཋ\x07a\x02\x02ཋཌ\x05хȣ\x02ཌ')
buf.write('ཌྷ\x05лȞ\x02ཌྷˠ\x03\x02\x02\x02ཎཏ')
buf.write('\x05љȭ\x02ཏཐ\x05ћȮ\x02ཐད')
buf.write('\x05еț\x02དདྷ\x05ћȮ\x02དྷན')
buf.write('\x05хȣ\x02ནཔ\x05йȝ\x02པˢ')
buf.write('\x03\x02\x02\x02ཕབ\x05љȭ\x02བབྷ\x05ћ')
buf.write('Ȯ\x02བྷམ\x05еț\x02མཙ\x05ћ')
buf.write('Ȯ\x02ཙཚ\x05хȣ\x02ཚཛ\x05љ')
buf.write('ȭ\x02ཛཛྷ\x05ћȮ\x02ཛྷཝ\x05х')
buf.write('ȣ\x02ཝཞ\x05йȝ\x02ཞཟ\x05љ')
buf.write('ȭ\x02ཟˤ\x03\x02\x02\x02འཡ\x05љȭ')
buf.write('\x02ཡར\x05ћȮ\x02རལ\x05їȬ')
buf.write('\x02ལཤ\x05хȣ\x02ཤཥ\x05яȨ')
buf.write('\x02ཥས\x05сȡ\x02ས˦\x03\x02\x02\x02ཧ')
buf.write('ཨ\x05љȭ\x02ཨཀྵ\x05ѝȯ\x02ཀྵ')
buf.write('ཪ\x05зȜ\x02ཪཫ\x05эȧ\x02ཫ')
buf.write('ཬ\x05ѝȯ\x02ཬ\u0f6d\x05ыȦ\x02\u0f6d')
buf.write('\u0f6e\x05ћȮ\x02\u0f6e\u0f6f\x05хȣ\x02\u0f6f')
buf.write('\u0f70\x05љȭ\x02\u0f70ཱ\x05нȟ\x02ཱ')
buf.write('ི\x05ћȮ\x02ི˨\x03\x02\x02\x02ཱིུ')
buf.write('\x05љȭ\x02ཱུུ\x05ѝȯ\x02ཱུྲྀ')
buf.write('\x05зȜ\x02ྲྀཷ\x05ѓȪ\x02ཷླྀ')
buf.write('\x05еț\x02ླྀཹ\x05їȬ\x02ཹེ')
buf.write('\x05ћȮ\x02ེཻ\x05хȣ\x02ཻོ')
buf.write('\x05ћȮ\x02ོཽ\x05хȣ\x02ཽཾ')
buf.write('\x05ёȩ\x02ཾཿ\x05яȨ\x02ཿ˪')
buf.write('\x03\x02\x02\x02ཱྀྀ\x05љȭ\x02ཱྀྂ\x05ѝ')
buf.write('ȯ\x02ྂྃ\x05зȜ\x02྄ྃ\x05љ')
buf.write('ȭ\x02྄྅\x05ћȮ\x02྅྆\x05х')
buf.write('ȣ\x02྆྇\x05ћȮ\x02྇ྈ\x05ѝ')
buf.write('ȯ\x02ྈྉ\x05ћȮ\x02ྉྊ\x05е')
buf.write('ț\x02ྊྋ\x05зȜ\x02ྋྌ\x05ы')
buf.write('Ȧ\x02ྌྍ\x05нȟ\x02ྍˬ\x03\x02\x02')
buf.write('\x02ྎྏ\x05љȭ\x02ྏྐ\x05ѝȯ')
buf.write('\x02ྐྑ\x05зȜ\x02ྑྒ\x05ћȮ')
buf.write('\x02ྒྒྷ\x05ѥȳ\x02ྒྷྔ\x05ѓȪ')
buf.write('\x02ྔྕ\x05нȟ\x02ྕˮ\x03\x02\x02\x02ྖ')
buf.write('ྗ\x05љȭ\x02ྗ\u0f98\x05ѝȯ\x02\u0f98')
buf.write('ྙ\x05йȝ\x02ྙྚ\x05йȝ\x02ྚ')
buf.write('ྛ\x05нȟ\x02ྛྜ\x05љȭ\x02ྜ')
buf.write('ྜྷ\x05љȭ\x02ྜྷ˰\x03\x02\x02\x02ྞྟ')
buf.write('\x05љȭ\x02ྟྠ\x05ѝȯ\x02ྠྡ')
buf.write('\x05љȭ\x02ྡྡྷ\x05ѓȪ\x02ྡྷྣ')
buf.write('\x05нȟ\x02ྣྤ\x05яȨ\x02ྤྥ')
buf.write('\x05лȞ\x02ྥ˲\x03\x02\x02\x02ྦྦྷ\x05ћ')
buf.write('Ȯ\x02ྦྷྨ\x05еț\x02ྨྩ\x05з')
buf.write('Ȝ\x02ྩྪ\x05ыȦ\x02ྪྫ\x05н')
buf.write('ȟ\x02ྫ˴\x03\x02\x02\x02ྫྷྭ\x05ћȮ')
buf.write('\x02ྭྮ\x05уȢ\x02ྮྯ\x05нȟ')
buf.write('\x02ྯ˶\x03\x02\x02\x02ྰྱ\x05ћȮ\x02ྱ')
buf.write('ྲ\x05уȢ\x02ྲླ\x05нȟ\x02ླ')
buf.write('ྴ\x05яȨ\x02ྴ˸\x03\x02\x02\x02ྵྶ')
buf.write('\x05ћȮ\x02ྶྷ\x05хȣ\x02ྷྸ')
buf.write('\x05эȧ\x02ྸྐྵ\x05нȟ\x02ྐྵ˺')
buf.write('\x03\x02\x02\x02ྺྻ\x05ћȮ\x02ྻྼ\x05х')
buf.write('ȣ\x02ྼ\u0fbd\x05эȧ\x02\u0fbd྾\x05н')
buf.write('ȟ\x02྾྿\x05љȭ\x02྿࿀\x05ћ')
buf.write('Ȯ\x02࿀࿁\x05еț\x02࿁࿂\x05э')
buf.write('ȧ\x02࿂࿃\x05ѓȪ\x02࿃˼\x03\x02\x02')
buf.write('\x02࿄࿅\x05ћȮ\x02࿅࿆\x05хȣ')
buf.write('\x02࿆࿇\x05эȧ\x02࿇࿈\x05нȟ')
buf.write('\x02࿈࿉\x05љȭ\x02࿉࿊\x05ћȮ')
buf.write('\x02࿊࿋\x05еț\x02࿋࿌\x05эȧ')
buf.write('\x02࿌\u0fcd\x05ѓȪ\x02\u0fcd࿎\x07a\x02\x02࿎')
buf.write('࿏\x05ыȦ\x02࿏࿐\x05ћȮ\x02࿐')
buf.write('࿑\x05ѧȴ\x02࿑࿒\x07a\x02\x02࿒࿓')
buf.write('\x05ѝȯ\x02࿓࿔\x05яȨ\x02࿔࿕')
buf.write('\x05йȝ\x02࿕࿖\x05ёȩ\x02࿖࿗')
buf.write('\x05яȨ\x02࿗࿘\x05љȭ\x02࿘࿙')
buf.write('\x05ћȮ\x02࿙࿚\x05їȬ\x02࿚\u0fdb')
buf.write('\x05еț\x02\u0fdb\u0fdc\x05хȣ\x02\u0fdc\u0fdd')
buf.write('\x05яȨ\x02\u0fdd\u0fde\x05нȟ\x02\u0fde\u0fdf')
buf.write('\x05лȞ\x02\u0fdf˾\x03\x02\x02\x02\u0fe0\u0fe1\x05ћ')
buf.write('Ȯ\x02\u0fe1\u0fe2\x05хȣ\x02\u0fe2\u0fe3\x05э')
buf.write('ȧ\x02\u0fe3\u0fe4\x05нȟ\x02\u0fe4\u0fe5\x05љ')
buf.write('ȭ\x02\u0fe5\u0fe6\x05ћȮ\x02\u0fe6\u0fe7\x05е')
buf.write('ț\x02\u0fe7\u0fe8\x05эȧ\x02\u0fe8\u0fe9\x05ѓ')
buf.write('Ȫ\x02\u0fe9\u0fea\x07a\x02\x02\u0fea\u0feb\x05ћȮ')
buf.write('\x02\u0feb\u0fec\x05ѧȴ\x02\u0fec\u0fed\x07a\x02\x02\u0fed')
buf.write('\u0fee\x05ѝȯ\x02\u0fee\u0fef\x05яȨ\x02\u0fef')
buf.write('\u0ff0\x05йȝ\x02\u0ff0\u0ff1\x05ёȩ\x02\u0ff1')
buf.write('\u0ff2\x05яȨ\x02\u0ff2\u0ff3\x05љȭ\x02\u0ff3')
buf.write('\u0ff4\x05ћȮ\x02\u0ff4\u0ff5\x05їȬ\x02\u0ff5')
buf.write('\u0ff6\x05еț\x02\u0ff6\u0ff7\x05хȣ\x02\u0ff7')
buf.write('\u0ff8\x05яȨ\x02\u0ff8\u0ff9\x05нȟ\x02\u0ff9')
buf.write('\u0ffa\x05лȞ\x02\u0ffà\x03\x02\x02\x02\u0ffb\u0ffc')
buf.write('\x05ћȮ\x02\u0ffc\u0ffd\x05хȣ\x02\u0ffd\u0ffe')
buf.write('\x05эȧ\x02\u0ffe\u0fff\x05нȟ\x02\u0fffက')
buf.write('\x05љȭ\x02ကခ\x05ћȮ\x02ခဂ')
buf.write('\x05еț\x02ဂဃ\x05эȧ\x02ဃင')
buf.write('\x05ѓȪ\x02ငစ\x07a\x02\x02စဆ\x05ѝ')
buf.write('ȯ\x02ဆဇ\x05яȨ\x02ဇဈ\x05й')
buf.write('ȝ\x02ဈဉ\x05ёȩ\x02ဉည\x05я')
buf.write('Ȩ\x02ညဋ\x05љȭ\x02ဋဌ\x05ћ')
buf.write('Ȯ\x02ဌဍ\x05їȬ\x02ဍဎ\x05е')
buf.write('ț\x02ဎဏ\x05хȣ\x02ဏတ\x05я')
buf.write('Ȩ\x02တထ\x05нȟ\x02ထဒ\x05л')
buf.write('Ȟ\x02ဒ̂\x03\x02\x02\x02ဓန\x05ћȮ')
buf.write('\x02နပ\x05хȣ\x02ပဖ\x05эȧ')
buf.write('\x02ဖဗ\x05нȟ\x02ဗဘ\x05ѧȴ')
buf.write('\x02ဘမ\x05ёȩ\x02မယ\x05яȨ')
buf.write('\x02ယရ\x05нȟ\x02ရလ\x07a\x02\x02လ')
buf.write('ဝ\x05еț\x02ဝသ\x05зȜ\x02သ')
buf.write('ဟ\x05зȜ\x02ဟဠ\x05їȬ\x02ဠ')
buf.write('̄\x03\x02\x02\x02အဢ\x05ћȮ\x02ဢဣ')
buf.write('\x05хȣ\x02ဣဤ\x05эȧ\x02ဤဥ')
buf.write('\x05нȟ\x02ဥဦ\x05ѧȴ\x02ဦဧ')
buf.write('\x05ёȩ\x02ဧဨ\x05яȨ\x02ဨဩ')
buf.write('\x05нȟ\x02ဩဪ\x07a\x02\x02ဪါ\x05у')
buf.write('Ȣ\x02ါာ\x05ёȩ\x02ာိ\x05ѝ')
buf.write('ȯ\x02ိီ\x05їȬ\x02ီ̆\x03\x02\x02')
buf.write('\x02ုူ\x05ћȮ\x02ူေ\x05хȣ')
buf.write('\x02ေဲ\x05эȧ\x02ဲဳ\x05нȟ')
buf.write('\x02ဳဴ\x05ѧȴ\x02ဴဵ\x05ёȩ')
buf.write('\x02ဵံ\x05яȨ\x02ံ့\x05нȟ')
buf.write('\x02့း\x07a\x02\x02း္\x05эȧ\x02္')
buf.write('်\x05хȣ\x02်ျ\x05яȨ\x02ျ')
buf.write('ြ\x05ѝȯ\x02ြွ\x05ћȮ\x02ွ')
buf.write('ှ\x05нȟ\x02ှ̈\x03\x02\x02\x02ဿ၀')
buf.write('\x05ћȮ\x02၀၁\x05хȣ\x02၁၂')
buf.write('\x05эȧ\x02၂၃\x05нȟ\x02၃၄')
buf.write('\x05ѧȴ\x02၄၅\x05ёȩ\x02၅၆')
buf.write('\x05яȨ\x02၆၇\x05нȟ\x02၇၈')
buf.write('\x07a\x02\x02၈၉\x05їȬ\x02၉၊\x05н')
buf.write('ȟ\x02၊။\x05сȡ\x02။၌\x05х')
buf.write('ȣ\x02၌၍\x05ёȩ\x02၍၎\x05я')
buf.write('Ȩ\x02၎̊\x03\x02\x02\x02၏ၐ\x05ћȮ')
buf.write('\x02ၐၑ\x05ёȩ\x02ၑ̌\x03\x02\x02\x02ၒ')
buf.write('ၓ\x05ћȮ\x02ၓၔ\x05їȬ\x02ၔ')
buf.write('ၕ\x05еț\x02ၕၖ\x05хȣ\x02ၖ')
buf.write('ၗ\x05ыȦ\x02ၗၘ\x05хȣ\x02ၘ')
buf.write('ၙ\x05яȨ\x02ၙၚ\x05сȡ\x02ၚ')
buf.write('̎\x03\x02\x02\x02ၛၜ\x05ћȮ\x02ၜၝ')
buf.write('\x05їȬ\x02ၝၞ\x05еț\x02ၞၟ')
buf.write('\x05яȨ\x02ၟၠ\x05љȭ\x02ၠၡ')
buf.write('\x05еț\x02ၡၢ\x05йȝ\x02ၢၣ')
buf.write('\x05ћȮ\x02ၣၤ\x05хȣ\x02ၤၥ')
buf.write('\x05ёȩ\x02ၥၦ\x05яȨ\x02ၦ̐')
buf.write('\x03\x02\x02\x02ၧၨ\x05ћȮ\x02ၨၩ\x05ї')
buf.write('Ȭ\x02ၩၪ\x05еț\x02ၪၫ\x05я')
buf.write('Ȩ\x02ၫၬ\x05љȭ\x02ၬၭ\x05ы')
buf.write('Ȧ\x02ၭၮ\x05еț\x02ၮၯ\x05ћ')
buf.write('Ȯ\x02ၯၰ\x05нȟ\x02ၰ̒\x03\x02\x02')
buf.write('\x02ၱၲ\x05ћȮ\x02ၲၳ\x05їȬ')
buf.write('\x02ၳၴ\x05нȟ\x02ၴၵ\x05еț')
buf.write('\x02ၵၶ\x05ћȮ\x02ၶ̔\x03\x02\x02\x02ၷ')
buf.write('ၸ\x05ћȮ\x02ၸၹ\x05їȬ\x02ၹ')
buf.write('ၺ\x05хȣ\x02ၺၻ\x05сȡ\x02ၻ')
buf.write('ၼ\x05сȡ\x02ၼၽ\x05нȟ\x02ၽ')
buf.write('ၾ\x05їȬ\x02ၾ̖\x03\x02\x02\x02ၿႀ')
buf.write('\x05ћȮ\x02ႀႁ\x05їȬ\x02ႁႂ')
buf.write('\x05хȣ\x02ႂႃ\x05эȧ\x02ႃ̘')
buf.write('\x03\x02\x02\x02ႄႅ\x05ћȮ\x02ႅႆ\x05ї')
buf.write('Ȭ\x02ႆႇ\x05ѝȯ\x02ႇႈ\x05н')
buf.write('ȟ\x02ႈ̚\x03\x02\x02\x02ႉႊ\x05ћȮ')
buf.write('\x02ႊႋ\x05їȬ\x02ႋႌ\x05ѝȯ')
buf.write('\x02ႌႍ\x05яȨ\x02ႍႎ\x05йȝ')
buf.write('\x02ႎႏ\x05еț\x02ႏ႐\x05ћȮ')
buf.write('\x02႐႑\x05нȟ\x02႑̜\x03\x02\x02\x02႒')
buf.write('႓\x05ћȮ\x02႓႔\x05ѥȳ\x02႔')
buf.write('႕\x05ѓȪ\x02႕႖\x05нȟ\x02႖')
buf.write('̞\x03\x02\x02\x02႗႘\x05ѝȯ\x02႘႙')
buf.write('\x05яȨ\x02႙ႚ\x05зȜ\x02ႚႛ')
buf.write('\x05ёȩ\x02ႛႜ\x05ѝȯ\x02ႜႝ')
buf.write('\x05яȨ\x02ႝ႞\x05лȞ\x02႞႟')
buf.write('\x05нȟ\x02႟Ⴀ\x05лȞ\x02Ⴀ̠')
buf.write('\x03\x02\x02\x02ႡႢ\x05ѝȯ\x02ႢႣ\x05я')
buf.write('Ȩ\x02ႣႤ\x05лȞ\x02ႤႥ\x05н')
buf.write('ȟ\x02ႥႦ\x05їȬ\x02Ⴆ̢\x03\x02\x02')
buf.write('\x02ႧႨ\x05ѝȯ\x02ႨႩ\x05яȨ')
buf.write('\x02ႩႪ\x05хȣ\x02ႪႫ\x05ёȩ')
buf.write('\x02ႫႬ\x05яȨ\x02Ⴌ̤\x03\x02\x02\x02Ⴍ')
buf.write('Ⴎ\x05ѝȯ\x02ႮႯ\x05яȨ\x02Ⴏ')
buf.write('Ⴐ\x05хȣ\x02ႰႱ\x05ѕȫ\x02Ⴑ')
buf.write('Ⴒ\x05ѝȯ\x02ႲႳ\x05нȟ\x02Ⴓ')
buf.write('̦\x03\x02\x02\x02ႴႵ\x05ѝȯ\x02ႵႶ')
buf.write('\x05яȨ\x02ႶႷ\x05ыȦ\x02ႷႸ')
buf.write('\x05хȣ\x02ႸႹ\x05эȧ\x02ႹႺ')
buf.write('\x05хȣ\x02ႺႻ\x05ћȮ\x02ႻႼ')
buf.write('\x05нȟ\x02ႼႽ\x05лȞ\x02Ⴝ̨')
buf.write('\x03\x02\x02\x02ႾႿ\x05ѝȯ\x02ႿჀ\x05я')
buf.write('Ȩ\x02ჀჁ\x05ѓȪ\x02ჁჂ\x05х')
buf.write('ȣ\x02ჂჃ\x05џȰ\x02ჃჄ\x05ё')
buf.write('ȩ\x02ჄჅ\x05ћȮ\x02Ⴥ̪\x03\x02\x02')
buf.write('\x02\u10c6Ⴧ\x05ѝȯ\x02Ⴧ\u10c8\x05яȨ')
buf.write('\x02\u10c8\u10c9\x05ћȮ\x02\u10c9\u10ca\x05хȣ')
buf.write('\x02\u10ca\u10cb\x05ыȦ\x02\u10cb̬\x03\x02\x02\x02\u10cc')
buf.write('Ⴭ\x05ѝȯ\x02Ⴭ\u10ce\x05ѓȪ\x02\u10ce')
buf.write('\u10cf\x05лȞ\x02\u10cfა\x05еț\x02ა')
buf.write('ბ\x05ћȮ\x02ბგ\x05нȟ\x02გ')
buf.write('̮\x03\x02\x02\x02დე\x05ѝȯ\x02ევ')
buf.write('\x05ѓȪ\x02ვზ\x05лȞ\x02ზთ')
buf.write('\x05еț\x02თი\x05ћȮ\x02იკ')
buf.write('\x05нȟ\x02კლ\x05лȞ\x02ლ̰')
buf.write('\x03\x02\x02\x02მნ\x05ѝȯ\x02ნო\x05ѓ')
buf.write('Ȫ\x02ოპ\x05љȭ\x02პჟ\x05н')
buf.write('ȟ\x02ჟრ\x05їȬ\x02რს\x05ћ')
buf.write('Ȯ\x02ს̲\x03\x02\x02\x02ტუ\x05ѝȯ')
buf.write('\x02უფ\x05їȬ\x02ფქ\x05ёȩ')
buf.write('\x02ქღ\x05ѡȱ\x02ღყ\x05хȣ')
buf.write('\x02ყშ\x05лȞ\x02შ̴\x03\x02\x02\x02ჩ')
buf.write('ც\x05ѝȯ\x02ცძ\x05љȭ\x02ძ')
buf.write('წ\x05нȟ\x02წ̶\x03\x02\x02\x02ჭხ')
buf.write('\x05ѝȯ\x02ხჯ\x05љȭ\x02ჯჰ')
buf.write('\x05хȣ\x02ჰჱ\x05яȨ\x02ჱჲ')
buf.write('\x05сȡ\x02ჲ̸\x03\x02\x02\x02ჳჴ\x05џ')
buf.write('Ȱ\x02ჴჵ\x05еț\x02ჵჶ\x05ы')
buf.write('Ȧ\x02ჶჷ\x05хȣ\x02ჷჸ\x05л')
buf.write('Ȟ\x02ჸჹ\x05еț\x02ჹჺ\x05ћ')
buf.write('Ȯ\x02ჺ჻\x05нȟ\x02჻̺\x03\x02\x02')
buf.write('\x02ჼჽ\x05џȰ\x02ჽჾ\x05еț')
buf.write('\x02ჾჿ\x05ыȦ\x02ჿᄀ\x05ѝȯ')
buf.write('\x02ᄀᄁ\x05нȟ\x02ᄁ̼\x03\x02\x02\x02ᄂ')
buf.write('ᄃ\x05џȰ\x02ᄃᄄ\x05еț\x02ᄄ')
buf.write('ᄅ\x05ыȦ\x02ᄅᄆ\x05ѝȯ\x02ᄆ')
buf.write('ᄇ\x05нȟ\x02ᄇᄈ\x05љȭ\x02ᄈ')
buf.write('̾\x03\x02\x02\x02ᄉᄊ\x05џȰ\x02ᄊᄋ')
buf.write('\x05еț\x02ᄋᄌ\x05їȬ\x02ᄌᄍ')
buf.write('\x05йȝ\x02ᄍᄎ\x05уȢ\x02ᄎᄏ')
buf.write('\x05еț\x02ᄏᄐ\x05їȬ\x02ᄐ̀')
buf.write('\x03\x02\x02\x02ᄑᄒ\x05џȰ\x02ᄒᄓ\x05е')
buf.write('ț\x02ᄓᄔ\x05їȬ\x02ᄔᄕ\x05й')
buf.write('ȝ\x02ᄕᄖ\x05уȢ\x02ᄖᄗ\x05е')
buf.write('ț\x02ᄗᄘ\x05їȬ\x02ᄘᄙ\x074')
buf.write('\x02\x02ᄙ͂\x03\x02\x02\x02ᄚᄛ\x05џȰ\x02ᄛ')
buf.write('ᄜ\x05еț\x02ᄜᄝ\x05їȬ\x02ᄝ')
buf.write('ᄞ\x05хȣ\x02ᄞᄟ\x05еț\x02ᄟ')
buf.write('ᄠ\x05зȜ\x02ᄠᄡ\x05ыȦ\x02ᄡ')
buf.write('ᄢ\x05нȟ\x02ᄢ̈́\x03\x02\x02\x02ᄣᄤ')
buf.write('\x05џȰ\x02ᄤᄥ\x05еț\x02ᄥᄦ')
buf.write('\x05їȬ\x02ᄦᄧ\x05їȬ\x02ᄧᄨ')
buf.write('\x05еț\x02ᄨᄩ\x05ѥȳ\x02ᄩ͆')
buf.write('\x03\x02\x02\x02ᄪᄫ\x05џȰ\x02ᄫᄬ\x05е')
buf.write('ț\x02ᄬᄭ\x05їȬ\x02ᄭᄮ\x05ѥ')
buf.write('ȳ\x02ᄮᄯ\x05хȣ\x02ᄯᄰ\x05я')
buf.write('Ȩ\x02ᄰᄱ\x05сȡ\x02ᄱ͈\x03\x02\x02')
buf.write('\x02ᄲᄳ\x05џȰ\x02ᄳᄴ\x05нȟ')
buf.write('\x02ᄴᄵ\x05їȬ\x02ᄵᄶ\x05љȭ')
buf.write('\x02ᄶᄷ\x05хȣ\x02ᄷᄸ\x05ёȩ')
buf.write('\x02ᄸᄹ\x05яȨ\x02ᄹ͊\x03\x02\x02\x02ᄺ')
buf.write('ᄻ\x05џȰ\x02ᄻᄼ\x05нȟ\x02ᄼ')
buf.write('ᄽ\x05їȬ\x02ᄽᄾ\x05љȭ\x02ᄾ')
buf.write('ᄿ\x05хȣ\x02ᄿᅀ\x05ёȩ\x02ᅀ')
buf.write('ᅁ\x05яȨ\x02ᅁᅂ\x05љȭ\x02ᅂ')
buf.write('͌\x03\x02\x02\x02ᅃᅄ\x05ѡȱ\x02ᅄᅅ')
buf.write('\x05еț\x02ᅅᅆ\x05хȣ\x02ᅆᅇ')
buf.write('\x05ћȮ\x02ᅇ͎\x03\x02\x02\x02ᅈᅉ\x05ѡ')
buf.write('ȱ\x02ᅉᅊ\x05еț\x02ᅊᅋ\x05ї')
buf.write('Ȭ\x02ᅋᅌ\x05яȨ\x02ᅌᅍ\x05х')
buf.write('ȣ\x02ᅍᅎ\x05яȨ\x02ᅎᅏ\x05с')
buf.write('ȡ\x02ᅏ͐\x03\x02\x02\x02ᅐᅑ\x05ѡȱ')
buf.write('\x02ᅑᅒ\x05нȟ\x02ᅒᅓ\x05ыȦ')
buf.write('\x02ᅓᅔ\x05ыȦ\x02ᅔᅕ\x05пȠ')
buf.write('\x02ᅕᅖ\x05ёȩ\x02ᅖᅗ\x05їȬ')
buf.write('\x02ᅗᅘ\x05эȧ\x02ᅘᅙ\x05нȟ')
buf.write('\x02ᅙᅚ\x05лȞ\x02ᅚ͒\x03\x02\x02\x02ᅛ')
buf.write('ᅜ\x05ѡȱ\x02ᅜᅝ\x05уȢ\x02ᅝ')
buf.write('ᅞ\x05нȟ\x02ᅞᅟ\x05яȨ\x02ᅟ')
buf.write('͔\x03\x02\x02\x02ᅠᅡ\x05ѡȱ\x02ᅡᅢ')
buf.write('\x05уȢ\x02ᅢᅣ\x05нȟ\x02ᅣᅤ')
buf.write('\x05яȨ\x02ᅤᅥ\x05нȟ\x02ᅥᅦ')
buf.write('\x05џȰ\x02ᅦᅧ\x05нȟ\x02ᅧᅨ')
buf.write('\x05їȬ\x02ᅨ͖\x03\x02\x02\x02ᅩᅪ\x05ѡ')
buf.write('ȱ\x02ᅪᅫ\x05уȢ\x02ᅫᅬ\x05н')
buf.write('ȟ\x02ᅬᅭ\x05їȬ\x02ᅭᅮ\x05н')
buf.write('ȟ\x02ᅮ͘\x03\x02\x02\x02ᅯᅰ\x05ѡȱ')
buf.write('\x02ᅰᅱ\x05уȢ\x02ᅱᅲ\x05хȣ')
buf.write('\x02ᅲᅳ\x05ыȦ\x02ᅳᅴ\x05нȟ')
buf.write('\x02ᅴ͚\x03\x02\x02\x02ᅵᅶ\x05ѡȱ\x02ᅶ')
buf.write('ᅷ\x05хȣ\x02ᅷᅸ\x05ћȮ\x02ᅸ')
buf.write('ᅹ\x05уȢ\x02ᅹ͜\x03\x02\x02\x02ᅺᅻ')
buf.write('\x05ѡȱ\x02ᅻᅼ\x05хȣ\x02ᅼᅽ')
buf.write('\x05ћȮ\x02ᅽᅾ\x05уȢ\x02ᅾᅿ')
buf.write('\x05хȣ\x02ᅿᆀ\x05яȨ\x02ᆀ͞')
buf.write('\x03\x02\x02\x02ᆁᆂ\x05ѡȱ\x02ᆂᆃ\x05ё')
buf.write('ȩ\x02ᆃᆄ\x05їȬ\x02ᆄᆅ\x05щ')
buf.write('ȥ\x02ᆅ͠\x03\x02\x02\x02ᆆᆇ\x05ѡȱ')
buf.write('\x02ᆇᆈ\x05їȬ\x02ᆈᆉ\x05хȣ')
buf.write('\x02ᆉᆊ\x05ћȮ\x02ᆊᆋ\x05нȟ')
buf.write('\x02ᆋ͢\x03\x02\x02\x02ᆌᆍ\x05ѣȲ\x02ᆍ')
buf.write('ᆎ\x05эȧ\x02ᆎᆏ\x05ыȦ\x02ᆏ')
buf.write('ͤ\x03\x02\x02\x02ᆐᆑ\x05ѣȲ\x02ᆑᆒ')
buf.write('\x05эȧ\x02ᆒᆓ\x05ыȦ\x02ᆓᆔ')
buf.write('\x05еț\x02ᆔᆕ\x05сȡ\x02ᆕᆖ')
buf.write('\x05сȡ\x02ᆖͦ\x03\x02\x02\x02ᆗᆘ\x05ѣ')
buf.write('Ȳ\x02ᆘᆙ\x05эȧ\x02ᆙᆚ\x05ы')
buf.write('Ȧ\x02ᆚᆛ\x05еț\x02ᆛᆜ\x05ћ')
buf.write('Ȯ\x02ᆜᆝ\x05ћȮ\x02ᆝᆞ\x05ї')
buf.write('Ȭ\x02ᆞᆟ\x05хȣ\x02ᆟᆠ\x05з')
buf.write('Ȝ\x02ᆠᆡ\x05ѝȯ\x02ᆡᆢ\x05ћ')
buf.write('Ȯ\x02ᆢᆣ\x05нȟ\x02ᆣᆤ\x05љ')
buf.write('ȭ\x02ᆤͨ\x03\x02\x02\x02ᆥᆦ\x05ѣȲ')
buf.write('\x02ᆦᆧ\x05эȧ\x02ᆧᆨ\x05ыȦ')
buf.write('\x02ᆨᆩ\x05йȝ\x02ᆩᆪ\x05еț')
buf.write('\x02ᆪᆫ\x05љȭ\x02ᆫᆬ\x05ћȮ')
buf.write('\x02ᆬͪ\x03\x02\x02\x02ᆭᆮ\x05ѣȲ\x02ᆮ')
buf.write('ᆯ\x05эȧ\x02ᆯᆰ\x05ыȦ\x02ᆰ')
buf.write('ᆱ\x05йȝ\x02ᆱᆲ\x05ёȩ\x02ᆲ')
buf.write('ᆳ\x05ыȦ\x02ᆳᆴ\x05еț\x02ᆴ')
buf.write('ᆵ\x05ћȮ\x02ᆵᆶ\x05ћȮ\x02ᆶ')
buf.write('ᆷ\x05џȰ\x02ᆷᆸ\x05еț\x02ᆸ')
buf.write('ᆹ\x05ыȦ\x02ᆹͬ\x03\x02\x02\x02ᆺᆻ')
buf.write('\x05ѣȲ\x02ᆻᆼ\x05эȧ\x02ᆼᆽ')
buf.write('\x05ыȦ\x02ᆽᆾ\x05нȟ\x02ᆾᆿ')
buf.write('\x05ыȦ\x02ᆿᇀ\x05нȟ\x02ᇀᇁ')
buf.write('\x05эȧ\x02ᇁᇂ\x05нȟ\x02ᇂᇃ')
buf.write('\x05яȨ\x02ᇃᇄ\x05ћȮ\x02ᇄͮ')
buf.write('\x03\x02\x02\x02ᇅᇆ\x05ѣȲ\x02ᇆᇇ\x05э')
buf.write('ȧ\x02ᇇᇈ\x05ыȦ\x02ᇈᇉ\x05н')
buf.write('ȟ\x02ᇉᇊ\x05ѣȲ\x02ᇊᇋ\x05х')
buf.write('ȣ\x02ᇋᇌ\x05љȭ\x02ᇌᇍ\x05ћ')
buf.write('Ȯ\x02ᇍᇎ\x05љȭ\x02ᇎͰ\x03\x02\x02')
buf.write('\x02ᇏᇐ\x05ѣȲ\x02ᇐᇑ\x05эȧ')
buf.write('\x02ᇑᇒ\x05ыȦ\x02ᇒᇓ\x05пȠ')
buf.write('\x02ᇓᇔ\x05ёȩ\x02ᇔᇕ\x05їȬ')
buf.write('\x02ᇕᇖ\x05нȟ\x02ᇖᇗ\x05љȭ')
buf.write('\x02ᇗᇘ\x05ћȮ\x02ᇘͲ\x03\x02\x02\x02ᇙ')
buf.write('ᇚ\x05ѣȲ\x02ᇚᇛ\x05эȧ\x02ᇛ')
buf.write('ᇜ\x05ыȦ\x02ᇜᇝ\x05яȨ\x02ᇝ')
buf.write('ᇞ\x05еț\x02ᇞᇟ\x05эȧ\x02ᇟ')
buf.write('ᇠ\x05нȟ\x02ᇠᇡ\x05љȭ\x02ᇡ')
buf.write('ᇢ\x05ѓȪ\x02ᇢᇣ\x05еț\x02ᇣ')
buf.write('ᇤ\x05йȝ\x02ᇤᇥ\x05нȟ\x02ᇥ')
buf.write('ᇦ\x05љȭ\x02ᇦʹ\x03\x02\x02\x02ᇧᇨ')
buf.write('\x05ѣȲ\x02ᇨᇩ\x05эȧ\x02ᇩᇪ')
buf.write('\x05ыȦ\x02ᇪᇫ\x05ѓȪ\x02ᇫᇬ')
buf.write('\x05еț\x02ᇬᇭ\x05їȬ\x02ᇭᇮ')
buf.write('\x05љȭ\x02ᇮᇯ\x05нȟ\x02ᇯͶ')
buf.write('\x03\x02\x02\x02ᇰᇱ\x05ѣȲ\x02ᇱᇲ\x05э')
buf.write('ȧ\x02ᇲᇳ\x05ыȦ\x02ᇳᇴ\x05ѓ')
buf.write('Ȫ\x02ᇴᇵ\x05хȣ\x02ᇵ\u0378\x03\x02\x02')
buf.write('\x02ᇶᇷ\x05ѣȲ\x02ᇷᇸ\x05эȧ')
buf.write('\x02ᇸᇹ\x05ыȦ\x02ᇹᇺ\x05ѕȫ')
buf.write('\x02ᇺᇻ\x05ѝȯ\x02ᇻᇼ\x05нȟ')
buf.write('\x02ᇼᇽ\x05їȬ\x02ᇽᇾ\x05ѥȳ')
buf.write('\x02ᇾͺ\x03\x02\x02\x02ᇿሀ\x05ѣȲ\x02ሀ')
buf.write('ሁ\x05эȧ\x02ሁሂ\x05ыȦ\x02ሂ')
buf.write('ሃ\x05їȬ\x02ሃሄ\x05ёȩ\x02ሄ')
buf.write('ህ\x05ёȩ\x02ህሆ\x05ћȮ\x02ሆ')
buf.write('ͼ\x03\x02\x02\x02ሇለ\x05ѣȲ\x02ለሉ')
buf.write('\x05эȧ\x02ሉሊ\x05ыȦ\x02ሊላ')
buf.write('\x05љȭ\x02ላሌ\x05нȟ\x02ሌል')
buf.write('\x05їȬ\x02ልሎ\x05хȣ\x02ሎሏ')
buf.write('\x05еț\x02ሏሐ\x05ыȦ\x02ሐሑ')
buf.write('\x05хȣ\x02ሑሒ\x05ѧȴ\x02ሒሓ')
buf.write('\x05нȟ\x02ሓ;\x03\x02\x02\x02ሔሕ\x05ѣ')
buf.write('Ȳ\x02ሕሖ\x05эȧ\x02ሖሗ\x05ы')
buf.write('Ȧ\x02ሗመ\x05ћȮ\x02መሙ\x05е')
buf.write('ț\x02ሙሚ\x05зȜ\x02ሚማ\x05ы')
buf.write('Ȧ\x02ማሜ\x05нȟ\x02ሜ\u0380\x03\x02\x02')
buf.write('\x02ምሞ\x05ѥȳ\x02ሞሟ\x05нȟ')
buf.write('\x02ሟሠ\x05еț\x02ሠሡ\x05їȬ')
buf.write('\x02ሡ\u0382\x03\x02\x02\x02ሢሣ\x05ѥȳ\x02ሣ')
buf.write('ሤ\x05нȟ\x02ሤሥ\x05љȭ\x02ሥ')
buf.write('΄\x03\x02\x02\x02ሦሧ\x05ѥȳ\x02ሧረ')
buf.write('\x05эȧ\x02ረሩ\x05хȣ\x02ሩሪ')
buf.write('\x05яȨ\x02ሪራ\x05ћȮ\x02ራሬ')
buf.write('\x05нȟ\x02ሬር\x05їȬ\x02ርሮ')
buf.write('\x05џȰ\x02ሮሯ\x05еț\x02ሯሰ')
buf.write('\x05ыȦ\x02ሰሱ\x07a\x02\x02ሱሲ\x05ѝ')
buf.write('ȯ\x02ሲሳ\x05яȨ\x02ሳሴ\x05й')
buf.write('ȝ\x02ሴስ\x05ёȩ\x02ስሶ\x05я')
buf.write('Ȩ\x02ሶሷ\x05љȭ\x02ሷሸ\x05ћ')
buf.write('Ȯ\x02ሸሹ\x05їȬ\x02ሹሺ\x05е')
buf.write('ț\x02ሺሻ\x05хȣ\x02ሻሼ\x05я')
buf.write('Ȩ\x02ሼሽ\x05нȟ\x02ሽሾ\x05л')
buf.write('Ȟ\x02ሾΆ\x03\x02\x02\x02ሿቀ\x05ѧȴ')
buf.write('\x02ቀቁ\x05ёȩ\x02ቁቂ\x05яȨ')
buf.write('\x02ቂቃ\x05нȟ\x02ቃΈ\x03\x02\x02\x02ቄ')
buf.write('ቅ\x05ѓȪ\x02ቅቆ\x05їȬ\x02ቆ')
buf.write('ቇ\x05нȟ\x02ቇቈ\x05лȞ\x02ቈ')
buf.write('\u1249\x05хȣ\x02\u1249ቊ\x05йȝ\x02ቊ')
buf.write('ቋ\x05ћȮ\x02ቋቌ\x05хȣ\x02ቌ')
buf.write('ቍ\x05ёȩ\x02ቍ\u124e\x05яȨ\x02\u124e')
buf.write('Ί\x03\x02\x02\x02\u124fቐ\x05ѓȪ\x02ቐቑ')
buf.write('\x05їȬ\x02ቑቒ\x05нȟ\x02ቒቓ')
buf.write('\x05лȞ\x02ቓቔ\x05хȣ\x02ቔቕ')
buf.write('\x05йȝ\x02ቕቖ\x05ћȮ\x02ቖ\u1257')
buf.write('\x05хȣ\x02\u1257ቘ\x05ёȩ\x02ቘ\u1259')
buf.write('\x05яȨ\x02\u1259ቚ\x07a\x02\x02ቚቛ\x05з')
buf.write('Ȝ\x02ቛቜ\x05ёȩ\x02ቜቝ\x05ѝ')
buf.write('ȯ\x02ቝ\u125e\x05яȨ\x02\u125e\u125f\x05л')
buf.write('Ȟ\x02\u125fበ\x05љȭ\x02በΌ\x03\x02\x02')
buf.write('\x02ቡቢ\x05ѓȪ\x02ቢባ\x05їȬ')
buf.write('\x02ባቤ\x05нȟ\x02ቤብ\x05лȞ')
buf.write('\x02ብቦ\x05хȣ\x02ቦቧ\x05йȝ')
buf.write('\x02ቧቨ\x05ћȮ\x02ቨቩ\x05хȣ')
buf.write('\x02ቩቪ\x05ёȩ\x02ቪቫ\x05яȨ')
buf.write('\x02ቫቬ\x07a\x02\x02ቬቭ\x05йȝ\x02ቭ')
buf.write('ቮ\x05ёȩ\x02ቮቯ\x05љȭ\x02ቯ')
buf.write('ተ\x05ћȮ\x02ተΎ\x03\x02\x02\x02ቱቲ')
buf.write('\x05ѓȪ\x02ቲታ\x05їȬ\x02ታቴ')
buf.write('\x05нȟ\x02ቴት\x05лȞ\x02ትቶ')
buf.write('\x05хȣ\x02ቶቷ\x05йȝ\x02ቷቸ')
buf.write('\x05ћȮ\x02ቸቹ\x05хȣ\x02ቹቺ')
buf.write('\x05ёȩ\x02ቺቻ\x05яȨ\x02ቻቼ')
buf.write('\x07a\x02\x02ቼች\x05лȞ\x02ችቾ\x05н')
buf.write('ȟ\x02ቾቿ\x05ћȮ\x02ቿኀ\x05е')
buf.write('ț\x02ኀኁ\x05хȣ\x02ኁኂ\x05ы')
buf.write('Ȧ\x02ኂኃ\x05љȭ\x02ኃΐ\x03\x02\x02')
buf.write('\x02ኄኅ\x05ѓȪ\x02ኅኆ\x05їȬ')
buf.write('\x02ኆኇ\x05нȟ\x02ኇኈ\x05лȞ')
buf.write('\x02ኈ\u1289\x05хȣ\x02\u1289ኊ\x05йȝ')
buf.write('\x02ኊኋ\x05ћȮ\x02ኋኌ\x05хȣ')
buf.write('\x02ኌኍ\x05ёȩ\x02ኍ\u128e\x05яȨ')
buf.write('\x02\u128e\u128f\x07a\x02\x02\u128fነ\x05ѓȪ\x02ነ')
buf.write('ኑ\x05їȬ\x02ኑኒ\x05ёȩ\x02ኒ')
buf.write('ና\x05зȜ\x02ናኔ\x05еț\x02ኔ')
buf.write('ን\x05зȜ\x02ንኖ\x05хȣ\x02ኖ')
buf.write('ኗ\x05ыȦ\x02ኗኘ\x05хȣ\x02ኘ')
buf.write('ኙ\x05ћȮ\x02ኙኚ\x05ѥȳ\x02ኚ')
buf.write('Β\x03\x02\x02\x02ኛኜ\x05ѓȪ\x02ኜኝ')
buf.write('\x05їȬ\x02ኝኞ\x05нȟ\x02ኞኟ')
buf.write('\x05лȞ\x02ኟአ\x05хȣ\x02አኡ')
buf.write('\x05йȝ\x02ኡኢ\x05ћȮ\x02ኢኣ')
buf.write('\x05хȣ\x02ኣኤ\x05ёȩ\x02ኤእ')
buf.write('\x05яȨ\x02እኦ\x07a\x02\x02ኦኧ\x05љ')
buf.write('ȭ\x02ኧከ\x05нȟ\x02ከኩ\x05ћ')
buf.write('Ȯ\x02ኩΔ\x03\x02\x02\x02ኪካ\x05йȝ')
buf.write('\x02ካኬ\x05ѝȯ\x02ኬክ\x05эȧ')
buf.write('\x02ክኮ\x05нȟ\x02ኮኯ\x07a\x02\x02ኯ')
buf.write('ኰ\x05лȞ\x02ኰ\u12b1\x05хȣ\x02\u12b1')
buf.write('ኲ\x05љȭ\x02ኲኳ\x05ћȮ\x02ኳ')
buf.write('Ζ\x03\x02\x02\x02ኴኵ\x05лȞ\x02ኵ\u12b6')
buf.write('\x05нȟ\x02\u12b6\u12b7\x05яȨ\x02\u12b7ኸ')
buf.write('\x05љȭ\x02ኸኹ\x05нȟ\x02ኹኺ')
buf.write('\x07a\x02\x02ኺኻ\x05їȬ\x02ኻኼ\x05е')
buf.write('ț\x02ኼኽ\x05яȨ\x02ኽኾ\x05щ')
buf.write('ȥ\x02ኾΘ\x03\x02\x02\x02\u12bfዀ\x05ыȦ')
buf.write('\x02ዀ\u12c1\x05хȣ\x02\u12c1ዂ\x05љȭ')
buf.write('\x02ዂዃ\x05ћȮ\x02ዃዄ\x05еț')
buf.write('\x02ዄዅ\x05сȡ\x02ዅ\u12c6\x05сȡ')
buf.write('\x02\u12c6Κ\x03\x02\x02\x02\u12c7ወ\x05ѓȪ\x02ወ')
buf.write('ዉ\x05нȟ\x02ዉዊ\x05їȬ\x02ዊ')
buf.write('ዋ\x05йȝ\x02ዋዌ\x05нȟ\x02ዌ')
buf.write('ው\x05яȨ\x02ውዎ\x05ћȮ\x02ዎ')
buf.write('ዏ\x07a\x02\x02ዏዐ\x05їȬ\x02ዐዑ')
buf.write('\x05еț\x02ዑዒ\x05яȨ\x02ዒዓ')
buf.write('\x05щȥ\x02ዓΜ\x03\x02\x02\x02ዔዕ\x05ѓ')
buf.write('Ȫ\x02ዕዖ\x05нȟ\x02ዖ\u12d7\x05ї')
buf.write('Ȭ\x02\u12d7ዘ\x05йȝ\x02ዘዙ\x05н')
buf.write('ȟ\x02ዙዚ\x05яȨ\x02ዚዛ\x05ћ')
buf.write('Ȯ\x02ዛዜ\x05хȣ\x02ዜዝ\x05ы')
buf.write('Ȧ\x02ዝዞ\x05нȟ\x02ዞዟ\x07a\x02')
buf.write('\x02ዟዠ\x05йȝ\x02ዠዡ\x05ёȩ')
buf.write('\x02ዡዢ\x05яȨ\x02ዢዣ\x05ћȮ')
buf.write('\x02ዣΞ\x03\x02\x02\x02ዤዥ\x05ѓȪ\x02ዥ')
buf.write('ዦ\x05нȟ\x02ዦዧ\x05їȬ\x02ዧ')
buf.write('የ\x05йȝ\x02የዩ\x05нȟ\x02ዩ')
buf.write('ዪ\x05яȨ\x02ዪያ\x05ћȮ\x02ያ')
buf.write('ዬ\x05хȣ\x02ዬይ\x05ыȦ\x02ይ')
buf.write('ዮ\x05нȟ\x02ዮዯ\x07a\x02\x02ዯደ')
buf.write('\x05лȞ\x02ደዱ\x05хȣ\x02ዱዲ')
buf.write('\x05љȭ\x02ዲዳ\x05йȝ\x02ዳΠ')
buf.write('\x03\x02\x02\x02ዴድ\x05їȬ\x02ድዶ\x05е')
buf.write('ț\x02ዶዷ\x05яȨ\x02ዷዸ\x05щ')
buf.write('ȥ\x02ዸ\u03a2\x03\x02\x02\x02ዹዺ\x05еț')
buf.write('\x02ዺዻ\x05џȰ\x02ዻዼ\x05сȡ')
buf.write('\x02ዼΤ\x03\x02\x02\x02ዽዾ\x05йȝ\x02ዾ')
buf.write('ዿ\x05ёȩ\x02ዿጀ\x05їȬ\x02ጀ')
buf.write('ጁ\x05їȬ\x02ጁΦ\x03\x02\x02\x02ጂጃ')
buf.write('\x05ыȦ\x02ጃጄ\x05еț\x02ጄጅ')
buf.write('\x05сȡ\x02ጅΨ\x03\x02\x02\x02ጆጇ\x05ы')
buf.write('Ȧ\x02ጇገ\x05нȟ\x02ገጉ\x05е')
buf.write('ț\x02ጉጊ\x05лȞ\x02ጊΪ\x03\x02\x02')
buf.write('\x02ጋጌ\x05эȧ\x02ጌግ\x05еț')
buf.write('\x02ግጎ\x05ѣȲ\x02ጎά\x03\x02\x02\x02ጏ')
buf.write('ጐ\x05эȧ\x02ጐ\u1311\x05нȟ\x02\u1311')
buf.write('ጒ\x05лȞ\x02ጒጓ\x05хȣ\x02ጓ')
buf.write('ጔ\x05еț\x02ጔጕ\x05яȨ\x02ጕ')
buf.write('ή\x03\x02\x02\x02\u1316\u1317\x05эȧ\x02\u1317ጘ')
buf.write('\x05хȣ\x02ጘጙ\x05яȨ\x02ጙΰ')
buf.write('\x03\x02\x02\x02ጚጛ\x05яȨ\x02ጛጜ\x05ћ')
buf.write('Ȯ\x02ጜጝ\x05хȣ\x02ጝጞ\x05ы')
buf.write('Ȧ\x02ጞጟ\x05нȟ\x02ጟβ\x03\x02\x02')
buf.write('\x02ጠጡ\x05їȬ\x02ጡጢ\x05еț')
buf.write('\x02ጢጣ\x05ћȮ\x02ጣጤ\x05хȣ')
buf.write('\x02ጤጥ\x05ёȩ\x02ጥጦ\x07a\x02\x02ጦ')
buf.write('ጧ\x05ћȮ\x02ጧጨ\x05ёȩ\x02ጨ')
buf.write('ጩ\x07a\x02\x02ጩጪ\x05їȬ\x02ጪጫ')
buf.write('\x05нȟ\x02ጫጬ\x05ѓȪ\x02ጬጭ')
buf.write('\x05ёȩ\x02ጭጮ\x05їȬ\x02ጮጯ')
buf.write('\x05ћȮ\x02ጯδ\x03\x02\x02\x02ጰጱ\x05ї')
buf.write('Ȭ\x02ጱጲ\x05ёȩ\x02ጲጳ\x05ѡ')
buf.write('ȱ\x02ጳጴ\x07a\x02\x02ጴጵ\x05яȨ')
buf.write('\x02ጵጶ\x05ѝȯ\x02ጶጷ\x05эȧ')
buf.write('\x02ጷጸ\x05зȜ\x02ጸጹ\x05нȟ')
buf.write('\x02ጹጺ\x05їȬ\x02ጺζ\x03\x02\x02\x02ጻ')
buf.write('ጼ\x05љȭ\x02ጼጽ\x05ѝȯ\x02ጽ')
buf.write('ጾ\x05эȧ\x02ጾθ\x03\x02\x02\x02ጿፀ')
buf.write('\x05џȰ\x02ፀፁ\x05еț\x02ፁፂ')
buf.write('\x05їȬ\x02ፂፃ\x05хȣ\x02ፃፄ')
buf.write('\x05еț\x02ፄፅ\x05яȨ\x02ፅፆ')
buf.write('\x05йȝ\x02ፆፇ\x05нȟ\x02ፇκ')
buf.write('\x03\x02\x02\x02ፈፉ\x05їȬ\x02ፉፊ\x05н')
buf.write('ȟ\x02ፊፋ\x05сȡ\x02ፋፌ\x05ї')
buf.write('Ȭ\x02ፌፍ\x07a\x02\x02ፍμ\x03\x02\x02\x02ፎ')
buf.write('ፏ\x05љȭ\x02ፏፐ\x05ћȮ\x02ፐ')
buf.write('ፑ\x05лȞ\x02ፑፒ\x05лȞ\x02ፒ')
buf.write('ፓ\x05нȟ\x02ፓፔ\x05џȰ\x02ፔ')
buf.write('ξ\x03\x02\x02\x02ፕፖ\x05џȰ\x02ፖፗ')
buf.write('\x05еț\x02ፗፘ\x05їȬ\x02ፘፙ')
buf.write('\x07a\x02\x02ፙπ\x03\x02\x02\x02ፚ\u135b\x05йȝ')
buf.write('\x02\u135b\u135c\x05ёȩ\x02\u135c፝\x05џȰ')
buf.write('\x02፝፞\x05еț\x02፞፟\x05їȬ')
buf.write('\x02፟፠\x07a\x02\x02፠ς\x03\x02\x02\x02፡።')
buf.write('\x05яȨ\x02።፩\x07)\x02\x02፣፨\n\x02\x02')
buf.write('\x02፤፥\x07)\x02\x02፥፨\x07)\x02\x02፦፨\x05')
buf.write('Эȗ\x02፧፣\x03\x02\x02\x02፧፤\x03\x02\x02\x02')
buf.write('፧፦\x03\x02\x02\x02፨፫\x03\x02\x02\x02፩፧\x03')
buf.write('\x02\x02\x02፩፪\x03\x02\x02\x02፪፬\x03\x02\x02\x02፫፩')
buf.write('\x03\x02\x02\x02፬፭\x07)\x02\x02፭τ\x03\x02\x02\x02፮')
buf.write('፷\x05зȜ\x02፯፳\x07)\x02\x02፰፲')
buf.write('\x0423\x02፱፰\x03\x02\x02\x02፲፵\x03\x02\x02\x02፳')
buf.write('፱\x03\x02\x02\x02፳፴\x03\x02\x02\x02፴፶\x03\x02\x02\x02')
buf.write('፵፳\x03\x02\x02\x02፶፸\x07)\x02\x02፷፯\x03')
buf.write('\x02\x02\x02፸፹\x03\x02\x02\x02፹፷\x03\x02\x02\x02፹፺')
buf.write('\x03\x02\x02\x02፺φ\x03\x02\x02\x02፻ᎄ\x05ѣȲ')
buf.write('\x02፼ᎀ\x07)\x02\x02\u137d\u137f\t\x03\x02\x02\u137e\u137d')
buf.write(
'\x03\x02\x02\x02\u137fᎂ\x03\x02\x02\x02ᎀ\u137e\x03\x02\x02\x02ᎀ')
buf.write('ᎁ\x03\x02\x02\x02ᎁᎃ\x03\x02\x02\x02ᎂᎀ\x03\x02\x02\x02')
buf.write('ᎃᎅ\x07)\x02\x02ᎄ፼\x03\x02\x02\x02ᎅᎆ\x03')
buf.write('\x02\x02\x02ᎆᎄ\x03\x02\x02\x02ᎆᎇ\x03\x02\x02\x02ᎇψ')
buf.write('\x03\x02\x02\x02ᎈᎉ\x070\x02\x02ᎉᎊ\x070\x02\x02ᎊ')
buf.write('ϊ\x03\x02\x02\x02ᎋᎌ\x070\x02\x02ᎌό\x03\x02\x02')
buf.write('\x02ᎍᎎ\x05УȒ\x02ᎎώ\x03\x02\x02\x02ᎏ')
buf.write('᎘\x05Хȓ\x02᎐᎒\t\x04\x02\x02᎑᎓')
buf.write('\t\x05\x02\x02᎒᎑\x03\x02\x02\x02᎒᎓\x03\x02\x02\x02᎓')
buf.write('᎖\x03\x02\x02\x02᎔᎗\x05Хȓ\x02᎕᎗')
buf.write('\x05УȒ\x02᎖᎔\x03\x02\x02\x02᎖᎕\x03\x02\x02')
buf.write('\x02᎗᎙\x03\x02\x02\x02᎘᎐\x03\x02\x02\x02᎘᎙')
buf.write('\x03\x02\x02\x02᎙\u139c\x03\x02\x02\x02\u139a\u139d\x05лȞ')
buf.write(
'\x02\u139b\u139d\x05пȠ\x02\u139c\u139a\x03\x02\x02\x02\u139c')
buf.write(
'\u139b\x03\x02\x02\x02\u139c\u139d\x03\x02\x02\x02\u139dϐ\x03\x02\x02\x02'
)
buf.write('\u139eᎥ\x07)\x02\x02\u139fᎤ\n\x02\x02\x02ᎠᎡ\x07')
buf.write(')\x02\x02ᎡᎤ\x07)\x02\x02ᎢᎤ\x05Эȗ\x02Ꭳ')
buf.write('\u139f\x03\x02\x02\x02ᎣᎠ\x03\x02\x02\x02ᎣᎢ\x03\x02\x02\x02')
buf.write('ᎤᎧ\x03\x02\x02\x02ᎥᎣ\x03\x02\x02\x02ᎥᎦ\x03')
buf.write('\x02\x02\x02ᎦᎨ\x03\x02\x02\x02ᎧᎥ\x03\x02\x02\x02ᎨᎩ')
buf.write('\x07)\x02\x02Ꭹϒ\x03\x02\x02\x02ᎪᎯ\x05ѕȫ')
buf.write('\x02ᎫᎰ\x05ϗǬ\x02ᎬᎰ\x05ϙǭ')
buf.write('\x02ᎭᎰ\x05ϛǮ\x02ᎮᎰ\x05ϝǯ')
buf.write('\x02ᎯᎫ\x03\x02\x02\x02ᎯᎬ\x03\x02\x02\x02ᎯᎭ')
buf.write('\x03\x02\x02\x02ᎯᎮ\x03\x02\x02\x02ᎰᎱ\x03\x02\x02\x02Ꮁ')
buf.write('Ꮂ\x08Ǫ\x02\x02Ꮂϔ\x03\x02\x02\x02ᎳᎴ\x07)')
buf.write('\x02\x02Ꮄϖ\x03\x02\x02\x02ᎵᎶ\x05ϕǫ\x02Ꮆ')
buf.write('Ꮊ\x07>\x02\x02ᎷᎹ\x0b\x02\x02\x02ᎸᎷ\x03\x02\x02\x02')
buf.write('ᎹᎼ\x03\x02\x02\x02ᎺᎻ\x03\x02\x02\x02ᎺᎸ\x03')
buf.write('\x02\x02\x02ᎻᎽ\x03\x02\x02\x02ᎼᎺ\x03\x02\x02\x02ᎽᎾ')
buf.write('\x07@\x02\x02ᎾᎿ\x05ϕǫ\x02ᎿϘ\x03\x02\x02')
buf.write('\x02ᏀᏁ\x05ϕǫ\x02ᏁᏅ\x07}\x02\x02Ꮒ')
buf.write('Ꮔ\x0b\x02\x02\x02ᏃᏂ\x03\x02\x02\x02ᏄᏇ\x03\x02\x02')
buf.write('\x02ᏅᏆ\x03\x02\x02\x02ᏅᏃ\x03\x02\x02\x02ᏆᏈ')
buf.write('\x03\x02\x02\x02ᏇᏅ\x03\x02\x02\x02ᏈᏉ\x07\x7f\x02\x02Ꮙ')
buf.write('Ꮚ\x05ϕǫ\x02ᏊϚ\x03\x02\x02\x02ᏋᏌ')
buf.write('\x05ϕǫ\x02ᏌᏐ\x07]\x02\x02ᏍᏏ\x0b\x02\x02')
buf.write('\x02ᏎᏍ\x03\x02\x02\x02ᏏᏒ\x03\x02\x02\x02ᏐᏑ')
buf.write('\x03\x02\x02\x02ᏐᏎ\x03\x02\x02\x02ᏑᏓ\x03\x02\x02\x02Ꮢ')
buf.write('Ꮠ\x03\x02\x02\x02ᏓᏔ\x07_\x02\x02ᏔᏕ\x05ϕ')
buf.write('ǫ\x02ᏕϜ\x03\x02\x02\x02ᏖᏗ\x05ϕǫ')
buf.write('\x02ᏗᏛ\x07*\x02\x02ᏘᏚ\x0b\x02\x02\x02ᏙᏘ')
buf.write('\x03\x02\x02\x02ᏚᏝ\x03\x02\x02\x02ᏛᏜ\x03\x02\x02\x02Ꮫ')
buf.write('Ꮩ\x03\x02\x02\x02ᏜᏞ\x03\x02\x02\x02ᏝᏛ\x03\x02\x02\x02')
buf.write('ᏞᏟ\x07+\x02\x02ᏟᏠ\x05ϕǫ\x02Ꮰ')
buf.write('Ϟ\x03\x02\x02\x02ᏡᏢ\n\x06\x02\x02ᏢϠ\x03\x02\x02\x02')
buf.write('ᏣᏧ\x07$\x02\x02ᏤᏨ\n\x07\x02\x02ᏥᏦ\x07')
buf.write('$\x02\x02ᏦᏨ\x07$\x02\x02ᏧᏤ\x03\x02\x02\x02ᏧᏥ')
buf.write('\x03\x02\x02\x02ᏨᏩ\x03\x02\x02\x02ᏩᏧ\x03\x02\x02\x02Ꮹ')
buf.write('Ꮺ\x03\x02\x02\x02ᏪᏫ\x03\x02\x02\x02ᏫᏬ\x07$\x02\x02')
buf.write("ᏬϢ\x03\x02\x02\x02ᏭᏮ\x07'\x02\x02ᏮϤ\x03")
buf.write('\x02\x02\x02ᏯᏰ\x07(\x02\x02ᏰϦ\x03\x02\x02\x02ᏱᏲ')
buf.write('\x07*\x02\x02ᏲϨ\x03\x02\x02\x02ᏳᏴ\x07+\x02\x02ᏴϪ')
buf.write(
'\x03\x02\x02\x02Ᏽ\u13f6\x07,\x02\x02\u13f6\u13f7\x07,\x02\x02\u13f7Ϭ'
)
buf.write('\x03\x02\x02\x02ᏸᏹ\x07,\x02\x02ᏹϮ\x03\x02\x02\x02ᏺ')
buf.write('ᏻ\x07-\x02\x02ᏻϰ\x03\x02\x02\x02ᏼᏽ\x07/\x02\x02ᏽ')
buf.write(
'ϲ\x03\x02\x02\x02\u13fe\u13ff\x07.\x02\x02\u13ffϴ\x03\x02\x02\x02'
)
buf.write('᐀ᐁ\x071\x02\x02ᐁ϶\x03\x02\x02\x02ᐂᐃ')
buf.write('\x07B\x02\x02ᐃϸ\x03\x02\x02\x02ᐄᐅ\x07<\x02\x02ᐅᐆ')
buf.write('\x07?\x02\x02ᐆϺ\x03\x02\x02\x02ᐇᐈ\x07<\x02\x02ᐈᐍ')
buf.write('\x05Сȑ\x02ᐉᐌ\x05Сȑ\x02ᐊᐌ')
buf.write('\t\x08\x02\x02ᐋᐉ\x03\x02\x02\x02ᐋᐊ\x03\x02\x02\x02ᐌ')
buf.write('ᐏ\x03\x02\x02\x02ᐍᐋ\x03\x02\x02\x02ᐍᐎ\x03\x02\x02\x02')
buf.write('ᐎᐖ\x03\x02\x02\x02ᐏᐍ\x03\x02\x02\x02ᐐᐑ\x07')
buf.write('<\x02\x02ᐑᐖ\x05ϡDZ\x02ᐒᐓ\x07<\x02\x02ᐓ')
buf.write('ᐖ\x05ύǧ\x02ᐔᐖ\x05Бȉ\x02ᐕ')
buf.write('ᐇ\x03\x02\x02\x02ᐕᐐ\x03\x02\x02\x02ᐕᐒ\x03\x02\x02\x02')
buf.write('ᐕᐔ\x03\x02\x02\x02ᐖϼ\x03\x02\x02\x02ᐗᐘ\x07')
buf.write('<\x02\x02ᐘϾ\x03\x02\x02\x02ᐙᐚ\x07=\x02\x02ᐚЀ')
buf.write('\x03\x02\x02\x02ᐛᐜ\x07>\x02\x02ᐜᐝ\x07?\x02\x02ᐝЂ')
buf.write('\x03\x02\x02\x02ᐞᐟ\x07>\x02\x02ᐟЄ\x03\x02\x02\x02ᐠ')
buf.write('ᐡ\x07@\x02\x02ᐡᐢ\x07?\x02\x02ᐢІ\x03\x02\x02\x02ᐣ')
buf.write('ᐤ\x07#\x02\x02ᐤᐬ\x07?\x02\x02ᐥᐦ\x07>\x02\x02ᐦ')
buf.write('ᐬ\x07@\x02\x02ᐧᐨ\x07`\x02\x02ᐨᐬ\x07?\x02\x02ᐩ')
buf.write('ᐪ\x07\x80\x02\x02ᐪᐬ\x07?\x02\x02ᐫᐣ\x03\x02')
buf.write('\x02\x02ᐫᐥ\x03\x02\x02\x02ᐫᐧ\x03\x02\x02\x02ᐫᐩ')
buf.write('\x03\x02\x02\x02ᐬЈ\x03\x02\x02\x02ᐭᐮ\x07`\x02\x02ᐮ')
buf.write('Њ\x03\x02\x02\x02ᐯᐰ\x07\x80\x02\x02ᐰЌ\x03\x02')
buf.write('\x02\x02ᐱᐲ\x07#\x02\x02ᐲЎ\x03\x02\x02\x02ᐳᐴ')
buf.write('\x07@\x02\x02ᐴА\x03\x02\x02\x02ᐵᐶ\x07A\x02\x02ᐶВ')
buf.write('\x03\x02\x02\x02ᐷᐸ\x07~\x02\x02ᐸᐹ\x07~\x02\x02ᐹД')
buf.write('\x03\x02\x02\x02ᐺᐻ\x07~\x02\x02ᐻЖ\x03\x02\x02\x02ᐼ')
buf.write('ᐽ\x07?\x02\x02ᐽИ\x03\x02\x02\x02ᐾᐿ\x07]\x02\x02ᐿ')
buf.write('К\x03\x02\x02\x02ᑀᑁ\x07_\x02\x02ᑁМ\x03\x02\x02\x02')
buf.write('ᑂᑃ\x07a\x02\x02ᑃО\x03\x02\x02\x02ᑄᑆ\t')
buf.write('\t\x02\x02ᑅᑄ\x03\x02\x02\x02ᑆᑇ\x03\x02\x02\x02ᑇᑅ')
buf.write('\x03\x02\x02\x02ᑇᑈ\x03\x02\x02\x02ᑈᑉ\x03\x02\x02\x02ᑉ')
buf.write('ᑊ\x08Ȑ\x03\x02ᑊР\x03\x02\x02\x02ᑋᑌ\t\n')
buf.write('\x02\x02ᑌТ\x03\x02\x02\x02ᑍᑏ\x042;\x02ᑎᑍ')
buf.write('\x03\x02\x02\x02ᑏᑐ\x03\x02\x02\x02ᑐᑎ\x03\x02\x02\x02ᑐ')
buf.write('ᑑ\x03\x02\x02\x02ᑑФ\x03\x02\x02\x02ᑒᑔ\x05ύ')
buf.write('ǧ\x02ᑓᑒ\x03\x02\x02\x02ᑔᑗ\x03\x02\x02\x02ᑕ')
buf.write('ᑓ\x03\x02\x02\x02ᑕᑖ\x03\x02\x02\x02ᑖᑙ\x03\x02\x02\x02')
buf.write('ᑗᑕ\x03\x02\x02\x02ᑘᑚ\x070\x02\x02ᑙᑘ')
buf.write('\x03\x02\x02\x02ᑙᑚ\x03\x02\x02\x02ᑚᑜ\x03\x02\x02\x02ᑛ')
buf.write('ᑝ\x05ύǧ\x02ᑜᑛ\x03\x02\x02\x02ᑝᑞ')
buf.write('\x03\x02\x02\x02ᑞᑜ\x03\x02\x02\x02ᑞᑟ\x03\x02\x02\x02ᑟ')
buf.write('Ц\x03\x02\x02\x02ᑠᑡ\x07/\x02\x02ᑡᑢ\x07/\x02\x02ᑢ')
buf.write('ᑦ\x03\x02\x02\x02ᑣᑥ\n\x0b\x02\x02ᑤᑣ\x03\x02\x02')
buf.write('\x02ᑥᑨ\x03\x02\x02\x02ᑦᑤ\x03\x02\x02\x02ᑦᑧ')
buf.write('\x03\x02\x02\x02ᑧᑫ\x03\x02\x02\x02ᑨᑦ\x03\x02\x02\x02ᑩ')
buf.write('ᑬ\x05Эȗ\x02ᑪᑬ\x07\x02\x02\x03ᑫᑩ')
buf.write('\x03\x02\x02\x02ᑫᑪ\x03\x02\x02\x02ᑬᑭ\x03\x02\x02\x02ᑭ')
buf.write('ᑮ\x08Ȕ\x04\x02ᑮШ\x03\x02\x02\x02ᑯᑰ\x071')
buf.write('\x02\x02ᑰᑱ\x07,\x02\x02ᑱᑵ\x03\x02\x02\x02ᑲᑴ')
buf.write('\x0b\x02\x02\x02ᑳᑲ\x03\x02\x02\x02ᑴᑷ\x03\x02\x02\x02ᑵ')
buf.write('ᑶ\x03\x02\x02\x02ᑵᑳ\x03\x02\x02\x02ᑶᑸ\x03\x02\x02\x02')
buf.write('ᑷᑵ\x03\x02\x02\x02ᑸᑹ\x07,\x02\x02ᑹᑺ\x07')
buf.write('1\x02\x02ᑺᑻ\x03\x02\x02\x02ᑻᑼ\x08ȕ\x04\x02ᑼ')
buf.write('Ъ\x03\x02\x02\x02ᑽᑾ\x07r\x02\x02ᑾᑿ\x07t\x02\x02ᑿ')
buf.write('ᒀ\x07q\x02\x02ᒀᒁ\x07o\x02\x02ᒁᒂ\x07r\x02\x02ᒂ')
buf.write('ᒃ\x07v\x02\x02ᒃᒄ\x03\x02\x02\x02ᒄᒈ\x05Я')
buf.write('Ș\x02ᒅᒇ\n\x0b\x02\x02ᒆᒅ\x03\x02\x02\x02ᒇ')
buf.write('ᒊ\x03\x02\x02\x02ᒈᒆ\x03\x02\x02\x02ᒈᒉ\x03\x02\x02\x02')
buf.write('ᒉᒍ\x03\x02\x02\x02ᒊᒈ\x03\x02\x02\x02ᒋᒎ\x05')
buf.write('Эȗ\x02ᒌᒎ\x07\x02\x02\x03ᒍᒋ\x03\x02\x02\x02')
buf.write('ᒍᒌ\x03\x02\x02\x02ᒎЬ\x03\x02\x02\x02ᒏᒑ\x07')
buf.write('\x0f\x02\x02ᒐᒏ\x03\x02\x02\x02ᒐᒑ\x03\x02\x02\x02ᒑ')
buf.write('ᒒ\x03\x02\x02\x02ᒒᒓ\x07\x0c\x02\x02ᒓЮ\x03\x02\x02\x02')
buf.write('ᒔᒕ\t\x0c\x02\x02ᒕа\x03\x02\x02\x02ᒖᒛ\x05')
buf.write('Сȑ\x02ᒗᒚ\x05Сȑ\x02ᒘᒚ')
buf.write('\t\r\x02\x02ᒙᒗ\x03\x02\x02\x02ᒙᒘ\x03\x02\x02\x02ᒚ')
buf.write('ᒝ\x03\x02\x02\x02ᒛᒙ\x03\x02\x02\x02ᒛᒜ\x03\x02\x02\x02')
buf.write('ᒜв\x03\x02\x02\x02ᒝᒛ\x03\x02\x02\x02ᒞᒟ\x07')
buf.write('B\x02\x02ᒟᒠ\x07#\x02\x02ᒠᒡ\x03\x02\x02\x02ᒡᒢ')
buf.write('\x08Ț\x04\x02ᒢд\x03\x02\x02\x02ᒣᒤ\t\x0e\x02\x02')
buf.write('ᒤж\x03\x02\x02\x02ᒥᒦ\t\x0f\x02\x02ᒦи')
buf.write('\x03\x02\x02\x02ᒧᒨ\t\x10\x02\x02ᒨк\x03\x02\x02\x02ᒩ')
buf.write('ᒪ\t\x11\x02\x02ᒪм\x03\x02\x02\x02ᒫᒬ\t\x04\x02')
buf.write('\x02ᒬо\x03\x02\x02\x02ᒭᒮ\t\x12\x02\x02ᒮр')
buf.write('\x03\x02\x02\x02ᒯᒰ\t\x13\x02\x02ᒰт\x03\x02\x02\x02ᒱ')
buf.write('ᒲ\t\x14\x02\x02ᒲф\x03\x02\x02\x02ᒳᒴ\t\x15\x02')
buf.write('\x02ᒴц\x03\x02\x02\x02ᒵᒶ\t\x16\x02\x02ᒶш')
buf.write('\x03\x02\x02\x02ᒷᒸ\t\x17\x02\x02ᒸъ\x03\x02\x02\x02ᒹ')
buf.write('ᒺ\t\x18\x02\x02ᒺь\x03\x02\x02\x02ᒻᒼ\t\x19\x02')
buf.write('\x02ᒼю\x03\x02\x02\x02ᒽᒾ\t\x1a\x02\x02ᒾѐ')
buf.write('\x03\x02\x02\x02ᒿᓀ\t\x1b\x02\x02ᓀђ\x03\x02\x02\x02ᓁ')
buf.write('ᓂ\t\x1c\x02\x02ᓂє\x03\x02\x02\x02ᓃᓄ\t\x1d\x02')
buf.write('\x02ᓄі\x03\x02\x02\x02ᓅᓆ\t\x1e\x02\x02ᓆј')
buf.write('\x03\x02\x02\x02ᓇᓈ\t\x1f\x02\x02ᓈњ\x03\x02\x02\x02ᓉ')
buf.write('ᓊ\t \x02\x02ᓊќ\x03\x02\x02\x02ᓋᓌ\t!\x02\x02ᓌ')
buf.write('ў\x03\x02\x02\x02ᓍᓎ\t"\x02\x02ᓎѠ\x03\x02\x02\x02')
buf.write('ᓏᓐ\t#\x02\x02ᓐѢ\x03\x02\x02\x02ᓑᓒ\t')
buf.write('$\x02\x02ᓒѤ\x03\x02\x02\x02ᓓᓔ\t%\x02\x02ᓔѦ')
buf.write("\x03\x02\x02\x02ᓕᓖ\t&\x02\x02ᓖѨ\x03\x02\x02\x02'\x02፧")
buf.write('፩፳፹ᎀᎆ᎒᎖᎘\u139c')
buf.write('ᎣᎥᎯᎺᏅᏐᏛᏧᏩ')
buf.write('ᐋᐍᐕᐫᑇᑐᑕᑙᑞ')
buf.write('ᑦᑫᑵᒈᒍᒐᒙᒛ\x05\tǪ')
buf.write('\x02\x08\x02\x02\x02\x03\x02')
return buf.getvalue()
class PlSqlLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [DFA(ds, i) for i, ds in enumerate(atn.decisionToState)]
T__0 = 1
A_LETTER = 2
ADD = 3
AFTER = 4
AGENT = 5
AGGREGATE = 6
ALL = 7
ALTER = 8
ANALYZE = 9
AND = 10
ANY = 11
ARRAY = 12
AS = 13
ASSUME = 14
ASSERT = 15
ASC = 16
ASSOCIATE = 17
AT = 18
ATTRIBUTE = 19
AUDIT = 20
AUTHID = 21
AUTO = 22
AUTOMATIC = 23
AUTONOMOUS_TRANSACTION = 24
BATCH = 25
BEFORE = 26
BEGIN = 27
BETWEEN = 28
BFILE = 29
BINARY_DOUBLE = 30
BINARY_FLOAT = 31
BINARY_INTEGER = 32
BLOB = 33
BLOCK = 34
BODY = 35
BOOLEAN = 36
BOTH = 37
BREADTH = 38
BULK = 39
BY = 40
BYTE = 41
C_LETTER = 42
CACHE = 43
CALL = 44
CANONICAL = 45
CASCADE = 46
CASE = 47
CAST = 48
CHAR = 49
CHAR_CS = 50
CHARACTER = 51
CHECK = 52
CHR = 53
CLOB = 54
CLOSE = 55
CLUSTER = 56
COLLECT = 57
COLUMNS = 58
COMMENT = 59
COMMIT = 60
COMMITTED = 61
COMPATIBILITY = 62
COMPILE = 63
COMPOUND = 64
CONNECT = 65
CONNECT_BY_ROOT = 66
CONSTANT = 67
CONSTRAINT = 68
CONSTRAINTS = 69
CONSTRUCTOR = 70
CONTENT = 71
CONTEXT = 72
CONTINUE = 73
CONVERT = 74
CORRUPT_XID = 75
CORRUPT_XID_ALL = 76
COST = 77
COUNT = 78
CREATE = 79
CROSS = 80
CUBE = 81
CURRENT = 82
CURRENT_USER = 83
CURSOR = 84
CUSTOMDATUM = 85
CYCLE = 86
DATA = 87
DATABASE = 88
DATE = 89
DAY = 90
DB_ROLE_CHANGE = 91
DBTIMEZONE = 92
DDL = 93
DEBUG = 94
DEC = 95
DECIMAL = 96
DECLARE = 97
DECOMPOSE = 98
DECREMENT = 99
DEFAULT = 100
DEFAULTS = 101
DEFERRED = 102
DEFINER = 103
DELETE = 104
DEPTH = 105
DESC = 106
DETERMINISTIC = 107
DIMENSION = 108
DISABLE = 109
DISASSOCIATE = 110
DISTINCT = 111
DOCUMENT = 112
DOUBLE = 113
DROP = 114
DSINTERVAL_UNCONSTRAINED = 115
EACH = 116
ELEMENT = 117
ELSE = 118
ELSIF = 119
EMPTY = 120
ENABLE = 121
ENCODING = 122
END = 123
ENTITYESCAPING = 124
ERR = 125
ERRORS = 126
ESCAPE = 127
EVALNAME = 128
EXCEPT = 129
EXCEPTION = 130
EXCEPTION_INIT = 131
EXCEPTIONS = 132
EXCLUDE = 133
EXCLUSIVE = 134
EXECUTE = 135
EXISTS = 136
EXIT = 137
EXPLAIN = 138
EXTERNAL = 139
EXTRACT = 140
FAILURE = 141
FALSE = 142
FETCH = 143
FINAL = 144
FIRST = 145
FIRST_VALUE = 146
FLOAT = 147
FOLLOWING = 148
FOLLOWS = 149
FOR = 150
FORALL = 151
FORCE = 152
FROM = 153
FULL = 154
FUNCTION = 155
GOTO = 156
GRANT = 157
GROUP = 158
GROUPING = 159
HASH = 160
HAVING = 161
HIDE = 162
HOUR = 163
IF = 164
IGNORE = 165
IMMEDIATE = 166
IN = 167
INCLUDE = 168
INCLUDING = 169
INCREMENT = 170
INDENT = 171
INDEX = 172
INDEXED = 173
INDICATOR = 174
INDICES = 175
INFINITE = 176
INLINE = 177
INNER = 178
INOUT = 179
INSERT = 180
INSTANTIABLE = 181
INSTEAD = 182
INT = 183
INTEGER = 184
INTERSECT = 185
INTERVAL = 186
INTO = 187
INVALIDATE = 188
IS = 189
ISOLATION = 190
ITERATE = 191
JAVA = 192
JOIN = 193
KEEP = 194
LANGUAGE = 195
LAST = 196
LAST_VALUE = 197
LEADING = 198
LEFT = 199
LEVEL = 200
LIBRARY = 201
LIKE = 202
LIKE2 = 203
LIKE4 = 204
LIKEC = 205
LIMIT = 206
LOCAL = 207
LOCK = 208
LOCKED = 209
LOG = 210
LOGOFF = 211
LOGON = 212
LONG = 213
LOOP = 214
MAIN = 215
MAP = 216
MATCHED = 217
MAXVALUE = 218
MEASURES = 219
MEMBER = 220
MERGE = 221
MINUS = 222
MINUTE = 223
MINVALUE = 224
MLSLABEL = 225
MODE = 226
MODEL = 227
MODIFY = 228
MONTH = 229
MULTISET = 230
NAME = 231
NAN = 232
NATURAL = 233
NATURALN = 234
NAV = 235
NCHAR = 236
NCHAR_CS = 237
NCLOB = 238
NESTED = 239
NEW = 240
NO = 241
NOAUDIT = 242
NOCACHE = 243
NOCOPY = 244
NOCYCLE = 245
NOENTITYESCAPING = 246
NOMAXVALUE = 247
NOMINVALUE = 248
NONE = 249
NOORDER = 250
NOSCHEMACHECK = 251
NOT = 252
NOWAIT = 253
NULL = 254
NULLS = 255
NUMBER = 256
NUMERIC = 257
NVARCHAR2 = 258
OBJECT = 259
OF = 260
OFF = 261
OID = 262
OLD = 263
ON = 264
ONLY = 265
OPEN = 266
OPTION = 267
OR = 268
ORADATA = 269
ORDER = 270
ORDINALITY = 271
OSERROR = 272
OUT = 273
OUTER = 274
OVER = 275
OVERRIDING = 276
PACKAGE = 277
PARALLEL_ENABLE = 278
PARAMETERS = 279
PARENT = 280
PARTITION = 281
PASSING = 282
PATH = 283
PERCENT_ROWTYPE = 284
PERCENT_TYPE = 285
PIPELINED = 286
PIVOT = 287
PLAN = 288
PLS_INTEGER = 289
POSITIVE = 290
POSITIVEN = 291
PRAGMA = 292
PRECEDING = 293
PRECISION = 294
PRESENT = 295
PRIOR = 296
PROCEDURE = 297
RAISE = 298
RANGE = 299
RAW = 300
READ = 301
REAL = 302
RECORD = 303
REF = 304
REFERENCE = 305
REFERENCING = 306
REJECT = 307
RELIES_ON = 308
RENAME = 309
REPLACE = 310
RESPECT = 311
RESTRICT_REFERENCES = 312
RESULT = 313
RESULT_CACHE = 314
RETURN = 315
RETURNING = 316
REUSE = 317
REVERSE = 318
REVOKE = 319
RIGHT = 320
ROLLBACK = 321
ROLLUP = 322
ROW = 323
ROWID = 324
ROWS = 325
RULES = 326
SAMPLE = 327
SAVE = 328
SAVEPOINT = 329
SCHEMA = 330
SCHEMACHECK = 331
SCN = 332
SEARCH = 333
SECOND = 334
SEED = 335
SEGMENT = 336
SELECT = 337
SELF = 338
SEQUENCE = 339
SEQUENTIAL = 340
SERIALIZABLE = 341
SERIALLY_REUSABLE = 342
SERVERERROR = 343
SESSIONTIMEZONE = 344
SET = 345
SETS = 346
SETTINGS = 347
SHARE = 348
SHOW = 349
SHUTDOWN = 350
SIBLINGS = 351
SIGNTYPE = 352
SIMPLE_INTEGER = 353
SINGLE = 354
SIZE = 355
SKIP_ = 356
SMALLINT = 357
SNAPSHOT = 358
SOME = 359
SPECIFICATION = 360
SQLDATA = 361
SQLERROR = 362
STANDALONE = 363
START = 364
STARTUP = 365
STATEMENT = 366
STATEMENT_ID = 367
STATIC = 368
STATISTICS = 369
STRING = 370
SUBMULTISET = 371
SUBPARTITION = 372
SUBSTITUTABLE = 373
SUBTYPE = 374
SUCCESS = 375
SUSPEND = 376
TABLE = 377
THE = 378
THEN = 379
TIME = 380
TIMESTAMP = 381
TIMESTAMP_LTZ_UNCONSTRAINED = 382
TIMESTAMP_TZ_UNCONSTRAINED = 383
TIMESTAMP_UNCONSTRAINED = 384
TIMEZONE_ABBR = 385
TIMEZONE_HOUR = 386
TIMEZONE_MINUTE = 387
TIMEZONE_REGION = 388
TO = 389
TRAILING = 390
TRANSACTION = 391
TRANSLATE = 392
TREAT = 393
TRIGGER = 394
TRIM = 395
TRUE = 396
TRUNCATE = 397
TYPE = 398
UNBOUNDED = 399
UNDER = 400
UNION = 401
UNIQUE = 402
UNLIMITED = 403
UNPIVOT = 404
UNTIL = 405
UPDATE = 406
UPDATED = 407
UPSERT = 408
UROWID = 409
USE = 410
USING = 411
VALIDATE = 412
VALUE = 413
VALUES = 414
VARCHAR = 415
VARCHAR2 = 416
VARIABLE = 417
VARRAY = 418
VARYING = 419
VERSION = 420
VERSIONS = 421
WAIT = 422
WARNING = 423
WELLFORMED = 424
WHEN = 425
WHENEVER = 426
WHERE = 427
WHILE = 428
WITH = 429
WITHIN = 430
WORK = 431
WRITE = 432
XML = 433
XMLAGG = 434
XMLATTRIBUTES = 435
XMLCAST = 436
XMLCOLATTVAL = 437
XMLELEMENT = 438
XMLEXISTS = 439
XMLFOREST = 440
XMLNAMESPACES = 441
XMLPARSE = 442
XMLPI = 443
XMLQUERY = 444
XMLROOT = 445
XMLSERIALIZE = 446
XMLTABLE = 447
YEAR = 448
YES = 449
YMINTERVAL_UNCONSTRAINED = 450
ZONE = 451
PREDICTION = 452
PREDICTION_BOUNDS = 453
PREDICTION_COST = 454
PREDICTION_DETAILS = 455
PREDICTION_PROBABILITY = 456
PREDICTION_SET = 457
CUME_DIST = 458
DENSE_RANK = 459
LISTAGG = 460
PERCENT_RANK = 461
PERCENTILE_CONT = 462
PERCENTILE_DISC = 463
RANK = 464
AVG = 465
CORR = 466
LAG = 467
LEAD = 468
MAX = 469
MEDIAN = 470
MIN = 471
NTILE = 472
RATIO_TO_REPORT = 473
ROW_NUMBER = 474
SUM = 475
VARIANCE = 476
REGR_ = 477
STDDEV = 478
VAR_ = 479
COVAR_ = 480
NATIONAL_CHAR_STRING_LIT = 481
BIT_STRING_LIT = 482
HEX_STRING_LIT = 483
DOUBLE_PERIOD = 484
PERIOD = 485
UNSIGNED_INTEGER = 486
APPROXIMATE_NUM_LIT = 487
CHAR_STRING = 488
DELIMITED_ID = 489
PERCENT = 490
AMPERSAND = 491
LEFT_PAREN = 492
RIGHT_PAREN = 493
DOUBLE_ASTERISK = 494
ASTERISK = 495
PLUS_SIGN = 496
MINUS_SIGN = 497
COMMA = 498
SOLIDUS = 499
AT_SIGN = 500
ASSIGN_OP = 501
BINDVAR = 502
COLON = 503
SEMICOLON = 504
LESS_THAN_OR_EQUALS_OP = 505
LESS_THAN_OP = 506
GREATER_THAN_OR_EQUALS_OP = 507
NOT_EQUAL_OP = 508
CARRET_OPERATOR_PART = 509
TILDE_OPERATOR_PART = 510
EXCLAMATION_OPERATOR_PART = 511
GREATER_THAN_OP = 512
CONCATENATION_OP = 513
VERTICAL_BAR = 514
EQUALS_OP = 515
LEFT_BRACKET = 516
RIGHT_BRACKET = 517
INTRODUCER = 518
SPACES = 519
SINGLE_LINE_COMMENT = 520
MULTI_LINE_COMMENT = 521
PROMPT = 522
REGULAR_ID = 523
ZV = 524
channelNames = [u'DEFAULT_TOKEN_CHANNEL', u'HIDDEN']
modeNames = ['DEFAULT_MODE']
literalNames = ['<INVALID>', "'..'", "'.'", "'%'", "'&'", "'('", "')'",
"'**'", "'*'", "'+'", "'-'", "','", "'/'", "'@'", "':='", "':'",
"';'", "'<='", "'<'", "'>='", "'^'", "'~'", "'!'", "'>'", "'||'",
"'|'", "'='", "'['", "']'", "'_'", "'@!'"]
symbolicNames = ['<INVALID>', 'A_LETTER', 'ADD', 'AFTER', 'AGENT',
'AGGREGATE', 'ALL', 'ALTER', 'ANALYZE', 'AND', 'ANY', 'ARRAY', 'AS',
'ASSUME', 'ASSERT', 'ASC', 'ASSOCIATE', 'AT', 'ATTRIBUTE', 'AUDIT',
'AUTHID', 'AUTO', 'AUTOMATIC', 'AUTONOMOUS_TRANSACTION', 'BATCH',
'BEFORE', 'BEGIN', 'BETWEEN', 'BFILE', 'BINARY_DOUBLE',
'BINARY_FLOAT', 'BINARY_INTEGER', 'BLOB', 'BLOCK', 'BODY',
'BOOLEAN', 'BOTH', 'BREADTH', 'BULK', 'BY', 'BYTE', 'C_LETTER',
'CACHE', 'CALL', 'CANONICAL', 'CASCADE', 'CASE', 'CAST', 'CHAR',
'CHAR_CS', 'CHARACTER', 'CHECK', 'CHR', 'CLOB', 'CLOSE', 'CLUSTER',
'COLLECT', 'COLUMNS', 'COMMENT', 'COMMIT', 'COMMITTED',
'COMPATIBILITY', 'COMPILE', 'COMPOUND', 'CONNECT',
'CONNECT_BY_ROOT', 'CONSTANT', 'CONSTRAINT', 'CONSTRAINTS',
'CONSTRUCTOR', 'CONTENT', 'CONTEXT', 'CONTINUE', 'CONVERT',
'CORRUPT_XID', 'CORRUPT_XID_ALL', 'COST', 'COUNT', 'CREATE',
'CROSS', 'CUBE', 'CURRENT', 'CURRENT_USER', 'CURSOR', 'CUSTOMDATUM',
'CYCLE', 'DATA', 'DATABASE', 'DATE', 'DAY', 'DB_ROLE_CHANGE',
'DBTIMEZONE', 'DDL', 'DEBUG', 'DEC', 'DECIMAL', 'DECLARE',
'DECOMPOSE', 'DECREMENT', 'DEFAULT', 'DEFAULTS', 'DEFERRED',
'DEFINER', 'DELETE', 'DEPTH', 'DESC', 'DETERMINISTIC', 'DIMENSION',
'DISABLE', 'DISASSOCIATE', 'DISTINCT', 'DOCUMENT', 'DOUBLE', 'DROP',
'DSINTERVAL_UNCONSTRAINED', 'EACH', 'ELEMENT', 'ELSE', 'ELSIF',
'EMPTY', 'ENABLE', 'ENCODING', 'END', 'ENTITYESCAPING', 'ERR',
'ERRORS', 'ESCAPE', 'EVALNAME', 'EXCEPT', 'EXCEPTION',
'EXCEPTION_INIT', 'EXCEPTIONS', 'EXCLUDE', 'EXCLUSIVE', 'EXECUTE',
'EXISTS', 'EXIT', 'EXPLAIN', 'EXTERNAL', 'EXTRACT', 'FAILURE',
'FALSE', 'FETCH', 'FINAL', 'FIRST', 'FIRST_VALUE', 'FLOAT',
'FOLLOWING', 'FOLLOWS', 'FOR', 'FORALL', 'FORCE', 'FROM', 'FULL',
'FUNCTION', 'GOTO', 'GRANT', 'GROUP', 'GROUPING', 'HASH', 'HAVING',
'HIDE', 'HOUR', 'IF', 'IGNORE', 'IMMEDIATE', 'IN', 'INCLUDE',
'INCLUDING', 'INCREMENT', 'INDENT', 'INDEX', 'INDEXED', 'INDICATOR',
'INDICES', 'INFINITE', 'INLINE', 'INNER', 'INOUT', 'INSERT',
'INSTANTIABLE', 'INSTEAD', 'INT', 'INTEGER', 'INTERSECT',
'INTERVAL', 'INTO', 'INVALIDATE', 'IS', 'ISOLATION', 'ITERATE',
'JAVA', 'JOIN', 'KEEP', 'LANGUAGE', 'LAST', 'LAST_VALUE', 'LEADING',
'LEFT', 'LEVEL', 'LIBRARY', 'LIKE', 'LIKE2', 'LIKE4', 'LIKEC',
'LIMIT', 'LOCAL', 'LOCK', 'LOCKED', 'LOG', 'LOGOFF', 'LOGON',
'LONG', 'LOOP', 'MAIN', 'MAP', 'MATCHED', 'MAXVALUE', 'MEASURES',
'MEMBER', 'MERGE', 'MINUS', 'MINUTE', 'MINVALUE', 'MLSLABEL',
'MODE', 'MODEL', 'MODIFY', 'MONTH', 'MULTISET', 'NAME', 'NAN',
'NATURAL', 'NATURALN', 'NAV', 'NCHAR', 'NCHAR_CS', 'NCLOB',
'NESTED', 'NEW', 'NO', 'NOAUDIT', 'NOCACHE', 'NOCOPY', 'NOCYCLE',
'NOENTITYESCAPING', 'NOMAXVALUE', 'NOMINVALUE', 'NONE', 'NOORDER',
'NOSCHEMACHECK', 'NOT', 'NOWAIT', 'NULL', 'NULLS', 'NUMBER',
'NUMERIC', 'NVARCHAR2', 'OBJECT', 'OF', 'OFF', 'OID', 'OLD', 'ON',
'ONLY', 'OPEN', 'OPTION', 'OR', 'ORADATA', 'ORDER', 'ORDINALITY',
'OSERROR', 'OUT', 'OUTER', 'OVER', 'OVERRIDING', 'PACKAGE',
'PARALLEL_ENABLE', 'PARAMETERS', 'PARENT', 'PARTITION', 'PASSING',
'PATH', 'PERCENT_ROWTYPE', 'PERCENT_TYPE', 'PIPELINED', 'PIVOT',
'PLAN', 'PLS_INTEGER', 'POSITIVE', 'POSITIVEN', 'PRAGMA',
'PRECEDING', 'PRECISION', 'PRESENT', 'PRIOR', 'PROCEDURE', 'RAISE',
'RANGE', 'RAW', 'READ', 'REAL', 'RECORD', 'REF', 'REFERENCE',
'REFERENCING', 'REJECT', 'RELIES_ON', 'RENAME', 'REPLACE',
'RESPECT', 'RESTRICT_REFERENCES', 'RESULT', 'RESULT_CACHE',
'RETURN', 'RETURNING', 'REUSE', 'REVERSE', 'REVOKE', 'RIGHT',
'ROLLBACK', 'ROLLUP', 'ROW', 'ROWID', 'ROWS', 'RULES', 'SAMPLE',
'SAVE', 'SAVEPOINT', 'SCHEMA', 'SCHEMACHECK', 'SCN', 'SEARCH',
'SECOND', 'SEED', 'SEGMENT', 'SELECT', 'SELF', 'SEQUENCE',
'SEQUENTIAL', 'SERIALIZABLE', 'SERIALLY_REUSABLE', 'SERVERERROR',
'SESSIONTIMEZONE', 'SET', 'SETS', 'SETTINGS', 'SHARE', 'SHOW',
'SHUTDOWN', 'SIBLINGS', 'SIGNTYPE', 'SIMPLE_INTEGER', 'SINGLE',
'SIZE', 'SKIP_', 'SMALLINT', 'SNAPSHOT', 'SOME', 'SPECIFICATION',
'SQLDATA', 'SQLERROR', 'STANDALONE', 'START', 'STARTUP',
'STATEMENT', 'STATEMENT_ID', 'STATIC', 'STATISTICS', 'STRING',
'SUBMULTISET', 'SUBPARTITION', 'SUBSTITUTABLE', 'SUBTYPE',
'SUCCESS', 'SUSPEND', 'TABLE', 'THE', 'THEN', 'TIME', 'TIMESTAMP',
'TIMESTAMP_LTZ_UNCONSTRAINED', 'TIMESTAMP_TZ_UNCONSTRAINED',
'TIMESTAMP_UNCONSTRAINED', 'TIMEZONE_ABBR', 'TIMEZONE_HOUR',
'TIMEZONE_MINUTE', 'TIMEZONE_REGION', 'TO', 'TRAILING',
'TRANSACTION', 'TRANSLATE', 'TREAT', 'TRIGGER', 'TRIM', 'TRUE',
'TRUNCATE', 'TYPE', 'UNBOUNDED', 'UNDER', 'UNION', 'UNIQUE',
'UNLIMITED', 'UNPIVOT', 'UNTIL', 'UPDATE', 'UPDATED', 'UPSERT',
'UROWID', 'USE', 'USING', 'VALIDATE', 'VALUE', 'VALUES', 'VARCHAR',
'VARCHAR2', 'VARIABLE', 'VARRAY', 'VARYING', 'VERSION', 'VERSIONS',
'WAIT', 'WARNING', 'WELLFORMED', 'WHEN', 'WHENEVER', 'WHERE',
'WHILE', 'WITH', 'WITHIN', 'WORK', 'WRITE', 'XML', 'XMLAGG',
'XMLATTRIBUTES', 'XMLCAST', 'XMLCOLATTVAL', 'XMLELEMENT',
'XMLEXISTS', 'XMLFOREST', 'XMLNAMESPACES', 'XMLPARSE', 'XMLPI',
'XMLQUERY', 'XMLROOT', 'XMLSERIALIZE', 'XMLTABLE', 'YEAR', 'YES',
'YMINTERVAL_UNCONSTRAINED', 'ZONE', 'PREDICTION',
'PREDICTION_BOUNDS', 'PREDICTION_COST', 'PREDICTION_DETAILS',
'PREDICTION_PROBABILITY', 'PREDICTION_SET', 'CUME_DIST',
'DENSE_RANK', 'LISTAGG', 'PERCENT_RANK', 'PERCENTILE_CONT',
'PERCENTILE_DISC', 'RANK', 'AVG', 'CORR', 'LAG', 'LEAD', 'MAX',
'MEDIAN', 'MIN', 'NTILE', 'RATIO_TO_REPORT', 'ROW_NUMBER', 'SUM',
'VARIANCE', 'REGR_', 'STDDEV', 'VAR_', 'COVAR_',
'NATIONAL_CHAR_STRING_LIT', 'BIT_STRING_LIT', 'HEX_STRING_LIT',
'DOUBLE_PERIOD', 'PERIOD', 'UNSIGNED_INTEGER',
'APPROXIMATE_NUM_LIT', 'CHAR_STRING', 'DELIMITED_ID', 'PERCENT',
'AMPERSAND', 'LEFT_PAREN', 'RIGHT_PAREN', 'DOUBLE_ASTERISK',
'ASTERISK', 'PLUS_SIGN', 'MINUS_SIGN', 'COMMA', 'SOLIDUS',
'AT_SIGN', 'ASSIGN_OP', 'BINDVAR', 'COLON', 'SEMICOLON',
'LESS_THAN_OR_EQUALS_OP', 'LESS_THAN_OP',
'GREATER_THAN_OR_EQUALS_OP', 'NOT_EQUAL_OP', 'CARRET_OPERATOR_PART',
'TILDE_OPERATOR_PART', 'EXCLAMATION_OPERATOR_PART',
'GREATER_THAN_OP', 'CONCATENATION_OP', 'VERTICAL_BAR', 'EQUALS_OP',
'LEFT_BRACKET', 'RIGHT_BRACKET', 'INTRODUCER', 'SPACES',
'SINGLE_LINE_COMMENT', 'MULTI_LINE_COMMENT', 'PROMPT', 'REGULAR_ID',
'ZV']
ruleNames = ['T__0', 'A_LETTER', 'ADD', 'AFTER', 'AGENT', 'AGGREGATE',
'ALL', 'ALTER', 'ANALYZE', 'AND', 'ANY', 'ARRAY', 'AS', 'ASSUME',
'ASSERT', 'ASC', 'ASSOCIATE', 'AT', 'ATTRIBUTE', 'AUDIT', 'AUTHID',
'AUTO', 'AUTOMATIC', 'AUTONOMOUS_TRANSACTION', 'BATCH', 'BEFORE',
'BEGIN', 'BETWEEN', 'BFILE', 'BINARY_DOUBLE', 'BINARY_FLOAT',
'BINARY_INTEGER', 'BLOB', 'BLOCK', 'BODY', 'BOOLEAN', 'BOTH',
'BREADTH', 'BULK', 'BY', 'BYTE', 'C_LETTER', 'CACHE', 'CALL',
'CANONICAL', 'CASCADE', 'CASE', 'CAST', 'CHAR', 'CHAR_CS',
'CHARACTER', 'CHECK', 'CHR', 'CLOB', 'CLOSE', 'CLUSTER', 'COLLECT',
'COLUMNS', 'COMMENT', 'COMMIT', 'COMMITTED', 'COMPATIBILITY',
'COMPILE', 'COMPOUND', 'CONNECT', 'CONNECT_BY_ROOT', 'CONSTANT',
'CONSTRAINT', 'CONSTRAINTS', 'CONSTRUCTOR', 'CONTENT', 'CONTEXT',
'CONTINUE', 'CONVERT', 'CORRUPT_XID', 'CORRUPT_XID_ALL', 'COST',
'COUNT', 'CREATE', 'CROSS', 'CUBE', 'CURRENT', 'CURRENT_USER',
'CURSOR', 'CUSTOMDATUM', 'CYCLE', 'DATA', 'DATABASE', 'DATE', 'DAY',
'DB_ROLE_CHANGE', 'DBTIMEZONE', 'DDL', 'DEBUG', 'DEC', 'DECIMAL',
'DECLARE', 'DECOMPOSE', 'DECREMENT', 'DEFAULT', 'DEFAULTS',
'DEFERRED', 'DEFINER', 'DELETE', 'DEPTH', 'DESC', 'DETERMINISTIC',
'DIMENSION', 'DISABLE', 'DISASSOCIATE', 'DISTINCT', 'DOCUMENT',
'DOUBLE', 'DROP', 'DSINTERVAL_UNCONSTRAINED', 'EACH', 'ELEMENT',
'ELSE', 'ELSIF', 'EMPTY', 'ENABLE', 'ENCODING', 'END',
'ENTITYESCAPING', 'ERR', 'ERRORS', 'ESCAPE', 'EVALNAME', 'EXCEPT',
'EXCEPTION', 'EXCEPTION_INIT', 'EXCEPTIONS', 'EXCLUDE', 'EXCLUSIVE',
'EXECUTE', 'EXISTS', 'EXIT', 'EXPLAIN', 'EXTERNAL', 'EXTRACT',
'FAILURE', 'FALSE', 'FETCH', 'FINAL', 'FIRST', 'FIRST_VALUE',
'FLOAT', 'FOLLOWING', 'FOLLOWS', 'FOR', 'FORALL', 'FORCE', 'FROM',
'FULL', 'FUNCTION', 'GOTO', 'GRANT', 'GROUP', 'GROUPING', 'HASH',
'HAVING', 'HIDE', 'HOUR', 'IF', 'IGNORE', 'IMMEDIATE', 'IN',
'INCLUDE', 'INCLUDING', 'INCREMENT', 'INDENT', 'INDEX', 'INDEXED',
'INDICATOR', 'INDICES', 'INFINITE', 'INLINE', 'INNER', 'INOUT',
'INSERT', 'INSTANTIABLE', 'INSTEAD', 'INT', 'INTEGER', 'INTERSECT',
'INTERVAL', 'INTO', 'INVALIDATE', 'IS', 'ISOLATION', 'ITERATE',
'JAVA', 'JOIN', 'KEEP', 'LANGUAGE', 'LAST', 'LAST_VALUE', 'LEADING',
'LEFT', 'LEVEL', 'LIBRARY', 'LIKE', 'LIKE2', 'LIKE4', 'LIKEC',
'LIMIT', 'LOCAL', 'LOCK', 'LOCKED', 'LOG', 'LOGOFF', 'LOGON',
'LONG', 'LOOP', 'MAIN', 'MAP', 'MATCHED', 'MAXVALUE', 'MEASURES',
'MEMBER', 'MERGE', 'MINUS', 'MINUTE', 'MINVALUE', 'MLSLABEL',
'MODE', 'MODEL', 'MODIFY', 'MONTH', 'MULTISET', 'NAME', 'NAN',
'NATURAL', 'NATURALN', 'NAV', 'NCHAR', 'NCHAR_CS', 'NCLOB',
'NESTED', 'NEW', 'NO', 'NOAUDIT', 'NOCACHE', 'NOCOPY', 'NOCYCLE',
'NOENTITYESCAPING', 'NOMAXVALUE', 'NOMINVALUE', 'NONE', 'NOORDER',
'NOSCHEMACHECK', 'NOT', 'NOWAIT', 'NULL', 'NULLS', 'NUMBER',
'NUMERIC', 'NVARCHAR2', 'OBJECT', 'OF', 'OFF', 'OID', 'OLD', 'ON',
'ONLY', 'OPEN', 'OPTION', 'OR', 'ORADATA', 'ORDER', 'ORDINALITY',
'OSERROR', 'OUT', 'OUTER', 'OVER', 'OVERRIDING', 'PACKAGE',
'PARALLEL_ENABLE', 'PARAMETERS', 'PARENT', 'PARTITION', 'PASSING',
'PATH', 'PERCENT_ROWTYPE', 'PERCENT_TYPE', 'PIPELINED', 'PIVOT',
'PLAN', 'PLS_INTEGER', 'POSITIVE', 'POSITIVEN', 'PRAGMA',
'PRECEDING', 'PRECISION', 'PRESENT', 'PRIOR', 'PROCEDURE', 'RAISE',
'RANGE', 'RAW', 'READ', 'REAL', 'RECORD', 'REF', 'REFERENCE',
'REFERENCING', 'REJECT', 'RELIES_ON', 'RENAME', 'REPLACE',
'RESPECT', 'RESTRICT_REFERENCES', 'RESULT', 'RESULT_CACHE',
'RETURN', 'RETURNING', 'REUSE', 'REVERSE', 'REVOKE', 'RIGHT',
'ROLLBACK', 'ROLLUP', 'ROW', 'ROWID', 'ROWS', 'RULES', 'SAMPLE',
'SAVE', 'SAVEPOINT', 'SCHEMA', 'SCHEMACHECK', 'SCN', 'SEARCH',
'SECOND', 'SEED', 'SEGMENT', 'SELECT', 'SELF', 'SEQUENCE',
'SEQUENTIAL', 'SERIALIZABLE', 'SERIALLY_REUSABLE', 'SERVERERROR',
'SESSIONTIMEZONE', 'SET', 'SETS', 'SETTINGS', 'SHARE', 'SHOW',
'SHUTDOWN', 'SIBLINGS', 'SIGNTYPE', 'SIMPLE_INTEGER', 'SINGLE',
'SIZE', 'SKIP_', 'SMALLINT', 'SNAPSHOT', 'SOME', 'SPECIFICATION',
'SQLDATA', 'SQLERROR', 'STANDALONE', 'START', 'STARTUP',
'STATEMENT', 'STATEMENT_ID', 'STATIC', 'STATISTICS', 'STRING',
'SUBMULTISET', 'SUBPARTITION', 'SUBSTITUTABLE', 'SUBTYPE',
'SUCCESS', 'SUSPEND', 'TABLE', 'THE', 'THEN', 'TIME', 'TIMESTAMP',
'TIMESTAMP_LTZ_UNCONSTRAINED', 'TIMESTAMP_TZ_UNCONSTRAINED',
'TIMESTAMP_UNCONSTRAINED', 'TIMEZONE_ABBR', 'TIMEZONE_HOUR',
'TIMEZONE_MINUTE', 'TIMEZONE_REGION', 'TO', 'TRAILING',
'TRANSACTION', 'TRANSLATE', 'TREAT', 'TRIGGER', 'TRIM', 'TRUE',
'TRUNCATE', 'TYPE', 'UNBOUNDED', 'UNDER', 'UNION', 'UNIQUE',
'UNLIMITED', 'UNPIVOT', 'UNTIL', 'UPDATE', 'UPDATED', 'UPSERT',
'UROWID', 'USE', 'USING', 'VALIDATE', 'VALUE', 'VALUES', 'VARCHAR',
'VARCHAR2', 'VARIABLE', 'VARRAY', 'VARYING', 'VERSION', 'VERSIONS',
'WAIT', 'WARNING', 'WELLFORMED', 'WHEN', 'WHENEVER', 'WHERE',
'WHILE', 'WITH', 'WITHIN', 'WORK', 'WRITE', 'XML', 'XMLAGG',
'XMLATTRIBUTES', 'XMLCAST', 'XMLCOLATTVAL', 'XMLELEMENT',
'XMLEXISTS', 'XMLFOREST', 'XMLNAMESPACES', 'XMLPARSE', 'XMLPI',
'XMLQUERY', 'XMLROOT', 'XMLSERIALIZE', 'XMLTABLE', 'YEAR', 'YES',
'YMINTERVAL_UNCONSTRAINED', 'ZONE', 'PREDICTION',
'PREDICTION_BOUNDS', 'PREDICTION_COST', 'PREDICTION_DETAILS',
'PREDICTION_PROBABILITY', 'PREDICTION_SET', 'CUME_DIST',
'DENSE_RANK', 'LISTAGG', 'PERCENT_RANK', 'PERCENTILE_CONT',
'PERCENTILE_DISC', 'RANK', 'AVG', 'CORR', 'LAG', 'LEAD', 'MAX',
'MEDIAN', 'MIN', 'NTILE', 'RATIO_TO_REPORT', 'ROW_NUMBER', 'SUM',
'VARIANCE', 'REGR_', 'STDDEV', 'VAR_', 'COVAR_',
'NATIONAL_CHAR_STRING_LIT', 'BIT_STRING_LIT', 'HEX_STRING_LIT',
'DOUBLE_PERIOD', 'PERIOD', 'UNSIGNED_INTEGER',
'APPROXIMATE_NUM_LIT', 'CHAR_STRING', 'CHAR_STRING_PERL', 'QUOTE',
'QS_ANGLE', 'QS_BRACE', 'QS_BRACK', 'QS_PAREN', 'QS_OTHER_CH',
'DELIMITED_ID', 'PERCENT', 'AMPERSAND', 'LEFT_PAREN', 'RIGHT_PAREN',
'DOUBLE_ASTERISK', 'ASTERISK', 'PLUS_SIGN', 'MINUS_SIGN', 'COMMA',
'SOLIDUS', 'AT_SIGN', 'ASSIGN_OP', 'BINDVAR', 'COLON', 'SEMICOLON',
'LESS_THAN_OR_EQUALS_OP', 'LESS_THAN_OP',
'GREATER_THAN_OR_EQUALS_OP', 'NOT_EQUAL_OP', 'CARRET_OPERATOR_PART',
'TILDE_OPERATOR_PART', 'EXCLAMATION_OPERATOR_PART',
'GREATER_THAN_OP', 'QUESTION_MARK', 'CONCATENATION_OP',
'VERTICAL_BAR', 'EQUALS_OP', 'LEFT_BRACKET', 'RIGHT_BRACKET',
'INTRODUCER', 'SPACES', 'SIMPLE_LETTER',
'UNSIGNED_INTEGER_FRAGMENT', 'FLOAT_FRAGMENT',
'SINGLE_LINE_COMMENT', 'MULTI_LINE_COMMENT', 'PROMPT', 'NEWLINE',
'SPACE', 'REGULAR_ID', 'ZV', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U',
'V', 'W', 'X', 'Y', 'Z']
grammarFileName = 'PlSql.g4'
def __init__(self, input=None, output: TextIO=sys.stdout):
super().__init__(input, output)
self.checkVersion('4.7.2')
self._interp = LexerATNSimulator(self, self.atn, self.
decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
| from antlr4 import *
from io import StringIO
from typing.io import TextIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write('\x03悋Ꜫ脳맭䅼㯧瞆奤\x02Ȏ')
buf.write(
'ᓗ\x08\x01\x04\x02\t\x02\x04\x03\t\x03\x04\x04\t\x04\x04\x05\t\x05\x04\x06\t\x06\x04\x07'
)
buf.write(
'\t\x07\x04\x08\t\x08\x04\t\t\t\x04\n\t\n\x04\x0b\t\x0b\x04\x0c\t\x0c\x04\r\t\r'
)
buf.write(
'\x04\x0e\t\x0e\x04\x0f\t\x0f\x04\x10\t\x10\x04\x11\t\x11\x04\x12\t\x12\x04\x13'
)
buf.write(
'\t\x13\x04\x14\t\x14\x04\x15\t\x15\x04\x16\t\x16\x04\x17\t\x17\x04\x18\t\x18'
)
buf.write(
'\x04\x19\t\x19\x04\x1a\t\x1a\x04\x1b\t\x1b\x04\x1c\t\x1c\x04\x1d\t\x1d\x04\x1e'
)
buf.write(
'\t\x1e\x04\x1f\t\x1f\x04 \t \x04!\t!\x04"\t"\x04#\t#\x04$\t$\x04%\t%'
)
buf.write(
"\x04&\t&\x04'\t'\x04(\t(\x04)\t)\x04*\t*\x04+\t+\x04,\t,\x04-\t-\x04."
)
buf.write('\t.\x04/\t/\x040\t0\x041\t1\x042\t2\x043\t3\x044')
buf.write('\t4\x045\t5\x046\t6\x047\t7\x048\t8\x049\t9\x04:\t:')
buf.write(
'\x04;\t;\x04<\t<\x04=\t=\x04>\t>\x04?\t?\x04@\t@\x04A\tA\x04B\tB\x04C\t'
)
buf.write(
'C\x04D\tD\x04E\tE\x04F\tF\x04G\tG\x04H\tH\x04I\tI\x04J\tJ\x04K\tK\x04L\t'
)
buf.write(
'L\x04M\tM\x04N\tN\x04O\tO\x04P\tP\x04Q\tQ\x04R\tR\x04S\tS\x04T\tT\x04U\t'
)
buf.write(
'U\x04V\tV\x04W\tW\x04X\tX\x04Y\tY\x04Z\tZ\x04[\t[\x04\\\t\\\x04]\t]\x04'
)
buf.write(
'^\t^\x04_\t_\x04`\t`\x04a\ta\x04b\tb\x04c\tc\x04d\td\x04e\te\x04f\tf\x04'
)
buf.write(
'g\tg\x04h\th\x04i\ti\x04j\tj\x04k\tk\x04l\tl\x04m\tm\x04n\tn\x04o\to\x04'
)
buf.write(
'p\tp\x04q\tq\x04r\tr\x04s\ts\x04t\tt\x04u\tu\x04v\tv\x04w\tw\x04x\tx\x04'
)
buf.write(
'y\ty\x04z\tz\x04{\t{\x04|\t|\x04}\t}\x04~\t~\x04\x7f\t\x7f\x04\x80'
)
buf.write('\t\x80\x04\x81\t\x81\x04\x82\t\x82\x04\x83\t\x83')
buf.write('\x04\x84\t\x84\x04\x85\t\x85\x04\x86\t\x86\x04\x87')
buf.write('\t\x87\x04\x88\t\x88\x04\x89\t\x89\x04\x8a\t\x8a')
buf.write('\x04\x8b\t\x8b\x04\x8c\t\x8c\x04\x8d\t\x8d\x04\x8e')
buf.write('\t\x8e\x04\x8f\t\x8f\x04\x90\t\x90\x04\x91\t\x91')
buf.write('\x04\x92\t\x92\x04\x93\t\x93\x04\x94\t\x94\x04\x95')
buf.write('\t\x95\x04\x96\t\x96\x04\x97\t\x97\x04\x98\t\x98')
buf.write('\x04\x99\t\x99\x04\x9a\t\x9a\x04\x9b\t\x9b\x04\x9c')
buf.write('\t\x9c\x04\x9d\t\x9d\x04\x9e\t\x9e\x04\x9f\t\x9f')
buf.write('\x04\xa0\t\xa0\x04¡\t¡\x04¢\t¢\x04£')
buf.write('\t£\x04¤\t¤\x04¥\t¥\x04¦\t¦')
buf.write('\x04§\t§\x04¨\t¨\x04©\t©\x04ª')
buf.write('\tª\x04«\t«\x04¬\t¬\x04\xad\t\xad')
buf.write('\x04®\t®\x04¯\t¯\x04°\t°\x04±')
buf.write('\t±\x04²\t²\x04³\t³\x04´\t´')
buf.write('\x04µ\tµ\x04¶\t¶\x04·\t·\x04¸')
buf.write('\t¸\x04¹\t¹\x04º\tº\x04»\t»')
buf.write('\x04¼\t¼\x04½\t½\x04¾\t¾\x04¿')
buf.write('\t¿\x04À\tÀ\x04Á\tÁ\x04Â\tÂ')
buf.write('\x04Ã\tÃ\x04Ä\tÄ\x04Å\tÅ\x04Æ')
buf.write('\tÆ\x04Ç\tÇ\x04È\tÈ\x04É\tÉ')
buf.write('\x04Ê\tÊ\x04Ë\tË\x04Ì\tÌ\x04Í')
buf.write('\tÍ\x04Î\tÎ\x04Ï\tÏ\x04Ð\tÐ')
buf.write('\x04Ñ\tÑ\x04Ò\tÒ\x04Ó\tÓ\x04Ô')
buf.write('\tÔ\x04Õ\tÕ\x04Ö\tÖ\x04×\t×')
buf.write('\x04Ø\tØ\x04Ù\tÙ\x04Ú\tÚ\x04Û')
buf.write('\tÛ\x04Ü\tÜ\x04Ý\tÝ\x04Þ\tÞ')
buf.write('\x04ß\tß\x04à\tà\x04á\tá\x04â')
buf.write('\tâ\x04ã\tã\x04ä\tä\x04å\tå')
buf.write('\x04æ\tæ\x04ç\tç\x04è\tè\x04é')
buf.write('\té\x04ê\tê\x04ë\të\x04ì\tì')
buf.write('\x04í\tí\x04î\tî\x04ï\tï\x04ð')
buf.write('\tð\x04ñ\tñ\x04ò\tò\x04ó\tó')
buf.write('\x04ô\tô\x04õ\tõ\x04ö\tö\x04÷')
buf.write('\t÷\x04ø\tø\x04ù\tù\x04ú\tú')
buf.write('\x04û\tû\x04ü\tü\x04ý\tý\x04þ')
buf.write('\tþ\x04ÿ\tÿ\x04Ā\tĀ\x04ā\tā')
buf.write('\x04Ă\tĂ\x04ă\tă\x04Ą\tĄ\x04ą')
buf.write('\tą\x04Ć\tĆ\x04ć\tć\x04Ĉ\tĈ')
buf.write('\x04ĉ\tĉ\x04Ċ\tĊ\x04ċ\tċ\x04Č')
buf.write('\tČ\x04č\tč\x04Ď\tĎ\x04ď\tď')
buf.write('\x04Đ\tĐ\x04đ\tđ\x04Ē\tĒ\x04ē')
buf.write('\tē\x04Ĕ\tĔ\x04ĕ\tĕ\x04Ė\tĖ')
buf.write('\x04ė\tė\x04Ę\tĘ\x04ę\tę\x04Ě')
buf.write('\tĚ\x04ě\tě\x04Ĝ\tĜ\x04ĝ\tĝ')
buf.write('\x04Ğ\tĞ\x04ğ\tğ\x04Ġ\tĠ\x04ġ')
buf.write('\tġ\x04Ģ\tĢ\x04ģ\tģ\x04Ĥ\tĤ')
buf.write('\x04ĥ\tĥ\x04Ħ\tĦ\x04ħ\tħ\x04Ĩ')
buf.write('\tĨ\x04ĩ\tĩ\x04Ī\tĪ\x04ī\tī')
buf.write('\x04Ĭ\tĬ\x04ĭ\tĭ\x04Į\tĮ\x04į')
buf.write('\tį\x04İ\tİ\x04ı\tı\x04IJ\tIJ')
buf.write('\x04ij\tij\x04Ĵ\tĴ\x04ĵ\tĵ\x04Ķ')
buf.write('\tĶ\x04ķ\tķ\x04ĸ\tĸ\x04Ĺ\tĹ')
buf.write('\x04ĺ\tĺ\x04Ļ\tĻ\x04ļ\tļ\x04Ľ')
buf.write('\tĽ\x04ľ\tľ\x04Ŀ\tĿ\x04ŀ\tŀ')
buf.write('\x04Ł\tŁ\x04ł\tł\x04Ń\tŃ\x04ń')
buf.write('\tń\x04Ņ\tŅ\x04ņ\tņ\x04Ň\tŇ')
buf.write('\x04ň\tň\x04ʼn\tʼn\x04Ŋ\tŊ\x04ŋ')
buf.write('\tŋ\x04Ō\tŌ\x04ō\tō\x04Ŏ\tŎ')
buf.write('\x04ŏ\tŏ\x04Ő\tŐ\x04ő\tő\x04Œ')
buf.write('\tŒ\x04œ\tœ\x04Ŕ\tŔ\x04ŕ\tŕ')
buf.write('\x04Ŗ\tŖ\x04ŗ\tŗ\x04Ř\tŘ\x04ř')
buf.write('\tř\x04Ś\tŚ\x04ś\tś\x04Ŝ\tŜ')
buf.write('\x04ŝ\tŝ\x04Ş\tŞ\x04ş\tş\x04Š')
buf.write('\tŠ\x04š\tš\x04Ţ\tŢ\x04ţ\tţ')
buf.write('\x04Ť\tŤ\x04ť\tť\x04Ŧ\tŦ\x04ŧ')
buf.write('\tŧ\x04Ũ\tŨ\x04ũ\tũ\x04Ū\tŪ')
buf.write('\x04ū\tū\x04Ŭ\tŬ\x04ŭ\tŭ\x04Ů')
buf.write('\tŮ\x04ů\tů\x04Ű\tŰ\x04ű\tű')
buf.write('\x04Ų\tŲ\x04ų\tų\x04Ŵ\tŴ\x04ŵ')
buf.write('\tŵ\x04Ŷ\tŶ\x04ŷ\tŷ\x04Ÿ\tŸ')
buf.write('\x04Ź\tŹ\x04ź\tź\x04Ż\tŻ\x04ż')
buf.write('\tż\x04Ž\tŽ\x04ž\tž\x04ſ\tſ')
buf.write('\x04ƀ\tƀ\x04Ɓ\tƁ\x04Ƃ\tƂ\x04ƃ')
buf.write('\tƃ\x04Ƅ\tƄ\x04ƅ\tƅ\x04Ɔ\tƆ')
buf.write('\x04Ƈ\tƇ\x04ƈ\tƈ\x04Ɖ\tƉ\x04Ɗ')
buf.write('\tƊ\x04Ƌ\tƋ\x04ƌ\tƌ\x04ƍ\tƍ')
buf.write('\x04Ǝ\tƎ\x04Ə\tƏ\x04Ɛ\tƐ\x04Ƒ')
buf.write('\tƑ\x04ƒ\tƒ\x04Ɠ\tƓ\x04Ɣ\tƔ')
buf.write('\x04ƕ\tƕ\x04Ɩ\tƖ\x04Ɨ\tƗ\x04Ƙ')
buf.write('\tƘ\x04ƙ\tƙ\x04ƚ\tƚ\x04ƛ\tƛ')
buf.write('\x04Ɯ\tƜ\x04Ɲ\tƝ\x04ƞ\tƞ\x04Ɵ')
buf.write('\tƟ\x04Ơ\tƠ\x04ơ\tơ\x04Ƣ\tƢ')
buf.write('\x04ƣ\tƣ\x04Ƥ\tƤ\x04ƥ\tƥ\x04Ʀ')
buf.write('\tƦ\x04Ƨ\tƧ\x04ƨ\tƨ\x04Ʃ\tƩ')
buf.write('\x04ƪ\tƪ\x04ƫ\tƫ\x04Ƭ\tƬ\x04ƭ')
buf.write('\tƭ\x04Ʈ\tƮ\x04Ư\tƯ\x04ư\tư')
buf.write('\x04Ʊ\tƱ\x04Ʋ\tƲ\x04Ƴ\tƳ\x04ƴ')
buf.write('\tƴ\x04Ƶ\tƵ\x04ƶ\tƶ\x04Ʒ\tƷ')
buf.write('\x04Ƹ\tƸ\x04ƹ\tƹ\x04ƺ\tƺ\x04ƻ')
buf.write('\tƻ\x04Ƽ\tƼ\x04ƽ\tƽ\x04ƾ\tƾ')
buf.write('\x04ƿ\tƿ\x04ǀ\tǀ\x04ǁ\tǁ\x04ǂ')
buf.write('\tǂ\x04ǃ\tǃ\x04DŽ\tDŽ\x04Dž\tDž')
buf.write('\x04dž\tdž\x04LJ\tLJ\x04Lj\tLj\x04lj')
buf.write('\tlj\x04NJ\tNJ\x04Nj\tNj\x04nj\tnj')
buf.write('\x04Ǎ\tǍ\x04ǎ\tǎ\x04Ǐ\tǏ\x04ǐ')
buf.write('\tǐ\x04Ǒ\tǑ\x04ǒ\tǒ\x04Ǔ\tǓ')
buf.write('\x04ǔ\tǔ\x04Ǖ\tǕ\x04ǖ\tǖ\x04Ǘ')
buf.write('\tǗ\x04ǘ\tǘ\x04Ǚ\tǙ\x04ǚ\tǚ')
buf.write('\x04Ǜ\tǛ\x04ǜ\tǜ\x04ǝ\tǝ\x04Ǟ')
buf.write('\tǞ\x04ǟ\tǟ\x04Ǡ\tǠ\x04ǡ\tǡ')
buf.write('\x04Ǣ\tǢ\x04ǣ\tǣ\x04Ǥ\tǤ\x04ǥ')
buf.write('\tǥ\x04Ǧ\tǦ\x04ǧ\tǧ\x04Ǩ\tǨ')
buf.write('\x04ǩ\tǩ\x04Ǫ\tǪ\x04ǫ\tǫ\x04Ǭ')
buf.write('\tǬ\x04ǭ\tǭ\x04Ǯ\tǮ\x04ǯ\tǯ')
buf.write('\x04ǰ\tǰ\x04DZ\tDZ\x04Dz\tDz\x04dz')
buf.write('\tdz\x04Ǵ\tǴ\x04ǵ\tǵ\x04Ƕ\tǶ')
buf.write('\x04Ƿ\tǷ\x04Ǹ\tǸ\x04ǹ\tǹ\x04Ǻ')
buf.write('\tǺ\x04ǻ\tǻ\x04Ǽ\tǼ\x04ǽ\tǽ')
buf.write('\x04Ǿ\tǾ\x04ǿ\tǿ\x04Ȁ\tȀ\x04ȁ')
buf.write('\tȁ\x04Ȃ\tȂ\x04ȃ\tȃ\x04Ȅ\tȄ')
buf.write('\x04ȅ\tȅ\x04Ȇ\tȆ\x04ȇ\tȇ\x04Ȉ')
buf.write('\tȈ\x04ȉ\tȉ\x04Ȋ\tȊ\x04ȋ\tȋ')
buf.write('\x04Ȍ\tȌ\x04ȍ\tȍ\x04Ȏ\tȎ\x04ȏ')
buf.write('\tȏ\x04Ȑ\tȐ\x04ȑ\tȑ\x04Ȓ\tȒ')
buf.write('\x04ȓ\tȓ\x04Ȕ\tȔ\x04ȕ\tȕ\x04Ȗ')
buf.write('\tȖ\x04ȗ\tȗ\x04Ș\tȘ\x04ș\tș')
buf.write('\x04Ț\tȚ\x04ț\tț\x04Ȝ\tȜ\x04ȝ')
buf.write('\tȝ\x04Ȟ\tȞ\x04ȟ\tȟ\x04Ƞ\tȠ')
buf.write('\x04ȡ\tȡ\x04Ȣ\tȢ\x04ȣ\tȣ\x04Ȥ')
buf.write('\tȤ\x04ȥ\tȥ\x04Ȧ\tȦ\x04ȧ\tȧ')
buf.write('\x04Ȩ\tȨ\x04ȩ\tȩ\x04Ȫ\tȪ\x04ȫ')
buf.write('\tȫ\x04Ȭ\tȬ\x04ȭ\tȭ\x04Ȯ\tȮ')
buf.write('\x04ȯ\tȯ\x04Ȱ\tȰ\x04ȱ\tȱ\x04Ȳ')
buf.write('\tȲ\x04ȳ\tȳ\x04ȴ\tȴ\x03\x02\x03\x02\x03\x02\x03')
buf.write(
'\x03\x03\x03\x03\x04\x03\x04\x03\x04\x03\x04\x03\x05\x03\x05\x03\x05\x03\x05\x03\x05\x03\x05\x03\x06\x03\x06'
)
buf.write(
'\x03\x06\x03\x06\x03\x06\x03\x06\x03\x07\x03\x07\x03\x07\x03\x07\x03\x07\x03\x07\x03\x07\x03\x07\x03\x07\x03'
)
buf.write("""
""")
buf.write("""
""")
buf.write(
'\x0c\x03\r\x03\r\x03\r\x03\r\x03\r\x03\r\x03\x0e\x03\x0e\x03\x0e\x03\x0f\x03\x0f\x03'
)
buf.write(
'\x0f\x03\x0f\x03\x0f\x03\x0f\x03\x0f\x03\x10\x03\x10\x03\x10\x03\x10\x03\x10\x03\x10'
)
buf.write(
'\x03\x10\x03\x11\x03\x11\x03\x11\x03\x11\x03\x12\x03\x12\x03\x12\x03\x12\x03\x12\x03\x12'
)
buf.write(
'\x03\x12\x03\x12\x03\x12\x03\x12\x03\x13\x03\x13\x03\x13\x03\x14\x03\x14\x03\x14\x03\x14'
)
buf.write(
'\x03\x14\x03\x14\x03\x14\x03\x14\x03\x14\x03\x14\x03\x15\x03\x15\x03\x15\x03\x15\x03\x15'
)
buf.write(
'\x03\x15\x03\x16\x03\x16\x03\x16\x03\x16\x03\x16\x03\x16\x03\x16\x03\x17\x03\x17\x03\x17'
)
buf.write(
'\x03\x17\x03\x17\x03\x18\x03\x18\x03\x18\x03\x18\x03\x18\x03\x18\x03\x18\x03\x18\x03\x18'
)
buf.write(
'\x03\x18\x03\x19\x03\x19\x03\x19\x03\x19\x03\x19\x03\x19\x03\x19\x03\x19\x03\x19\x03\x19'
)
buf.write(
'\x03\x19\x03\x19\x03\x19\x03\x19\x03\x19\x03\x19\x03\x19\x03\x19\x03\x19\x03\x19\x03\x19'
)
buf.write(
'\x03\x19\x03\x19\x03\x1a\x03\x1a\x03\x1a\x03\x1a\x03\x1a\x03\x1a\x03\x1b\x03\x1b\x03\x1b'
)
buf.write(
'\x03\x1b\x03\x1b\x03\x1b\x03\x1b\x03\x1c\x03\x1c\x03\x1c\x03\x1c\x03\x1c\x03\x1c\x03\x1d'
)
buf.write(
'\x03\x1d\x03\x1d\x03\x1d\x03\x1d\x03\x1d\x03\x1d\x03\x1d\x03\x1e\x03\x1e\x03\x1e\x03\x1e'
)
buf.write(
'\x03\x1e\x03\x1e\x03\x1f\x03\x1f\x03\x1f\x03\x1f\x03\x1f\x03\x1f\x03\x1f\x03\x1f\x03\x1f'
)
buf.write(
'\x03\x1f\x03\x1f\x03\x1f\x03\x1f\x03\x1f\x03 \x03 \x03 \x03 \x03 \x03 \x03 \x03 \x03 \x03'
)
buf.write(
' \x03 \x03 \x03 \x03!\x03!\x03!\x03!\x03!\x03!\x03!\x03!\x03!\x03!\x03!\x03!\x03!\x03!\x03'
)
buf.write(
'!\x03"\x03"\x03"\x03"\x03"\x03#\x03#\x03#\x03#\x03#\x03#\x03$\x03$\x03$\x03$\x03'
)
buf.write(
"$\x03%\x03%\x03%\x03%\x03%\x03%\x03%\x03%\x03&\x03&\x03&\x03&\x03&\x03'\x03'\x03'\x03"
)
buf.write(
"'\x03'\x03'\x03'\x03'\x03(\x03(\x03(\x03(\x03(\x03)\x03)\x03)\x03*\x03*\x03*\x03"
)
buf.write(
'*\x03*\x03+\x03+\x03,\x03,\x03,\x03,\x03,\x03,\x03-\x03-\x03-\x03-\x03-\x03.\x03.\x03.\x03'
)
buf.write(
'.\x03.\x03.\x03.\x03.\x03.\x03.\x03/\x03/\x03/\x03/\x03/\x03/\x03/\x03/\x030\x030'
)
buf.write('\x030\x030\x030\x031\x031\x031\x031\x031\x032\x032\x032')
buf.write('\x032\x032\x033\x033\x033\x033\x033\x033\x033\x033\x034')
buf.write('\x034\x034\x034\x034\x034\x034\x034\x034\x034\x035\x035')
buf.write('\x035\x035\x035\x035\x036\x036\x036\x036\x037\x037\x037')
buf.write(
'\x037\x037\x038\x038\x038\x038\x038\x038\x039\x039\x039\x039\x039\x039\x039\x039\x03'
)
buf.write(
':\x03:\x03:\x03:\x03:\x03:\x03:\x03:\x03;\x03;\x03;\x03;\x03;\x03;\x03;\x03;\x03<\x03<\x03'
)
buf.write(
'<\x03<\x03<\x03<\x03<\x03<\x03=\x03=\x03=\x03=\x03=\x03=\x03=\x03>\x03>\x03>\x03>\x03>\x03'
)
buf.write(
'>\x03>\x03>\x03>\x03>\x03?\x03?\x03?\x03?\x03?\x03?\x03?\x03?\x03?\x03?\x03?\x03?\x03?\x03'
)
buf.write(
'?\x03@\x03@\x03@\x03@\x03@\x03@\x03@\x03@\x03A\x03A\x03A\x03A\x03A\x03A\x03A\x03A\x03A\x03'
)
buf.write(
'B\x03B\x03B\x03B\x03B\x03B\x03B\x03B\x03C\x03C\x03C\x03C\x03C\x03C\x03C\x03C\x03C\x03C\x03'
)
buf.write(
'C\x03C\x03C\x03C\x03C\x03C\x03D\x03D\x03D\x03D\x03D\x03D\x03D\x03D\x03D\x03E\x03E\x03E\x03'
)
buf.write(
'E\x03E\x03E\x03E\x03E\x03E\x03E\x03E\x03F\x03F\x03F\x03F\x03F\x03F\x03F\x03F\x03F\x03F\x03'
)
buf.write(
'F\x03F\x03G\x03G\x03G\x03G\x03G\x03G\x03G\x03G\x03G\x03G\x03G\x03G\x03H\x03H\x03H\x03H\x03'
)
buf.write(
'H\x03H\x03H\x03H\x03I\x03I\x03I\x03I\x03I\x03I\x03I\x03I\x03J\x03J\x03J\x03J\x03J\x03J\x03'
)
buf.write(
'J\x03J\x03J\x03K\x03K\x03K\x03K\x03K\x03K\x03K\x03K\x03L\x03L\x03L\x03L\x03L\x03L\x03L\x03'
)
buf.write(
'L\x03L\x03L\x03L\x03L\x03M\x03M\x03M\x03M\x03M\x03M\x03M\x03M\x03M\x03M\x03M\x03M\x03M\x03'
)
buf.write(
'M\x03M\x03M\x03N\x03N\x03N\x03N\x03N\x03O\x03O\x03O\x03O\x03O\x03O\x03P\x03P\x03P\x03P\x03'
)
buf.write(
'P\x03P\x03P\x03Q\x03Q\x03Q\x03Q\x03Q\x03Q\x03R\x03R\x03R\x03R\x03R\x03S\x03S\x03S\x03S\x03'
)
buf.write(
'S\x03S\x03S\x03S\x03T\x03T\x03T\x03T\x03T\x03T\x03T\x03T\x03T\x03T\x03T\x03T\x03T\x03U\x03'
)
buf.write(
'U\x03U\x03U\x03U\x03U\x03U\x03V\x03V\x03V\x03V\x03V\x03V\x03V\x03V\x03V\x03V\x03V\x03V\x03'
)
buf.write(
'W\x03W\x03W\x03W\x03W\x03W\x03X\x03X\x03X\x03X\x03X\x03Y\x03Y\x03Y\x03Y\x03Y\x03Y\x03Y\x03'
)
buf.write(
'Y\x03Y\x03Z\x03Z\x03Z\x03Z\x03Z\x03[\x03[\x03[\x03[\x03\\\x03\\\x03\\\x03\\\x03\\\x03'
)
buf.write(
'\\\x03\\\x03\\\x03\\\x03\\\x03\\\x03\\\x03\\\x03\\\x03\\\x03]\x03]\x03]\x03]\x03]'
)
buf.write(
'\x03]\x03]\x03]\x03]\x03]\x03]\x03^\x03^\x03^\x03^\x03_\x03_\x03_\x03_\x03_\x03_\x03`\x03'
)
buf.write(
'`\x03`\x03`\x03a\x03a\x03a\x03a\x03a\x03a\x03a\x03a\x03b\x03b\x03b\x03b\x03b\x03b\x03b\x03'
)
buf.write(
'b\x03c\x03c\x03c\x03c\x03c\x03c\x03c\x03c\x03c\x03c\x03d\x03d\x03d\x03d\x03d\x03d\x03d\x03'
)
buf.write(
'd\x03d\x03d\x03e\x03e\x03e\x03e\x03e\x03e\x03e\x03e\x03f\x03f\x03f\x03f\x03f\x03f\x03f\x03'
)
buf.write(
'f\x03f\x03g\x03g\x03g\x03g\x03g\x03g\x03g\x03g\x03g\x03h\x03h\x03h\x03h\x03h\x03h\x03h\x03'
)
buf.write(
'h\x03i\x03i\x03i\x03i\x03i\x03i\x03i\x03j\x03j\x03j\x03j\x03j\x03j\x03k\x03k\x03k\x03k\x03'
)
buf.write(
'k\x03l\x03l\x03l\x03l\x03l\x03l\x03l\x03l\x03l\x03l\x03l\x03l\x03l\x03l\x03m\x03m\x03m\x03'
)
buf.write(
'm\x03m\x03m\x03m\x03m\x03m\x03m\x03n\x03n\x03n\x03n\x03n\x03n\x03n\x03n\x03o\x03o\x03o\x03'
)
buf.write(
'o\x03o\x03o\x03o\x03o\x03o\x03o\x03o\x03o\x03o\x03p\x03p\x03p\x03p\x03p\x03p\x03p\x03p\x03'
)
buf.write(
'p\x03q\x03q\x03q\x03q\x03q\x03q\x03q\x03q\x03q\x03r\x03r\x03r\x03r\x03r\x03r\x03r\x03s\x03'
)
buf.write(
's\x03s\x03s\x03s\x03t\x03t\x03t\x03t\x03t\x03t\x03t\x03t\x03t\x03t\x03t\x03t\x03t\x03t\x03'
)
buf.write(
't\x03t\x03t\x03t\x03t\x03t\x03t\x03t\x03t\x03t\x03t\x03u\x03u\x03u\x03u\x03u\x03v\x03v\x03'
)
buf.write(
'v\x03v\x03v\x03v\x03v\x03v\x03w\x03w\x03w\x03w\x03w\x03x\x03x\x03x\x03x\x03x\x03x\x03y\x03'
)
buf.write(
'y\x03y\x03y\x03y\x03y\x03z\x03z\x03z\x03z\x03z\x03z\x03z\x03{\x03{\x03{\x03{\x03{\x03{\x03'
)
buf.write(
'{\x03{\x03{\x03|\x03|\x03|\x03|\x03}\x03}\x03}\x03}\x03}\x03}\x03}\x03}\x03}\x03}\x03}\x03'
)
buf.write(
'}\x03}\x03}\x03}\x03~\x03~\x03~\x03~\x03\x7f\x03\x7f\x03\x7f\x03\x7f\x03\x7f\x03'
)
buf.write(
'\x7f\x03\x7f\x03\x80\x03\x80\x03\x80\x03\x80\x03\x80\x03\x80')
buf.write('\x03\x80\x03\x81\x03\x81\x03\x81\x03\x81\x03\x81\x03\x81')
buf.write('\x03\x81\x03\x81\x03\x81\x03\x82\x03\x82\x03\x82\x03\x82')
buf.write('\x03\x82\x03\x82\x03\x82\x03\x83\x03\x83\x03\x83\x03\x83')
buf.write('\x03\x83\x03\x83\x03\x83\x03\x83\x03\x83\x03\x83\x03\x84')
buf.write('\x03\x84\x03\x84\x03\x84\x03\x84\x03\x84\x03\x84\x03\x84')
buf.write('\x03\x84\x03\x84\x03\x84\x03\x84\x03\x84\x03\x84\x03\x84')
buf.write('\x03\x85\x03\x85\x03\x85\x03\x85\x03\x85\x03\x85\x03\x85')
buf.write('\x03\x85\x03\x85\x03\x85\x03\x85\x03\x86\x03\x86\x03\x86')
buf.write('\x03\x86\x03\x86\x03\x86\x03\x86\x03\x86\x03\x87\x03\x87')
buf.write('\x03\x87\x03\x87\x03\x87\x03\x87\x03\x87\x03\x87\x03\x87')
buf.write('\x03\x87\x03\x88\x03\x88\x03\x88\x03\x88\x03\x88\x03\x88')
buf.write('\x03\x88\x03\x88\x03\x89\x03\x89\x03\x89\x03\x89\x03\x89')
buf.write('\x03\x89\x03\x89\x03\x8a\x03\x8a\x03\x8a\x03\x8a\x03\x8a')
buf.write('\x03\x8b\x03\x8b\x03\x8b\x03\x8b\x03\x8b\x03\x8b\x03\x8b')
buf.write('\x03\x8b\x03\x8c\x03\x8c\x03\x8c\x03\x8c\x03\x8c\x03\x8c')
buf.write('\x03\x8c\x03\x8c\x03\x8c\x03\x8d\x03\x8d\x03\x8d\x03\x8d')
buf.write('\x03\x8d\x03\x8d\x03\x8d\x03\x8d\x03\x8e\x03\x8e\x03\x8e')
buf.write('\x03\x8e\x03\x8e\x03\x8e\x03\x8e\x03\x8e\x03\x8f\x03\x8f')
buf.write('\x03\x8f\x03\x8f\x03\x8f\x03\x8f\x03\x90\x03\x90\x03\x90')
buf.write('\x03\x90\x03\x90\x03\x90\x03\x91\x03\x91\x03\x91\x03\x91')
buf.write('\x03\x91\x03\x91\x03\x92\x03\x92\x03\x92\x03\x92\x03\x92')
buf.write('\x03\x92\x03\x93\x03\x93\x03\x93\x03\x93\x03\x93\x03\x93')
buf.write('\x03\x93\x03\x93\x03\x93\x03\x93\x03\x93\x03\x93\x03\x94')
buf.write('\x03\x94\x03\x94\x03\x94\x03\x94\x03\x94\x03\x95\x03\x95')
buf.write('\x03\x95\x03\x95\x03\x95\x03\x95\x03\x95\x03\x95\x03\x95')
buf.write('\x03\x95\x03\x96\x03\x96\x03\x96\x03\x96\x03\x96\x03\x96')
buf.write('\x03\x96\x03\x96\x03\x97\x03\x97\x03\x97\x03\x97\x03\x98')
buf.write('\x03\x98\x03\x98\x03\x98\x03\x98\x03\x98\x03\x98\x03\x99')
buf.write('\x03\x99\x03\x99\x03\x99\x03\x99\x03\x99\x03\x9a\x03\x9a')
buf.write('\x03\x9a\x03\x9a\x03\x9a\x03\x9b\x03\x9b\x03\x9b\x03\x9b')
buf.write('\x03\x9b\x03\x9c\x03\x9c\x03\x9c\x03\x9c\x03\x9c\x03\x9c')
buf.write('\x03\x9c\x03\x9c\x03\x9c\x03\x9d\x03\x9d\x03\x9d\x03\x9d')
buf.write('\x03\x9d\x03\x9e\x03\x9e\x03\x9e\x03\x9e\x03\x9e\x03\x9e')
buf.write('\x03\x9f\x03\x9f\x03\x9f\x03\x9f\x03\x9f\x03\x9f\x03\xa0')
buf.write('\x03\xa0\x03\xa0\x03\xa0\x03\xa0\x03\xa0\x03\xa0\x03\xa0')
buf.write('\x03\xa0\x03¡\x03¡\x03¡\x03¡\x03¡\x03¢')
buf.write('\x03¢\x03¢\x03¢\x03¢\x03¢\x03¢\x03£')
buf.write('\x03£\x03£\x03£\x03£\x03¤\x03¤\x03¤')
buf.write('\x03¤\x03¤\x03¥\x03¥\x03¥\x03¦\x03¦')
buf.write('\x03¦\x03¦\x03¦\x03¦\x03¦\x03§\x03§')
buf.write('\x03§\x03§\x03§\x03§\x03§\x03§\x03§')
buf.write('\x03§\x03¨\x03¨\x03¨\x03©\x03©\x03©')
buf.write('\x03©\x03©\x03©\x03©\x03©\x03ª\x03ª')
buf.write('\x03ª\x03ª\x03ª\x03ª\x03ª\x03ª\x03ª')
buf.write('\x03ª\x03«\x03«\x03«\x03«\x03«\x03«')
buf.write('\x03«\x03«\x03«\x03«\x03¬\x03¬\x03¬')
buf.write('\x03¬\x03¬\x03¬\x03¬\x03\xad\x03\xad\x03\xad')
buf.write('\x03\xad\x03\xad\x03\xad\x03®\x03®\x03®\x03®')
buf.write('\x03®\x03®\x03®\x03®\x03¯\x03¯\x03¯')
buf.write('\x03¯\x03¯\x03¯\x03¯\x03¯\x03¯\x03¯')
buf.write('\x03°\x03°\x03°\x03°\x03°\x03°\x03°')
buf.write('\x03°\x03±\x03±\x03±\x03±\x03±\x03±')
buf.write('\x03±\x03±\x03±\x03²\x03²\x03²\x03²')
buf.write('\x03²\x03²\x03²\x03³\x03³\x03³\x03³')
buf.write('\x03³\x03³\x03´\x03´\x03´\x03´\x03´')
buf.write('\x03´\x03µ\x03µ\x03µ\x03µ\x03µ\x03µ')
buf.write('\x03µ\x03¶\x03¶\x03¶\x03¶\x03¶\x03¶')
buf.write('\x03¶\x03¶\x03¶\x03¶\x03¶\x03¶\x03¶')
buf.write('\x03·\x03·\x03·\x03·\x03·\x03·\x03·')
buf.write('\x03·\x03¸\x03¸\x03¸\x03¸\x03¹\x03¹')
buf.write('\x03¹\x03¹\x03¹\x03¹\x03¹\x03¹\x03º')
buf.write('\x03º\x03º\x03º\x03º\x03º\x03º\x03º')
buf.write('\x03º\x03º\x03»\x03»\x03»\x03»\x03»')
buf.write('\x03»\x03»\x03»\x03»\x03¼\x03¼\x03¼')
buf.write('\x03¼\x03¼\x03½\x03½\x03½\x03½\x03½')
buf.write('\x03½\x03½\x03½\x03½\x03½\x03½\x03¾')
buf.write('\x03¾\x03¾\x03¿\x03¿\x03¿\x03¿\x03¿')
buf.write('\x03¿\x03¿\x03¿\x03¿\x03¿\x03À\x03À')
buf.write('\x03À\x03À\x03À\x03À\x03À\x03À\x03Á')
buf.write('\x03Á\x03Á\x03Á\x03Á\x03Â\x03Â\x03Â')
buf.write('\x03Â\x03Â\x03Ã\x03Ã\x03Ã\x03Ã\x03Ã')
buf.write('\x03Ä\x03Ä\x03Ä\x03Ä\x03Ä\x03Ä\x03Ä')
buf.write('\x03Ä\x03Ä\x03Å\x03Å\x03Å\x03Å\x03Å')
buf.write('\x03Æ\x03Æ\x03Æ\x03Æ\x03Æ\x03Æ\x03Æ')
buf.write('\x03Æ\x03Æ\x03Æ\x03Æ\x03Ç\x03Ç\x03Ç')
buf.write('\x03Ç\x03Ç\x03Ç\x03Ç\x03Ç\x03È\x03È')
buf.write('\x03È\x03È\x03È\x03É\x03É\x03É\x03É')
buf.write('\x03É\x03É\x03Ê\x03Ê\x03Ê\x03Ê\x03Ê')
buf.write('\x03Ê\x03Ê\x03Ê\x03Ë\x03Ë\x03Ë\x03Ë')
buf.write('\x03Ë\x03Ì\x03Ì\x03Ì\x03Ì\x03Ì\x03Ì')
buf.write('\x03Í\x03Í\x03Í\x03Í\x03Í\x03Í\x03Î')
buf.write('\x03Î\x03Î\x03Î\x03Î\x03Î\x03Ï\x03Ï')
buf.write('\x03Ï\x03Ï\x03Ï\x03Ï\x03Ð\x03Ð\x03Ð')
buf.write('\x03Ð\x03Ð\x03Ð\x03Ñ\x03Ñ\x03Ñ\x03Ñ')
buf.write('\x03Ñ\x03Ò\x03Ò\x03Ò\x03Ò\x03Ò\x03Ò')
buf.write('\x03Ò\x03Ó\x03Ó\x03Ó\x03Ó\x03Ô\x03Ô')
buf.write('\x03Ô\x03Ô\x03Ô\x03Ô\x03Ô\x03Õ\x03Õ')
buf.write('\x03Õ\x03Õ\x03Õ\x03Õ\x03Ö\x03Ö\x03Ö')
buf.write('\x03Ö\x03Ö\x03×\x03×\x03×\x03×\x03×')
buf.write('\x03Ø\x03Ø\x03Ø\x03Ø\x03Ø\x03Ù\x03Ù')
buf.write('\x03Ù\x03Ù\x03Ú\x03Ú\x03Ú\x03Ú\x03Ú')
buf.write('\x03Ú\x03Ú\x03Ú\x03Û\x03Û\x03Û\x03Û')
buf.write('\x03Û\x03Û\x03Û\x03Û\x03Û\x03Ü\x03Ü')
buf.write('\x03Ü\x03Ü\x03Ü\x03Ü\x03Ü\x03Ü\x03Ü')
buf.write('\x03Ý\x03Ý\x03Ý\x03Ý\x03Ý\x03Ý\x03Ý')
buf.write('\x03Þ\x03Þ\x03Þ\x03Þ\x03Þ\x03Þ\x03ß')
buf.write('\x03ß\x03ß\x03ß\x03ß\x03ß\x03à\x03à')
buf.write('\x03à\x03à\x03à\x03à\x03à\x03á\x03á')
buf.write('\x03á\x03á\x03á\x03á\x03á\x03á\x03á')
buf.write('\x03â\x03â\x03â\x03â\x03â\x03â\x03â')
buf.write('\x03â\x03â\x03ã\x03ã\x03ã\x03ã\x03ã')
buf.write('\x03ä\x03ä\x03ä\x03ä\x03ä\x03ä\x03å')
buf.write('\x03å\x03å\x03å\x03å\x03å\x03å\x03æ')
buf.write('\x03æ\x03æ\x03æ\x03æ\x03æ\x03ç\x03ç')
buf.write('\x03ç\x03ç\x03ç\x03ç\x03ç\x03ç\x03ç')
buf.write('\x03è\x03è\x03è\x03è\x03è\x03é\x03é')
buf.write('\x03é\x03é\x03ê\x03ê\x03ê\x03ê\x03ê')
buf.write('\x03ê\x03ê\x03ê\x03ë\x03ë\x03ë\x03ë')
buf.write('\x03ë\x03ë\x03ë\x03ë\x03ë\x03ì\x03ì')
buf.write('\x03ì\x03ì\x03í\x03í\x03í\x03í\x03í')
buf.write('\x03í\x03î\x03î\x03î\x03î\x03î\x03î')
buf.write('\x03î\x03î\x03î\x03ï\x03ï\x03ï\x03ï')
buf.write('\x03ï\x03ï\x03ð\x03ð\x03ð\x03ð\x03ð')
buf.write('\x03ð\x03ð\x03ñ\x03ñ\x03ñ\x03ñ\x03ò')
buf.write('\x03ò\x03ò\x03ó\x03ó\x03ó\x03ó\x03ó')
buf.write('\x03ó\x03ó\x03ó\x03ô\x03ô\x03ô\x03ô')
buf.write('\x03ô\x03ô\x03ô\x03ô\x03õ\x03õ\x03õ')
buf.write('\x03õ\x03õ\x03õ\x03õ\x03ö\x03ö\x03ö')
buf.write('\x03ö\x03ö\x03ö\x03ö\x03ö\x03÷\x03÷')
buf.write('\x03÷\x03÷\x03÷\x03÷\x03÷\x03÷\x03÷')
buf.write('\x03÷\x03÷\x03÷\x03÷\x03÷\x03÷\x03÷')
buf.write('\x03÷\x03ø\x03ø\x03ø\x03ø\x03ø\x03ø')
buf.write('\x03ø\x03ø\x03ø\x03ø\x03ø\x03ù\x03ù')
buf.write('\x03ù\x03ù\x03ù\x03ù\x03ù\x03ù\x03ù')
buf.write('\x03ù\x03ù\x03ú\x03ú\x03ú\x03ú\x03ú')
buf.write('\x03û\x03û\x03û\x03û\x03û\x03û\x03û')
buf.write('\x03û\x03ü\x03ü\x03ü\x03ü\x03ü\x03ü')
buf.write('\x03ü\x03ü\x03ü\x03ü\x03ü\x03ü\x03ü')
buf.write('\x03ü\x03ý\x03ý\x03ý\x03ý\x03þ\x03þ')
buf.write('\x03þ\x03þ\x03þ\x03þ\x03þ\x03ÿ\x03ÿ')
buf.write('\x03ÿ\x03ÿ\x03ÿ\x03Ā\x03Ā\x03Ā\x03Ā')
buf.write('\x03Ā\x03Ā\x03ā\x03ā\x03ā\x03ā\x03ā')
buf.write('\x03ā\x03ā\x03Ă\x03Ă\x03Ă\x03Ă\x03Ă')
buf.write('\x03Ă\x03Ă\x03Ă\x03ă\x03ă\x03ă\x03ă')
buf.write('\x03ă\x03ă\x03ă\x03ă\x03ă\x03ă\x03Ą')
buf.write('\x03Ą\x03Ą\x03Ą\x03Ą\x03Ą\x03Ą\x03ą')
buf.write('\x03ą\x03ą\x03Ć\x03Ć\x03Ć\x03Ć\x03ć')
buf.write('\x03ć\x03ć\x03ć\x03Ĉ\x03Ĉ\x03Ĉ\x03Ĉ')
buf.write('\x03ĉ\x03ĉ\x03ĉ\x03Ċ\x03Ċ\x03Ċ\x03Ċ')
buf.write('\x03Ċ\x03ċ\x03ċ\x03ċ\x03ċ\x03ċ\x03Č')
buf.write('\x03Č\x03Č\x03Č\x03Č\x03Č\x03Č\x03č')
buf.write('\x03č\x03č\x03Ď\x03Ď\x03Ď\x03Ď\x03Ď')
buf.write('\x03Ď\x03Ď\x03Ď\x03ď\x03ď\x03ď\x03ď')
buf.write('\x03ď\x03ď\x03Đ\x03Đ\x03Đ\x03Đ\x03Đ')
buf.write('\x03Đ\x03Đ\x03Đ\x03Đ\x03Đ\x03Đ\x03đ')
buf.write('\x03đ\x03đ\x03đ\x03đ\x03đ\x03đ\x03đ')
buf.write('\x03Ē\x03Ē\x03Ē\x03Ē\x03ē\x03ē\x03ē')
buf.write('\x03ē\x03ē\x03ē\x03Ĕ\x03Ĕ\x03Ĕ\x03Ĕ')
buf.write('\x03Ĕ\x03ĕ\x03ĕ\x03ĕ\x03ĕ\x03ĕ\x03ĕ')
buf.write('\x03ĕ\x03ĕ\x03ĕ\x03ĕ\x03ĕ\x03Ė\x03Ė')
buf.write('\x03Ė\x03Ė\x03Ė\x03Ė\x03Ė\x03Ė\x03ė')
buf.write('\x03ė\x03ė\x03ė\x03ė\x03ė\x03ė\x03ė')
buf.write('\x03ė\x03ė\x03ė\x03ė\x03ė\x03ė\x03ė')
buf.write('\x03ė\x03Ę\x03Ę\x03Ę\x03Ę\x03Ę\x03Ę')
buf.write('\x03Ę\x03Ę\x03Ę\x03Ę\x03Ę\x03ę\x03ę')
buf.write('\x03ę\x03ę\x03ę\x03ę\x03ę\x03Ě\x03Ě')
buf.write('\x03Ě\x03Ě\x03Ě\x03Ě\x03Ě\x03Ě\x03Ě')
buf.write('\x03Ě\x03ě\x03ě\x03ě\x03ě\x03ě\x03ě')
buf.write('\x03ě\x03ě\x03Ĝ\x03Ĝ\x03Ĝ\x03Ĝ\x03Ĝ')
buf.write('\x03ĝ\x03ĝ\x03ĝ\x03ĝ\x03ĝ\x03ĝ\x03ĝ')
buf.write('\x03ĝ\x03ĝ\x03Ğ\x03Ğ\x03Ğ\x03Ğ\x03Ğ')
buf.write('\x03Ğ\x03ğ\x03ğ\x03ğ\x03ğ\x03ğ\x03ğ')
buf.write('\x03ğ\x03ğ\x03ğ\x03ğ\x03Ġ\x03Ġ\x03Ġ')
buf.write('\x03Ġ\x03Ġ\x03Ġ\x03ġ\x03ġ\x03ġ\x03ġ')
buf.write('\x03ġ\x03Ģ\x03Ģ\x03Ģ\x03Ģ\x03Ģ\x03Ģ')
buf.write('\x03Ģ\x03Ģ\x03Ģ\x03Ģ\x03Ģ\x03Ģ\x03ģ')
buf.write('\x03ģ\x03ģ\x03ģ\x03ģ\x03ģ\x03ģ\x03ģ')
buf.write('\x03ģ\x03Ĥ\x03Ĥ\x03Ĥ\x03Ĥ\x03Ĥ\x03Ĥ')
buf.write('\x03Ĥ\x03Ĥ\x03Ĥ\x03Ĥ\x03ĥ\x03ĥ\x03ĥ')
buf.write('\x03ĥ\x03ĥ\x03ĥ\x03ĥ\x03Ħ\x03Ħ\x03Ħ')
buf.write('\x03Ħ\x03Ħ\x03Ħ\x03Ħ\x03Ħ\x03Ħ\x03Ħ')
buf.write('\x03ħ\x03ħ\x03ħ\x03ħ\x03ħ\x03ħ\x03ħ')
buf.write('\x03ħ\x03ħ\x03ħ\x03Ĩ\x03Ĩ\x03Ĩ\x03Ĩ')
buf.write('\x03Ĩ\x03Ĩ\x03Ĩ\x03Ĩ\x03ĩ\x03ĩ\x03ĩ')
buf.write('\x03ĩ\x03ĩ\x03ĩ\x03Ī\x03Ī\x03Ī\x03Ī')
buf.write('\x03Ī\x03Ī\x03Ī\x03Ī\x03Ī\x03Ī\x03ī')
buf.write('\x03ī\x03ī\x03ī\x03ī\x03ī\x03Ĭ\x03Ĭ')
buf.write('\x03Ĭ\x03Ĭ\x03Ĭ\x03Ĭ\x03ĭ\x03ĭ\x03ĭ')
buf.write('\x03ĭ\x03Į\x03Į\x03Į\x03Į\x03Į\x03į')
buf.write('\x03į\x03į\x03į\x03į\x03İ\x03İ\x03İ')
buf.write('\x03İ\x03İ\x03İ\x03İ\x03ı\x03ı\x03ı')
buf.write('\x03ı\x03IJ\x03IJ\x03IJ\x03IJ\x03IJ\x03IJ')
buf.write('\x03IJ\x03IJ\x03IJ\x03IJ\x03ij\x03ij\x03ij')
buf.write('\x03ij\x03ij\x03ij\x03ij\x03ij\x03ij\x03ij')
buf.write('\x03ij\x03ij\x03Ĵ\x03Ĵ\x03Ĵ\x03Ĵ\x03Ĵ')
buf.write('\x03Ĵ\x03Ĵ\x03ĵ\x03ĵ\x03ĵ\x03ĵ\x03ĵ')
buf.write('\x03ĵ\x03ĵ\x03ĵ\x03ĵ\x03ĵ\x03Ķ\x03Ķ')
buf.write('\x03Ķ\x03Ķ\x03Ķ\x03Ķ\x03Ķ\x03ķ\x03ķ')
buf.write('\x03ķ\x03ķ\x03ķ\x03ķ\x03ķ\x03ķ\x03ĸ')
buf.write('\x03ĸ\x03ĸ\x03ĸ\x03ĸ\x03ĸ\x03ĸ\x03ĸ')
buf.write('\x03Ĺ\x03Ĺ\x03Ĺ\x03Ĺ\x03Ĺ\x03Ĺ\x03Ĺ')
buf.write('\x03Ĺ\x03Ĺ\x03Ĺ\x03Ĺ\x03Ĺ\x03Ĺ\x03Ĺ')
buf.write('\x03Ĺ\x03Ĺ\x03Ĺ\x03Ĺ\x03Ĺ\x03Ĺ\x03ĺ')
buf.write('\x03ĺ\x03ĺ\x03ĺ\x03ĺ\x03ĺ\x03ĺ\x03Ļ')
buf.write('\x03Ļ\x03Ļ\x03Ļ\x03Ļ\x03Ļ\x03Ļ\x03Ļ')
buf.write('\x03Ļ\x03Ļ\x03Ļ\x03Ļ\x03Ļ\x03ļ\x03ļ')
buf.write('\x03ļ\x03ļ\x03ļ\x03ļ\x03ļ\x03Ľ\x03Ľ')
buf.write('\x03Ľ\x03Ľ\x03Ľ\x03Ľ\x03Ľ\x03Ľ\x03Ľ')
buf.write('\x03Ľ\x03ľ\x03ľ\x03ľ\x03ľ\x03ľ\x03ľ')
buf.write('\x03Ŀ\x03Ŀ\x03Ŀ\x03Ŀ\x03Ŀ\x03Ŀ\x03Ŀ')
buf.write('\x03Ŀ\x03ŀ\x03ŀ\x03ŀ\x03ŀ\x03ŀ\x03ŀ')
buf.write('\x03ŀ\x03Ł\x03Ł\x03Ł\x03Ł\x03Ł\x03Ł')
buf.write('\x03ł\x03ł\x03ł\x03ł\x03ł\x03ł\x03ł')
buf.write('\x03ł\x03ł\x03Ń\x03Ń\x03Ń\x03Ń\x03Ń')
buf.write('\x03Ń\x03Ń\x03ń\x03ń\x03ń\x03ń\x03Ņ')
buf.write('\x03Ņ\x03Ņ\x03Ņ\x03Ņ\x03Ņ\x03ņ\x03ņ')
buf.write('\x03ņ\x03ņ\x03ņ\x03Ň\x03Ň\x03Ň\x03Ň')
buf.write('\x03Ň\x03Ň\x03ň\x03ň\x03ň\x03ň\x03ň')
buf.write('\x03ň\x03ň\x03ʼn\x03ʼn\x03ʼn\x03ʼn\x03ʼn')
buf.write('\x03Ŋ\x03Ŋ\x03Ŋ\x03Ŋ\x03Ŋ\x03Ŋ\x03Ŋ')
buf.write('\x03Ŋ\x03Ŋ\x03Ŋ\x03ŋ\x03ŋ\x03ŋ\x03ŋ')
buf.write('\x03ŋ\x03ŋ\x03ŋ\x03Ō\x03Ō\x03Ō\x03Ō')
buf.write('\x03Ō\x03Ō\x03Ō\x03Ō\x03Ō\x03Ō\x03Ō')
buf.write('\x03Ō\x03ō\x03ō\x03ō\x03ō\x03Ŏ\x03Ŏ')
buf.write('\x03Ŏ\x03Ŏ\x03Ŏ\x03Ŏ\x03Ŏ\x03ŏ\x03ŏ')
buf.write('\x03ŏ\x03ŏ\x03ŏ\x03ŏ\x03ŏ\x03Ő\x03Ő')
buf.write('\x03Ő\x03Ő\x03Ő\x03ő\x03ő\x03ő\x03ő')
buf.write('\x03ő\x03ő\x03ő\x03ő\x03Œ\x03Œ\x03Œ')
buf.write('\x03Œ\x03Œ\x03Œ\x03Œ\x03œ\x03œ\x03œ')
buf.write('\x03œ\x03œ\x03Ŕ\x03Ŕ\x03Ŕ\x03Ŕ\x03Ŕ')
buf.write('\x03Ŕ\x03Ŕ\x03Ŕ\x03Ŕ\x03ŕ\x03ŕ\x03ŕ')
buf.write('\x03ŕ\x03ŕ\x03ŕ\x03ŕ\x03ŕ\x03ŕ\x03ŕ')
buf.write('\x03ŕ\x03Ŗ\x03Ŗ\x03Ŗ\x03Ŗ\x03Ŗ\x03Ŗ')
buf.write('\x03Ŗ\x03Ŗ\x03Ŗ\x03Ŗ\x03Ŗ\x03Ŗ\x03Ŗ')
buf.write('\x03ŗ\x03ŗ\x03ŗ\x03ŗ\x03ŗ\x03ŗ\x03ŗ')
buf.write('\x03ŗ\x03ŗ\x03ŗ\x03ŗ\x03ŗ\x03ŗ\x03ŗ')
buf.write('\x03ŗ\x03ŗ\x03ŗ\x03ŗ\x03Ř\x03Ř\x03Ř')
buf.write('\x03Ř\x03Ř\x03Ř\x03Ř\x03Ř\x03Ř\x03Ř')
buf.write('\x03Ř\x03Ř\x03ř\x03ř\x03ř\x03ř\x03ř')
buf.write('\x03ř\x03ř\x03ř\x03ř\x03ř\x03ř\x03ř')
buf.write('\x03ř\x03ř\x03ř\x03ř\x03Ś\x03Ś\x03Ś')
buf.write('\x03Ś\x03ś\x03ś\x03ś\x03ś\x03ś\x03Ŝ')
buf.write('\x03Ŝ\x03Ŝ\x03Ŝ\x03Ŝ\x03Ŝ\x03Ŝ\x03Ŝ')
buf.write('\x03Ŝ\x03ŝ\x03ŝ\x03ŝ\x03ŝ\x03ŝ\x03ŝ')
buf.write('\x03Ş\x03Ş\x03Ş\x03Ş\x03Ş\x03ş\x03ş')
buf.write('\x03ş\x03ş\x03ş\x03ş\x03ş\x03ş\x03ş')
buf.write('\x03Š\x03Š\x03Š\x03Š\x03Š\x03Š\x03Š')
buf.write('\x03Š\x03Š\x03š\x03š\x03š\x03š\x03š')
buf.write('\x03š\x03š\x03š\x03š\x03Ţ\x03Ţ\x03Ţ')
buf.write('\x03Ţ\x03Ţ\x03Ţ\x03Ţ\x03Ţ\x03Ţ\x03Ţ')
buf.write('\x03Ţ\x03Ţ\x03Ţ\x03Ţ\x03Ţ\x03ţ\x03ţ')
buf.write('\x03ţ\x03ţ\x03ţ\x03ţ\x03ţ\x03Ť\x03Ť')
buf.write('\x03Ť\x03Ť\x03Ť\x03ť\x03ť\x03ť\x03ť')
buf.write('\x03ť\x03Ŧ\x03Ŧ\x03Ŧ\x03Ŧ\x03Ŧ\x03Ŧ')
buf.write('\x03Ŧ\x03Ŧ\x03Ŧ\x03ŧ\x03ŧ\x03ŧ\x03ŧ')
buf.write('\x03ŧ\x03ŧ\x03ŧ\x03ŧ\x03ŧ\x03Ũ\x03Ũ')
buf.write('\x03Ũ\x03Ũ\x03Ũ\x03ũ\x03ũ\x03ũ\x03ũ')
buf.write('\x03ũ\x03ũ\x03ũ\x03ũ\x03ũ\x03ũ\x03ũ')
buf.write('\x03ũ\x03ũ\x03ũ\x03Ū\x03Ū\x03Ū\x03Ū')
buf.write('\x03Ū\x03Ū\x03Ū\x03Ū\x03ū\x03ū\x03ū')
buf.write('\x03ū\x03ū\x03ū\x03ū\x03ū\x03ū\x03Ŭ')
buf.write('\x03Ŭ\x03Ŭ\x03Ŭ\x03Ŭ\x03Ŭ\x03Ŭ\x03Ŭ')
buf.write('\x03Ŭ\x03Ŭ\x03Ŭ\x03ŭ\x03ŭ\x03ŭ\x03ŭ')
buf.write('\x03ŭ\x03ŭ\x03Ů\x03Ů\x03Ů\x03Ů\x03Ů')
buf.write('\x03Ů\x03Ů\x03Ů\x03ů\x03ů\x03ů\x03ů')
buf.write('\x03ů\x03ů\x03ů\x03ů\x03ů\x03ů\x03Ű')
buf.write('\x03Ű\x03Ű\x03Ű\x03Ű\x03Ű\x03Ű\x03Ű')
buf.write('\x03Ű\x03Ű\x03Ű\x03Ű\x03Ű\x03ű\x03ű')
buf.write('\x03ű\x03ű\x03ű\x03ű\x03ű\x03Ų\x03Ų')
buf.write('\x03Ų\x03Ų\x03Ų\x03Ų\x03Ų\x03Ų\x03Ų')
buf.write('\x03Ų\x03Ų\x03ų\x03ų\x03ų\x03ų\x03ų')
buf.write('\x03ų\x03ų\x03Ŵ\x03Ŵ\x03Ŵ\x03Ŵ\x03Ŵ')
buf.write('\x03Ŵ\x03Ŵ\x03Ŵ\x03Ŵ\x03Ŵ\x03Ŵ\x03Ŵ')
buf.write('\x03ŵ\x03ŵ\x03ŵ\x03ŵ\x03ŵ\x03ŵ\x03ŵ')
buf.write('\x03ŵ\x03ŵ\x03ŵ\x03ŵ\x03ŵ\x03ŵ\x03Ŷ')
buf.write('\x03Ŷ\x03Ŷ\x03Ŷ\x03Ŷ\x03Ŷ\x03Ŷ\x03Ŷ')
buf.write('\x03Ŷ\x03Ŷ\x03Ŷ\x03Ŷ\x03Ŷ\x03Ŷ\x03ŷ')
buf.write('\x03ŷ\x03ŷ\x03ŷ\x03ŷ\x03ŷ\x03ŷ\x03ŷ')
buf.write('\x03Ÿ\x03Ÿ\x03Ÿ\x03Ÿ\x03Ÿ\x03Ÿ\x03Ÿ')
buf.write('\x03Ÿ\x03Ź\x03Ź\x03Ź\x03Ź\x03Ź\x03Ź')
buf.write('\x03Ź\x03Ź\x03ź\x03ź\x03ź\x03ź\x03ź')
buf.write('\x03ź\x03Ż\x03Ż\x03Ż\x03Ż\x03ż\x03ż')
buf.write('\x03ż\x03ż\x03ż\x03Ž\x03Ž\x03Ž\x03Ž')
buf.write('\x03Ž\x03ž\x03ž\x03ž\x03ž\x03ž\x03ž')
buf.write('\x03ž\x03ž\x03ž\x03ž\x03ſ\x03ſ\x03ſ')
buf.write('\x03ſ\x03ſ\x03ſ\x03ſ\x03ſ\x03ſ\x03ſ')
buf.write('\x03ſ\x03ſ\x03ſ\x03ſ\x03ſ\x03ſ\x03ſ')
buf.write('\x03ſ\x03ſ\x03ſ\x03ſ\x03ſ\x03ſ\x03ſ')
buf.write('\x03ſ\x03ſ\x03ſ\x03ſ\x03ƀ\x03ƀ\x03ƀ')
buf.write('\x03ƀ\x03ƀ\x03ƀ\x03ƀ\x03ƀ\x03ƀ\x03ƀ')
buf.write('\x03ƀ\x03ƀ\x03ƀ\x03ƀ\x03ƀ\x03ƀ\x03ƀ')
buf.write('\x03ƀ\x03ƀ\x03ƀ\x03ƀ\x03ƀ\x03ƀ\x03ƀ')
buf.write('\x03ƀ\x03ƀ\x03ƀ\x03Ɓ\x03Ɓ\x03Ɓ\x03Ɓ')
buf.write('\x03Ɓ\x03Ɓ\x03Ɓ\x03Ɓ\x03Ɓ\x03Ɓ\x03Ɓ')
buf.write('\x03Ɓ\x03Ɓ\x03Ɓ\x03Ɓ\x03Ɓ\x03Ɓ\x03Ɓ')
buf.write('\x03Ɓ\x03Ɓ\x03Ɓ\x03Ɓ\x03Ɓ\x03Ɓ\x03Ƃ')
buf.write('\x03Ƃ\x03Ƃ\x03Ƃ\x03Ƃ\x03Ƃ\x03Ƃ\x03Ƃ')
buf.write('\x03Ƃ\x03Ƃ\x03Ƃ\x03Ƃ\x03Ƃ\x03Ƃ\x03ƃ')
buf.write('\x03ƃ\x03ƃ\x03ƃ\x03ƃ\x03ƃ\x03ƃ\x03ƃ')
buf.write('\x03ƃ\x03ƃ\x03ƃ\x03ƃ\x03ƃ\x03ƃ\x03Ƅ')
buf.write('\x03Ƅ\x03Ƅ\x03Ƅ\x03Ƅ\x03Ƅ\x03Ƅ\x03Ƅ')
buf.write('\x03Ƅ\x03Ƅ\x03Ƅ\x03Ƅ\x03Ƅ\x03Ƅ\x03Ƅ')
buf.write('\x03Ƅ\x03ƅ\x03ƅ\x03ƅ\x03ƅ\x03ƅ\x03ƅ')
buf.write('\x03ƅ\x03ƅ\x03ƅ\x03ƅ\x03ƅ\x03ƅ\x03ƅ')
buf.write('\x03ƅ\x03ƅ\x03ƅ\x03Ɔ\x03Ɔ\x03Ɔ\x03Ƈ')
buf.write('\x03Ƈ\x03Ƈ\x03Ƈ\x03Ƈ\x03Ƈ\x03Ƈ\x03Ƈ')
buf.write('\x03Ƈ\x03ƈ\x03ƈ\x03ƈ\x03ƈ\x03ƈ\x03ƈ')
buf.write('\x03ƈ\x03ƈ\x03ƈ\x03ƈ\x03ƈ\x03ƈ\x03Ɖ')
buf.write('\x03Ɖ\x03Ɖ\x03Ɖ\x03Ɖ\x03Ɖ\x03Ɖ\x03Ɖ')
buf.write('\x03Ɖ\x03Ɖ\x03Ɗ\x03Ɗ\x03Ɗ\x03Ɗ\x03Ɗ')
buf.write('\x03Ɗ\x03Ƌ\x03Ƌ\x03Ƌ\x03Ƌ\x03Ƌ\x03Ƌ')
buf.write('\x03Ƌ\x03Ƌ\x03ƌ\x03ƌ\x03ƌ\x03ƌ\x03ƌ')
buf.write('\x03ƍ\x03ƍ\x03ƍ\x03ƍ\x03ƍ\x03Ǝ\x03Ǝ')
buf.write('\x03Ǝ\x03Ǝ\x03Ǝ\x03Ǝ\x03Ǝ\x03Ǝ\x03Ǝ')
buf.write('\x03Ə\x03Ə\x03Ə\x03Ə\x03Ə\x03Ɛ\x03Ɛ')
buf.write('\x03Ɛ\x03Ɛ\x03Ɛ\x03Ɛ\x03Ɛ\x03Ɛ\x03Ɛ')
buf.write('\x03Ɛ\x03Ƒ\x03Ƒ\x03Ƒ\x03Ƒ\x03Ƒ\x03Ƒ')
buf.write('\x03ƒ\x03ƒ\x03ƒ\x03ƒ\x03ƒ\x03ƒ\x03Ɠ')
buf.write('\x03Ɠ\x03Ɠ\x03Ɠ\x03Ɠ\x03Ɠ\x03Ɠ\x03Ɣ')
buf.write('\x03Ɣ\x03Ɣ\x03Ɣ\x03Ɣ\x03Ɣ\x03Ɣ\x03Ɣ')
buf.write('\x03Ɣ\x03Ɣ\x03ƕ\x03ƕ\x03ƕ\x03ƕ\x03ƕ')
buf.write('\x03ƕ\x03ƕ\x03ƕ\x03Ɩ\x03Ɩ\x03Ɩ\x03Ɩ')
buf.write('\x03Ɩ\x03Ɩ\x03Ɨ\x03Ɨ\x03Ɨ\x03Ɨ\x03Ɨ')
buf.write('\x03Ɨ\x03Ɨ\x03Ƙ\x03Ƙ\x03Ƙ\x03Ƙ\x03Ƙ')
buf.write('\x03Ƙ\x03Ƙ\x03Ƙ\x03ƙ\x03ƙ\x03ƙ\x03ƙ')
buf.write('\x03ƙ\x03ƙ\x03ƙ\x03ƚ\x03ƚ\x03ƚ\x03ƚ')
buf.write('\x03ƚ\x03ƚ\x03ƚ\x03ƛ\x03ƛ\x03ƛ\x03ƛ')
buf.write('\x03Ɯ\x03Ɯ\x03Ɯ\x03Ɯ\x03Ɯ\x03Ɯ\x03Ɲ')
buf.write('\x03Ɲ\x03Ɲ\x03Ɲ\x03Ɲ\x03Ɲ\x03Ɲ\x03Ɲ')
buf.write('\x03Ɲ\x03ƞ\x03ƞ\x03ƞ\x03ƞ\x03ƞ\x03ƞ')
buf.write('\x03Ɵ\x03Ɵ\x03Ɵ\x03Ɵ\x03Ɵ\x03Ɵ\x03Ɵ')
buf.write('\x03Ơ\x03Ơ\x03Ơ\x03Ơ\x03Ơ\x03Ơ\x03Ơ')
buf.write('\x03Ơ\x03ơ\x03ơ\x03ơ\x03ơ\x03ơ\x03ơ')
buf.write('\x03ơ\x03ơ\x03ơ\x03Ƣ\x03Ƣ\x03Ƣ\x03Ƣ')
buf.write('\x03Ƣ\x03Ƣ\x03Ƣ\x03Ƣ\x03Ƣ\x03ƣ\x03ƣ')
buf.write('\x03ƣ\x03ƣ\x03ƣ\x03ƣ\x03ƣ\x03Ƥ\x03Ƥ')
buf.write('\x03Ƥ\x03Ƥ\x03Ƥ\x03Ƥ\x03Ƥ\x03Ƥ\x03ƥ')
buf.write('\x03ƥ\x03ƥ\x03ƥ\x03ƥ\x03ƥ\x03ƥ\x03ƥ')
buf.write('\x03Ʀ\x03Ʀ\x03Ʀ\x03Ʀ\x03Ʀ\x03Ʀ\x03Ʀ')
buf.write('\x03Ʀ\x03Ʀ\x03Ƨ\x03Ƨ\x03Ƨ\x03Ƨ\x03Ƨ')
buf.write('\x03ƨ\x03ƨ\x03ƨ\x03ƨ\x03ƨ\x03ƨ\x03ƨ')
buf.write('\x03ƨ\x03Ʃ\x03Ʃ\x03Ʃ\x03Ʃ\x03Ʃ\x03Ʃ')
buf.write('\x03Ʃ\x03Ʃ\x03Ʃ\x03Ʃ\x03Ʃ\x03ƪ\x03ƪ')
buf.write('\x03ƪ\x03ƪ\x03ƪ\x03ƫ\x03ƫ\x03ƫ\x03ƫ')
buf.write('\x03ƫ\x03ƫ\x03ƫ\x03ƫ\x03ƫ\x03Ƭ\x03Ƭ')
buf.write('\x03Ƭ\x03Ƭ\x03Ƭ\x03Ƭ\x03ƭ\x03ƭ\x03ƭ')
buf.write('\x03ƭ\x03ƭ\x03ƭ\x03Ʈ\x03Ʈ\x03Ʈ\x03Ʈ')
buf.write('\x03Ʈ\x03Ư\x03Ư\x03Ư\x03Ư\x03Ư\x03Ư')
buf.write('\x03Ư\x03ư\x03ư\x03ư\x03ư\x03ư\x03Ʊ')
buf.write('\x03Ʊ\x03Ʊ\x03Ʊ\x03Ʊ\x03Ʊ\x03Ʋ\x03Ʋ')
buf.write('\x03Ʋ\x03Ʋ\x03Ƴ\x03Ƴ\x03Ƴ\x03Ƴ\x03Ƴ')
buf.write('\x03Ƴ\x03Ƴ\x03ƴ\x03ƴ\x03ƴ\x03ƴ\x03ƴ')
buf.write('\x03ƴ\x03ƴ\x03ƴ\x03ƴ\x03ƴ\x03ƴ\x03ƴ')
buf.write('\x03ƴ\x03ƴ\x03Ƶ\x03Ƶ\x03Ƶ\x03Ƶ\x03Ƶ')
buf.write('\x03Ƶ\x03Ƶ\x03Ƶ\x03ƶ\x03ƶ\x03ƶ\x03ƶ')
buf.write('\x03ƶ\x03ƶ\x03ƶ\x03ƶ\x03ƶ\x03ƶ\x03ƶ')
buf.write('\x03ƶ\x03ƶ\x03Ʒ\x03Ʒ\x03Ʒ\x03Ʒ\x03Ʒ')
buf.write('\x03Ʒ\x03Ʒ\x03Ʒ\x03Ʒ\x03Ʒ\x03Ʒ\x03Ƹ')
buf.write('\x03Ƹ\x03Ƹ\x03Ƹ\x03Ƹ\x03Ƹ\x03Ƹ\x03Ƹ')
buf.write('\x03Ƹ\x03Ƹ\x03ƹ\x03ƹ\x03ƹ\x03ƹ\x03ƹ')
buf.write('\x03ƹ\x03ƹ\x03ƹ\x03ƹ\x03ƹ\x03ƺ\x03ƺ')
buf.write('\x03ƺ\x03ƺ\x03ƺ\x03ƺ\x03ƺ\x03ƺ\x03ƺ')
buf.write('\x03ƺ\x03ƺ\x03ƺ\x03ƺ\x03ƺ\x03ƻ\x03ƻ')
buf.write('\x03ƻ\x03ƻ\x03ƻ\x03ƻ\x03ƻ\x03ƻ\x03ƻ')
buf.write('\x03Ƽ\x03Ƽ\x03Ƽ\x03Ƽ\x03Ƽ\x03Ƽ\x03ƽ')
buf.write('\x03ƽ\x03ƽ\x03ƽ\x03ƽ\x03ƽ\x03ƽ\x03ƽ')
buf.write('\x03ƽ\x03ƾ\x03ƾ\x03ƾ\x03ƾ\x03ƾ\x03ƾ')
buf.write('\x03ƾ\x03ƾ\x03ƿ\x03ƿ\x03ƿ\x03ƿ\x03ƿ')
buf.write('\x03ƿ\x03ƿ\x03ƿ\x03ƿ\x03ƿ\x03ƿ\x03ƿ')
buf.write('\x03ƿ\x03ǀ\x03ǀ\x03ǀ\x03ǀ\x03ǀ\x03ǀ')
buf.write('\x03ǀ\x03ǀ\x03ǀ\x03ǁ\x03ǁ\x03ǁ\x03ǁ')
buf.write('\x03ǁ\x03ǂ\x03ǂ\x03ǂ\x03ǂ\x03ǃ\x03ǃ')
buf.write('\x03ǃ\x03ǃ\x03ǃ\x03ǃ\x03ǃ\x03ǃ\x03ǃ')
buf.write('\x03ǃ\x03ǃ\x03ǃ\x03ǃ\x03ǃ\x03ǃ\x03ǃ')
buf.write('\x03ǃ\x03ǃ\x03ǃ\x03ǃ\x03ǃ\x03ǃ\x03ǃ')
buf.write('\x03ǃ\x03ǃ\x03DŽ\x03DŽ\x03DŽ\x03DŽ\x03DŽ')
buf.write('\x03Dž\x03Dž\x03Dž\x03Dž\x03Dž\x03Dž\x03Dž')
buf.write('\x03Dž\x03Dž\x03Dž\x03Dž\x03dž\x03dž\x03dž')
buf.write('\x03dž\x03dž\x03dž\x03dž\x03dž\x03dž\x03dž')
buf.write('\x03dž\x03dž\x03dž\x03dž\x03dž\x03dž\x03dž')
buf.write('\x03dž\x03LJ\x03LJ\x03LJ\x03LJ\x03LJ\x03LJ')
buf.write('\x03LJ\x03LJ\x03LJ\x03LJ\x03LJ\x03LJ\x03LJ')
buf.write('\x03LJ\x03LJ\x03LJ\x03Lj\x03Lj\x03Lj\x03Lj')
buf.write('\x03Lj\x03Lj\x03Lj\x03Lj\x03Lj\x03Lj\x03Lj')
buf.write('\x03Lj\x03Lj\x03Lj\x03Lj\x03Lj\x03Lj\x03Lj')
buf.write('\x03Lj\x03lj\x03lj\x03lj\x03lj\x03lj\x03lj')
buf.write('\x03lj\x03lj\x03lj\x03lj\x03lj\x03lj\x03lj')
buf.write('\x03lj\x03lj\x03lj\x03lj\x03lj\x03lj\x03lj')
buf.write('\x03lj\x03lj\x03lj\x03NJ\x03NJ\x03NJ\x03NJ')
buf.write('\x03NJ\x03NJ\x03NJ\x03NJ\x03NJ\x03NJ\x03NJ')
buf.write('\x03NJ\x03NJ\x03NJ\x03NJ\x03Nj\x03Nj\x03Nj')
buf.write('\x03Nj\x03Nj\x03Nj\x03Nj\x03Nj\x03Nj\x03Nj')
buf.write('\x03nj\x03nj\x03nj\x03nj\x03nj\x03nj\x03nj')
buf.write('\x03nj\x03nj\x03nj\x03nj\x03Ǎ\x03Ǎ\x03Ǎ')
buf.write('\x03Ǎ\x03Ǎ\x03Ǎ\x03Ǎ\x03Ǎ\x03ǎ\x03ǎ')
buf.write('\x03ǎ\x03ǎ\x03ǎ\x03ǎ\x03ǎ\x03ǎ\x03ǎ')
buf.write('\x03ǎ\x03ǎ\x03ǎ\x03ǎ\x03Ǐ\x03Ǐ\x03Ǐ')
buf.write('\x03Ǐ\x03Ǐ\x03Ǐ\x03Ǐ\x03Ǐ\x03Ǐ\x03Ǐ')
buf.write('\x03Ǐ\x03Ǐ\x03Ǐ\x03Ǐ\x03Ǐ\x03Ǐ\x03ǐ')
buf.write('\x03ǐ\x03ǐ\x03ǐ\x03ǐ\x03ǐ\x03ǐ\x03ǐ')
buf.write('\x03ǐ\x03ǐ\x03ǐ\x03ǐ\x03ǐ\x03ǐ\x03ǐ')
buf.write('\x03ǐ\x03Ǒ\x03Ǒ\x03Ǒ\x03Ǒ\x03Ǒ\x03ǒ')
buf.write('\x03ǒ\x03ǒ\x03ǒ\x03Ǔ\x03Ǔ\x03Ǔ\x03Ǔ')
buf.write('\x03Ǔ\x03ǔ\x03ǔ\x03ǔ\x03ǔ\x03Ǖ\x03Ǖ')
buf.write('\x03Ǖ\x03Ǖ\x03Ǖ\x03ǖ\x03ǖ\x03ǖ\x03ǖ')
buf.write('\x03Ǘ\x03Ǘ\x03Ǘ\x03Ǘ\x03Ǘ\x03Ǘ\x03Ǘ')
buf.write('\x03ǘ\x03ǘ\x03ǘ\x03ǘ\x03Ǚ\x03Ǚ\x03Ǚ')
buf.write('\x03Ǚ\x03Ǚ\x03Ǚ\x03ǚ\x03ǚ\x03ǚ\x03ǚ')
buf.write('\x03ǚ\x03ǚ\x03ǚ\x03ǚ\x03ǚ\x03ǚ\x03ǚ')
buf.write('\x03ǚ\x03ǚ\x03ǚ\x03ǚ\x03ǚ\x03Ǜ\x03Ǜ')
buf.write('\x03Ǜ\x03Ǜ\x03Ǜ\x03Ǜ\x03Ǜ\x03Ǜ\x03Ǜ')
buf.write('\x03Ǜ\x03Ǜ\x03ǜ\x03ǜ\x03ǜ\x03ǜ\x03ǝ')
buf.write('\x03ǝ\x03ǝ\x03ǝ\x03ǝ\x03ǝ\x03ǝ\x03ǝ')
buf.write('\x03ǝ\x03Ǟ\x03Ǟ\x03Ǟ\x03Ǟ\x03Ǟ\x03Ǟ')
buf.write('\x03ǟ\x03ǟ\x03ǟ\x03ǟ\x03ǟ\x03ǟ\x03ǟ')
buf.write('\x03Ǡ\x03Ǡ\x03Ǡ\x03Ǡ\x03Ǡ\x03ǡ\x03ǡ')
buf.write('\x03ǡ\x03ǡ\x03ǡ\x03ǡ\x03ǡ\x03Ǣ\x03Ǣ')
buf.write('\x03Ǣ\x03Ǣ\x03Ǣ\x03Ǣ\x07Ǣ፨\nǢ')
buf.write('\x0cǢ\x0eǢ፫\x0bǢ\x03Ǣ\x03Ǣ\x03ǣ')
buf.write('\x03ǣ\x03ǣ\x07ǣ፲\nǣ\x0cǣ\x0eǣ')
buf.write('፵\x0bǣ\x03ǣ\x06ǣ፸\nǣ\rǣ')
buf.write('\x0eǣ፹\x03Ǥ\x03Ǥ\x03Ǥ\x07Ǥ\u137f')
buf.write('\nǤ\x0cǤ\x0eǤᎂ\x0bǤ\x03Ǥ\x06Ǥ')
buf.write('ᎅ\nǤ\rǤ\x0eǤᎆ\x03ǥ\x03ǥ')
buf.write('\x03ǥ\x03Ǧ\x03Ǧ\x03ǧ\x03ǧ\x03Ǩ\x03Ǩ')
buf.write('\x03Ǩ\x05Ǩ᎓\nǨ\x03Ǩ\x03Ǩ\x05Ǩ')
buf.write('᎗\nǨ\x05Ǩ᎙\nǨ\x03Ǩ\x03Ǩ\x05')
buf.write('Ǩ\u139d\nǨ\x03ǩ\x03ǩ\x03ǩ\x03ǩ\x03')
buf.write('ǩ\x07ǩᎤ\nǩ\x0cǩ\x0eǩᎧ\x0b')
buf.write('ǩ\x03ǩ\x03ǩ\x03Ǫ\x03Ǫ\x03Ǫ\x03Ǫ')
buf.write('\x03Ǫ\x05ǪᎰ\nǪ\x03Ǫ\x03Ǫ\x03ǫ')
buf.write('\x03ǫ\x03Ǭ\x03Ǭ\x03Ǭ\x07ǬᎹ\nǬ')
buf.write('\x0cǬ\x0eǬᎼ\x0bǬ\x03Ǭ\x03Ǭ\x03Ǭ')
buf.write('\x03ǭ\x03ǭ\x03ǭ\x07ǭᏄ\nǭ\x0cǭ')
buf.write('\x0eǭᏇ\x0bǭ\x03ǭ\x03ǭ\x03ǭ\x03Ǯ')
buf.write('\x03Ǯ\x03Ǯ\x07ǮᏏ\nǮ\x0cǮ\x0eǮ')
buf.write('Ꮢ\x0bǮ\x03Ǯ\x03Ǯ\x03Ǯ\x03ǯ\x03ǯ')
buf.write('\x03ǯ\x07ǯᏚ\nǯ\x0cǯ\x0eǯᏝ')
buf.write('\x0bǯ\x03ǯ\x03ǯ\x03ǯ\x03ǰ\x03ǰ\x03DZ')
buf.write('\x03DZ\x03DZ\x03DZ\x06DZᏨ\nDZ\rDZ')
buf.write('\x0eDZᏩ\x03DZ\x03DZ\x03Dz\x03Dz\x03dz')
buf.write('\x03dz\x03Ǵ\x03Ǵ\x03ǵ\x03ǵ\x03Ƕ\x03Ƕ')
buf.write('\x03Ƕ\x03Ƿ\x03Ƿ\x03Ǹ\x03Ǹ\x03ǹ\x03ǹ')
buf.write('\x03Ǻ\x03Ǻ\x03ǻ\x03ǻ\x03Ǽ\x03Ǽ\x03ǽ')
buf.write('\x03ǽ\x03ǽ\x03Ǿ\x03Ǿ\x03Ǿ\x03Ǿ\x07Ǿ')
buf.write('ᐌ\nǾ\x0cǾ\x0eǾᐏ\x0bǾ\x03Ǿ')
buf.write('\x03Ǿ\x03Ǿ\x03Ǿ\x03Ǿ\x05Ǿᐖ\nǾ')
buf.write('\x03ǿ\x03ǿ\x03Ȁ\x03Ȁ\x03ȁ\x03ȁ\x03ȁ')
buf.write('\x03Ȃ\x03Ȃ\x03ȃ\x03ȃ\x03ȃ\x03Ȅ\x03Ȅ')
buf.write('\x03Ȅ\x03Ȅ\x03Ȅ\x03Ȅ\x03Ȅ\x03Ȅ\x05Ȅ')
buf.write('ᐬ\nȄ\x03ȅ\x03ȅ\x03Ȇ\x03Ȇ\x03ȇ')
buf.write('\x03ȇ\x03Ȉ\x03Ȉ\x03ȉ\x03ȉ\x03Ȋ\x03Ȋ')
buf.write('\x03Ȋ\x03ȋ\x03ȋ\x03Ȍ\x03Ȍ\x03ȍ\x03ȍ')
buf.write('\x03Ȏ\x03Ȏ\x03ȏ\x03ȏ\x03Ȑ\x06Ȑᑆ')
buf.write('\nȐ\rȐ\x0eȐᑇ\x03Ȑ\x03Ȑ\x03ȑ')
buf.write('\x03ȑ\x03Ȓ\x06Ȓᑏ\nȒ\rȒ\x0eȒ')
buf.write('ᑐ\x03ȓ\x07ȓᑔ\nȓ\x0cȓ\x0eȓ')
buf.write('ᑗ\x0bȓ\x03ȓ\x05ȓᑚ\nȓ\x03ȓ')
buf.write('\x06ȓᑝ\nȓ\rȓ\x0eȓᑞ\x03Ȕ')
buf.write('\x03Ȕ\x03Ȕ\x03Ȕ\x07Ȕᑥ\nȔ\x0cȔ')
buf.write('\x0eȔᑨ\x0bȔ\x03Ȕ\x03Ȕ\x05Ȕᑬ')
buf.write('\nȔ\x03Ȕ\x03Ȕ\x03ȕ\x03ȕ\x03ȕ\x03ȕ')
buf.write('\x07ȕᑴ\nȕ\x0cȕ\x0eȕᑷ\x0bȕ')
buf.write('\x03ȕ\x03ȕ\x03ȕ\x03ȕ\x03ȕ\x03Ȗ\x03Ȗ')
buf.write('\x03Ȗ\x03Ȗ\x03Ȗ\x03Ȗ\x03Ȗ\x03Ȗ\x03Ȗ')
buf.write('\x07Ȗᒇ\nȖ\x0cȖ\x0eȖᒊ\x0bȖ')
buf.write('\x03Ȗ\x03Ȗ\x05Ȗᒎ\nȖ\x03ȗ\x05ȗ')
buf.write('ᒑ\nȗ\x03ȗ\x03ȗ\x03Ș\x03Ș\x03ș')
buf.write('\x03ș\x03ș\x07șᒚ\nș\x0cș\x0eș')
buf.write('ᒝ\x0bș\x03Ț\x03Ț\x03Ț\x03Ț\x03Ț')
buf.write('\x03ț\x03ț\x03Ȝ\x03Ȝ\x03ȝ\x03ȝ\x03Ȟ')
buf.write('\x03Ȟ\x03ȟ\x03ȟ\x03Ƞ\x03Ƞ\x03ȡ\x03ȡ')
buf.write('\x03Ȣ\x03Ȣ\x03ȣ\x03ȣ\x03Ȥ\x03Ȥ\x03ȥ')
buf.write('\x03ȥ\x03Ȧ\x03Ȧ\x03ȧ\x03ȧ\x03Ȩ\x03Ȩ')
buf.write('\x03ȩ\x03ȩ\x03Ȫ\x03Ȫ\x03ȫ\x03ȫ\x03Ȭ')
buf.write('\x03Ȭ\x03ȭ\x03ȭ\x03Ȯ\x03Ȯ\x03ȯ\x03ȯ')
buf.write('\x03Ȱ\x03Ȱ\x03ȱ\x03ȱ\x03Ȳ\x03Ȳ\x03ȳ')
buf.write('\x03ȳ\x03ȴ\x03ȴ\x07ᎺᏅᏐᏛᑵ')
buf.write(
'\x02ȵ\x03\x03\x05\x04\x07\x05\t\x06\x0b\x07\r\x08\x0f\t\x11\n\x13\x0b\x15\x0c'
)
buf.write(
"\x17\r\x19\x0e\x1b\x0f\x1d\x10\x1f\x11!\x12#\x13%\x14'\x15)\x16+\x17"
)
buf.write('-\x18/\x191\x1a3\x1b5\x1c7\x1d9\x1e;\x1f= ?!A"C#E$G%')
buf.write("I&K'M(O)Q*S+U,W-Y.[/]0_1a2c3e4g5i6k7")
buf.write('m8o9q:s;u<w=y>{?}@\x7fA\x81B\x83C\x85D\x87E\x89')
buf.write('F\x8bG\x8dH\x8fI\x91J\x93K\x95L\x97M\x99')
buf.write('N\x9bO\x9dP\x9fQ¡R£S¥T§U©')
buf.write('V«W\xadX¯Y±Z³[µ\\·]¹')
buf.write('^»_½`¿aÁbÃcÅdÇeÉ')
buf.write('fËgÍhÏiÑjÓkÕl×mÙ')
buf.write('nÛoÝpßqárãsåtçué')
buf.write('vëwíxïyñzó{õ|÷}ù')
buf.write('~û\x7fý\x80ÿ\x81ā\x82ă')
buf.write('\x83ą\x84ć\x85ĉ\x86ċ\x87')
buf.write('č\x88ď\x89đ\x8aē\x8bĕ')
buf.write('\x8cė\x8dę\x8eě\x8fĝ\x90')
buf.write('ğ\x91ġ\x92ģ\x93ĥ\x94ħ')
buf.write('\x95ĩ\x96ī\x97ĭ\x98į\x99')
buf.write('ı\x9aij\x9bĵ\x9cķ\x9dĹ')
buf.write('\x9eĻ\x9fĽ\xa0Ŀ¡Ł¢')
buf.write('Ń£Ņ¤Ň¥ʼn¦ŋ')
buf.write('§ō¨ŏ©őªœ«')
buf.write('ŕ¬ŗ\xadř®ś¯ŝ')
buf.write('°ş±š²ţ³ť´')
buf.write('ŧµũ¶ū·ŭ¸ů')
buf.write('¹űºų»ŵ¼ŷ½')
buf.write('Ź¾Ż¿ŽÀſÁƁ')
buf.write('ÂƃÃƅÄƇÅƉÆ')
buf.write('ƋÇƍÈƏÉƑÊƓ')
buf.write('ËƕÌƗÍƙÎƛÏ')
buf.write('ƝÐƟÑơÒƣÓƥ')
buf.write('ÔƧÕƩÖƫ×ƭØ')
buf.write('ƯÙƱÚƳÛƵÜƷ')
buf.write('ÝƹÞƻßƽàƿá')
buf.write('ǁâǃãDžäLJålj')
buf.write('æNjçǍèǏéǑê')
buf.write('ǓëǕìǗíǙîǛ')
buf.write('ïǝðǟñǡòǣó')
buf.write('ǥôǧõǩöǫ÷ǭ')
buf.write('øǯùDZúdzûǵü')
buf.write('ǷýǹþǻÿǽĀǿ')
buf.write('āȁĂȃăȅĄȇą')
buf.write('ȉĆȋćȍĈȏĉȑ')
buf.write('ĊȓċȕČȗčșĎ')
buf.write('țďȝĐȟđȡĒȣ')
buf.write('ēȥĔȧĕȩĖȫė')
buf.write('ȭĘȯęȱĚȳěȵ')
buf.write('ĜȷĝȹĞȻğȽĠ')
buf.write('ȿġɁĢɃģɅĤɇ')
buf.write('ĥɉĦɋħɍĨɏĩ')
buf.write('ɑĪɓīɕĬɗĭə')
buf.write('ĮɛįɝİɟıɡIJ')
buf.write('ɣijɥĴɧĵɩĶɫ')
buf.write('ķɭĸɯĹɱĺɳĻ')
buf.write('ɵļɷĽɹľɻĿɽ')
buf.write('ŀɿŁʁłʃŃʅń')
buf.write('ʇŅʉņʋŇʍňʏ')
buf.write('ʼnʑŊʓŋʕŌʗō')
buf.write('ʙŎʛŏʝŐʟőʡ')
buf.write('ŒʣœʥŔʧŕʩŖ')
buf.write('ʫŗʭŘʯřʱŚʳ')
buf.write('śʵŜʷŝʹŞʻş')
buf.write('ʽŠʿšˁŢ˃ţ˅')
buf.write('ŤˇťˉŦˋŧˍŨ')
buf.write('ˏũˑŪ˓ū˕Ŭ˗')
buf.write('ŭ˙ٲů˝Ű˟ű')
buf.write('ˡŲˣų˥Ŵ˧ŵ˩')
buf.write('Ŷ˫ŷ˭Ÿ˯Ź˱ź')
buf.write('˳Ż˵ż˷Ž˹ž˻')
buf.write('ſ˽ƀ˿Ɓ́Ƃ̃ƃ')
buf.write('̅Ƅ̇ƅ̉Ɔ̋Ƈ̍')
buf.write('ƈ̏Ɖ̑Ɗ̓Ƌ̕ƌ')
buf.write('̗ƍ̙Ǝ̛Ə̝Ɛ̟')
buf.write('Ƒ̡ƒ̣Ɠ̥Ɣ̧ƕ')
buf.write('̩Ɩ̫Ɨ̭Ƙ̯ƙ̱')
buf.write('ƚ̳ƛ̵Ɯ̷Ɲ̹ƞ')
buf.write('̻Ɵ̽Ơ̿ớƢ̓')
buf.write('ƣͅƤ͇ƥ͉Ʀ͋Ƨ')
buf.write('͍ƨ͏Ʃ͑ƪ͓ƫ͕')
buf.write('Ƭ͗ƭ͙Ʈ͛Ư͝ư')
buf.write('͟Ʊ͡ƲͣƳͥƴͧ')
buf.write('ƵͩƶͫƷͭƸͯƹ')
buf.write('ͱƺͳƻ͵Ƽͷƽ\u0379')
buf.write('ƾͻƿͽǀͿǁ\u0381ǂ')
buf.write('\u0383ǃ΅DŽ·DžΉdž\u038b')
buf.write('LJ\u038dLjΏljΑNJΓNj')
buf.write('ΕnjΗǍΙǎΛǏΝ')
buf.write('ǐΟǑΡǒΣǓΥǔ')
buf.write('ΧǕΩǖΫǗέǘί')
buf.write('ǙαǚγǛεǜηǝ')
buf.write('ιǞλǟνǠοǡρ')
buf.write('ǢσǣυǤχǥωǦ')
buf.write('ϋǧύǨϏǩϑǪϓ')
buf.write('\x02ϕ\x02ϗ\x02ϙ\x02ϛ\x02ϝ\x02ϟ\x02ϡ')
buf.write('ǫϣǬϥǭϧǮϩǯ')
buf.write('ϫǰϭDZϯDzϱdzϳ')
buf.write('ǴϵǵϷǶϹǷϻǸ')
buf.write('ϽǹϿǺЁǻЃǼЅ')
buf.write('ǽЇǾЉǿЋȀЍȁ')
buf.write('ЏȂБ\x02ГȃЕȄЗȅ')
buf.write('ЙȆЛȇНȈПȉС')
buf.write('\x02У\x02Х\x02ЧȊЩȋЫȌ')
buf.write('Э\x02Я\x02бȍгȎе\x02з')
buf.write('\x02й\x02л\x02н\x02п\x02с\x02у\x02х')
buf.write('\x02ч\x02щ\x02ы\x02э\x02я\x02ё\x02ѓ')
buf.write('\x02ѕ\x02ї\x02љ\x02ћ\x02ѝ\x02џ\x02ѡ')
buf.write(
"\x02ѣ\x02ѥ\x02ѧ\x02\x03\x02'\x05\x02\x0c\x0c\x0f\x0f))\x05\x022")
buf.write(
';CHch\x04\x02GGgg\x04\x02--//\t\x02\x0b\x0c\x0f\x0f""**>>]]}}\x05\x02\x0c'
)
buf.write(
'\x0c\x0f\x0f$$\x04\x022;aa\x05\x02\x0b\x0c\x0f\x0f""\x04\x02C\\c|\x04\x02\x0c'
)
buf.write(
'\x0c\x0f\x0f\x04\x02\x0b\x0b""\x05\x02%&2;aa\x04\x02CCcc\x04\x02DDdd\x04\x02'
)
buf.write(
'EEee\x04\x02FFff\x04\x02HHhh\x04\x02IIii\x04\x02JJjj\x04\x02KKkk\x04\x02LLll\x04'
)
buf.write(
'\x02MMmm\x04\x02NNnn\x04\x02OOoo\x04\x02PPpp\x04\x02QQqq\x04\x02RRrr\x04\x02SSs'
)
buf.write(
's\x04\x02TTtt\x04\x02UUuu\x04\x02VVvv\x04\x02WWww\x04\x02XXxx\x04\x02YYyy\x04\x02'
)
buf.write(
'ZZzz\x04\x02[[{{\x04\x02\\\\||\x02ᓝ\x02\x03\x03\x02\x02\x02\x02\x05\x03\x02\x02\x02'
)
buf.write(
'\x02\x07\x03\x02\x02\x02\x02\t\x03\x02\x02\x02\x02\x0b\x03\x02\x02\x02\x02\r\x03\x02\x02\x02\x02\x0f'
)
buf.write(
'\x03\x02\x02\x02\x02\x11\x03\x02\x02\x02\x02\x13\x03\x02\x02\x02\x02\x15\x03\x02\x02\x02\x02\x17\x03'
)
buf.write(
'\x02\x02\x02\x02\x19\x03\x02\x02\x02\x02\x1b\x03\x02\x02\x02\x02\x1d\x03\x02\x02\x02\x02\x1f\x03\x02'
)
buf.write(
"\x02\x02\x02!\x03\x02\x02\x02\x02#\x03\x02\x02\x02\x02%\x03\x02\x02\x02\x02'\x03\x02\x02\x02\x02)\x03"
)
buf.write(
'\x02\x02\x02\x02+\x03\x02\x02\x02\x02-\x03\x02\x02\x02\x02/\x03\x02\x02\x02\x021\x03\x02\x02\x02\x02'
)
buf.write(
'3\x03\x02\x02\x02\x025\x03\x02\x02\x02\x027\x03\x02\x02\x02\x029\x03\x02\x02\x02\x02;\x03'
)
buf.write(
'\x02\x02\x02\x02=\x03\x02\x02\x02\x02?\x03\x02\x02\x02\x02A\x03\x02\x02\x02\x02C\x03\x02\x02\x02\x02E'
)
buf.write(
'\x03\x02\x02\x02\x02G\x03\x02\x02\x02\x02I\x03\x02\x02\x02\x02K\x03\x02\x02\x02\x02M\x03\x02\x02\x02\x02'
)
buf.write(
'O\x03\x02\x02\x02\x02Q\x03\x02\x02\x02\x02S\x03\x02\x02\x02\x02U\x03\x02\x02\x02\x02W\x03\x02\x02\x02'
)
buf.write(
'\x02Y\x03\x02\x02\x02\x02[\x03\x02\x02\x02\x02]\x03\x02\x02\x02\x02_\x03\x02\x02\x02\x02a\x03\x02\x02'
)
buf.write(
'\x02\x02c\x03\x02\x02\x02\x02e\x03\x02\x02\x02\x02g\x03\x02\x02\x02\x02i\x03\x02\x02\x02\x02k\x03\x02'
)
buf.write(
'\x02\x02\x02m\x03\x02\x02\x02\x02o\x03\x02\x02\x02\x02q\x03\x02\x02\x02\x02s\x03\x02\x02\x02\x02u\x03'
)
buf.write(
'\x02\x02\x02\x02w\x03\x02\x02\x02\x02y\x03\x02\x02\x02\x02{\x03\x02\x02\x02\x02}\x03\x02\x02\x02\x02\x7f'
)
buf.write(
'\x03\x02\x02\x02\x02\x81\x03\x02\x02\x02\x02\x83\x03\x02\x02\x02\x02\x85\x03\x02\x02'
)
buf.write(
'\x02\x02\x87\x03\x02\x02\x02\x02\x89\x03\x02\x02\x02\x02\x8b\x03\x02\x02\x02\x02\x8d'
)
buf.write(
'\x03\x02\x02\x02\x02\x8f\x03\x02\x02\x02\x02\x91\x03\x02\x02\x02\x02\x93\x03\x02\x02'
)
buf.write(
'\x02\x02\x95\x03\x02\x02\x02\x02\x97\x03\x02\x02\x02\x02\x99\x03\x02\x02\x02\x02\x9b'
)
buf.write(
'\x03\x02\x02\x02\x02\x9d\x03\x02\x02\x02\x02\x9f\x03\x02\x02\x02\x02¡\x03\x02\x02'
)
buf.write(
'\x02\x02£\x03\x02\x02\x02\x02¥\x03\x02\x02\x02\x02§\x03\x02\x02\x02\x02©'
)
buf.write(
'\x03\x02\x02\x02\x02«\x03\x02\x02\x02\x02\xad\x03\x02\x02\x02\x02¯\x03\x02\x02'
)
buf.write(
'\x02\x02±\x03\x02\x02\x02\x02³\x03\x02\x02\x02\x02µ\x03\x02\x02\x02\x02·'
)
buf.write(
'\x03\x02\x02\x02\x02¹\x03\x02\x02\x02\x02»\x03\x02\x02\x02\x02½\x03\x02\x02'
)
buf.write(
'\x02\x02¿\x03\x02\x02\x02\x02Á\x03\x02\x02\x02\x02Ã\x03\x02\x02\x02\x02Å'
)
buf.write(
'\x03\x02\x02\x02\x02Ç\x03\x02\x02\x02\x02É\x03\x02\x02\x02\x02Ë\x03\x02\x02'
)
buf.write(
'\x02\x02Í\x03\x02\x02\x02\x02Ï\x03\x02\x02\x02\x02Ñ\x03\x02\x02\x02\x02Ó'
)
buf.write(
'\x03\x02\x02\x02\x02Õ\x03\x02\x02\x02\x02×\x03\x02\x02\x02\x02Ù\x03\x02\x02'
)
buf.write(
'\x02\x02Û\x03\x02\x02\x02\x02Ý\x03\x02\x02\x02\x02ß\x03\x02\x02\x02\x02á'
)
buf.write(
'\x03\x02\x02\x02\x02ã\x03\x02\x02\x02\x02å\x03\x02\x02\x02\x02ç\x03\x02\x02'
)
buf.write(
'\x02\x02é\x03\x02\x02\x02\x02ë\x03\x02\x02\x02\x02í\x03\x02\x02\x02\x02ï'
)
buf.write(
'\x03\x02\x02\x02\x02ñ\x03\x02\x02\x02\x02ó\x03\x02\x02\x02\x02õ\x03\x02\x02'
)
buf.write(
'\x02\x02÷\x03\x02\x02\x02\x02ù\x03\x02\x02\x02\x02û\x03\x02\x02\x02\x02ý'
)
buf.write(
'\x03\x02\x02\x02\x02ÿ\x03\x02\x02\x02\x02ā\x03\x02\x02\x02\x02ă\x03\x02\x02'
)
buf.write(
'\x02\x02ą\x03\x02\x02\x02\x02ć\x03\x02\x02\x02\x02ĉ\x03\x02\x02\x02\x02ċ'
)
buf.write(
'\x03\x02\x02\x02\x02č\x03\x02\x02\x02\x02ď\x03\x02\x02\x02\x02đ\x03\x02\x02'
)
buf.write(
'\x02\x02ē\x03\x02\x02\x02\x02ĕ\x03\x02\x02\x02\x02ė\x03\x02\x02\x02\x02ę'
)
buf.write(
'\x03\x02\x02\x02\x02ě\x03\x02\x02\x02\x02ĝ\x03\x02\x02\x02\x02ğ\x03\x02\x02'
)
buf.write(
'\x02\x02ġ\x03\x02\x02\x02\x02ģ\x03\x02\x02\x02\x02ĥ\x03\x02\x02\x02\x02ħ'
)
buf.write(
'\x03\x02\x02\x02\x02ĩ\x03\x02\x02\x02\x02ī\x03\x02\x02\x02\x02ĭ\x03\x02\x02'
)
buf.write(
'\x02\x02į\x03\x02\x02\x02\x02ı\x03\x02\x02\x02\x02ij\x03\x02\x02\x02\x02ĵ'
)
buf.write(
'\x03\x02\x02\x02\x02ķ\x03\x02\x02\x02\x02Ĺ\x03\x02\x02\x02\x02Ļ\x03\x02\x02'
)
buf.write(
'\x02\x02Ľ\x03\x02\x02\x02\x02Ŀ\x03\x02\x02\x02\x02Ł\x03\x02\x02\x02\x02Ń'
)
buf.write(
'\x03\x02\x02\x02\x02Ņ\x03\x02\x02\x02\x02Ň\x03\x02\x02\x02\x02ʼn\x03\x02\x02'
)
buf.write(
'\x02\x02ŋ\x03\x02\x02\x02\x02ō\x03\x02\x02\x02\x02ŏ\x03\x02\x02\x02\x02ő'
)
buf.write(
'\x03\x02\x02\x02\x02œ\x03\x02\x02\x02\x02ŕ\x03\x02\x02\x02\x02ŗ\x03\x02\x02'
)
buf.write(
'\x02\x02ř\x03\x02\x02\x02\x02ś\x03\x02\x02\x02\x02ŝ\x03\x02\x02\x02\x02ş'
)
buf.write(
'\x03\x02\x02\x02\x02š\x03\x02\x02\x02\x02ţ\x03\x02\x02\x02\x02ť\x03\x02\x02'
)
buf.write(
'\x02\x02ŧ\x03\x02\x02\x02\x02ũ\x03\x02\x02\x02\x02ū\x03\x02\x02\x02\x02ŭ'
)
buf.write(
'\x03\x02\x02\x02\x02ů\x03\x02\x02\x02\x02ű\x03\x02\x02\x02\x02ų\x03\x02\x02'
)
buf.write(
'\x02\x02ŵ\x03\x02\x02\x02\x02ŷ\x03\x02\x02\x02\x02Ź\x03\x02\x02\x02\x02Ż'
)
buf.write(
'\x03\x02\x02\x02\x02Ž\x03\x02\x02\x02\x02ſ\x03\x02\x02\x02\x02Ɓ\x03\x02\x02'
)
buf.write(
'\x02\x02ƃ\x03\x02\x02\x02\x02ƅ\x03\x02\x02\x02\x02Ƈ\x03\x02\x02\x02\x02Ɖ'
)
buf.write(
'\x03\x02\x02\x02\x02Ƌ\x03\x02\x02\x02\x02ƍ\x03\x02\x02\x02\x02Ə\x03\x02\x02'
)
buf.write(
'\x02\x02Ƒ\x03\x02\x02\x02\x02Ɠ\x03\x02\x02\x02\x02ƕ\x03\x02\x02\x02\x02Ɨ'
)
buf.write(
'\x03\x02\x02\x02\x02ƙ\x03\x02\x02\x02\x02ƛ\x03\x02\x02\x02\x02Ɲ\x03\x02\x02'
)
buf.write(
'\x02\x02Ɵ\x03\x02\x02\x02\x02ơ\x03\x02\x02\x02\x02ƣ\x03\x02\x02\x02\x02ƥ'
)
buf.write(
'\x03\x02\x02\x02\x02Ƨ\x03\x02\x02\x02\x02Ʃ\x03\x02\x02\x02\x02ƫ\x03\x02\x02'
)
buf.write(
'\x02\x02ƭ\x03\x02\x02\x02\x02Ư\x03\x02\x02\x02\x02Ʊ\x03\x02\x02\x02\x02Ƴ'
)
buf.write(
'\x03\x02\x02\x02\x02Ƶ\x03\x02\x02\x02\x02Ʒ\x03\x02\x02\x02\x02ƹ\x03\x02\x02'
)
buf.write(
'\x02\x02ƻ\x03\x02\x02\x02\x02ƽ\x03\x02\x02\x02\x02ƿ\x03\x02\x02\x02\x02ǁ'
)
buf.write(
'\x03\x02\x02\x02\x02ǃ\x03\x02\x02\x02\x02Dž\x03\x02\x02\x02\x02LJ\x03\x02\x02'
)
buf.write(
'\x02\x02lj\x03\x02\x02\x02\x02Nj\x03\x02\x02\x02\x02Ǎ\x03\x02\x02\x02\x02Ǐ'
)
buf.write(
'\x03\x02\x02\x02\x02Ǒ\x03\x02\x02\x02\x02Ǔ\x03\x02\x02\x02\x02Ǖ\x03\x02\x02'
)
buf.write(
'\x02\x02Ǘ\x03\x02\x02\x02\x02Ǚ\x03\x02\x02\x02\x02Ǜ\x03\x02\x02\x02\x02ǝ'
)
buf.write(
'\x03\x02\x02\x02\x02ǟ\x03\x02\x02\x02\x02ǡ\x03\x02\x02\x02\x02ǣ\x03\x02\x02'
)
buf.write(
'\x02\x02ǥ\x03\x02\x02\x02\x02ǧ\x03\x02\x02\x02\x02ǩ\x03\x02\x02\x02\x02ǫ'
)
buf.write(
'\x03\x02\x02\x02\x02ǭ\x03\x02\x02\x02\x02ǯ\x03\x02\x02\x02\x02DZ\x03\x02\x02'
)
buf.write(
'\x02\x02dz\x03\x02\x02\x02\x02ǵ\x03\x02\x02\x02\x02Ƿ\x03\x02\x02\x02\x02ǹ'
)
buf.write(
'\x03\x02\x02\x02\x02ǻ\x03\x02\x02\x02\x02ǽ\x03\x02\x02\x02\x02ǿ\x03\x02\x02'
)
buf.write(
'\x02\x02ȁ\x03\x02\x02\x02\x02ȃ\x03\x02\x02\x02\x02ȅ\x03\x02\x02\x02\x02ȇ'
)
buf.write(
'\x03\x02\x02\x02\x02ȉ\x03\x02\x02\x02\x02ȋ\x03\x02\x02\x02\x02ȍ\x03\x02\x02'
)
buf.write(
'\x02\x02ȏ\x03\x02\x02\x02\x02ȑ\x03\x02\x02\x02\x02ȓ\x03\x02\x02\x02\x02ȕ'
)
buf.write(
'\x03\x02\x02\x02\x02ȗ\x03\x02\x02\x02\x02ș\x03\x02\x02\x02\x02ț\x03\x02\x02'
)
buf.write(
'\x02\x02ȝ\x03\x02\x02\x02\x02ȟ\x03\x02\x02\x02\x02ȡ\x03\x02\x02\x02\x02ȣ'
)
buf.write(
'\x03\x02\x02\x02\x02ȥ\x03\x02\x02\x02\x02ȧ\x03\x02\x02\x02\x02ȩ\x03\x02\x02'
)
buf.write(
'\x02\x02ȫ\x03\x02\x02\x02\x02ȭ\x03\x02\x02\x02\x02ȯ\x03\x02\x02\x02\x02ȱ'
)
buf.write(
'\x03\x02\x02\x02\x02ȳ\x03\x02\x02\x02\x02ȵ\x03\x02\x02\x02\x02ȷ\x03\x02\x02'
)
buf.write(
'\x02\x02ȹ\x03\x02\x02\x02\x02Ȼ\x03\x02\x02\x02\x02Ƚ\x03\x02\x02\x02\x02ȿ'
)
buf.write(
'\x03\x02\x02\x02\x02Ɂ\x03\x02\x02\x02\x02Ƀ\x03\x02\x02\x02\x02Ʌ\x03\x02\x02'
)
buf.write(
'\x02\x02ɇ\x03\x02\x02\x02\x02ɉ\x03\x02\x02\x02\x02ɋ\x03\x02\x02\x02\x02ɍ'
)
buf.write(
'\x03\x02\x02\x02\x02ɏ\x03\x02\x02\x02\x02ɑ\x03\x02\x02\x02\x02ɓ\x03\x02\x02'
)
buf.write(
'\x02\x02ɕ\x03\x02\x02\x02\x02ɗ\x03\x02\x02\x02\x02ə\x03\x02\x02\x02\x02ɛ'
)
buf.write(
'\x03\x02\x02\x02\x02ɝ\x03\x02\x02\x02\x02ɟ\x03\x02\x02\x02\x02ɡ\x03\x02\x02'
)
buf.write(
'\x02\x02ɣ\x03\x02\x02\x02\x02ɥ\x03\x02\x02\x02\x02ɧ\x03\x02\x02\x02\x02ɩ'
)
buf.write(
'\x03\x02\x02\x02\x02ɫ\x03\x02\x02\x02\x02ɭ\x03\x02\x02\x02\x02ɯ\x03\x02\x02'
)
buf.write(
'\x02\x02ɱ\x03\x02\x02\x02\x02ɳ\x03\x02\x02\x02\x02ɵ\x03\x02\x02\x02\x02ɷ'
)
buf.write(
'\x03\x02\x02\x02\x02ɹ\x03\x02\x02\x02\x02ɻ\x03\x02\x02\x02\x02ɽ\x03\x02\x02'
)
buf.write(
'\x02\x02ɿ\x03\x02\x02\x02\x02ʁ\x03\x02\x02\x02\x02ʃ\x03\x02\x02\x02\x02ʅ'
)
buf.write(
'\x03\x02\x02\x02\x02ʇ\x03\x02\x02\x02\x02ʉ\x03\x02\x02\x02\x02ʋ\x03\x02\x02'
)
buf.write(
'\x02\x02ʍ\x03\x02\x02\x02\x02ʏ\x03\x02\x02\x02\x02ʑ\x03\x02\x02\x02\x02ʓ'
)
buf.write(
'\x03\x02\x02\x02\x02ʕ\x03\x02\x02\x02\x02ʗ\x03\x02\x02\x02\x02ʙ\x03\x02\x02'
)
buf.write(
'\x02\x02ʛ\x03\x02\x02\x02\x02ʝ\x03\x02\x02\x02\x02ʟ\x03\x02\x02\x02\x02ʡ'
)
buf.write(
'\x03\x02\x02\x02\x02ʣ\x03\x02\x02\x02\x02ʥ\x03\x02\x02\x02\x02ʧ\x03\x02\x02'
)
buf.write(
'\x02\x02ʩ\x03\x02\x02\x02\x02ʫ\x03\x02\x02\x02\x02ʭ\x03\x02\x02\x02\x02ʯ'
)
buf.write(
'\x03\x02\x02\x02\x02ʱ\x03\x02\x02\x02\x02ʳ\x03\x02\x02\x02\x02ʵ\x03\x02\x02'
)
buf.write(
'\x02\x02ʷ\x03\x02\x02\x02\x02ʹ\x03\x02\x02\x02\x02ʻ\x03\x02\x02\x02\x02ʽ'
)
buf.write(
'\x03\x02\x02\x02\x02ʿ\x03\x02\x02\x02\x02ˁ\x03\x02\x02\x02\x02˃\x03\x02\x02'
)
buf.write(
'\x02\x02˅\x03\x02\x02\x02\x02ˇ\x03\x02\x02\x02\x02ˉ\x03\x02\x02\x02\x02ˋ'
)
buf.write(
'\x03\x02\x02\x02\x02ˍ\x03\x02\x02\x02\x02ˏ\x03\x02\x02\x02\x02ˑ\x03\x02\x02'
)
buf.write(
'\x02\x02˓\x03\x02\x02\x02\x02˕\x03\x02\x02\x02\x02˗\x03\x02\x02\x02\x02˙'
)
buf.write(
'\x03\x02\x02\x02\x02˛\x03\x02\x02\x02\x02˝\x03\x02\x02\x02\x02˟\x03\x02\x02'
)
buf.write(
'\x02\x02ˡ\x03\x02\x02\x02\x02ˣ\x03\x02\x02\x02\x02˥\x03\x02\x02\x02\x02˧'
)
buf.write(
'\x03\x02\x02\x02\x02˩\x03\x02\x02\x02\x02˫\x03\x02\x02\x02\x02˭\x03\x02\x02'
)
buf.write(
'\x02\x02˯\x03\x02\x02\x02\x02˱\x03\x02\x02\x02\x02˳\x03\x02\x02\x02\x02˵'
)
buf.write(
'\x03\x02\x02\x02\x02˷\x03\x02\x02\x02\x02˹\x03\x02\x02\x02\x02˻\x03\x02\x02'
)
buf.write(
'\x02\x02˽\x03\x02\x02\x02\x02˿\x03\x02\x02\x02\x02́\x03\x02\x02\x02\x02̃'
)
buf.write(
'\x03\x02\x02\x02\x02̅\x03\x02\x02\x02\x02̇\x03\x02\x02\x02\x02̉\x03\x02\x02'
)
buf.write(
'\x02\x02̋\x03\x02\x02\x02\x02̍\x03\x02\x02\x02\x02̏\x03\x02\x02\x02\x02̑'
)
buf.write(
'\x03\x02\x02\x02\x02̓\x03\x02\x02\x02\x02̕\x03\x02\x02\x02\x02̗\x03\x02\x02'
)
buf.write(
'\x02\x02̙\x03\x02\x02\x02\x02̛\x03\x02\x02\x02\x02̝\x03\x02\x02\x02\x02̟'
)
buf.write(
'\x03\x02\x02\x02\x02̡\x03\x02\x02\x02\x02̣\x03\x02\x02\x02\x02̥\x03\x02\x02'
)
buf.write(
'\x02\x02̧\x03\x02\x02\x02\x02̩\x03\x02\x02\x02\x02̫\x03\x02\x02\x02\x02̭'
)
buf.write(
'\x03\x02\x02\x02\x02̯\x03\x02\x02\x02\x02̱\x03\x02\x02\x02\x02̳\x03\x02\x02'
)
buf.write(
'\x02\x02̵\x03\x02\x02\x02\x02̷\x03\x02\x02\x02\x02̹\x03\x02\x02\x02\x02̻'
)
buf.write(
'\x03\x02\x02\x02\x02̽\x03\x02\x02\x02\x02̿\x03\x02\x02\x02\x02́\x03\x02\x02'
)
buf.write(
'\x02\x02̓\x03\x02\x02\x02\x02ͅ\x03\x02\x02\x02\x02͇\x03\x02\x02\x02\x02͉'
)
buf.write(
'\x03\x02\x02\x02\x02͋\x03\x02\x02\x02\x02͍\x03\x02\x02\x02\x02͏\x03\x02\x02'
)
buf.write(
'\x02\x02͑\x03\x02\x02\x02\x02͓\x03\x02\x02\x02\x02͕\x03\x02\x02\x02\x02͗'
)
buf.write(
'\x03\x02\x02\x02\x02͙\x03\x02\x02\x02\x02͛\x03\x02\x02\x02\x02͝\x03\x02\x02'
)
buf.write(
'\x02\x02͟\x03\x02\x02\x02\x02͡\x03\x02\x02\x02\x02ͣ\x03\x02\x02\x02\x02ͥ'
)
buf.write(
'\x03\x02\x02\x02\x02ͧ\x03\x02\x02\x02\x02ͩ\x03\x02\x02\x02\x02ͫ\x03\x02\x02'
)
buf.write(
'\x02\x02ͭ\x03\x02\x02\x02\x02ͯ\x03\x02\x02\x02\x02ͱ\x03\x02\x02\x02\x02ͳ'
)
buf.write(
'\x03\x02\x02\x02\x02͵\x03\x02\x02\x02\x02ͷ\x03\x02\x02\x02\x02\u0379\x03\x02\x02'
)
buf.write(
'\x02\x02ͻ\x03\x02\x02\x02\x02ͽ\x03\x02\x02\x02\x02Ϳ\x03\x02\x02\x02\x02\u0381'
)
buf.write(
'\x03\x02\x02\x02\x02\u0383\x03\x02\x02\x02\x02΅\x03\x02\x02\x02\x02·\x03\x02\x02'
)
buf.write(
'\x02\x02Ή\x03\x02\x02\x02\x02\u038b\x03\x02\x02\x02\x02\u038d\x03\x02\x02\x02\x02Ώ'
)
buf.write(
'\x03\x02\x02\x02\x02Α\x03\x02\x02\x02\x02Γ\x03\x02\x02\x02\x02Ε\x03\x02\x02'
)
buf.write(
'\x02\x02Η\x03\x02\x02\x02\x02Ι\x03\x02\x02\x02\x02Λ\x03\x02\x02\x02\x02Ν'
)
buf.write(
'\x03\x02\x02\x02\x02Ο\x03\x02\x02\x02\x02Ρ\x03\x02\x02\x02\x02Σ\x03\x02\x02'
)
buf.write(
'\x02\x02Υ\x03\x02\x02\x02\x02Χ\x03\x02\x02\x02\x02Ω\x03\x02\x02\x02\x02Ϋ'
)
buf.write(
'\x03\x02\x02\x02\x02έ\x03\x02\x02\x02\x02ί\x03\x02\x02\x02\x02α\x03\x02\x02'
)
buf.write(
'\x02\x02γ\x03\x02\x02\x02\x02ε\x03\x02\x02\x02\x02η\x03\x02\x02\x02\x02ι'
)
buf.write(
'\x03\x02\x02\x02\x02λ\x03\x02\x02\x02\x02ν\x03\x02\x02\x02\x02ο\x03\x02\x02'
)
buf.write(
'\x02\x02ρ\x03\x02\x02\x02\x02σ\x03\x02\x02\x02\x02υ\x03\x02\x02\x02\x02χ'
)
buf.write(
'\x03\x02\x02\x02\x02ω\x03\x02\x02\x02\x02ϋ\x03\x02\x02\x02\x02ύ\x03\x02\x02'
)
buf.write(
'\x02\x02Ϗ\x03\x02\x02\x02\x02ϑ\x03\x02\x02\x02\x02ϓ\x03\x02\x02\x02\x02ϡ'
)
buf.write(
'\x03\x02\x02\x02\x02ϣ\x03\x02\x02\x02\x02ϥ\x03\x02\x02\x02\x02ϧ\x03\x02\x02'
)
buf.write(
'\x02\x02ϩ\x03\x02\x02\x02\x02ϫ\x03\x02\x02\x02\x02ϭ\x03\x02\x02\x02\x02ϯ'
)
buf.write(
'\x03\x02\x02\x02\x02ϱ\x03\x02\x02\x02\x02ϳ\x03\x02\x02\x02\x02ϵ\x03\x02\x02'
)
buf.write(
'\x02\x02Ϸ\x03\x02\x02\x02\x02Ϲ\x03\x02\x02\x02\x02ϻ\x03\x02\x02\x02\x02Ͻ'
)
buf.write(
'\x03\x02\x02\x02\x02Ͽ\x03\x02\x02\x02\x02Ё\x03\x02\x02\x02\x02Ѓ\x03\x02\x02'
)
buf.write(
'\x02\x02Ѕ\x03\x02\x02\x02\x02Ї\x03\x02\x02\x02\x02Љ\x03\x02\x02\x02\x02Ћ'
)
buf.write(
'\x03\x02\x02\x02\x02Ѝ\x03\x02\x02\x02\x02Џ\x03\x02\x02\x02\x02Г\x03\x02\x02'
)
buf.write(
'\x02\x02Е\x03\x02\x02\x02\x02З\x03\x02\x02\x02\x02Й\x03\x02\x02\x02\x02Л'
)
buf.write(
'\x03\x02\x02\x02\x02Н\x03\x02\x02\x02\x02П\x03\x02\x02\x02\x02Ч\x03\x02\x02'
)
buf.write(
'\x02\x02Щ\x03\x02\x02\x02\x02Ы\x03\x02\x02\x02\x02б\x03\x02\x02\x02\x02г'
)
buf.write(
'\x03\x02\x02\x02\x03ѩ\x03\x02\x02\x02\x05Ѭ\x03\x02\x02\x02\x07Ѯ\x03\x02\x02'
)
buf.write(
'\x02\tѲ\x03\x02\x02\x02\x0bѸ\x03\x02\x02\x02\rѾ\x03\x02\x02\x02\x0f'
)
buf.write(
'҈\x03\x02\x02\x02\x11Ҍ\x03\x02\x02\x02\x13Ғ\x03\x02\x02\x02\x15Қ')
buf.write(
'\x03\x02\x02\x02\x17Ҟ\x03\x02\x02\x02\x19Ң\x03\x02\x02\x02\x1bҨ\x03'
)
buf.write(
'\x02\x02\x02\x1dҫ\x03\x02\x02\x02\x1fҲ\x03\x02\x02\x02!ҹ\x03\x02\x02'
)
buf.write(
"\x02#ҽ\x03\x02\x02\x02%Ӈ\x03\x02\x02\x02'ӊ\x03\x02\x02\x02)Ӕ")
buf.write(
'\x03\x02\x02\x02+Ӛ\x03\x02\x02\x02-ӡ\x03\x02\x02\x02/Ӧ\x03\x02\x02\x02'
)
buf.write('1Ӱ\x03\x02\x02\x023ԇ\x03\x02\x02\x025ԍ\x03\x02\x02\x027')
buf.write('Ԕ\x03\x02\x02\x029Ԛ\x03\x02\x02\x02;Ԣ\x03\x02\x02\x02=Ԩ\x03'
)
buf.write(
'\x02\x02\x02?Զ\x03\x02\x02\x02AՃ\x03\x02\x02\x02CՒ\x03\x02\x02\x02E\u0557'
)
buf.write(
'\x03\x02\x02\x02G՝\x03\x02\x02\x02Iբ\x03\x02\x02\x02Kժ\x03\x02\x02\x02'
)
buf.write(
'Mկ\x03\x02\x02\x02Oշ\x03\x02\x02\x02Qռ\x03\x02\x02\x02Sտ\x03')
buf.write(
'\x02\x02\x02Uք\x03\x02\x02\x02Wֆ\x03\x02\x02\x02Y\u058c\x03\x02\x02\x02[֑'
)
buf.write(
'\x03\x02\x02\x02]֛\x03\x02\x02\x02_֣\x03\x02\x02\x02a֨\x03\x02\x02\x02'
)
buf.write(
'c֭\x03\x02\x02\x02eֲ\x03\x02\x02\x02gֺ\x03\x02\x02\x02iׄ\x03')
buf.write(
'\x02\x02\x02k\u05ca\x03\x02\x02\x02m\u05ce\x03\x02\x02\x02oד\x03\x02\x02\x02qי'
)
buf.write(
'\x03\x02\x02\x02sס\x03\x02\x02\x02uש\x03\x02\x02\x02wױ\x03\x02\x02\x02'
)
buf.write(
'y\u05f9\x03\x02\x02\x02{\u0600\x03\x02\x02\x02}؊\x03\x02\x02\x02\x7fؘ'
)
buf.write(
'\x03\x02\x02\x02\x81ؠ\x03\x02\x02\x02\x83ة\x03\x02\x02\x02\x85')
buf.write('ر\x03\x02\x02\x02\x87ف\x03\x02\x02\x02\x89ي\x03\x02\x02\x02'
)
buf.write('\x8bٕ\x03\x02\x02\x02\x8d١\x03\x02\x02\x02\x8f٭\x03')
buf.write('\x02\x02\x02\x91ٵ\x03\x02\x02\x02\x93ٽ\x03\x02\x02\x02\x95چ'
)
buf.write(
'\x03\x02\x02\x02\x97ڎ\x03\x02\x02\x02\x99ښ\x03\x02\x02\x02\x9b')
buf.write('ڪ\x03\x02\x02\x02\x9dگ\x03\x02\x02\x02\x9fڵ\x03\x02\x02\x02'
)
buf.write('¡ڼ\x03\x02\x02\x02£ۂ\x03\x02\x02\x02¥ۇ\x03')
buf.write('\x02\x02\x02§ۏ\x03\x02\x02\x02©ۜ\x03\x02\x02\x02«ۣ')
buf.write('\x03\x02\x02\x02\xadۯ\x03\x02\x02\x02¯۵\x03\x02\x02\x02±')
buf.write('ۺ\x03\x02\x02\x02³܃\x03\x02\x02\x02µ܈\x03\x02\x02\x02')
buf.write('·܌\x03\x02\x02\x02¹ܛ\x03\x02\x02\x02»ܦ\x03')
buf.write('\x02\x02\x02½ܪ\x03\x02\x02\x02¿ܰ\x03\x02\x02\x02Áܴ')
buf.write('\x03\x02\x02\x02Ãܼ\x03\x02\x02\x02Å݄\x03\x02\x02\x02Ç')
buf.write('ݎ\x03\x02\x02\x02Éݘ\x03\x02\x02\x02Ëݠ\x03\x02\x02\x02')
buf.write('Íݩ\x03\x02\x02\x02Ïݲ\x03\x02\x02\x02Ñݺ\x03')
buf.write('\x02\x02\x02Óށ\x03\x02\x02\x02Õއ\x03\x02\x02\x02×ތ')
buf.write('\x03\x02\x02\x02Ùޚ\x03\x02\x02\x02Ûޤ\x03\x02\x02\x02Ý')
buf.write('ެ\x03\x02\x02\x02ß\u07b9\x03\x02\x02\x02á߂\x03\x02\x02\x02')
buf.write('ãߋ\x03\x02\x02\x02åߒ\x03\x02\x02\x02çߗ\x03')
buf.write('\x02\x02\x02é߰\x03\x02\x02\x02ëߵ\x03\x02\x02\x02í߽')
buf.write('\x03\x02\x02\x02ïࠂ\x03\x02\x02\x02ñࠈ\x03\x02\x02\x02ó')
buf.write('ࠎ\x03\x02\x02\x02õࠕ\x03\x02\x02\x02÷ࠞ\x03\x02\x02\x02')
buf.write('ùࠢ\x03\x02\x02\x02û࠱\x03\x02\x02\x02ý࠵\x03')
buf.write('\x02\x02\x02ÿ࠼\x03\x02\x02\x02āࡃ\x03\x02\x02\x02ăࡌ')
buf.write('\x03\x02\x02\x02ąࡓ\x03\x02\x02\x02ć\u085d\x03\x02\x02\x02ĉ')
buf.write('\u086c\x03\x02\x02\x02ċࡷ\x03\x02\x02\x02čࡿ\x03\x02\x02\x02')
buf.write('ďࢉ\x03\x02\x02\x02đ\u0891\x03\x02\x02\x02ē࢘\x03')
buf.write('\x02\x02\x02ĕ࢝\x03\x02\x02\x02ėࢥ\x03\x02\x02\x02ęࢮ')
buf.write('\x03\x02\x02\x02ěࢶ\x03\x02\x02\x02ĝࢾ\x03\x02\x02\x02ğ')
buf.write('ࣄ\x03\x02\x02\x02ġ࣊\x03\x02\x02\x02ģ࣐\x03\x02\x02\x02')
buf.write('ĥࣖ\x03\x02\x02\x02ħ\u08e2\x03\x02\x02\x02ĩࣨ\x03')
buf.write('\x02\x02\x02īࣲ\x03\x02\x02\x02ĭࣺ\x03\x02\x02\x02įࣾ')
buf.write('\x03\x02\x02\x02ıअ\x03\x02\x02\x02ijऋ\x03\x02\x02\x02ĵ')
buf.write('ऐ\x03\x02\x02\x02ķक\x03\x02\x02\x02Ĺञ\x03\x02\x02\x02')
buf.write('Ļण\x03\x02\x02\x02Ľऩ\x03\x02\x02\x02Ŀय\x03')
buf.write('\x02\x02\x02Łस\x03\x02\x02\x02Ńऽ\x03\x02\x02\x02Ņॄ')
buf.write('\x03\x02\x02\x02Ňॉ\x03\x02\x02\x02ʼnॎ\x03\x02\x02\x02ŋ')
buf.write('॑\x03\x02\x02\x02ōक़\x03\x02\x02\x02ŏॢ\x03\x02\x02\x02')
buf.write('ő॥\x03\x02\x02\x02œ७\x03\x02\x02\x02ŕॷ\x03')
buf.write('\x02\x02\x02ŗঁ\x03\x02\x02\x02řঈ\x03\x02\x02\x02ś\u098e')
buf.write('\x03\x02\x02\x02ŝখ\x03\x02\x02\x02şঠ\x03\x02\x02\x02š')
buf.write('ন\x03\x02\x02\x02ţ\u09b1\x03\x02\x02\x02ťস\x03\x02\x02\x02')
buf.write('ŧা\x03\x02\x02\x02ũৄ\x03\x02\x02\x02ūো\x03')
buf.write(
'\x02\x02\x02ŭ\u09d8\x03\x02\x02\x02ůৠ\x03\x02\x02\x02ű\u09e4')
buf.write('\x03\x02\x02\x02ų৬\x03\x02\x02\x02ŵ৶\x03\x02\x02\x02ŷ')
buf.write(
'\u09ff\x03\x02\x02\x02Ź\u0a04\x03\x02\x02\x02Żਏ\x03\x02\x02\x02')
buf.write('Ž\u0a12\x03\x02\x02\x02ſਜ\x03\x02\x02\x02Ɓਤ\x03')
buf.write('\x02\x02\x02ƃ\u0a29\x03\x02\x02\x02ƅਮ\x03\x02\x02\x02Ƈਲ਼')
buf.write('\x03\x02\x02\x02Ɖ਼\x03\x02\x02\x02Ƌੁ\x03\x02\x02\x02ƍ')
buf.write('ੌ\x03\x02\x02\x02Ə\u0a54\x03\x02\x02\x02Ƒਖ਼\x03\x02\x02\x02')
buf.write('Ɠ\u0a5f\x03\x02\x02\x02ƕ੧\x03\x02\x02\x02Ɨ੬\x03')
buf.write(
'\x02\x02\x02ƙੲ\x03\x02\x02\x02ƛ\u0a78\x03\x02\x02\x02Ɲ\u0a7e')
buf.write('\x03\x02\x02\x02Ɵ\u0a84\x03\x02\x02\x02ơઊ\x03\x02\x02\x02ƣ')
buf.write('એ\x03\x02\x02\x02ƥખ\x03\x02\x02\x02Ƨચ\x03\x02\x02\x02')
buf.write('Ʃડ\x03\x02\x02\x02ƫધ\x03\x02\x02\x02ƭબ\x03')
buf.write(
'\x02\x02\x02Ư\u0ab1\x03\x02\x02\x02Ʊશ\x03\x02\x02\x02Ƴ\u0aba')
buf.write('\x03\x02\x02\x02Ƶૂ\x03\x02\x02\x02Ʒો\x03\x02\x02\x02ƹ')
buf.write(
'\u0ad4\x03\x02\x02\x02ƻ\u0adb\x03\x02\x02\x02ƽૡ\x03\x02\x02\x02')
buf.write('ƿ૧\x03\x02\x02\x02ǁ૮\x03\x02\x02\x02ǃ\u0af7\x03')
buf.write('\x02\x02\x02Dž\u0b00\x03\x02\x02\x02LJଅ\x03\x02\x02\x02ljଋ')
buf.write('\x03\x02\x02\x02Nj\u0b12\x03\x02\x02\x02Ǎଘ\x03\x02\x02\x02Ǐ')
buf.write('ଡ\x03\x02\x02\x02Ǒଦ\x03\x02\x02\x02Ǔପ\x03\x02\x02\x02')
buf.write('Ǖଲ\x03\x02\x02\x02Ǘ\u0b3b\x03\x02\x02\x02Ǚି\x03')
buf.write(
'\x02\x02\x02Ǜ\u0b45\x03\x02\x02\x02ǝ\u0b4e\x03\x02\x02\x02ǟ\u0b54'
)
buf.write('\x03\x02\x02\x02ǡ\u0b5b\x03\x02\x02\x02ǣୟ\x03\x02\x02\x02ǥ')
buf.write('ୢ\x03\x02\x02\x02ǧ୪\x03\x02\x02\x02ǩ୲\x03\x02\x02\x02')
buf.write('ǫ\u0b79\x03\x02\x02\x02ǭ\u0b81\x03\x02\x02\x02ǯஒ\x03')
buf.write(
'\x02\x02\x02DZ\u0b9d\x03\x02\x02\x02dzந\x03\x02\x02\x02ǵ\u0bad')
buf.write('\x03\x02\x02\x02Ƿவ\x03\x02\x02\x02ǹ\u0bc3\x03\x02\x02\x02ǻ')
buf.write(
'ே\x03\x02\x02\x02ǽ\u0bce\x03\x02\x02\x02ǿ\u0bd3\x03\x02\x02\x02')
buf.write('ȁ\u0bd9\x03\x02\x02\x02ȃ\u0be0\x03\x02\x02\x02ȅ௨\x03')
buf.write('\x02\x02\x02ȇ௲\x03\x02\x02\x02ȉ௹\x03\x02\x02\x02ȋ\u0bfc')
buf.write('\x03\x02\x02\x02ȍఀ\x03\x02\x02\x02ȏఄ\x03\x02\x02\x02ȑ')
buf.write('ఈ\x03\x02\x02\x02ȓఋ\x03\x02\x02\x02ȕఐ\x03\x02\x02\x02')
buf.write('ȗక\x03\x02\x02\x02șజ\x03\x02\x02\x02țట\x03')
buf.write('\x02\x02\x02ȝధ\x03\x02\x02\x02ȟభ\x03\x02\x02\x02ȡస')
buf.write('\x03\x02\x02\x02ȣీ\x03\x02\x02\x02ȥౄ\x03\x02\x02\x02ȧ')
buf.write('ొ\x03\x02\x02\x02ȩ\u0c4f\x03\x02\x02\x02ȫౚ\x03\x02\x02\x02')
buf.write('ȭౢ\x03\x02\x02\x02ȯ\u0c72\x03\x02\x02\x02ȱ౽\x03')
buf.write('\x02\x02\x02ȳ಄\x03\x02\x02\x02ȵಎ\x03\x02\x02\x02ȷಖ')
buf.write('\x03\x02\x02\x02ȹಛ\x03\x02\x02\x02Ȼತ\x03\x02\x02\x02Ƚ')
buf.write(
'ಪ\x03\x02\x02\x02ȿ\u0cb4\x03\x02\x02\x02Ɂ\u0cba\x03\x02\x02\x02')
buf.write('Ƀಿ\x03\x02\x02\x02Ʌೋ\x03\x02\x02\x02ɇ\u0cd4\x03')
buf.write('\x02\x02\x02ɉೞ\x03\x02\x02\x02ɋ\u0ce5\x03\x02\x02\x02ɍ೯')
buf.write('\x03\x02\x02\x02ɏ\u0cf9\x03\x02\x02\x02ɑഁ\x03\x02\x02\x02ɓ')
buf.write('ഇ\x03\x02\x02\x02ɕ\u0d11\x03\x02\x02\x02ɗഗ\x03\x02\x02\x02')
buf.write('əഝ\x03\x02\x02\x02ɛഡ\x03\x02\x02\x02ɝദ\x03')
buf.write('\x02\x02\x02ɟഫ\x03\x02\x02\x02ɡല\x03\x02\x02\x02ɣശ')
buf.write('\x03\x02\x02\x02ɥീ\x03\x02\x02\x02ɧൌ\x03\x02\x02\x02ɩ')
buf.write(
'\u0d53\x03\x02\x02\x02ɫ൝\x03\x02\x02\x02ɭ\u0d64\x03\x02\x02\x02')
buf.write('ɯ൬\x03\x02\x02\x02ɱ൴\x03\x02\x02\x02ɳඈ\x03')
buf.write('\x02\x02\x02ɵඏ\x03\x02\x02\x02ɷග\x03\x02\x02\x02ɹඣ')
buf.write('\x03\x02\x02\x02ɻත\x03\x02\x02\x02ɽඳ\x03\x02\x02\x02ɿ')
buf.write('ර\x03\x02\x02\x02ʁෂ\x03\x02\x02\x02ʃ\u0dc8\x03\x02\x02\x02')
buf.write('ʅෑ\x03\x02\x02\x02ʇෘ\x03\x02\x02\x02ʉො\x03')
buf.write('\x02\x02\x02ʋ\u0de2\x03\x02\x02\x02ʍ෧\x03\x02\x02\x02ʏ෭')
buf.write('\x03\x02\x02\x02ʑ෴\x03\x02\x02\x02ʓ\u0df9\x03\x02\x02\x02ʕ')
buf.write('ฃ\x03\x02\x02\x02ʗช\x03\x02\x02\x02ʙถ\x03\x02\x02\x02')
buf.write('ʛบ\x03\x02\x02\x02ʝม\x03\x02\x02\x02ʟศ\x03')
buf.write('\x02\x02\x02ʡอ\x03\x02\x02\x02ʣี\x03\x02\x02\x02ʥ\u0e3c')
buf.write('\x03\x02\x02\x02ʧแ\x03\x02\x02\x02ʩ๊\x03\x02\x02\x02ʫ')
buf.write(
'๕\x03\x02\x02\x02ʭ\u0e62\x03\x02\x02\x02ʯ\u0e74\x03\x02\x02\x02')
buf.write('ʱ\u0e80\x03\x02\x02\x02ʳຐ\x03\x02\x02\x02ʵດ\x03')
buf.write('\x02\x02\x02ʷນ\x03\x02\x02\x02ʹຢ\x03\x02\x02\x02ʻຨ')
buf.write('\x03\x02\x02\x02ʽອ\x03\x02\x02\x02ʿຶ\x03\x02\x02\x02ˁ')
buf.write('\u0ebf\x03\x02\x02\x02˃່\x03\x02\x02\x02˅໗\x03\x02\x02\x02')
buf.write('ˇໞ\x03\x02\x02\x02ˉ\u0ee3\x03\x02\x02\x02ˋ\u0ee8\x03')
buf.write(
'\x02\x02\x02ˍ\u0ef1\x03\x02\x02\x02ˏ\u0efa\x03\x02\x02\x02ˑ\u0eff'
)
buf.write('\x03\x02\x02\x02˓།\x03\x02\x02\x02˕༕\x03\x02\x02\x02˗')
buf.write('༞\x03\x02\x02\x02˙༩\x03\x02\x02\x02˛༯\x03\x02\x02\x02')
buf.write('˝༷\x03\x02\x02\x02˟ཁ\x03\x02\x02\x02ˡཎ\x03')
buf.write('\x02\x02\x02ˣཕ\x03\x02\x02\x02˥འ\x03\x02\x02\x02˧ཧ')
buf.write('\x03\x02\x02\x02˩ཱི\x03\x02\x02\x02˫ྀ\x03\x02\x02\x02˭')
buf.write('ྎ\x03\x02\x02\x02˯ྖ\x03\x02\x02\x02˱ྞ\x03\x02\x02\x02')
buf.write('˳ྦ\x03\x02\x02\x02˵ྫྷ\x03\x02\x02\x02˷ྰ\x03')
buf.write('\x02\x02\x02˹ྵ\x03\x02\x02\x02˻ྺ\x03\x02\x02\x02˽࿄')
buf.write(
'\x03\x02\x02\x02˿\u0fe0\x03\x02\x02\x02́\u0ffb\x03\x02\x02\x02̃')
buf.write('ဓ\x03\x02\x02\x02̅အ\x03\x02\x02\x02̇ု\x03\x02\x02\x02')
buf.write('̉ဿ\x03\x02\x02\x02̋၏\x03\x02\x02\x02̍ၒ\x03')
buf.write('\x02\x02\x02̏ၛ\x03\x02\x02\x02̑ၧ\x03\x02\x02\x02̓ၱ')
buf.write('\x03\x02\x02\x02̕ၷ\x03\x02\x02\x02̗ၿ\x03\x02\x02\x02̙')
buf.write('ႄ\x03\x02\x02\x02̛ႉ\x03\x02\x02\x02̝႒\x03\x02\x02\x02')
buf.write('̟႗\x03\x02\x02\x02̡Ⴁ\x03\x02\x02\x02̣Ⴇ\x03')
buf.write('\x02\x02\x02̥Ⴍ\x03\x02\x02\x02̧Ⴔ\x03\x02\x02\x02̩Ⴞ')
buf.write(
'\x03\x02\x02\x02̫\u10c6\x03\x02\x02\x02̭\u10cc\x03\x02\x02\x02̯')
buf.write('დ\x03\x02\x02\x02̱მ\x03\x02\x02\x02̳ტ\x03\x02\x02\x02')
buf.write('̵ჩ\x03\x02\x02\x02̷ჭ\x03\x02\x02\x02̹ჳ\x03')
buf.write('\x02\x02\x02̻ჼ\x03\x02\x02\x02̽ᄂ\x03\x02\x02\x02̿ᄉ')
buf.write('\x03\x02\x02\x02́ᄑ\x03\x02\x02\x02̓ᄚ\x03\x02\x02\x02ͅ')
buf.write('ᄣ\x03\x02\x02\x02͇ᄪ\x03\x02\x02\x02͉ᄲ\x03\x02\x02\x02')
buf.write('͋ᄺ\x03\x02\x02\x02͍ᅃ\x03\x02\x02\x02͏ᅈ\x03')
buf.write('\x02\x02\x02͑ᅐ\x03\x02\x02\x02͓ᅛ\x03\x02\x02\x02͕ᅠ')
buf.write('\x03\x02\x02\x02͗ᅩ\x03\x02\x02\x02͙ᅯ\x03\x02\x02\x02͛')
buf.write('ᅵ\x03\x02\x02\x02͝ᅺ\x03\x02\x02\x02͟ᆁ\x03\x02\x02\x02')
buf.write('͡ᆆ\x03\x02\x02\x02ͣᆌ\x03\x02\x02\x02ͥᆐ\x03')
buf.write('\x02\x02\x02ͧᆗ\x03\x02\x02\x02ͩᆥ\x03\x02\x02\x02ͫᆭ')
buf.write('\x03\x02\x02\x02ͭᆺ\x03\x02\x02\x02ͯᇅ\x03\x02\x02\x02ͱ')
buf.write('ᇏ\x03\x02\x02\x02ͳᇙ\x03\x02\x02\x02͵ᇧ\x03\x02\x02\x02')
buf.write('ͷᇰ\x03\x02\x02\x02\u0379ᇶ\x03\x02\x02\x02ͻᇿ\x03')
buf.write('\x02\x02\x02ͽሇ\x03\x02\x02\x02Ϳሔ\x03\x02\x02\x02\u0381ም')
buf.write('\x03\x02\x02\x02\u0383ሢ\x03\x02\x02\x02΅ሦ\x03\x02\x02\x02·')
buf.write(
'ሿ\x03\x02\x02\x02Ήቄ\x03\x02\x02\x02\u038b\u124f\x03\x02\x02\x02')
buf.write('\u038dቡ\x03\x02\x02\x02Ώቱ\x03\x02\x02\x02Αኄ\x03')
buf.write('\x02\x02\x02Γኛ\x03\x02\x02\x02Εኪ\x03\x02\x02\x02Ηኴ')
buf.write(
'\x03\x02\x02\x02Ι\u12bf\x03\x02\x02\x02Λ\u12c7\x03\x02\x02\x02Ν')
buf.write('ዔ\x03\x02\x02\x02Οዤ\x03\x02\x02\x02Ρዴ\x03\x02\x02\x02')
buf.write('Σዹ\x03\x02\x02\x02Υዽ\x03\x02\x02\x02Χጂ\x03')
buf.write('\x02\x02\x02Ωጆ\x03\x02\x02\x02Ϋጋ\x03\x02\x02\x02έጏ')
buf.write('\x03\x02\x02\x02ί\u1316\x03\x02\x02\x02αጚ\x03\x02\x02\x02γ')
buf.write('ጠ\x03\x02\x02\x02εጰ\x03\x02\x02\x02ηጻ\x03\x02\x02\x02')
buf.write('ιጿ\x03\x02\x02\x02λፈ\x03\x02\x02\x02νፎ\x03')
buf.write('\x02\x02\x02οፕ\x03\x02\x02\x02ρፚ\x03\x02\x02\x02σ፡')
buf.write('\x03\x02\x02\x02υ፮\x03\x02\x02\x02χ፻\x03\x02\x02\x02ω')
buf.write('ᎈ\x03\x02\x02\x02ϋᎋ\x03\x02\x02\x02ύᎍ\x03\x02\x02\x02')
buf.write('Ϗᎏ\x03\x02\x02\x02ϑ\u139e\x03\x02\x02\x02ϓᎪ\x03')
buf.write('\x02\x02\x02ϕᎳ\x03\x02\x02\x02ϗᎵ\x03\x02\x02\x02ϙᏀ')
buf.write('\x03\x02\x02\x02ϛᏋ\x03\x02\x02\x02ϝᏖ\x03\x02\x02\x02ϟ')
buf.write('Ꮱ\x03\x02\x02\x02ϡᏣ\x03\x02\x02\x02ϣᏭ\x03\x02\x02\x02')
buf.write('ϥᏯ\x03\x02\x02\x02ϧᏱ\x03\x02\x02\x02ϩᏳ\x03')
buf.write('\x02\x02\x02ϫᏵ\x03\x02\x02\x02ϭᏸ\x03\x02\x02\x02ϯᏺ')
buf.write('\x03\x02\x02\x02ϱᏼ\x03\x02\x02\x02ϳ\u13fe\x03\x02\x02\x02ϵ')
buf.write('᐀\x03\x02\x02\x02Ϸᐂ\x03\x02\x02\x02Ϲᐄ\x03\x02\x02\x02')
buf.write('ϻᐕ\x03\x02\x02\x02Ͻᐗ\x03\x02\x02\x02Ͽᐙ\x03')
buf.write('\x02\x02\x02Ёᐛ\x03\x02\x02\x02Ѓᐞ\x03\x02\x02\x02Ѕᐠ')
buf.write('\x03\x02\x02\x02Їᐫ\x03\x02\x02\x02Љᐭ\x03\x02\x02\x02Ћ')
buf.write('ᐯ\x03\x02\x02\x02Ѝᐱ\x03\x02\x02\x02Џᐳ\x03\x02\x02\x02')
buf.write('Бᐵ\x03\x02\x02\x02Гᐷ\x03\x02\x02\x02Еᐺ\x03')
buf.write('\x02\x02\x02Зᐼ\x03\x02\x02\x02Йᐾ\x03\x02\x02\x02Лᑀ')
buf.write('\x03\x02\x02\x02Нᑂ\x03\x02\x02\x02Пᑅ\x03\x02\x02\x02С')
buf.write('ᑋ\x03\x02\x02\x02Уᑎ\x03\x02\x02\x02Хᑕ\x03\x02\x02\x02')
buf.write('Чᑠ\x03\x02\x02\x02Щᑯ\x03\x02\x02\x02Ыᑽ\x03')
buf.write('\x02\x02\x02Эᒐ\x03\x02\x02\x02Яᒔ\x03\x02\x02\x02бᒖ')
buf.write('\x03\x02\x02\x02гᒞ\x03\x02\x02\x02еᒣ\x03\x02\x02\x02з')
buf.write('ᒥ\x03\x02\x02\x02йᒧ\x03\x02\x02\x02лᒩ\x03\x02\x02\x02')
buf.write('нᒫ\x03\x02\x02\x02пᒭ\x03\x02\x02\x02сᒯ\x03')
buf.write('\x02\x02\x02уᒱ\x03\x02\x02\x02хᒳ\x03\x02\x02\x02чᒵ')
buf.write('\x03\x02\x02\x02щᒷ\x03\x02\x02\x02ыᒹ\x03\x02\x02\x02э')
buf.write('ᒻ\x03\x02\x02\x02яᒽ\x03\x02\x02\x02ёᒿ\x03\x02\x02\x02')
buf.write('ѓᓁ\x03\x02\x02\x02ѕᓃ\x03\x02\x02\x02їᓅ\x03')
buf.write('\x02\x02\x02љᓇ\x03\x02\x02\x02ћᓉ\x03\x02\x02\x02ѝᓋ')
buf.write('\x03\x02\x02\x02џᓍ\x03\x02\x02\x02ѡᓏ\x03\x02\x02\x02ѣ')
buf.write('ᓑ\x03\x02\x02\x02ѥᓓ\x03\x02\x02\x02ѧᓕ\x03\x02\x02\x02')
buf.write('ѩѪ\x070\x02\x02Ѫѫ\x070\x02\x02ѫ\x04\x03\x02')
buf.write('\x02\x02Ѭѭ\x05еț\x02ѭ\x06\x03\x02\x02\x02Ѯ')
buf.write('ѯ\x05еț\x02ѯѰ\x05лȞ\x02Ѱ')
buf.write('ѱ\x05лȞ\x02ѱ\x08\x03\x02\x02\x02Ѳѳ\x05е')
buf.write('ț\x02ѳѴ\x05пȠ\x02Ѵѵ\x05ћ')
buf.write('Ȯ\x02ѵѶ\x05нȟ\x02Ѷѷ\x05ї')
buf.write('Ȭ\x02ѷ\n\x03\x02\x02\x02Ѹѹ\x05еț\x02ѹ')
buf.write('Ѻ\x05сȡ\x02Ѻѻ\x05нȟ\x02ѻ')
buf.write('Ѽ\x05яȨ\x02Ѽѽ\x05ћȮ\x02ѽ')
buf.write('\x0c\x03\x02\x02\x02Ѿѿ\x05еț\x02ѿҀ\x05с')
buf.write('ȡ\x02Ҁҁ\x05сȡ\x02ҁ҂\x05ї')
buf.write('Ȭ\x02҂҃\x05нȟ\x02҃҄\x05с')
buf.write('ȡ\x02҄҅\x05еț\x02҅҆\x05ћ')
buf.write('Ȯ\x02҆҇\x05нȟ\x02҇\x0e\x03\x02\x02\x02')
buf.write('҈҉\x05еț\x02҉Ҋ\x05ыȦ')
buf.write('\x02Ҋҋ\x05ыȦ\x02ҋ\x10\x03\x02\x02\x02Ҍ')
buf.write('ҍ\x05еț\x02ҍҎ\x05ыȦ\x02Ҏ')
buf.write('ҏ\x05ћȮ\x02ҏҐ\x05нȟ\x02Ґ')
buf.write('ґ\x05їȬ\x02ґ\x12\x03\x02\x02\x02Ғғ\x05')
buf.write('еț\x02ғҔ\x05яȨ\x02Ҕҕ')
buf.write('\x05еț\x02ҕҖ\x05ыȦ\x02Җҗ')
buf.write('\x05ѥȳ\x02җҘ\x05ѧȴ\x02Ҙҙ')
buf.write('\x05нȟ\x02ҙ\x14\x03\x02\x02\x02Ққ\x05е')
buf.write('ț\x02қҜ\x05яȨ\x02Ҝҝ\x05л')
buf.write('Ȟ\x02ҝ\x16\x03\x02\x02\x02Ҟҟ\x05еț\x02')
buf.write('ҟҠ\x05яȨ\x02Ҡҡ\x05ѥȳ')
buf.write('\x02ҡ\x18\x03\x02\x02\x02Ңң\x05еț\x02ң')
buf.write('Ҥ\x05їȬ\x02Ҥҥ\x05їȬ\x02ҥ')
buf.write('Ҧ\x05еț\x02Ҧҧ\x05ѥȳ\x02ҧ')
buf.write('\x1a\x03\x02\x02\x02Ҩҩ\x05еț\x02ҩҪ\x05')
buf.write('љȭ\x02Ҫ\x1c\x03\x02\x02\x02ҫҬ\x05еț')
buf.write('\x02Ҭҭ\x05љȭ\x02ҭҮ\x05љȭ')
buf.write('\x02Үү\x05ѝȯ\x02үҰ\x05эȧ')
buf.write('\x02Ұұ\x05нȟ\x02ұ\x1e\x03\x02\x02\x02Ҳ')
buf.write('ҳ\x05еț\x02ҳҴ\x05љȭ\x02Ҵ')
buf.write('ҵ\x05љȭ\x02ҵҶ\x05нȟ\x02Ҷ')
buf.write('ҷ\x05їȬ\x02ҷҸ\x05ћȮ\x02Ҹ')
buf.write(' \x03\x02\x02\x02ҹҺ\x05еț\x02Һһ\x05љ')
buf.write('ȭ\x02һҼ\x05йȝ\x02Ҽ"\x03\x02\x02\x02ҽ')
buf.write('Ҿ\x05еț\x02Ҿҿ\x05љȭ\x02ҿ')
buf.write('Ӏ\x05љȭ\x02ӀӁ\x05ёȩ\x02Ӂ')
buf.write('ӂ\x05йȝ\x02ӂӃ\x05хȣ\x02Ӄ')
buf.write('ӄ\x05еț\x02ӄӅ\x05ћȮ\x02Ӆ')
buf.write('ӆ\x05нȟ\x02ӆ$\x03\x02\x02\x02Ӈӈ\x05е')
buf.write('ț\x02ӈӉ\x05ћȮ\x02Ӊ&\x03\x02\x02\x02ӊ')
buf.write('Ӌ\x05еț\x02Ӌӌ\x05ћȮ\x02ӌ')
buf.write('Ӎ\x05ћȮ\x02Ӎӎ\x05їȬ\x02ӎ')
buf.write('ӏ\x05хȣ\x02ӏӐ\x05зȜ\x02Ӑ')
buf.write('ӑ\x05ѝȯ\x02ӑӒ\x05ћȮ\x02Ӓ')
buf.write('ӓ\x05нȟ\x02ӓ(\x03\x02\x02\x02Ӕӕ\x05е')
buf.write('ț\x02ӕӖ\x05ѝȯ\x02Ӗӗ\x05л')
buf.write('Ȟ\x02ӗӘ\x05хȣ\x02Әә\x05ћ')
buf.write('Ȯ\x02ә*\x03\x02\x02\x02Ӛӛ\x05еț\x02ӛ')
buf.write('Ӝ\x05ѝȯ\x02Ӝӝ\x05ћȮ\x02ӝ')
buf.write('Ӟ\x05уȢ\x02Ӟӟ\x05хȣ\x02ӟ')
buf.write('Ӡ\x05лȞ\x02Ӡ,\x03\x02\x02\x02ӡӢ\x05е')
buf.write('ț\x02Ӣӣ\x05ѝȯ\x02ӣӤ\x05ћ')
buf.write('Ȯ\x02Ӥӥ\x05ёȩ\x02ӥ.\x03\x02\x02\x02Ӧ')
buf.write('ӧ\x05еț\x02ӧӨ\x05ѝȯ\x02Ө')
buf.write('ө\x05ћȮ\x02өӪ\x05ёȩ\x02Ӫ')
buf.write('ӫ\x05эȧ\x02ӫӬ\x05еț\x02Ӭ')
buf.write('ӭ\x05ћȮ\x02ӭӮ\x05хȣ\x02Ӯ')
buf.write('ӯ\x05йȝ\x02ӯ0\x03\x02\x02\x02Ӱӱ\x05')
buf.write('еț\x02ӱӲ\x05ѝȯ\x02Ӳӳ')
buf.write('\x05ћȮ\x02ӳӴ\x05ёȩ\x02Ӵӵ')
buf.write('\x05яȨ\x02ӵӶ\x05ёȩ\x02Ӷӷ')
buf.write('\x05эȧ\x02ӷӸ\x05ёȩ\x02Ӹӹ')
buf.write('\x05ѝȯ\x02ӹӺ\x05љȭ\x02Ӻӻ')
buf.write('\x07a\x02\x02ӻӼ\x05ћȮ\x02Ӽӽ\x05ї')
buf.write('Ȭ\x02ӽӾ\x05еț\x02Ӿӿ\x05я')
buf.write('Ȩ\x02ӿԀ\x05љȭ\x02Ԁԁ\x05е')
buf.write('ț\x02ԁԂ\x05йȝ\x02Ԃԃ\x05ћ')
buf.write('Ȯ\x02ԃԄ\x05хȣ\x02Ԅԅ\x05ё')
buf.write('ȩ\x02ԅԆ\x05яȨ\x02Ԇ2\x03\x02\x02\x02')
buf.write('ԇԈ\x05зȜ\x02Ԉԉ\x05еț')
buf.write('\x02ԉԊ\x05ћȮ\x02Ԋԋ\x05йȝ')
buf.write('\x02ԋԌ\x05уȢ\x02Ԍ4\x03\x02\x02\x02ԍ')
buf.write('Ԏ\x05зȜ\x02Ԏԏ\x05нȟ\x02ԏ')
buf.write('Ԑ\x05пȠ\x02Ԑԑ\x05ёȩ\x02ԑ')
buf.write('Ԓ\x05їȬ\x02Ԓԓ\x05нȟ\x02ԓ')
buf.write('6\x03\x02\x02\x02Ԕԕ\x05зȜ\x02ԕԖ\x05')
buf.write('нȟ\x02Ԗԗ\x05сȡ\x02ԗԘ')
buf.write('\x05хȣ\x02Ԙԙ\x05яȨ\x02ԙ8\x03')
buf.write('\x02\x02\x02Ԛԛ\x05зȜ\x02ԛԜ\x05н')
buf.write('ȟ\x02Ԝԝ\x05ћȮ\x02ԝԞ\x05ѡ')
buf.write('ȱ\x02Ԟԟ\x05нȟ\x02ԟԠ\x05н')
buf.write('ȟ\x02Ԡԡ\x05яȨ\x02ԡ:\x03\x02\x02\x02Ԣ')
buf.write('ԣ\x05зȜ\x02ԣԤ\x05пȠ\x02Ԥ')
buf.write('ԥ\x05хȣ\x02ԥԦ\x05ыȦ\x02Ԧ')
buf.write('ԧ\x05нȟ\x02ԧ<\x03\x02\x02\x02Ԩԩ\x05з')
buf.write('Ȝ\x02ԩԪ\x05хȣ\x02Ԫԫ\x05я')
buf.write('Ȩ\x02ԫԬ\x05еț\x02Ԭԭ\x05ї')
buf.write('Ȭ\x02ԭԮ\x05ѥȳ\x02Ԯԯ\x07a\x02')
buf.write('\x02ԯ\u0530\x05лȞ\x02\u0530Ա\x05ёȩ')
buf.write('\x02ԱԲ\x05ѝȯ\x02ԲԳ\x05зȜ')
buf.write('\x02ԳԴ\x05ыȦ\x02ԴԵ\x05нȟ')
buf.write('\x02Ե>\x03\x02\x02\x02ԶԷ\x05зȜ\x02ԷԸ')
buf.write('\x05хȣ\x02ԸԹ\x05яȨ\x02ԹԺ')
buf.write('\x05еț\x02ԺԻ\x05їȬ\x02ԻԼ')
buf.write('\x05ѥȳ\x02ԼԽ\x07a\x02\x02ԽԾ\x05п')
buf.write('Ƞ\x02ԾԿ\x05ыȦ\x02ԿՀ\x05ё')
buf.write('ȩ\x02ՀՁ\x05еț\x02ՁՂ\x05ћ')
buf.write('Ȯ\x02Ղ@\x03\x02\x02\x02ՃՄ\x05зȜ\x02Մ')
buf.write('Յ\x05хȣ\x02ՅՆ\x05яȨ\x02Ն')
buf.write('Շ\x05еț\x02ՇՈ\x05їȬ\x02Ո')
buf.write('Չ\x05ѥȳ\x02ՉՊ\x07a\x02\x02ՊՋ')
buf.write('\x05хȣ\x02ՋՌ\x05яȨ\x02ՌՍ')
buf.write('\x05ћȮ\x02ՍՎ\x05нȟ\x02ՎՏ')
buf.write('\x05сȡ\x02ՏՐ\x05нȟ\x02ՐՑ')
buf.write('\x05їȬ\x02ՑB\x03\x02\x02\x02ՒՓ\x05зȜ')
buf.write('\x02ՓՔ\x05ыȦ\x02ՔՕ\x05ёȩ')
buf.write('\x02ՕՖ\x05зȜ\x02ՖD\x03\x02\x02\x02\u0557\u0558')
buf.write('\x05зȜ\x02\u0558ՙ\x05ыȦ\x02ՙ՚')
buf.write('\x05ёȩ\x02՚՛\x05йȝ\x02՛՜')
buf.write('\x05щȥ\x02՜F\x03\x02\x02\x02՝՞\x05зȜ')
buf.write('\x02՞՟\x05ёȩ\x02՟ՠ\x05лȞ')
buf.write('\x02ՠա\x05ѥȳ\x02աH\x03\x02\x02\x02բգ')
buf.write('\x05зȜ\x02գդ\x05ёȩ\x02դե')
buf.write('\x05ёȩ\x02եզ\x05ыȦ\x02զէ')
buf.write('\x05нȟ\x02էը\x05еț\x02ըթ')
buf.write('\x05яȨ\x02թJ\x03\x02\x02\x02ժի\x05зȜ')
buf.write('\x02իլ\x05ёȩ\x02լխ\x05ћȮ')
buf.write('\x02խծ\x05уȢ\x02ծL\x03\x02\x02\x02կհ')
buf.write('\x05зȜ\x02հձ\x05їȬ\x02ձղ')
buf.write('\x05нȟ\x02ղճ\x05еț\x02ճմ')
buf.write('\x05лȞ\x02մյ\x05ћȮ\x02յն')
buf.write('\x05уȢ\x02նN\x03\x02\x02\x02շո\x05зȜ')
buf.write('\x02ոչ\x05ѝȯ\x02չպ\x05ыȦ')
buf.write('\x02պջ\x05щȥ\x02ջP\x03\x02\x02\x02ռս')
buf.write('\x05зȜ\x02սվ\x05ѥȳ\x02վR\x03')
buf.write('\x02\x02\x02տր\x05зȜ\x02րց\x05ѥ')
buf.write('ȳ\x02ցւ\x05ћȮ\x02ւփ\x05н')
buf.write('ȟ\x02փT\x03\x02\x02\x02քօ\x05йȝ\x02օ')
buf.write('V\x03\x02\x02\x02ֆև\x05йȝ\x02ևֈ\x05е')
buf.write('ț\x02ֈ։\x05йȝ\x02։֊\x05у')
buf.write('Ȣ\x02֊\u058b\x05нȟ\x02\u058bX\x03\x02\x02\x02\u058c')
buf.write('֍\x05йȝ\x02֍֎\x05еț\x02֎')
buf.write('֏\x05ыȦ\x02֏\u0590\x05ыȦ\x02\u0590')
buf.write('Z\x03\x02\x02\x02֑֒\x05йȝ\x02֒֓\x05е')
buf.write('ț\x02֓֔\x05яȨ\x02֔֕\x05ё')
buf.write('ȩ\x02֖֕\x05яȨ\x02֖֗\x05х')
buf.write('ȣ\x02֗֘\x05йȝ\x02֘֙\x05е')
buf.write('ț\x02֚֙\x05ыȦ\x02֚\\\x03\x02\x02\x02֛')
buf.write('֜\x05йȝ\x02֜֝\x05еț\x02֝')
buf.write('֞\x05љȭ\x02֞֟\x05йȝ\x02֟')
buf.write('֠\x05еț\x02֠֡\x05лȞ\x02֡')
buf.write('֢\x05нȟ\x02֢^\x03\x02\x02\x02֣֤\x05й')
buf.write('ȝ\x02֤֥\x05еț\x02֥֦\x05љ')
buf.write('ȭ\x02֦֧\x05нȟ\x02֧`\x03\x02\x02\x02֨')
buf.write('֩\x05йȝ\x02֪֩\x05еț\x02֪')
buf.write('֫\x05љȭ\x02֫֬\x05ћȮ\x02֬')
buf.write('b\x03\x02\x02\x02֭֮\x05йȝ\x02֮֯\x05у')
buf.write('Ȣ\x02ְ֯\x05еț\x02ְֱ\x05ї')
buf.write('Ȭ\x02ֱd\x03\x02\x02\x02ֲֳ\x05йȝ\x02ֳ')
buf.write('ִ\x05уȢ\x02ִֵ\x05еț\x02ֵ')
buf.write('ֶ\x05їȬ\x02ֶַ\x07a\x02\x02ַָ')
buf.write('\x05йȝ\x02ָֹ\x05љȭ\x02ֹf\x03')
buf.write('\x02\x02\x02ֺֻ\x05йȝ\x02ֻּ\x05у')
buf.write('Ȣ\x02ּֽ\x05еț\x02ֽ־\x05ї')
buf.write('Ȭ\x02־ֿ\x05еț\x02ֿ׀\x05й')
buf.write('ȝ\x02׀ׁ\x05ћȮ\x02ׁׂ\x05н')
buf.write('ȟ\x02ׂ׃\x05їȬ\x02׃h\x03\x02\x02\x02ׄ')
buf.write('ׅ\x05йȝ\x02ׅ׆\x05уȢ\x02׆')
buf.write('ׇ\x05нȟ\x02ׇ\u05c8\x05йȝ\x02\u05c8')
buf.write('\u05c9\x05щȥ\x02\u05c9j\x03\x02\x02\x02\u05ca\u05cb\x05й')
buf.write('ȝ\x02\u05cb\u05cc\x05уȢ\x02\u05cc\u05cd\x05ї')
buf.write('Ȭ\x02\u05cdl\x03\x02\x02\x02\u05ce\u05cf\x05йȝ\x02\u05cf')
buf.write('א\x05ыȦ\x02אב\x05ёȩ\x02ב')
buf.write('ג\x05зȜ\x02גn\x03\x02\x02\x02דה\x05й')
buf.write('ȝ\x02הו\x05ыȦ\x02וז\x05ё')
buf.write('ȩ\x02זח\x05љȭ\x02חט\x05н')
buf.write('ȟ\x02טp\x03\x02\x02\x02יך\x05йȝ\x02ך')
buf.write('כ\x05ыȦ\x02כל\x05ѝȯ\x02ל')
buf.write('ם\x05љȭ\x02םמ\x05ћȮ\x02מ')
buf.write('ן\x05нȟ\x02ןנ\x05їȬ\x02נ')
buf.write('r\x03\x02\x02\x02סע\x05йȝ\x02עף\x05ё')
buf.write('ȩ\x02ףפ\x05ыȦ\x02פץ\x05ы')
buf.write('Ȧ\x02ץצ\x05нȟ\x02צק\x05й')
buf.write('ȝ\x02קר\x05ћȮ\x02רt\x03\x02\x02\x02ש')
buf.write('ת\x05йȝ\x02ת\u05eb\x05ёȩ\x02\u05eb')
buf.write('\u05ec\x05ыȦ\x02\u05ec\u05ed\x05ѝȯ\x02\u05ed')
buf.write('\u05ee\x05эȧ\x02\u05eeׯ\x05яȨ\x02ׯ')
buf.write('װ\x05љȭ\x02װv\x03\x02\x02\x02ױײ\x05й')
buf.write('ȝ\x02ײ׳\x05ёȩ\x02׳״\x05э')
buf.write('ȧ\x02״\u05f5\x05эȧ\x02\u05f5\u05f6\x05н')
buf.write('ȟ\x02\u05f6\u05f7\x05яȨ\x02\u05f7\u05f8\x05ћ')
buf.write('Ȯ\x02\u05f8x\x03\x02\x02\x02\u05f9\u05fa\x05йȝ\x02\u05fa')
buf.write('\u05fb\x05ёȩ\x02\u05fb\u05fc\x05эȧ\x02\u05fc')
buf.write('\u05fd\x05эȧ\x02\u05fd\u05fe\x05хȣ\x02\u05fe')
buf.write('\u05ff\x05ћȮ\x02\u05ffz\x03\x02\x02\x02\u0600\u0601\x05й')
buf.write('ȝ\x02\u0601\u0602\x05ёȩ\x02\u0602\u0603\x05э')
buf.write('ȧ\x02\u0603\u0604\x05эȧ\x02\u0604\u0605\x05х')
buf.write('ȣ\x02\u0605؆\x05ћȮ\x02؆؇\x05ћ')
buf.write('Ȯ\x02؇؈\x05нȟ\x02؈؉\x05л')
buf.write('Ȟ\x02؉|\x03\x02\x02\x02؊؋\x05йȝ\x02؋')
buf.write('،\x05ёȩ\x02،؍\x05эȧ\x02؍')
buf.write('؎\x05ѓȪ\x02؎؏\x05еț\x02؏')
buf.write('ؐ\x05ћȮ\x02ؐؑ\x05хȣ\x02ؑ')
buf.write('ؒ\x05зȜ\x02ؒؓ\x05хȣ\x02ؓ')
buf.write('ؔ\x05ыȦ\x02ؔؕ\x05хȣ\x02ؕ')
buf.write('ؖ\x05ћȮ\x02ؖؗ\x05ѥȳ\x02ؗ')
buf.write('~\x03\x02\x02\x02ؘؙ\x05йȝ\x02ؙؚ\x05ё')
buf.write('ȩ\x02ؚ؛\x05эȧ\x02؛\u061c\x05ѓ')
buf.write('Ȫ\x02\u061c؝\x05хȣ\x02؝؞\x05ы')
buf.write('Ȧ\x02؞؟\x05нȟ\x02؟\x80\x03\x02\x02')
buf.write('\x02ؠء\x05йȝ\x02ءآ\x05ёȩ')
buf.write('\x02آأ\x05эȧ\x02أؤ\x05ѓȪ')
buf.write('\x02ؤإ\x05ёȩ\x02إئ\x05ѝȯ')
buf.write('\x02ئا\x05яȨ\x02اب\x05лȞ')
buf.write('\x02ب\x82\x03\x02\x02\x02ةت\x05йȝ\x02ت')
buf.write('ث\x05ёȩ\x02ثج\x05яȨ\x02ج')
buf.write('ح\x05яȨ\x02حخ\x05нȟ\x02خ')
buf.write('د\x05йȝ\x02دذ\x05ћȮ\x02ذ')
buf.write('\x84\x03\x02\x02\x02رز\x05йȝ\x02زس')
buf.write('\x05ёȩ\x02سش\x05яȨ\x02شص')
buf.write('\x05яȨ\x02صض\x05нȟ\x02ضط')
buf.write('\x05йȝ\x02طظ\x05ћȮ\x02ظع')
buf.write('\x07a\x02\x02عغ\x05зȜ\x02غػ\x05ѥ')
buf.write('ȳ\x02ػؼ\x07a\x02\x02ؼؽ\x05їȬ')
buf.write('\x02ؽؾ\x05ёȩ\x02ؾؿ\x05ёȩ')
buf.write('\x02ؿـ\x05ћȮ\x02ـ\x86\x03\x02\x02\x02ف')
buf.write('ق\x05йȝ\x02قك\x05ёȩ\x02ك')
buf.write('ل\x05яȨ\x02لم\x05љȭ\x02م')
buf.write('ن\x05ћȮ\x02نه\x05еț\x02ه')
buf.write('و\x05яȨ\x02وى\x05ћȮ\x02ى')
buf.write('\x88\x03\x02\x02\x02يً\x05йȝ\x02ًٌ')
buf.write('\x05ёȩ\x02ٌٍ\x05яȨ\x02ٍَ')
buf.write('\x05љȭ\x02َُ\x05ћȮ\x02ُِ')
buf.write('\x05їȬ\x02ِّ\x05еț\x02ّْ')
buf.write('\x05хȣ\x02ْٓ\x05яȨ\x02ٓٔ')
buf.write('\x05ћȮ\x02ٔ\x8a\x03\x02\x02\x02ٕٖ\x05й')
buf.write('ȝ\x02ٖٗ\x05ёȩ\x02ٗ٘\x05я')
buf.write('Ȩ\x02٘ٙ\x05љȭ\x02ٙٚ\x05ћ')
buf.write('Ȯ\x02ٚٛ\x05їȬ\x02ٜٛ\x05е')
buf.write('ț\x02ٜٝ\x05хȣ\x02ٝٞ\x05я')
buf.write('Ȩ\x02ٟٞ\x05ћȮ\x02ٟ٠\x05љ')
buf.write('ȭ\x02٠\x8c\x03\x02\x02\x02١٢\x05йȝ')
buf.write('\x02٢٣\x05ёȩ\x02٣٤\x05яȨ')
buf.write('\x02٤٥\x05љȭ\x02٥٦\x05ћȮ')
buf.write('\x02٦٧\x05їȬ\x02٧٨\x05ѝȯ')
buf.write('\x02٨٩\x05йȝ\x02٩٪\x05ћȮ')
buf.write('\x02٪٫\x05ёȩ\x02٫٬\x05їȬ')
buf.write('\x02٬\x8e\x03\x02\x02\x02٭ٮ\x05йȝ\x02ٮ')
buf.write('ٯ\x05ёȩ\x02ٯٰ\x05яȨ\x02ٰ')
buf.write('ٱ\x05ћȮ\x02ٱٲ\x05нȟ\x02ٲ')
buf.write('ٳ\x05яȨ\x02ٳٴ\x05ћȮ\x02ٴ')
buf.write('\x90\x03\x02\x02\x02ٵٶ\x05йȝ\x02ٶٷ')
buf.write('\x05ёȩ\x02ٷٸ\x05яȨ\x02ٸٹ')
buf.write('\x05ћȮ\x02ٹٺ\x05нȟ\x02ٺٻ')
buf.write('\x05ѣȲ\x02ٻټ\x05ћȮ\x02ټ\x92')
buf.write('\x03\x02\x02\x02ٽپ\x05йȝ\x02پٿ\x05ё')
buf.write('ȩ\x02ٿڀ\x05яȨ\x02ڀځ\x05ћ')
buf.write('Ȯ\x02ځڂ\x05хȣ\x02ڂڃ\x05я')
buf.write('Ȩ\x02ڃڄ\x05ѝȯ\x02ڄڅ\x05н')
buf.write('ȟ\x02څ\x94\x03\x02\x02\x02چڇ\x05йȝ')
buf.write('\x02ڇڈ\x05ёȩ\x02ڈډ\x05яȨ')
buf.write('\x02ډڊ\x05џȰ\x02ڊڋ\x05нȟ')
buf.write('\x02ڋڌ\x05їȬ\x02ڌڍ\x05ћȮ')
buf.write('\x02ڍ\x96\x03\x02\x02\x02ڎڏ\x05йȝ\x02ڏ')
buf.write('ڐ\x05ёȩ\x02ڐڑ\x05їȬ\x02ڑ')
buf.write('ڒ\x05їȬ\x02ڒړ\x05ѝȯ\x02ړ')
buf.write('ڔ\x05ѓȪ\x02ڔڕ\x05ћȮ\x02ڕ')
buf.write('ږ\x07a\x02\x02ږڗ\x05ѣȲ\x02ڗژ')
buf.write('\x05хȣ\x02ژڙ\x05лȞ\x02ڙ\x98')
buf.write('\x03\x02\x02\x02ښڛ\x05йȝ\x02ڛڜ\x05ё')
buf.write('ȩ\x02ڜڝ\x05їȬ\x02ڝڞ\x05ї')
buf.write('Ȭ\x02ڞڟ\x05ѝȯ\x02ڟڠ\x05ѓ')
buf.write('Ȫ\x02ڠڡ\x05ћȮ\x02ڡڢ\x07a\x02')
buf.write('\x02ڢڣ\x05ѣȲ\x02ڣڤ\x05хȣ')
buf.write('\x02ڤڥ\x05лȞ\x02ڥڦ\x07a\x02\x02ڦ')
buf.write('ڧ\x05еț\x02ڧڨ\x05ыȦ\x02ڨ')
buf.write('ک\x05ыȦ\x02ک\x9a\x03\x02\x02\x02ڪګ')
buf.write('\x05йȝ\x02ګڬ\x05ёȩ\x02ڬڭ')
buf.write('\x05љȭ\x02ڭڮ\x05ћȮ\x02ڮ\x9c')
buf.write('\x03\x02\x02\x02گڰ\x05йȝ\x02ڰڱ\x05ё')
buf.write('ȩ\x02ڱڲ\x05ѝȯ\x02ڲڳ\x05я')
buf.write('Ȩ\x02ڳڴ\x05ћȮ\x02ڴ\x9e\x03\x02\x02')
buf.write('\x02ڵڶ\x05йȝ\x02ڶڷ\x05їȬ')
buf.write('\x02ڷڸ\x05нȟ\x02ڸڹ\x05еț')
buf.write('\x02ڹں\x05ћȮ\x02ںڻ\x05нȟ')
buf.write('\x02ڻ\xa0\x03\x02\x02\x02ڼڽ\x05йȝ\x02ڽ')
buf.write('ھ\x05їȬ\x02ھڿ\x05ёȩ\x02ڿ')
buf.write('ۀ\x05љȭ\x02ۀہ\x05љȭ\x02ہ')
buf.write('¢\x03\x02\x02\x02ۂۃ\x05йȝ\x02ۃۄ')
buf.write('\x05ѝȯ\x02ۄۅ\x05зȜ\x02ۅۆ')
buf.write('\x05нȟ\x02ۆ¤\x03\x02\x02\x02ۇۈ\x05й')
buf.write('ȝ\x02ۈۉ\x05ѝȯ\x02ۉۊ\x05ї')
buf.write('Ȭ\x02ۊۋ\x05їȬ\x02ۋی\x05н')
buf.write('ȟ\x02یۍ\x05яȨ\x02ۍێ\x05ћ')
buf.write('Ȯ\x02ێ¦\x03\x02\x02\x02ۏې\x05йȝ')
buf.write('\x02ېۑ\x05ѝȯ\x02ۑے\x05їȬ')
buf.write('\x02ےۓ\x05їȬ\x02ۓ۔\x05нȟ')
buf.write('\x02۔ە\x05яȨ\x02ەۖ\x05ћȮ')
buf.write('\x02ۖۗ\x07a\x02\x02ۗۘ\x05ѝȯ\x02ۘ')
buf.write('ۙ\x05љȭ\x02ۙۚ\x05нȟ\x02ۚ')
buf.write('ۛ\x05їȬ\x02ۛ¨\x03\x02\x02\x02ۜ\u06dd')
buf.write('\x05йȝ\x02\u06dd۞\x05ѝȯ\x02۞۟')
buf.write('\x05їȬ\x02۟۠\x05љȭ\x02۠ۡ')
buf.write('\x05ёȩ\x02ۡۢ\x05їȬ\x02ۢª')
buf.write('\x03\x02\x02\x02ۣۤ\x05йȝ\x02ۤۥ\x05ѝ')
buf.write('ȯ\x02ۥۦ\x05љȭ\x02ۦۧ\x05ћ')
buf.write('Ȯ\x02ۧۨ\x05ёȩ\x02ۨ۩\x05э')
buf.write('ȧ\x02۩۪\x05лȞ\x02۪۫\x05е')
buf.write('ț\x02۫۬\x05ћȮ\x02ۭ۬\x05ѝ')
buf.write('ȯ\x02ۭۮ\x05эȧ\x02ۮ¬\x03\x02\x02')
buf.write('\x02ۯ۰\x05йȝ\x02۰۱\x05ѥȳ')
buf.write('\x02۱۲\x05йȝ\x02۲۳\x05ыȦ')
buf.write('\x02۳۴\x05нȟ\x02۴®\x03\x02\x02\x02۵')
buf.write('۶\x05лȞ\x02۶۷\x05еț\x02۷')
buf.write('۸\x05ћȮ\x02۸۹\x05еț\x02۹')
buf.write('°\x03\x02\x02\x02ۺۻ\x05лȞ\x02ۻۼ')
buf.write('\x05еț\x02ۼ۽\x05ћȮ\x02۽۾')
buf.write('\x05еț\x02۾ۿ\x05зȜ\x02ۿ܀')
buf.write('\x05еț\x02܀܁\x05љȭ\x02܁܂')
buf.write('\x05нȟ\x02܂²\x03\x02\x02\x02܃܄\x05л')
buf.write('Ȟ\x02܄܅\x05еț\x02܅܆\x05ћ')
buf.write('Ȯ\x02܆܇\x05нȟ\x02܇´\x03\x02\x02')
buf.write('\x02܈܉\x05лȞ\x02܉܊\x05еț')
buf.write('\x02܊܋\x05ѥȳ\x02܋¶\x03\x02\x02\x02܌')
buf.write('܍\x05лȞ\x02܍\u070e\x05зȜ\x02\u070e')
buf.write('\u070f\x07a\x02\x02\u070fܐ\x05їȬ\x02ܐܑ')
buf.write('\x05ёȩ\x02ܑܒ\x05ыȦ\x02ܒܓ')
buf.write('\x05нȟ\x02ܓܔ\x07a\x02\x02ܔܕ\x05й')
buf.write('ȝ\x02ܕܖ\x05уȢ\x02ܖܗ\x05е')
buf.write('ț\x02ܗܘ\x05яȨ\x02ܘܙ\x05с')
buf.write('ȡ\x02ܙܚ\x05нȟ\x02ܚ¸\x03\x02\x02')
buf.write('\x02ܛܜ\x05лȞ\x02ܜܝ\x05зȜ')
buf.write('\x02ܝܞ\x05ћȮ\x02ܞܟ\x05хȣ')
buf.write('\x02ܟܠ\x05эȧ\x02ܠܡ\x05нȟ')
buf.write('\x02ܡܢ\x05ѧȴ\x02ܢܣ\x05ёȩ')
buf.write('\x02ܣܤ\x05яȨ\x02ܤܥ\x05нȟ')
buf.write('\x02ܥº\x03\x02\x02\x02ܦܧ\x05лȞ\x02ܧ')
buf.write('ܨ\x05лȞ\x02ܨܩ\x05ыȦ\x02ܩ')
buf.write('¼\x03\x02\x02\x02ܪܫ\x05лȞ\x02ܫܬ')
buf.write('\x05нȟ\x02ܬܭ\x05зȜ\x02ܭܮ')
buf.write('\x05ѝȯ\x02ܮܯ\x05сȡ\x02ܯ¾')
buf.write('\x03\x02\x02\x02ܱܰ\x05лȞ\x02ܱܲ\x05н')
buf.write('ȟ\x02ܲܳ\x05йȝ\x02ܳÀ\x03\x02\x02')
buf.write('\x02ܴܵ\x05лȞ\x02ܵܶ\x05нȟ')
buf.write('\x02ܷܶ\x05йȝ\x02ܷܸ\x05хȣ')
buf.write('\x02ܸܹ\x05эȧ\x02ܹܺ\x05еț')
buf.write('\x02ܻܺ\x05ыȦ\x02ܻÂ\x03\x02\x02\x02ܼ')
buf.write('ܽ\x05лȞ\x02ܾܽ\x05нȟ\x02ܾ')
buf.write('ܿ\x05йȝ\x02ܿ݀\x05ыȦ\x02݀')
buf.write('݁\x05еț\x02݂݁\x05їȬ\x02݂')
buf.write('݃\x05нȟ\x02݃Ä\x03\x02\x02\x02݄݅')
buf.write('\x05лȞ\x02݆݅\x05нȟ\x02݆݇')
buf.write('\x05йȝ\x02݈݇\x05ёȩ\x02݈݉')
buf.write('\x05эȧ\x02݉݊\x05ѓȪ\x02݊\u074b')
buf.write('\x05ёȩ\x02\u074b\u074c\x05љȭ\x02\u074cݍ')
buf.write('\x05нȟ\x02ݍÆ\x03\x02\x02\x02ݎݏ\x05л')
buf.write('Ȟ\x02ݏݐ\x05нȟ\x02ݐݑ\x05й')
buf.write('ȝ\x02ݑݒ\x05їȬ\x02ݒݓ\x05н')
buf.write('ȟ\x02ݓݔ\x05эȧ\x02ݔݕ\x05н')
buf.write('ȟ\x02ݕݖ\x05яȨ\x02ݖݗ\x05ћ')
buf.write('Ȯ\x02ݗÈ\x03\x02\x02\x02ݘݙ\x05лȞ')
buf.write('\x02ݙݚ\x05нȟ\x02ݚݛ\x05пȠ')
buf.write('\x02ݛݜ\x05еț\x02ݜݝ\x05ѝȯ')
buf.write('\x02ݝݞ\x05ыȦ\x02ݞݟ\x05ћȮ')
buf.write('\x02ݟÊ\x03\x02\x02\x02ݠݡ\x05лȞ\x02ݡ')
buf.write('ݢ\x05нȟ\x02ݢݣ\x05пȠ\x02ݣ')
buf.write('ݤ\x05еț\x02ݤݥ\x05ѝȯ\x02ݥ')
buf.write('ݦ\x05ыȦ\x02ݦݧ\x05ћȮ\x02ݧ')
buf.write('ݨ\x05љȭ\x02ݨÌ\x03\x02\x02\x02ݩݪ')
buf.write('\x05лȞ\x02ݪݫ\x05нȟ\x02ݫݬ')
buf.write('\x05пȠ\x02ݬݭ\x05нȟ\x02ݭݮ')
buf.write('\x05їȬ\x02ݮݯ\x05їȬ\x02ݯݰ')
buf.write('\x05нȟ\x02ݰݱ\x05лȞ\x02ݱÎ')
buf.write('\x03\x02\x02\x02ݲݳ\x05лȞ\x02ݳݴ\x05н')
buf.write('ȟ\x02ݴݵ\x05пȠ\x02ݵݶ\x05х')
buf.write('ȣ\x02ݶݷ\x05яȨ\x02ݷݸ\x05н')
buf.write('ȟ\x02ݸݹ\x05їȬ\x02ݹÐ\x03\x02\x02')
buf.write('\x02ݺݻ\x05лȞ\x02ݻݼ\x05нȟ')
buf.write('\x02ݼݽ\x05ыȦ\x02ݽݾ\x05нȟ')
buf.write('\x02ݾݿ\x05ћȮ\x02ݿހ\x05нȟ')
buf.write('\x02ހÒ\x03\x02\x02\x02ށނ\x05лȞ\x02ނ')
buf.write('ރ\x05нȟ\x02ރބ\x05ѓȪ\x02ބ')
buf.write('ޅ\x05ћȮ\x02ޅކ\x05уȢ\x02ކ')
buf.write('Ô\x03\x02\x02\x02އވ\x05лȞ\x02ވމ')
buf.write('\x05нȟ\x02މފ\x05љȭ\x02ފދ')
buf.write('\x05йȝ\x02ދÖ\x03\x02\x02\x02ތލ\x05л')
buf.write('Ȟ\x02ލގ\x05нȟ\x02ގޏ\x05ћ')
buf.write('Ȯ\x02ޏސ\x05нȟ\x02ސޑ\x05ї')
buf.write('Ȭ\x02ޑޒ\x05эȧ\x02ޒޓ\x05х')
buf.write('ȣ\x02ޓޔ\x05яȨ\x02ޔޕ\x05х')
buf.write('ȣ\x02ޕޖ\x05љȭ\x02ޖޗ\x05ћ')
buf.write('Ȯ\x02ޗޘ\x05хȣ\x02ޘޙ\x05й')
buf.write('ȝ\x02ޙØ\x03\x02\x02\x02ޚޛ\x05лȞ')
buf.write('\x02ޛޜ\x05хȣ\x02ޜޝ\x05эȧ')
buf.write('\x02ޝޞ\x05нȟ\x02ޞޟ\x05яȨ')
buf.write('\x02ޟޠ\x05љȭ\x02ޠޡ\x05хȣ')
buf.write('\x02ޡޢ\x05ёȩ\x02ޢޣ\x05яȨ')
buf.write('\x02ޣÚ\x03\x02\x02\x02ޤޥ\x05лȞ\x02ޥ')
buf.write('ަ\x05хȣ\x02ަާ\x05љȭ\x02ާ')
buf.write('ި\x05еț\x02ިީ\x05зȜ\x02ީ')
buf.write('ު\x05ыȦ\x02ުޫ\x05нȟ\x02ޫ')
buf.write('Ü\x03\x02\x02\x02ެޭ\x05лȞ\x02ޭޮ')
buf.write('\x05хȣ\x02ޮޯ\x05љȭ\x02ޯް')
buf.write('\x05еț\x02ްޱ\x05љȭ\x02ޱ\u07b2')
buf.write('\x05љȭ\x02\u07b2\u07b3\x05ёȩ\x02\u07b3\u07b4')
buf.write('\x05йȝ\x02\u07b4\u07b5\x05хȣ\x02\u07b5\u07b6')
buf.write('\x05еț\x02\u07b6\u07b7\x05ћȮ\x02\u07b7\u07b8')
buf.write('\x05нȟ\x02\u07b8Þ\x03\x02\x02\x02\u07b9\u07ba\x05л')
buf.write('Ȟ\x02\u07ba\u07bb\x05хȣ\x02\u07bb\u07bc\x05љ')
buf.write('ȭ\x02\u07bc\u07bd\x05ћȮ\x02\u07bd\u07be\x05х')
buf.write('ȣ\x02\u07be\u07bf\x05яȨ\x02\u07bf߀\x05й')
buf.write('ȝ\x02߀߁\x05ћȮ\x02߁à\x03\x02\x02')
buf.write('\x02߂߃\x05лȞ\x02߃߄\x05ёȩ')
buf.write('\x02߄߅\x05йȝ\x02߅߆\x05ѝȯ')
buf.write('\x02߆߇\x05эȧ\x02߇߈\x05нȟ')
buf.write('\x02߈߉\x05яȨ\x02߉ߊ\x05ћȮ')
buf.write('\x02ߊâ\x03\x02\x02\x02ߋߌ\x05лȞ\x02ߌ')
buf.write('ߍ\x05ёȩ\x02ߍߎ\x05ѝȯ\x02ߎ')
buf.write('ߏ\x05зȜ\x02ߏߐ\x05ыȦ\x02ߐ')
buf.write('ߑ\x05нȟ\x02ߑä\x03\x02\x02\x02ߒߓ')
buf.write('\x05лȞ\x02ߓߔ\x05їȬ\x02ߔߕ')
buf.write('\x05ёȩ\x02ߕߖ\x05ѓȪ\x02ߖæ')
buf.write('\x03\x02\x02\x02ߗߘ\x05лȞ\x02ߘߙ\x05љ')
buf.write('ȭ\x02ߙߚ\x05хȣ\x02ߚߛ\x05я')
buf.write('Ȩ\x02ߛߜ\x05ћȮ\x02ߜߝ\x05н')
buf.write('ȟ\x02ߝߞ\x05їȬ\x02ߞߟ\x05џ')
buf.write('Ȱ\x02ߟߠ\x05еț\x02ߠߡ\x05ы')
buf.write('Ȧ\x02ߡߢ\x07a\x02\x02ߢߣ\x05ѝȯ')
buf.write('\x02ߣߤ\x05яȨ\x02ߤߥ\x05йȝ')
buf.write('\x02ߥߦ\x05ёȩ\x02ߦߧ\x05яȨ')
buf.write('\x02ߧߨ\x05љȭ\x02ߨߩ\x05ћȮ')
buf.write('\x02ߩߪ\x05їȬ\x02ߪ߫\x05еț')
buf.write('\x02߫߬\x05хȣ\x02߬߭\x05яȨ')
buf.write('\x02߭߮\x05нȟ\x02߮߯\x05лȞ')
buf.write('\x02߯è\x03\x02\x02\x02߰߱\x05нȟ\x02߱')
buf.write('߲\x05еț\x02߲߳\x05йȝ\x02߳')
buf.write('ߴ\x05уȢ\x02ߴê\x03\x02\x02\x02ߵ߶')
buf.write('\x05нȟ\x02߶߷\x05ыȦ\x02߷߸')
buf.write('\x05нȟ\x02߸߹\x05эȧ\x02߹ߺ')
buf.write('\x05нȟ\x02ߺ\u07fb\x05яȨ\x02\u07fb\u07fc')
buf.write('\x05ћȮ\x02\u07fcì\x03\x02\x02\x02߽߾\x05н')
buf.write('ȟ\x02߾߿\x05ыȦ\x02߿ࠀ\x05љ')
buf.write('ȭ\x02ࠀࠁ\x05нȟ\x02ࠁî\x03\x02\x02')
buf.write('\x02ࠂࠃ\x05нȟ\x02ࠃࠄ\x05ыȦ')
buf.write('\x02ࠄࠅ\x05љȭ\x02ࠅࠆ\x05хȣ')
buf.write('\x02ࠆࠇ\x05пȠ\x02ࠇð\x03\x02\x02\x02ࠈ')
buf.write('ࠉ\x05нȟ\x02ࠉࠊ\x05эȧ\x02ࠊ')
buf.write('ࠋ\x05ѓȪ\x02ࠋࠌ\x05ћȮ\x02ࠌ')
buf.write('ࠍ\x05ѥȳ\x02ࠍò\x03\x02\x02\x02ࠎࠏ')
buf.write('\x05нȟ\x02ࠏࠐ\x05яȨ\x02ࠐࠑ')
buf.write('\x05еț\x02ࠑࠒ\x05зȜ\x02ࠒࠓ')
buf.write('\x05ыȦ\x02ࠓࠔ\x05нȟ\x02ࠔô')
buf.write('\x03\x02\x02\x02ࠕࠖ\x05нȟ\x02ࠖࠗ\x05я')
buf.write('Ȩ\x02ࠗ࠘\x05йȝ\x02࠘࠙\x05ё')
buf.write('ȩ\x02࠙ࠚ\x05лȞ\x02ࠚࠛ\x05х')
buf.write('ȣ\x02ࠛࠜ\x05яȨ\x02ࠜࠝ\x05с')
buf.write('ȡ\x02ࠝö\x03\x02\x02\x02ࠞࠟ\x05нȟ')
buf.write('\x02ࠟࠠ\x05яȨ\x02ࠠࠡ\x05лȞ')
buf.write('\x02ࠡø\x03\x02\x02\x02ࠢࠣ\x05нȟ\x02ࠣ')
buf.write('ࠤ\x05яȨ\x02ࠤࠥ\x05ћȮ\x02ࠥ')
buf.write('ࠦ\x05хȣ\x02ࠦࠧ\x05ћȮ\x02ࠧ')
buf.write('ࠨ\x05ѥȳ\x02ࠨࠩ\x05нȟ\x02ࠩ')
buf.write('ࠪ\x05љȭ\x02ࠪࠫ\x05йȝ\x02ࠫ')
buf.write('ࠬ\x05еț\x02ࠬ࠭\x05ѓȪ\x02࠭')
buf.write('\u082e\x05хȣ\x02\u082e\u082f\x05яȨ\x02\u082f')
buf.write('࠰\x05сȡ\x02࠰ú\x03\x02\x02\x02࠱࠲')
buf.write('\x05нȟ\x02࠲࠳\x05їȬ\x02࠳࠴')
buf.write('\x05їȬ\x02࠴ü\x03\x02\x02\x02࠵࠶\x05н')
buf.write('ȟ\x02࠶࠷\x05їȬ\x02࠷࠸\x05ї')
buf.write('Ȭ\x02࠸࠹\x05ёȩ\x02࠹࠺\x05ї')
buf.write('Ȭ\x02࠺࠻\x05љȭ\x02࠻þ\x03\x02\x02')
buf.write('\x02࠼࠽\x05нȟ\x02࠽࠾\x05љȭ')
buf.write('\x02࠾\u083f\x05йȝ\x02\u083fࡀ\x05еț')
buf.write('\x02ࡀࡁ\x05ѓȪ\x02ࡁࡂ\x05нȟ')
buf.write('\x02ࡂĀ\x03\x02\x02\x02ࡃࡄ\x05нȟ\x02ࡄ')
buf.write('ࡅ\x05џȰ\x02ࡅࡆ\x05еț\x02ࡆ')
buf.write('ࡇ\x05ыȦ\x02ࡇࡈ\x05яȨ\x02ࡈ')
buf.write('ࡉ\x05еț\x02ࡉࡊ\x05эȧ\x02ࡊ')
buf.write('ࡋ\x05нȟ\x02ࡋĂ\x03\x02\x02\x02ࡌࡍ')
buf.write('\x05нȟ\x02ࡍࡎ\x05ѣȲ\x02ࡎࡏ')
buf.write('\x05йȝ\x02ࡏࡐ\x05нȟ\x02ࡐࡑ')
buf.write('\x05ѓȪ\x02ࡑࡒ\x05ћȮ\x02ࡒĄ')
buf.write('\x03\x02\x02\x02ࡓࡔ\x05нȟ\x02ࡔࡕ\x05ѣ')
buf.write('Ȳ\x02ࡕࡖ\x05йȝ\x02ࡖࡗ\x05н')
buf.write('ȟ\x02ࡗࡘ\x05ѓȪ\x02ࡘ࡙\x05ћ')
buf.write('Ȯ\x02࡙࡚\x05хȣ\x02࡚࡛\x05ё')
buf.write('ȩ\x02࡛\u085c\x05яȨ\x02\u085cĆ\x03\x02\x02')
buf.write('\x02\u085d࡞\x05нȟ\x02࡞\u085f\x05ѣȲ')
buf.write('\x02\u085fࡠ\x05йȝ\x02ࡠࡡ\x05нȟ')
buf.write('\x02ࡡࡢ\x05ѓȪ\x02ࡢࡣ\x05ћȮ')
buf.write('\x02ࡣࡤ\x05хȣ\x02ࡤࡥ\x05ёȩ')
buf.write('\x02ࡥࡦ\x05яȨ\x02ࡦࡧ\x07a\x02\x02ࡧ')
buf.write('ࡨ\x05хȣ\x02ࡨࡩ\x05яȨ\x02ࡩ')
buf.write('ࡪ\x05хȣ\x02ࡪ\u086b\x05ћȮ\x02\u086b')
buf.write('Ĉ\x03\x02\x02\x02\u086c\u086d\x05нȟ\x02\u086d\u086e')
buf.write('\x05ѣȲ\x02\u086e\u086f\x05йȝ\x02\u086fࡰ')
buf.write('\x05нȟ\x02ࡰࡱ\x05ѓȪ\x02ࡱࡲ')
buf.write('\x05ћȮ\x02ࡲࡳ\x05хȣ\x02ࡳࡴ')
buf.write('\x05ёȩ\x02ࡴࡵ\x05яȨ\x02ࡵࡶ')
buf.write('\x05љȭ\x02ࡶĊ\x03\x02\x02\x02ࡷࡸ\x05н')
buf.write('ȟ\x02ࡸࡹ\x05ѣȲ\x02ࡹࡺ\x05й')
buf.write('ȝ\x02ࡺࡻ\x05ыȦ\x02ࡻࡼ\x05ѝ')
buf.write('ȯ\x02ࡼࡽ\x05лȞ\x02ࡽࡾ\x05н')
buf.write('ȟ\x02ࡾČ\x03\x02\x02\x02ࡿࢀ\x05нȟ')
buf.write('\x02ࢀࢁ\x05ѣȲ\x02ࢁࢂ\x05йȝ')
buf.write('\x02ࢂࢃ\x05ыȦ\x02ࢃࢄ\x05ѝȯ')
buf.write('\x02ࢄࢅ\x05љȭ\x02ࢅࢆ\x05хȣ')
buf.write('\x02ࢆࢇ\x05џȰ\x02ࢇ࢈\x05нȟ')
buf.write('\x02࢈Ď\x03\x02\x02\x02ࢉࢊ\x05нȟ\x02ࢊ')
buf.write('ࢋ\x05ѣȲ\x02ࢋࢌ\x05нȟ\x02ࢌ')
buf.write('ࢍ\x05йȝ\x02ࢍࢎ\x05ѝȯ\x02ࢎ')
buf.write('\u088f\x05ћȮ\x02\u088f\u0890\x05нȟ\x02\u0890')
buf.write('Đ\x03\x02\x02\x02\u0891\u0892\x05нȟ\x02\u0892\u0893')
buf.write('\x05ѣȲ\x02\u0893\u0894\x05хȣ\x02\u0894\u0895')
buf.write('\x05љȭ\x02\u0895\u0896\x05ћȮ\x02\u0896\u0897')
buf.write('\x05љȭ\x02\u0897Ē\x03\x02\x02\x02࢙࢘\x05н')
buf.write('ȟ\x02࢙࢚\x05ѣȲ\x02࢚࢛\x05х')
buf.write('ȣ\x02࢛࢜\x05ћȮ\x02࢜Ĕ\x03\x02\x02')
buf.write('\x02࢝࢞\x05нȟ\x02࢞࢟\x05ѣȲ')
buf.write('\x02࢟ࢠ\x05ѓȪ\x02ࢠࢡ\x05ыȦ')
buf.write('\x02ࢡࢢ\x05еț\x02ࢢࢣ\x05хȣ')
buf.write('\x02ࢣࢤ\x05яȨ\x02ࢤĖ\x03\x02\x02\x02ࢥ')
buf.write('ࢦ\x05нȟ\x02ࢦࢧ\x05ѣȲ\x02ࢧ')
buf.write('ࢨ\x05ћȮ\x02ࢨࢩ\x05нȟ\x02ࢩ')
buf.write('ࢪ\x05їȬ\x02ࢪࢫ\x05яȨ\x02ࢫ')
buf.write('ࢬ\x05еț\x02ࢬࢭ\x05ыȦ\x02ࢭ')
buf.write('Ę\x03\x02\x02\x02ࢮࢯ\x05нȟ\x02ࢯࢰ')
buf.write('\x05ѣȲ\x02ࢰࢱ\x05ћȮ\x02ࢱࢲ')
buf.write('\x05їȬ\x02ࢲࢳ\x05еț\x02ࢳࢴ')
buf.write('\x05йȝ\x02ࢴࢵ\x05ћȮ\x02ࢵĚ')
buf.write('\x03\x02\x02\x02ࢶࢷ\x05пȠ\x02ࢷࢸ\x05е')
buf.write('ț\x02ࢸࢹ\x05хȣ\x02ࢹࢺ\x05ы')
buf.write('Ȧ\x02ࢺࢻ\x05ѝȯ\x02ࢻࢼ\x05ї')
buf.write('Ȭ\x02ࢼࢽ\x05нȟ\x02ࢽĜ\x03\x02\x02')
buf.write('\x02ࢾࢿ\x05пȠ\x02ࢿࣀ\x05еț')
buf.write('\x02ࣀࣁ\x05ыȦ\x02ࣁࣂ\x05љȭ')
buf.write('\x02ࣂࣃ\x05нȟ\x02ࣃĞ\x03\x02\x02\x02ࣄ')
buf.write('ࣅ\x05пȠ\x02ࣅࣆ\x05нȟ\x02ࣆ')
buf.write('ࣇ\x05ћȮ\x02ࣇࣈ\x05йȝ\x02ࣈ')
buf.write('ࣉ\x05уȢ\x02ࣉĠ\x03\x02\x02\x02࣊࣋')
buf.write('\x05пȠ\x02࣋࣌\x05хȣ\x02࣌࣍')
buf.write('\x05яȨ\x02࣍࣎\x05еț\x02࣏࣎')
buf.write('\x05ыȦ\x02࣏Ģ\x03\x02\x02\x02࣐࣑\x05п')
buf.write('Ƞ\x02࣑࣒\x05хȣ\x02࣒࣓\x05ї')
buf.write('Ȭ\x02࣓ࣔ\x05љȭ\x02ࣔࣕ\x05ћ')
buf.write('Ȯ\x02ࣕĤ\x03\x02\x02\x02ࣖࣗ\x05пȠ')
buf.write('\x02ࣗࣘ\x05хȣ\x02ࣘࣙ\x05їȬ')
buf.write('\x02ࣙࣚ\x05љȭ\x02ࣚࣛ\x05ћȮ')
buf.write('\x02ࣛࣜ\x07a\x02\x02ࣜࣝ\x05џȰ\x02ࣝ')
buf.write('ࣞ\x05еț\x02ࣞࣟ\x05ыȦ\x02ࣟ')
buf.write('࣠\x05ѝȯ\x02࣠࣡\x05нȟ\x02࣡')
buf.write('Ħ\x03\x02\x02\x02\u08e2ࣣ\x05пȠ\x02ࣣࣤ')
buf.write('\x05ыȦ\x02ࣤࣥ\x05ёȩ\x02ࣦࣥ')
buf.write('\x05еț\x02ࣦࣧ\x05ћȮ\x02ࣧĨ')
buf.write('\x03\x02\x02\x02ࣩࣨ\x05пȠ\x02ࣩ࣪\x05ё')
buf.write('ȩ\x02࣪࣫\x05ыȦ\x02࣫࣬\x05ы')
buf.write('Ȧ\x02࣭࣬\x05ёȩ\x02࣭࣮\x05ѡ')
buf.write('ȱ\x02࣮࣯\x05хȣ\x02ࣰ࣯\x05я')
buf.write('Ȩ\x02ࣰࣱ\x05сȡ\x02ࣱĪ\x03\x02\x02')
buf.write('\x02ࣲࣳ\x05пȠ\x02ࣳࣴ\x05ёȩ')
buf.write('\x02ࣴࣵ\x05ыȦ\x02ࣶࣵ\x05ыȦ')
buf.write('\x02ࣶࣷ\x05ёȩ\x02ࣷࣸ\x05ѡȱ')
buf.write('\x02ࣹࣸ\x05љȭ\x02ࣹĬ\x03\x02\x02\x02ࣺ')
buf.write('ࣻ\x05пȠ\x02ࣻࣼ\x05ёȩ\x02ࣼ')
buf.write('ࣽ\x05їȬ\x02ࣽĮ\x03\x02\x02\x02ࣾࣿ')
buf.write('\x05пȠ\x02ࣿऀ\x05ёȩ\x02ऀँ')
buf.write('\x05їȬ\x02ँं\x05еț\x02ंः')
buf.write('\x05ыȦ\x02ःऄ\x05ыȦ\x02ऄİ')
buf.write('\x03\x02\x02\x02अआ\x05пȠ\x02आइ\x05ё')
buf.write('ȩ\x02इई\x05їȬ\x02ईउ\x05й')
buf.write('ȝ\x02उऊ\x05нȟ\x02ऊIJ\x03\x02\x02')
buf.write('\x02ऋऌ\x05пȠ\x02ऌऍ\x05їȬ')
buf.write('\x02ऍऎ\x05ёȩ\x02ऎए\x05эȧ')
buf.write('\x02एĴ\x03\x02\x02\x02ऐऑ\x05пȠ\x02ऑ')
buf.write('ऒ\x05ѝȯ\x02ऒओ\x05ыȦ\x02ओ')
buf.write('औ\x05ыȦ\x02औĶ\x03\x02\x02\x02कख')
buf.write('\x05пȠ\x02खग\x05ѝȯ\x02गघ')
buf.write('\x05яȨ\x02घङ\x05йȝ\x02ङच')
buf.write('\x05ћȮ\x02चछ\x05хȣ\x02छज')
buf.write('\x05ёȩ\x02जझ\x05яȨ\x02झĸ')
buf.write('\x03\x02\x02\x02ञट\x05сȡ\x02टठ\x05ё')
buf.write('ȩ\x02ठड\x05ћȮ\x02डढ\x05ё')
buf.write('ȩ\x02ढĺ\x03\x02\x02\x02णत\x05сȡ')
buf.write('\x02तथ\x05їȬ\x02थद\x05еț')
buf.write('\x02दध\x05яȨ\x02धन\x05ћȮ')
buf.write('\x02नļ\x03\x02\x02\x02ऩप\x05сȡ\x02प')
buf.write('फ\x05їȬ\x02फब\x05ёȩ\x02ब')
buf.write('भ\x05ѝȯ\x02भम\x05ѓȪ\x02म')
buf.write('ľ\x03\x02\x02\x02यर\x05сȡ\x02रऱ')
buf.write('\x05їȬ\x02ऱल\x05ёȩ\x02लळ')
buf.write('\x05ѝȯ\x02ळऴ\x05ѓȪ\x02ऴव')
buf.write('\x05хȣ\x02वश\x05яȨ\x02शष')
buf.write('\x05сȡ\x02षŀ\x03\x02\x02\x02सह\x05у')
buf.write('Ȣ\x02हऺ\x05еț\x02ऺऻ\x05љ')
buf.write('ȭ\x02ऻ़\x05уȢ\x02़ł\x03\x02\x02')
buf.write('\x02ऽा\x05уȢ\x02ाि\x05еț')
buf.write('\x02िी\x05џȰ\x02ीु\x05хȣ')
buf.write('\x02ुू\x05яȨ\x02ूृ\x05сȡ')
buf.write('\x02ृń\x03\x02\x02\x02ॄॅ\x05уȢ\x02ॅ')
buf.write('ॆ\x05хȣ\x02ॆे\x05лȞ\x02े')
buf.write('ै\x05нȟ\x02ैņ\x03\x02\x02\x02ॉॊ')
buf.write('\x05уȢ\x02ॊो\x05ёȩ\x02ोौ')
buf.write('\x05ѝȯ\x02ौ्\x05їȬ\x02्ň')
buf.write('\x03\x02\x02\x02ॎॏ\x05хȣ\x02ॏॐ\x05п')
buf.write('Ƞ\x02ॐŊ\x03\x02\x02\x02॒॑\x05хȣ')
buf.write('\x02॒॓\x05сȡ\x02॓॔\x05яȨ')
buf.write('\x02॔ॕ\x05ёȩ\x02ॕॖ\x05їȬ')
buf.write('\x02ॖॗ\x05нȟ\x02ॗŌ\x03\x02\x02\x02क़')
buf.write('ख़\x05хȣ\x02ख़ग़\x05эȧ\x02ग़')
buf.write('ज़\x05эȧ\x02ज़ड़\x05нȟ\x02ड़')
buf.write('ढ़\x05лȞ\x02ढ़फ़\x05хȣ\x02फ़')
buf.write('य़\x05еț\x02य़ॠ\x05ћȮ\x02ॠ')
buf.write('ॡ\x05нȟ\x02ॡŎ\x03\x02\x02\x02ॢॣ')
buf.write('\x05хȣ\x02ॣ।\x05яȨ\x02।Ő')
buf.write('\x03\x02\x02\x02॥०\x05хȣ\x02०१\x05я')
buf.write('Ȩ\x02१२\x05йȝ\x02२३\x05ы')
buf.write('Ȧ\x02३४\x05ѝȯ\x02४५\x05л')
buf.write('Ȟ\x02५६\x05нȟ\x02६Œ\x03\x02\x02')
buf.write('\x02७८\x05хȣ\x02८९\x05яȨ')
buf.write('\x02९॰\x05йȝ\x02॰ॱ\x05ыȦ')
buf.write('\x02ॱॲ\x05ѝȯ\x02ॲॳ\x05лȞ')
buf.write('\x02ॳॴ\x05хȣ\x02ॴॵ\x05яȨ')
buf.write('\x02ॵॶ\x05сȡ\x02ॶŔ\x03\x02\x02\x02ॷ')
buf.write('ॸ\x05хȣ\x02ॸॹ\x05яȨ\x02ॹ')
buf.write('ॺ\x05йȝ\x02ॺॻ\x05їȬ\x02ॻ')
buf.write('ॼ\x05нȟ\x02ॼॽ\x05эȧ\x02ॽ')
buf.write('ॾ\x05нȟ\x02ॾॿ\x05яȨ\x02ॿ')
buf.write('ঀ\x05ћȮ\x02ঀŖ\x03\x02\x02\x02ঁং')
buf.write('\x05хȣ\x02ংঃ\x05яȨ\x02ঃ\u0984')
buf.write('\x05лȞ\x02\u0984অ\x05нȟ\x02অআ')
buf.write('\x05яȨ\x02আই\x05ћȮ\x02ইŘ')
buf.write('\x03\x02\x02\x02ঈউ\x05хȣ\x02উঊ\x05я')
buf.write('Ȩ\x02ঊঋ\x05лȞ\x02ঋঌ\x05н')
buf.write('ȟ\x02ঌ\u098d\x05ѣȲ\x02\u098dŚ\x03\x02\x02')
buf.write('\x02\u098eএ\x05хȣ\x02এঐ\x05яȨ')
buf.write('\x02ঐ\u0991\x05лȞ\x02\u0991\u0992\x05нȟ')
buf.write('\x02\u0992ও\x05ѣȲ\x02ওঔ\x05нȟ')
buf.write('\x02ঔক\x05лȞ\x02কŜ\x03\x02\x02\x02খ')
buf.write('গ\x05хȣ\x02গঘ\x05яȨ\x02ঘ')
buf.write('ঙ\x05лȞ\x02ঙচ\x05хȣ\x02চ')
buf.write('ছ\x05йȝ\x02ছজ\x05еț\x02জ')
buf.write('ঝ\x05ћȮ\x02ঝঞ\x05ёȩ\x02ঞ')
buf.write('ট\x05їȬ\x02টŞ\x03\x02\x02\x02ঠড')
buf.write('\x05хȣ\x02ডঢ\x05яȨ\x02ঢণ')
buf.write('\x05лȞ\x02ণত\x05хȣ\x02তথ')
buf.write('\x05йȝ\x02থদ\x05нȟ\x02দধ')
buf.write('\x05љȭ\x02ধŠ\x03\x02\x02\x02ন\u09a9\x05х')
buf.write('ȣ\x02\u09a9প\x05яȨ\x02পফ\x05п')
buf.write('Ƞ\x02ফব\x05хȣ\x02বভ\x05я')
buf.write('Ȩ\x02ভম\x05хȣ\x02ময\x05ћ')
buf.write('Ȯ\x02যর\x05нȟ\x02রŢ\x03\x02\x02')
buf.write('\x02\u09b1ল\x05хȣ\x02ল\u09b3\x05яȨ')
buf.write('\x02\u09b3\u09b4\x05ыȦ\x02\u09b4\u09b5\x05хȣ')
buf.write('\x02\u09b5শ\x05яȨ\x02শষ\x05нȟ')
buf.write('\x02ষŤ\x03\x02\x02\x02সহ\x05хȣ\x02হ')
buf.write('\u09ba\x05яȨ\x02\u09ba\u09bb\x05яȨ\x02\u09bb')
buf.write('়\x05нȟ\x02়ঽ\x05їȬ\x02ঽ')
buf.write('Ŧ\x03\x02\x02\x02াি\x05хȣ\x02িী')
buf.write('\x05яȨ\x02ীু\x05ёȩ\x02ুূ')
buf.write('\x05ѝȯ\x02ূৃ\x05ћȮ\x02ৃŨ')
buf.write('\x03\x02\x02\x02ৄ\u09c5\x05хȣ\x02\u09c5\u09c6\x05я')
buf.write('Ȩ\x02\u09c6ে\x05љȭ\x02েৈ\x05н')
buf.write('ȟ\x02ৈ\u09c9\x05їȬ\x02\u09c9\u09ca\x05ћ')
buf.write('Ȯ\x02\u09caŪ\x03\x02\x02\x02োৌ\x05хȣ')
buf.write('\x02ৌ্\x05яȨ\x02্ৎ\x05љȭ')
buf.write('\x02ৎ\u09cf\x05ћȮ\x02\u09cf\u09d0\x05еț')
buf.write('\x02\u09d0\u09d1\x05яȨ\x02\u09d1\u09d2\x05ћȮ')
buf.write('\x02\u09d2\u09d3\x05хȣ\x02\u09d3\u09d4\x05еț')
buf.write('\x02\u09d4\u09d5\x05зȜ\x02\u09d5\u09d6\x05ыȦ')
buf.write('\x02\u09d6ৗ\x05нȟ\x02ৗŬ\x03\x02\x02\x02\u09d8')
buf.write('\u09d9\x05хȣ\x02\u09d9\u09da\x05яȨ\x02\u09da')
buf.write('\u09db\x05љȭ\x02\u09dbড়\x05ћȮ\x02ড়')
buf.write('ঢ়\x05нȟ\x02ঢ়\u09de\x05еț\x02\u09de')
buf.write('য়\x05лȞ\x02য়Ů\x03\x02\x02\x02ৠৡ')
buf.write('\x05хȣ\x02ৡৢ\x05яȨ\x02ৢৣ')
buf.write('\x05ћȮ\x02ৣŰ\x03\x02\x02\x02\u09e4\u09e5\x05х')
buf.write('ȣ\x02\u09e5০\x05яȨ\x02০১\x05ћ')
buf.write('Ȯ\x02১২\x05нȟ\x02২৩\x05с')
buf.write('ȡ\x02৩৪\x05нȟ\x02৪৫\x05ї')
buf.write('Ȭ\x02৫Ų\x03\x02\x02\x02৬৭\x05хȣ')
buf.write('\x02৭৮\x05яȨ\x02৮৯\x05ћȮ')
buf.write('\x02৯ৰ\x05нȟ\x02ৰৱ\x05їȬ')
buf.write('\x02ৱ৲\x05љȭ\x02৲৳\x05нȟ')
buf.write('\x02৳৴\x05йȝ\x02৴৵\x05ћȮ')
buf.write('\x02৵Ŵ\x03\x02\x02\x02৶৷\x05хȣ\x02৷')
buf.write('৸\x05яȨ\x02৸৹\x05ћȮ\x02৹')
buf.write('৺\x05нȟ\x02৺৻\x05їȬ\x02৻')
buf.write('ৼ\x05џȰ\x02ৼ৽\x05еț\x02৽')
buf.write('৾\x05ыȦ\x02৾Ŷ\x03\x02\x02\x02\u09ff\u0a00')
buf.write('\x05хȣ\x02\u0a00ਁ\x05яȨ\x02ਁਂ')
buf.write('\x05ћȮ\x02ਂਃ\x05ёȩ\x02ਃŸ')
buf.write('\x03\x02\x02\x02\u0a04ਅ\x05хȣ\x02ਅਆ\x05я')
buf.write('Ȩ\x02ਆਇ\x05џȰ\x02ਇਈ\x05е')
buf.write('ț\x02ਈਉ\x05ыȦ\x02ਉਊ\x05х')
buf.write('ȣ\x02ਊ\u0a0b\x05лȞ\x02\u0a0b\u0a0c\x05е')
buf.write('ț\x02\u0a0c\u0a0d\x05ћȮ\x02\u0a0d\u0a0e\x05н')
buf.write('ȟ\x02\u0a0eź\x03\x02\x02\x02ਏਐ\x05хȣ')
buf.write('\x02ਐ\u0a11\x05љȭ\x02\u0a11ż\x03\x02\x02\x02\u0a12')
buf.write('ਓ\x05хȣ\x02ਓਔ\x05љȭ\x02ਔ')
buf.write('ਕ\x05ёȩ\x02ਕਖ\x05ыȦ\x02ਖ')
buf.write('ਗ\x05еț\x02ਗਘ\x05ћȮ\x02ਘ')
buf.write('ਙ\x05хȣ\x02ਙਚ\x05ёȩ\x02ਚ')
buf.write('ਛ\x05яȨ\x02ਛž\x03\x02\x02\x02ਜਝ')
buf.write('\x05хȣ\x02ਝਞ\x05ћȮ\x02ਞਟ')
buf.write('\x05нȟ\x02ਟਠ\x05їȬ\x02ਠਡ')
buf.write('\x05еț\x02ਡਢ\x05ћȮ\x02ਢਣ')
buf.write('\x05нȟ\x02ਣƀ\x03\x02\x02\x02ਤਥ\x05ч')
buf.write('Ȥ\x02ਥਦ\x05еț\x02ਦਧ\x05џ')
buf.write('Ȱ\x02ਧਨ\x05еț\x02ਨƂ\x03\x02\x02')
buf.write('\x02\u0a29ਪ\x05чȤ\x02ਪਫ\x05ёȩ')
buf.write('\x02ਫਬ\x05хȣ\x02ਬਭ\x05яȨ')
buf.write('\x02ਭƄ\x03\x02\x02\x02ਮਯ\x05щȥ\x02ਯ')
buf.write('ਰ\x05нȟ\x02ਰ\u0a31\x05нȟ\x02\u0a31')
buf.write('ਲ\x05ѓȪ\x02ਲƆ\x03\x02\x02\x02ਲ਼\u0a34')
buf.write('\x05ыȦ\x02\u0a34ਵ\x05еț\x02ਵਸ਼')
buf.write('\x05яȨ\x02ਸ਼\u0a37\x05сȡ\x02\u0a37ਸ')
buf.write('\x05ѝȯ\x02ਸਹ\x05еț\x02ਹ\u0a3a')
buf.write('\x05сȡ\x02\u0a3a\u0a3b\x05нȟ\x02\u0a3bƈ')
buf.write('\x03\x02\x02\x02਼\u0a3d\x05ыȦ\x02\u0a3dਾ\x05е')
buf.write('ț\x02ਾਿ\x05љȭ\x02ਿੀ\x05ћ')
buf.write('Ȯ\x02ੀƊ\x03\x02\x02\x02ੁੂ\x05ыȦ')
buf.write('\x02ੂ\u0a43\x05еț\x02\u0a43\u0a44\x05љȭ')
buf.write('\x02\u0a44\u0a45\x05ћȮ\x02\u0a45\u0a46\x07a\x02\x02\u0a46')
buf.write('ੇ\x05џȰ\x02ੇੈ\x05еț\x02ੈ')
buf.write('\u0a49\x05ыȦ\x02\u0a49\u0a4a\x05ѝȯ\x02\u0a4a')
buf.write('ੋ\x05нȟ\x02ੋƌ\x03\x02\x02\x02ੌ੍')
buf.write('\x05ыȦ\x02੍\u0a4e\x05нȟ\x02\u0a4e\u0a4f')
buf.write('\x05еț\x02\u0a4f\u0a50\x05лȞ\x02\u0a50ੑ')
buf.write('\x05хȣ\x02ੑ\u0a52\x05яȨ\x02\u0a52\u0a53')
buf.write('\x05сȡ\x02\u0a53Ǝ\x03\x02\x02\x02\u0a54\u0a55\x05ы')
buf.write('Ȧ\x02\u0a55\u0a56\x05нȟ\x02\u0a56\u0a57\x05п')
buf.write('Ƞ\x02\u0a57\u0a58\x05ћȮ\x02\u0a58Ɛ\x03\x02\x02')
buf.write('\x02ਖ਼ਗ਼\x05ыȦ\x02ਗ਼ਜ਼\x05нȟ')
buf.write('\x02ਜ਼ੜ\x05џȰ\x02ੜ\u0a5d\x05нȟ')
buf.write('\x02\u0a5dਫ਼\x05ыȦ\x02ਫ਼ƒ\x03\x02\x02\x02\u0a5f')
buf.write('\u0a60\x05ыȦ\x02\u0a60\u0a61\x05хȣ\x02\u0a61')
buf.write('\u0a62\x05зȜ\x02\u0a62\u0a63\x05їȬ\x02\u0a63')
buf.write('\u0a64\x05еț\x02\u0a64\u0a65\x05їȬ\x02\u0a65')
buf.write('੦\x05ѥȳ\x02੦Ɣ\x03\x02\x02\x02੧੨')
buf.write('\x05ыȦ\x02੨੩\x05хȣ\x02੩੪')
buf.write('\x05щȥ\x02੪੫\x05нȟ\x02੫Ɩ')
buf.write('\x03\x02\x02\x02੬੭\x05ыȦ\x02੭੮\x05х')
buf.write('ȣ\x02੮੯\x05щȥ\x02੯ੰ\x05н')
buf.write('ȟ\x02ੰੱ\x074\x02\x02ੱƘ\x03\x02\x02\x02ੲ')
buf.write('ੳ\x05ыȦ\x02ੳੴ\x05хȣ\x02ੴ')
buf.write('ੵ\x05щȥ\x02ੵ੶\x05нȟ\x02੶')
buf.write('\u0a77\x076\x02\x02\u0a77ƚ\x03\x02\x02\x02\u0a78\u0a79\x05ы'
)
buf.write('Ȧ\x02\u0a79\u0a7a\x05хȣ\x02\u0a7a\u0a7b\x05щ')
buf.write('ȥ\x02\u0a7b\u0a7c\x05нȟ\x02\u0a7c\u0a7d\x05й')
buf.write('ȝ\x02\u0a7dƜ\x03\x02\x02\x02\u0a7e\u0a7f\x05ыȦ')
buf.write('\x02\u0a7f\u0a80\x05хȣ\x02\u0a80ઁ\x05эȧ')
buf.write('\x02ઁં\x05хȣ\x02ંઃ\x05ћȮ')
buf.write('\x02ઃƞ\x03\x02\x02\x02\u0a84અ\x05ыȦ\x02અ')
buf.write('આ\x05ёȩ\x02આઇ\x05йȝ\x02ઇ')
buf.write('ઈ\x05еț\x02ઈઉ\x05ыȦ\x02ઉ')
buf.write('Ơ\x03\x02\x02\x02ઊઋ\x05ыȦ\x02ઋઌ')
buf.write('\x05ёȩ\x02ઌઍ\x05йȝ\x02ઍ\u0a8e')
buf.write('\x05щȥ\x02\u0a8eƢ\x03\x02\x02\x02એઐ\x05ы')
buf.write('Ȧ\x02ઐઑ\x05ёȩ\x02ઑ\u0a92\x05й')
buf.write('ȝ\x02\u0a92ઓ\x05щȥ\x02ઓઔ\x05н')
buf.write('ȟ\x02ઔક\x05лȞ\x02કƤ\x03\x02\x02')
buf.write('\x02ખગ\x05ыȦ\x02ગઘ\x05ёȩ')
buf.write('\x02ઘઙ\x05сȡ\x02ઙƦ\x03\x02\x02\x02ચ')
buf.write('છ\x05ыȦ\x02છજ\x05ёȩ\x02જ')
buf.write('ઝ\x05сȡ\x02ઝઞ\x05ёȩ\x02ઞ')
buf.write('ટ\x05пȠ\x02ટઠ\x05пȠ\x02ઠ')
buf.write('ƨ\x03\x02\x02\x02ડઢ\x05ыȦ\x02ઢણ')
buf.write('\x05ёȩ\x02ણત\x05сȡ\x02તથ')
buf.write('\x05ёȩ\x02થદ\x05яȨ\x02દƪ')
buf.write('\x03\x02\x02\x02ધન\x05ыȦ\x02ન\u0aa9\x05ё')
buf.write('ȩ\x02\u0aa9પ\x05яȨ\x02પફ\x05с')
buf.write('ȡ\x02ફƬ\x03\x02\x02\x02બભ\x05ыȦ')
buf.write('\x02ભમ\x05ёȩ\x02મય\x05ёȩ')
buf.write('\x02યર\x05ѓȪ\x02રƮ\x03\x02\x02\x02\u0ab1')
buf.write('લ\x05эȧ\x02લળ\x05еț\x02ળ')
buf.write('\u0ab4\x05хȣ\x02\u0ab4વ\x05яȨ\x02વ')
buf.write('ư\x03\x02\x02\x02શષ\x05эȧ\x02ષસ')
buf.write('\x05еț\x02સહ\x05ѓȪ\x02હƲ')
buf.write('\x03\x02\x02\x02\u0aba\u0abb\x05эȧ\x02\u0abb઼\x05е')
buf.write('ț\x02઼ઽ\x05ћȮ\x02ઽા\x05й')
buf.write('ȝ\x02ાિ\x05уȢ\x02િી\x05н')
buf.write('ȟ\x02ીુ\x05лȞ\x02ુƴ\x03\x02\x02')
buf.write('\x02ૂૃ\x05эȧ\x02ૃૄ\x05еț')
buf.write('\x02ૄૅ\x05ѣȲ\x02ૅ\u0ac6\x05џȰ')
buf.write('\x02\u0ac6ે\x05еț\x02ેૈ\x05ыȦ')
buf.write('\x02ૈૉ\x05ѝȯ\x02ૉ\u0aca\x05нȟ')
buf.write('\x02\u0acaƶ\x03\x02\x02\x02ોૌ\x05эȧ\x02ૌ')
buf.write('્\x05нȟ\x02્\u0ace\x05еț\x02\u0ace')
buf.write('\u0acf\x05љȭ\x02\u0acfૐ\x05ѝȯ\x02ૐ')
buf.write('\u0ad1\x05їȬ\x02\u0ad1\u0ad2\x05нȟ\x02\u0ad2')
buf.write('\u0ad3\x05љȭ\x02\u0ad3Ƹ\x03\x02\x02\x02\u0ad4\u0ad5')
buf.write('\x05эȧ\x02\u0ad5\u0ad6\x05нȟ\x02\u0ad6\u0ad7')
buf.write('\x05эȧ\x02\u0ad7\u0ad8\x05зȜ\x02\u0ad8\u0ad9')
buf.write('\x05нȟ\x02\u0ad9\u0ada\x05їȬ\x02\u0adaƺ')
buf.write('\x03\x02\x02\x02\u0adb\u0adc\x05эȧ\x02\u0adc\u0add\x05н')
buf.write('ȟ\x02\u0add\u0ade\x05їȬ\x02\u0ade\u0adf\x05с')
buf.write('ȡ\x02\u0adfૠ\x05нȟ\x02ૠƼ\x03\x02\x02')
buf.write('\x02ૡૢ\x05эȧ\x02ૢૣ\x05хȣ')
buf.write('\x02ૣ\u0ae4\x05яȨ\x02\u0ae4\u0ae5\x05ѝȯ')
buf.write('\x02\u0ae5૦\x05љȭ\x02૦ƾ\x03\x02\x02\x02૧')
buf.write('૨\x05эȧ\x02૨૩\x05хȣ\x02૩')
buf.write('૪\x05яȨ\x02૪૫\x05ѝȯ\x02૫')
buf.write('૬\x05ћȮ\x02૬૭\x05нȟ\x02૭')
buf.write('ǀ\x03\x02\x02\x02૮૯\x05эȧ\x02૯૰')
buf.write('\x05хȣ\x02૰૱\x05яȨ\x02૱\u0af2')
buf.write('\x05џȰ\x02\u0af2\u0af3\x05еț\x02\u0af3\u0af4')
buf.write('\x05ыȦ\x02\u0af4\u0af5\x05ѝȯ\x02\u0af5\u0af6')
buf.write('\x05нȟ\x02\u0af6ǂ\x03\x02\x02\x02\u0af7\u0af8\x05э')
buf.write('ȧ\x02\u0af8ૹ\x05ыȦ\x02ૹૺ\x05љ')
buf.write('ȭ\x02ૺૻ\x05ыȦ\x02ૻૼ\x05е')
buf.write('ț\x02ૼ૽\x05зȜ\x02૽૾\x05н')
buf.write('ȟ\x02૾૿\x05ыȦ\x02૿DŽ\x03\x02\x02')
buf.write('\x02\u0b00ଁ\x05эȧ\x02ଁଂ\x05ёȩ')
buf.write('\x02ଂଃ\x05лȞ\x02ଃ\u0b04\x05нȟ')
buf.write('\x02\u0b04dž\x03\x02\x02\x02ଅଆ\x05эȧ\x02ଆ')
buf.write('ଇ\x05ёȩ\x02ଇଈ\x05лȞ\x02ଈ')
buf.write('ଉ\x05нȟ\x02ଉଊ\x05ыȦ\x02ଊ')
buf.write('Lj\x03\x02\x02\x02ଋଌ\x05эȧ\x02ଌ\u0b0d')
buf.write('\x05ёȩ\x02\u0b0d\u0b0e\x05лȞ\x02\u0b0eଏ')
buf.write('\x05хȣ\x02ଏଐ\x05пȠ\x02ଐ\u0b11')
buf.write('\x05ѥȳ\x02\u0b11NJ\x03\x02\x02\x02\u0b12ଓ\x05э')
buf.write('ȧ\x02ଓଔ\x05ёȩ\x02ଔକ\x05я')
buf.write('Ȩ\x02କଖ\x05ћȮ\x02ଖଗ\x05у')
buf.write('Ȣ\x02ଗnj\x03\x02\x02\x02ଘଙ\x05эȧ')
buf.write('\x02ଙଚ\x05ѝȯ\x02ଚଛ\x05ыȦ')
buf.write('\x02ଛଜ\x05ћȮ\x02ଜଝ\x05хȣ')
buf.write('\x02ଝଞ\x05љȭ\x02ଞଟ\x05нȟ')
buf.write('\x02ଟଠ\x05ћȮ\x02ଠǎ\x03\x02\x02\x02ଡ')
buf.write('ଢ\x05яȨ\x02ଢଣ\x05еț\x02ଣ')
buf.write('ତ\x05эȧ\x02ତଥ\x05нȟ\x02ଥ')
buf.write('ǐ\x03\x02\x02\x02ଦଧ\x05яȨ\x02ଧନ')
buf.write('\x05еț\x02ନ\u0b29\x05яȨ\x02\u0b29ǒ')
buf.write('\x03\x02\x02\x02ପଫ\x05яȨ\x02ଫବ\x05е')
buf.write('ț\x02ବଭ\x05ћȮ\x02ଭମ\x05ѝ')
buf.write('ȯ\x02ମଯ\x05їȬ\x02ଯର\x05е')
buf.write('ț\x02ର\u0b31\x05ыȦ\x02\u0b31ǔ\x03\x02\x02')
buf.write('\x02ଲଳ\x05яȨ\x02ଳ\u0b34\x05еț')
buf.write('\x02\u0b34ଵ\x05ћȮ\x02ଵଶ\x05ѝȯ')
buf.write('\x02ଶଷ\x05їȬ\x02ଷସ\x05еț')
buf.write('\x02ସହ\x05ыȦ\x02ହ\u0b3a\x05яȨ')
buf.write('\x02\u0b3aǖ\x03\x02\x02\x02\u0b3b଼\x05яȨ\x02଼')
buf.write('ଽ\x05еț\x02ଽା\x05џȰ\x02ା')
buf.write('ǘ\x03\x02\x02\x02ିୀ\x05яȨ\x02ୀୁ')
buf.write('\x05йȝ\x02ୁୂ\x05уȢ\x02ୂୃ')
buf.write('\x05еț\x02ୃୄ\x05їȬ\x02ୄǚ')
buf.write('\x03\x02\x02\x02\u0b45\u0b46\x05яȨ\x02\u0b46େ\x05й')
buf.write('ȝ\x02େୈ\x05уȢ\x02ୈ\u0b49\x05е')
buf.write('ț\x02\u0b49\u0b4a\x05їȬ\x02\u0b4aୋ\x07a\x02')
buf.write('\x02ୋୌ\x05йȝ\x02ୌ୍\x05љȭ')
buf.write('\x02୍ǜ\x03\x02\x02\x02\u0b4e\u0b4f\x05яȨ\x02\u0b4f')
buf.write('\u0b50\x05йȝ\x02\u0b50\u0b51\x05ыȦ\x02\u0b51')
buf.write('\u0b52\x05ёȩ\x02\u0b52\u0b53\x05зȜ\x02\u0b53')
buf.write('Ǟ\x03\x02\x02\x02\u0b54୕\x05яȨ\x02୕ୖ')
buf.write('\x05нȟ\x02ୖୗ\x05љȭ\x02ୗ\u0b58')
buf.write('\x05ћȮ\x02\u0b58\u0b59\x05нȟ\x02\u0b59\u0b5a')
buf.write('\x05лȞ\x02\u0b5aǠ\x03\x02\x02\x02\u0b5bଡ଼\x05я')
buf.write('Ȩ\x02ଡ଼ଢ଼\x05нȟ\x02ଢ଼\u0b5e\x05ѡ')
buf.write('ȱ\x02\u0b5eǢ\x03\x02\x02\x02ୟୠ\x05яȨ')
buf.write('\x02ୠୡ\x05ёȩ\x02ୡǤ\x03\x02\x02\x02ୢ')
buf.write('ୣ\x05яȨ\x02ୣ\u0b64\x05ёȩ\x02\u0b64')
buf.write('\u0b65\x05еț\x02\u0b65୦\x05ѝȯ\x02୦')
buf.write('୧\x05лȞ\x02୧୨\x05хȣ\x02୨')
buf.write('୩\x05ћȮ\x02୩Ǧ\x03\x02\x02\x02୪୫')
buf.write('\x05яȨ\x02୫୬\x05ёȩ\x02୬୭')
buf.write('\x05йȝ\x02୭୮\x05еț\x02୮୯')
buf.write('\x05йȝ\x02୯୰\x05уȢ\x02୰ୱ')
buf.write('\x05нȟ\x02ୱǨ\x03\x02\x02\x02୲୳\x05я')
buf.write('Ȩ\x02୳୴\x05ёȩ\x02୴୵\x05й')
buf.write('ȝ\x02୵୶\x05ёȩ\x02୶୷\x05ѓ')
buf.write('Ȫ\x02୷\u0b78\x05ѥȳ\x02\u0b78Ǫ\x03\x02\x02')
buf.write('\x02\u0b79\u0b7a\x05яȨ\x02\u0b7a\u0b7b\x05ёȩ')
buf.write('\x02\u0b7b\u0b7c\x05йȝ\x02\u0b7c\u0b7d\x05ѥȳ')
buf.write('\x02\u0b7d\u0b7e\x05йȝ\x02\u0b7e\u0b7f\x05ыȦ')
buf.write('\x02\u0b7f\u0b80\x05нȟ\x02\u0b80Ǭ\x03\x02\x02\x02\u0b81')
buf.write('ஂ\x05яȨ\x02ஂஃ\x05ёȩ\x02ஃ')
buf.write('\u0b84\x05нȟ\x02\u0b84அ\x05яȨ\x02அ')
buf.write('ஆ\x05ћȮ\x02ஆஇ\x05хȣ\x02இ')
buf.write('ஈ\x05ћȮ\x02ஈஉ\x05ѥȳ\x02உ')
buf.write('ஊ\x05нȟ\x02ஊ\u0b8b\x05љȭ\x02\u0b8b')
buf.write('\u0b8c\x05йȝ\x02\u0b8c\u0b8d\x05еț\x02\u0b8d')
buf.write('எ\x05ѓȪ\x02எஏ\x05хȣ\x02ஏ')
buf.write('ஐ\x05яȨ\x02ஐ\u0b91\x05сȡ\x02\u0b91')
buf.write('Ǯ\x03\x02\x02\x02ஒஓ\x05яȨ\x02ஓஔ')
buf.write('\x05ёȩ\x02ஔக\x05эȧ\x02க\u0b96')
buf.write('\x05еț\x02\u0b96\u0b97\x05ѣȲ\x02\u0b97\u0b98')
buf.write('\x05џȰ\x02\u0b98ங\x05еț\x02ஙச')
buf.write('\x05ыȦ\x02ச\u0b9b\x05ѝȯ\x02\u0b9bஜ')
buf.write('\x05нȟ\x02ஜǰ\x03\x02\x02\x02\u0b9dஞ\x05я')
buf.write('Ȩ\x02ஞட\x05ёȩ\x02ட\u0ba0\x05э')
buf.write('ȧ\x02\u0ba0\u0ba1\x05хȣ\x02\u0ba1\u0ba2\x05я')
buf.write('Ȩ\x02\u0ba2ண\x05џȰ\x02ணத\x05е')
buf.write('ț\x02த\u0ba5\x05ыȦ\x02\u0ba5\u0ba6\x05ѝ')
buf.write('ȯ\x02\u0ba6\u0ba7\x05нȟ\x02\u0ba7Dz\x03\x02\x02')
buf.write('\x02நன\x05яȨ\x02னப\x05ёȩ')
buf.write('\x02ப\u0bab\x05яȨ\x02\u0bab\u0bac\x05нȟ')
buf.write('\x02\u0bacǴ\x03\x02\x02\x02\u0badம\x05яȨ\x02ம')
buf.write('ய\x05ёȩ\x02யர\x05ёȩ\x02ர')
buf.write('ற\x05їȬ\x02றல\x05лȞ\x02ல')
buf.write('ள\x05нȟ\x02ளழ\x05їȬ\x02ழ')
buf.write('Ƕ\x03\x02\x02\x02வஶ\x05яȨ\x02ஶஷ')
buf.write('\x05ёȩ\x02ஷஸ\x05љȭ\x02ஸஹ')
buf.write('\x05йȝ\x02ஹ\u0bba\x05уȢ\x02\u0bba\u0bbb')
buf.write('\x05нȟ\x02\u0bbb\u0bbc\x05эȧ\x02\u0bbc\u0bbd')
buf.write('\x05еț\x02\u0bbdா\x05йȝ\x02ாி')
buf.write('\x05уȢ\x02ிீ\x05нȟ\x02ீு')
buf.write('\x05йȝ\x02ுூ\x05щȥ\x02ூǸ')
buf.write('\x03\x02\x02\x02\u0bc3\u0bc4\x05яȨ\x02\u0bc4\u0bc5\x05ё')
buf.write('ȩ\x02\u0bc5ெ\x05ћȮ\x02ெǺ\x03\x02\x02')
buf.write('\x02ேை\x05яȨ\x02ை\u0bc9\x05ёȩ')
buf.write('\x02\u0bc9ொ\x05ѡȱ\x02ொோ\x05еț')
buf.write('\x02ோௌ\x05хȣ\x02ௌ்\x05ћȮ')
buf.write('\x02்Ǽ\x03\x02\x02\x02\u0bce\u0bcf\x05яȨ\x02\u0bcf')
buf.write('ௐ\x05ѝȯ\x02ௐ\u0bd1\x05ыȦ\x02\u0bd1')
buf.write('\u0bd2\x05ыȦ\x02\u0bd2Ǿ\x03\x02\x02\x02\u0bd3\u0bd4')
buf.write('\x05яȨ\x02\u0bd4\u0bd5\x05ѝȯ\x02\u0bd5\u0bd6')
buf.write('\x05ыȦ\x02\u0bd6ௗ\x05ыȦ\x02ௗ\u0bd8')
buf.write('\x05љȭ\x02\u0bd8Ȁ\x03\x02\x02\x02\u0bd9\u0bda\x05я')
buf.write('Ȩ\x02\u0bda\u0bdb\x05ѝȯ\x02\u0bdb\u0bdc\x05э')
buf.write('ȧ\x02\u0bdc\u0bdd\x05зȜ\x02\u0bdd\u0bde\x05н')
buf.write('ȟ\x02\u0bde\u0bdf\x05їȬ\x02\u0bdfȂ\x03\x02\x02')
buf.write('\x02\u0be0\u0be1\x05яȨ\x02\u0be1\u0be2\x05ѝȯ')
buf.write('\x02\u0be2\u0be3\x05эȧ\x02\u0be3\u0be4\x05нȟ')
buf.write('\x02\u0be4\u0be5\x05їȬ\x02\u0be5௦\x05хȣ')
buf.write('\x02௦௧\x05йȝ\x02௧Ȅ\x03\x02\x02\x02௨')
buf.write('௩\x05яȨ\x02௩௪\x05џȰ\x02௪')
buf.write('௫\x05еț\x02௫௬\x05їȬ\x02௬')
buf.write('௭\x05йȝ\x02௭௮\x05уȢ\x02௮')
buf.write('௯\x05еț\x02௯௰\x05їȬ\x02௰')
buf.write('௱\x074\x02\x02௱Ȇ\x03\x02\x02\x02௲௳\x05ё')
buf.write('ȩ\x02௳௴\x05зȜ\x02௴௵\x05ч')
buf.write('Ȥ\x02௵௶\x05нȟ\x02௶௷\x05й')
buf.write('ȝ\x02௷௸\x05ћȮ\x02௸Ȉ\x03\x02\x02')
buf.write('\x02௹௺\x05ёȩ\x02௺\u0bfb\x05пȠ')
buf.write('\x02\u0bfbȊ\x03\x02\x02\x02\u0bfc\u0bfd\x05ёȩ\x02\u0bfd')
buf.write('\u0bfe\x05пȠ\x02\u0bfe\u0bff\x05пȠ\x02\u0bff')
buf.write('Ȍ\x03\x02\x02\x02ఀఁ\x05ёȩ\x02ఁం')
buf.write('\x05хȣ\x02ంః\x05лȞ\x02ఃȎ')
buf.write('\x03\x02\x02\x02ఄఅ\x05ёȩ\x02అఆ\x05ы')
buf.write('Ȧ\x02ఆఇ\x05лȞ\x02ఇȐ\x03\x02\x02')
buf.write('\x02ఈఉ\x05ёȩ\x02ఉఊ\x05яȨ')
buf.write('\x02ఊȒ\x03\x02\x02\x02ఋఌ\x05ёȩ\x02ఌ')
buf.write('\u0c0d\x05яȨ\x02\u0c0dఎ\x05ыȦ\x02ఎ')
buf.write('ఏ\x05ѥȳ\x02ఏȔ\x03\x02\x02\x02ఐ\u0c11')
buf.write('\x05ёȩ\x02\u0c11ఒ\x05ѓȪ\x02ఒఓ')
buf.write('\x05нȟ\x02ఓఔ\x05яȨ\x02ఔȖ')
buf.write('\x03\x02\x02\x02కఖ\x05ёȩ\x02ఖగ\x05ѓ')
buf.write('Ȫ\x02గఘ\x05ћȮ\x02ఘఙ\x05х')
buf.write('ȣ\x02ఙచ\x05ёȩ\x02చఛ\x05я')
buf.write('Ȩ\x02ఛȘ\x03\x02\x02\x02జఝ\x05ёȩ')
buf.write('\x02ఝఞ\x05їȬ\x02ఞȚ\x03\x02\x02\x02ట')
buf.write('ఠ\x05ёȩ\x02ఠడ\x05їȬ\x02డ')
buf.write('ఢ\x05еț\x02ఢణ\x05лȞ\x02ణ')
buf.write('త\x05еț\x02తథ\x05ћȮ\x02థ')
buf.write('ద\x05еț\x02దȜ\x03\x02\x02\x02ధన')
buf.write('\x05ёȩ\x02న\u0c29\x05їȬ\x02\u0c29ప')
buf.write('\x05лȞ\x02పఫ\x05нȟ\x02ఫబ')
buf.write('\x05їȬ\x02బȞ\x03\x02\x02\x02భమ\x05ё')
buf.write('ȩ\x02మయ\x05їȬ\x02యర\x05л')
buf.write('Ȟ\x02రఱ\x05хȣ\x02ఱల\x05я')
buf.write('Ȩ\x02లళ\x05еț\x02ళఴ\x05ы')
buf.write('Ȧ\x02ఴవ\x05хȣ\x02వశ\x05ћ')
buf.write('Ȯ\x02శష\x05ѥȳ\x02షȠ\x03\x02\x02')
buf.write('\x02సహ\x05ёȩ\x02హ\u0c3a\x05љȭ')
buf.write('\x02\u0c3a\u0c3b\x05нȟ\x02\u0c3b఼\x05їȬ')
buf.write('\x02఼ఽ\x05їȬ\x02ఽా\x05ёȩ')
buf.write('\x02ాి\x05їȬ\x02ిȢ\x03\x02\x02\x02ీ')
buf.write('ు\x05ёȩ\x02ుూ\x05ѝȯ\x02ూ')
buf.write('ృ\x05ћȮ\x02ృȤ\x03\x02\x02\x02ౄ\u0c45')
buf.write('\x05ёȩ\x02\u0c45ె\x05ѝȯ\x02ెే')
buf.write('\x05ћȮ\x02ేై\x05нȟ\x02ై\u0c49')
buf.write('\x05їȬ\x02\u0c49Ȧ\x03\x02\x02\x02ొో\x05ё')
buf.write('ȩ\x02ోౌ\x05џȰ\x02ౌ్\x05н')
buf.write('ȟ\x02్\u0c4e\x05їȬ\x02\u0c4eȨ\x03\x02\x02')
buf.write('\x02\u0c4f\u0c50\x05ёȩ\x02\u0c50\u0c51\x05џȰ')
buf.write('\x02\u0c51\u0c52\x05нȟ\x02\u0c52\u0c53\x05їȬ')
buf.write('\x02\u0c53\u0c54\x05їȬ\x02\u0c54ౕ\x05хȣ')
buf.write('\x02ౕౖ\x05лȞ\x02ౖ\u0c57\x05хȣ')
buf.write('\x02\u0c57ౘ\x05яȨ\x02ౘౙ\x05сȡ')
buf.write('\x02ౙȪ\x03\x02\x02\x02ౚ\u0c5b\x05ѓȪ\x02\u0c5b')
buf.write('\u0c5c\x05еț\x02\u0c5cౝ\x05йȝ\x02ౝ')
buf.write('\u0c5e\x05щȥ\x02\u0c5e\u0c5f\x05еț\x02\u0c5f')
buf.write('ౠ\x05сȡ\x02ౠౡ\x05нȟ\x02ౡ')
buf.write('Ȭ\x03\x02\x02\x02ౢౣ\x05ѓȪ\x02ౣ\u0c64')
buf.write('\x05еț\x02\u0c64\u0c65\x05їȬ\x02\u0c65౦')
buf.write('\x05еț\x02౦౧\x05ыȦ\x02౧౨')
buf.write('\x05ыȦ\x02౨౩\x05нȟ\x02౩౪')
buf.write('\x05ыȦ\x02౪౫\x07a\x02\x02౫౬\x05н')
buf.write('ȟ\x02౬౭\x05яȨ\x02౭౮\x05е')
buf.write('ț\x02౮౯\x05зȜ\x02౯\u0c70\x05ы')
buf.write('Ȧ\x02\u0c70\u0c71\x05нȟ\x02\u0c71Ȯ\x03\x02\x02')
buf.write('\x02\u0c72\u0c73\x05ѓȪ\x02\u0c73\u0c74\x05еț')
buf.write('\x02\u0c74\u0c75\x05їȬ\x02\u0c75\u0c76\x05еț')
buf.write('\x02\u0c76౷\x05эȧ\x02౷౸\x05нȟ')
buf.write('\x02౸౹\x05ћȮ\x02౹౺\x05нȟ')
buf.write('\x02౺౻\x05їȬ\x02౻౼\x05љȭ')
buf.write('\x02౼Ȱ\x03\x02\x02\x02౽౾\x05ѓȪ\x02౾')
buf.write('౿\x05еț\x02౿ಀ\x05їȬ\x02ಀ')
buf.write('ಁ\x05нȟ\x02ಁಂ\x05яȨ\x02ಂ')
buf.write('ಃ\x05ћȮ\x02ಃȲ\x03\x02\x02\x02಄ಅ')
buf.write('\x05ѓȪ\x02ಅಆ\x05еț\x02ಆಇ')
buf.write('\x05їȬ\x02ಇಈ\x05ћȮ\x02ಈಉ')
buf.write('\x05хȣ\x02ಉಊ\x05ћȮ\x02ಊಋ')
buf.write('\x05хȣ\x02ಋಌ\x05ёȩ\x02ಌ\u0c8d')
buf.write('\x05яȨ\x02\u0c8dȴ\x03\x02\x02\x02ಎಏ\x05ѓ')
buf.write('Ȫ\x02ಏಐ\x05еț\x02ಐ\u0c91\x05љ')
buf.write('ȭ\x02\u0c91ಒ\x05љȭ\x02ಒಓ\x05х')
buf.write('ȣ\x02ಓಔ\x05яȨ\x02ಔಕ\x05с')
buf.write('ȡ\x02ಕȶ\x03\x02\x02\x02ಖಗ\x05ѓȪ')
buf.write('\x02ಗಘ\x05еț\x02ಘಙ\x05ћȮ')
buf.write('\x02ಙಚ\x05уȢ\x02ಚȸ\x03\x02\x02\x02ಛ')
buf.write("ಜ\x07'\x02\x02ಜಝ\x05їȬ\x02ಝಞ")
buf.write('\x05ёȩ\x02ಞಟ\x05ѡȱ\x02ಟಠ')
buf.write('\x05ћȮ\x02ಠಡ\x05ѥȳ\x02ಡಢ')
buf.write('\x05ѓȪ\x02ಢಣ\x05нȟ\x02ಣȺ')
buf.write("\x03\x02\x02\x02ತಥ\x07'\x02\x02ಥದ\x05ћȮ")
buf.write('\x02ದಧ\x05ѥȳ\x02ಧನ\x05ѓȪ')
buf.write('\x02ನ\u0ca9\x05нȟ\x02\u0ca9ȼ\x03\x02\x02\x02ಪ')
buf.write('ಫ\x05ѓȪ\x02ಫಬ\x05хȣ\x02ಬ')
buf.write('ಭ\x05ѓȪ\x02ಭಮ\x05нȟ\x02ಮ')
buf.write('ಯ\x05ыȦ\x02ಯರ\x05хȣ\x02ರ')
buf.write('ಱ\x05яȨ\x02ಱಲ\x05нȟ\x02ಲ')
buf.write('ಳ\x05лȞ\x02ಳȾ\x03\x02\x02\x02\u0cb4ವ')
buf.write('\x05ѓȪ\x02ವಶ\x05хȣ\x02ಶಷ')
buf.write('\x05џȰ\x02ಷಸ\x05ёȩ\x02ಸಹ')
buf.write('\x05ћȮ\x02ಹɀ\x03\x02\x02\x02\u0cba\u0cbb\x05ѓ')
buf.write('Ȫ\x02\u0cbb಼\x05ыȦ\x02಼ಽ\x05е')
buf.write('ț\x02ಽಾ\x05яȨ\x02ಾɂ\x03\x02\x02')
buf.write('\x02ಿೀ\x05ѓȪ\x02ೀು\x05ыȦ')
buf.write('\x02ುೂ\x05љȭ\x02ೂೃ\x07a\x02\x02ೃ')
buf.write('ೄ\x05хȣ\x02ೄ\u0cc5\x05яȨ\x02\u0cc5')
buf.write('ೆ\x05ћȮ\x02ೆೇ\x05нȟ\x02ೇ')
buf.write('ೈ\x05сȡ\x02ೈ\u0cc9\x05нȟ\x02\u0cc9')
buf.write('ೊ\x05їȬ\x02ೊɄ\x03\x02\x02\x02ೋೌ')
buf.write('\x05ѓȪ\x02ೌ್\x05ёȩ\x02್\u0cce')
buf.write('\x05љȭ\x02\u0cce\u0ccf\x05хȣ\x02\u0ccf\u0cd0')
buf.write('\x05ћȮ\x02\u0cd0\u0cd1\x05хȣ\x02\u0cd1\u0cd2')
buf.write('\x05џȰ\x02\u0cd2\u0cd3\x05нȟ\x02\u0cd3Ɇ')
buf.write('\x03\x02\x02\x02\u0cd4ೕ\x05ѓȪ\x02ೕೖ\x05ё')
buf.write('ȩ\x02ೖ\u0cd7\x05љȭ\x02\u0cd7\u0cd8\x05х')
buf.write('ȣ\x02\u0cd8\u0cd9\x05ћȮ\x02\u0cd9\u0cda\x05х')
buf.write('ȣ\x02\u0cda\u0cdb\x05џȰ\x02\u0cdb\u0cdc\x05н')
buf.write('ȟ\x02\u0cdcೝ\x05яȨ\x02ೝɈ\x03\x02\x02')
buf.write('\x02ೞ\u0cdf\x05ѓȪ\x02\u0cdfೠ\x05їȬ')
buf.write('\x02ೠೡ\x05еț\x02ೡೢ\x05сȡ')
buf.write('\x02ೢೣ\x05эȧ\x02ೣ\u0ce4\x05еț')
buf.write('\x02\u0ce4Ɋ\x03\x02\x02\x02\u0ce5೦\x05ѓȪ\x02೦')
buf.write('೧\x05їȬ\x02೧೨\x05нȟ\x02೨')
buf.write('೩\x05йȝ\x02೩೪\x05нȟ\x02೪')
buf.write('೫\x05лȞ\x02೫೬\x05хȣ\x02೬')
buf.write('೭\x05яȨ\x02೭೮\x05сȡ\x02೮')
buf.write('Ɍ\x03\x02\x02\x02೯\u0cf0\x05ѓȪ\x02\u0cf0ೱ')
buf.write('\x05їȬ\x02ೱೲ\x05нȟ\x02ೲ\u0cf3')
buf.write('\x05йȝ\x02\u0cf3\u0cf4\x05хȣ\x02\u0cf4\u0cf5')
buf.write('\x05љȭ\x02\u0cf5\u0cf6\x05хȣ\x02\u0cf6\u0cf7')
buf.write('\x05ёȩ\x02\u0cf7\u0cf8\x05яȨ\x02\u0cf8Ɏ')
buf.write('\x03\x02\x02\x02\u0cf9\u0cfa\x05ѓȪ\x02\u0cfa\u0cfb\x05ї')
buf.write('Ȭ\x02\u0cfb\u0cfc\x05нȟ\x02\u0cfc\u0cfd\x05љ')
buf.write('ȭ\x02\u0cfd\u0cfe\x05нȟ\x02\u0cfe\u0cff\x05я')
buf.write('Ȩ\x02\u0cffഀ\x05ћȮ\x02ഀɐ\x03\x02\x02')
buf.write('\x02ഁം\x05ѓȪ\x02ംഃ\x05їȬ')
buf.write('\x02ഃഄ\x05хȣ\x02ഄഅ\x05ёȩ')
buf.write('\x02അആ\x05їȬ\x02ആɒ\x03\x02\x02\x02ഇ')
buf.write('ഈ\x05ѓȪ\x02ഈഉ\x05їȬ\x02ഉ')
buf.write('ഊ\x05ёȩ\x02ഊഋ\x05йȝ\x02ഋ')
buf.write('ഌ\x05нȟ\x02ഌ\u0d0d\x05лȞ\x02\u0d0d')
buf.write('എ\x05ѝȯ\x02എഏ\x05їȬ\x02ഏ')
buf.write('ഐ\x05нȟ\x02ഐɔ\x03\x02\x02\x02\u0d11ഒ')
buf.write('\x05їȬ\x02ഒഓ\x05еț\x02ഓഔ')
buf.write('\x05хȣ\x02ഔക\x05љȭ\x02കഖ')
buf.write('\x05нȟ\x02ഖɖ\x03\x02\x02\x02ഗഘ\x05ї')
buf.write('Ȭ\x02ഘങ\x05еț\x02ങച\x05я')
buf.write('Ȩ\x02ചഛ\x05сȡ\x02ഛജ\x05н')
buf.write('ȟ\x02ജɘ\x03\x02\x02\x02ഝഞ\x05їȬ')
buf.write('\x02ഞട\x05еț\x02ടഠ\x05ѡȱ')
buf.write('\x02ഠɚ\x03\x02\x02\x02ഡഢ\x05їȬ\x02ഢ')
buf.write('ണ\x05нȟ\x02ണത\x05еț\x02ത')
buf.write('ഥ\x05лȞ\x02ഥɜ\x03\x02\x02\x02ദധ')
buf.write('\x05їȬ\x02ധന\x05нȟ\x02നഩ')
buf.write('\x05еț\x02ഩപ\x05ыȦ\x02പɞ')
buf.write('\x03\x02\x02\x02ഫബ\x05їȬ\x02ബഭ\x05н')
buf.write('ȟ\x02ഭമ\x05йȝ\x02മയ\x05ё')
buf.write('ȩ\x02യര\x05їȬ\x02രറ\x05л')
buf.write('Ȟ\x02റɠ\x03\x02\x02\x02ലള\x05їȬ')
buf.write('\x02ളഴ\x05нȟ\x02ഴവ\x05пȠ')
buf.write('\x02വɢ\x03\x02\x02\x02ശഷ\x05їȬ\x02ഷ')
buf.write('സ\x05нȟ\x02സഹ\x05пȠ\x02ഹ')
buf.write('ഺ\x05нȟ\x02ഺ഻\x05їȬ\x02഻')
buf.write('഼\x05нȟ\x02഼ഽ\x05яȨ\x02ഽ')
buf.write('ാ\x05йȝ\x02ാി\x05нȟ\x02ി')
buf.write('ɤ\x03\x02\x02\x02ീു\x05їȬ\x02ുൂ')
buf.write('\x05нȟ\x02ൂൃ\x05пȠ\x02ൃൄ')
buf.write('\x05нȟ\x02ൄ\u0d45\x05їȬ\x02\u0d45െ')
buf.write('\x05нȟ\x02െേ\x05яȨ\x02േൈ')
buf.write('\x05йȝ\x02ൈ\u0d49\x05хȣ\x02\u0d49ൊ')
buf.write('\x05яȨ\x02ൊോ\x05сȡ\x02ോɦ')
buf.write('\x03\x02\x02\x02ൌ്\x05їȬ\x02്ൎ\x05н')
buf.write('ȟ\x02ൎ൏\x05чȤ\x02൏\u0d50\x05н')
buf.write('ȟ\x02\u0d50\u0d51\x05йȝ\x02\u0d51\u0d52\x05ћ')
buf.write('Ȯ\x02\u0d52ɨ\x03\x02\x02\x02\u0d53ൔ\x05їȬ')
buf.write('\x02ൔൕ\x05нȟ\x02ൕൖ\x05ыȦ')
buf.write('\x02ൖൗ\x05хȣ\x02ൗ൘\x05нȟ')
buf.write('\x02൘൙\x05љȭ\x02൙൚\x07a\x02\x02൚')
buf.write('൛\x05ёȩ\x02൛൜\x05яȨ\x02൜')
buf.write('ɪ\x03\x02\x02\x02൝൞\x05їȬ\x02൞ൟ')
buf.write('\x05нȟ\x02ൟൠ\x05яȨ\x02ൠൡ')
buf.write('\x05еț\x02ൡൢ\x05эȧ\x02ൢൣ')
buf.write('\x05нȟ\x02ൣɬ\x03\x02\x02\x02\u0d64\u0d65\x05ї')
buf.write('Ȭ\x02\u0d65൦\x05нȟ\x02൦൧\x05ѓ')
buf.write('Ȫ\x02൧൨\x05ыȦ\x02൨൩\x05е')
buf.write('ț\x02൩൪\x05йȝ\x02൪൫\x05н')
buf.write('ȟ\x02൫ɮ\x03\x02\x02\x02൬൭\x05їȬ')
buf.write('\x02൭൮\x05нȟ\x02൮൯\x05љȭ')
buf.write('\x02൯൰\x05ѓȪ\x02൰൱\x05нȟ')
buf.write('\x02൱൲\x05йȝ\x02൲൳\x05ћȮ')
buf.write('\x02൳ɰ\x03\x02\x02\x02൴൵\x05їȬ\x02൵')
buf.write('൶\x05нȟ\x02൶൷\x05љȭ\x02൷')
buf.write('൸\x05ћȮ\x02൸൹\x05їȬ\x02൹')
buf.write('ൺ\x05хȣ\x02ൺൻ\x05йȝ\x02ൻ')
buf.write('ർ\x05ћȮ\x02ർൽ\x07a\x02\x02ൽൾ')
buf.write('\x05їȬ\x02ൾൿ\x05нȟ\x02ൿ\u0d80')
buf.write('\x05пȠ\x02\u0d80ඁ\x05нȟ\x02ඁං')
buf.write('\x05їȬ\x02ංඃ\x05нȟ\x02ඃ\u0d84')
buf.write('\x05яȨ\x02\u0d84අ\x05йȝ\x02අආ')
buf.write('\x05нȟ\x02ආඇ\x05љȭ\x02ඇɲ')
buf.write('\x03\x02\x02\x02ඈඉ\x05їȬ\x02ඉඊ\x05н')
buf.write('ȟ\x02ඊඋ\x05љȭ\x02උඌ\x05ѝ')
buf.write('ȯ\x02ඌඍ\x05ыȦ\x02ඍඎ\x05ћ')
buf.write('Ȯ\x02ඎɴ\x03\x02\x02\x02ඏඐ\x05їȬ')
buf.write('\x02ඐඑ\x05нȟ\x02එඒ\x05љȭ')
buf.write('\x02ඒඓ\x05ѝȯ\x02ඓඔ\x05ыȦ')
buf.write('\x02ඔඕ\x05ћȮ\x02ඕඖ\x07a\x02\x02ඖ')
buf.write('\u0d97\x05йȝ\x02\u0d97\u0d98\x05еț\x02\u0d98')
buf.write('\u0d99\x05йȝ\x02\u0d99ක\x05уȢ\x02ක')
buf.write('ඛ\x05нȟ\x02ඛɶ\x03\x02\x02\x02ගඝ')
buf.write('\x05їȬ\x02ඝඞ\x05нȟ\x02ඞඟ')
buf.write('\x05ћȮ\x02ඟච\x05ѝȯ\x02චඡ')
buf.write('\x05їȬ\x02ඡජ\x05яȨ\x02ජɸ')
buf.write('\x03\x02\x02\x02ඣඤ\x05їȬ\x02ඤඥ\x05н')
buf.write('ȟ\x02ඥඦ\x05ћȮ\x02ඦට\x05ѝ')
buf.write('ȯ\x02ටඨ\x05їȬ\x02ඨඩ\x05я')
buf.write('Ȩ\x02ඩඪ\x05хȣ\x02ඪණ\x05я')
buf.write('Ȩ\x02ණඬ\x05сȡ\x02ඬɺ\x03\x02\x02')
buf.write('\x02තථ\x05їȬ\x02ථද\x05нȟ')
buf.write('\x02දධ\x05ѝȯ\x02ධන\x05љȭ')
buf.write('\x02න\u0db2\x05нȟ\x02\u0db2ɼ\x03\x02\x02\x02ඳ')
buf.write('ප\x05їȬ\x02පඵ\x05нȟ\x02ඵ')
buf.write('බ\x05џȰ\x02බභ\x05нȟ\x02භ')
buf.write('ම\x05їȬ\x02මඹ\x05љȭ\x02ඹ')
buf.write('ය\x05нȟ\x02යɾ\x03\x02\x02\x02ර\u0dbc')
buf.write('\x05їȬ\x02\u0dbcල\x05нȟ\x02ල\u0dbe')
buf.write('\x05џȰ\x02\u0dbe\u0dbf\x05ёȩ\x02\u0dbfව')
buf.write('\x05щȥ\x02වශ\x05нȟ\x02ශʀ')
buf.write('\x03\x02\x02\x02ෂස\x05їȬ\x02සහ\x05х')
buf.write('ȣ\x02හළ\x05сȡ\x02ළෆ\x05у')
buf.write('Ȣ\x02ෆ\u0dc7\x05ћȮ\x02\u0dc7ʂ\x03\x02\x02')
buf.write('\x02\u0dc8\u0dc9\x05їȬ\x02\u0dc9්\x05ёȩ')
buf.write('\x02්\u0dcb\x05ыȦ\x02\u0dcb\u0dcc\x05ыȦ')
buf.write('\x02\u0dcc\u0dcd\x05зȜ\x02\u0dcd\u0dce\x05еț')
buf.write('\x02\u0dceා\x05йȝ\x02ාැ\x05щȥ')
buf.write('\x02ැʄ\x03\x02\x02\x02ෑි\x05їȬ\x02ි')
buf.write('ී\x05ёȩ\x02ීු\x05ыȦ\x02ු')
buf.write('\u0dd5\x05ыȦ\x02\u0dd5ූ\x05ѝȯ\x02ූ')
buf.write('\u0dd7\x05ѓȪ\x02\u0dd7ʆ\x03\x02\x02\x02ෘෙ')
buf.write('\x05їȬ\x02ෙේ\x05ёȩ\x02ේෛ')
buf.write('\x05ѡȱ\x02ෛʈ\x03\x02\x02\x02ොෝ\x05ї')
buf.write('Ȭ\x02ෝෞ\x05ёȩ\x02ෞෟ\x05ѡ')
buf.write('ȱ\x02ෟ\u0de0\x05хȣ\x02\u0de0\u0de1\x05л')
buf.write('Ȟ\x02\u0de1ʊ\x03\x02\x02\x02\u0de2\u0de3\x05їȬ')
buf.write('\x02\u0de3\u0de4\x05ёȩ\x02\u0de4\u0de5\x05ѡȱ')
buf.write('\x02\u0de5෦\x05љȭ\x02෦ʌ\x03\x02\x02\x02෧')
buf.write('෨\x05їȬ\x02෨෩\x05ѝȯ\x02෩')
buf.write('෪\x05ыȦ\x02෪෫\x05нȟ\x02෫')
buf.write('෬\x05љȭ\x02෬ʎ\x03\x02\x02\x02෭෮')
buf.write('\x05љȭ\x02෮෯\x05еț\x02෯\u0df0')
buf.write('\x05эȧ\x02\u0df0\u0df1\x05ѓȪ\x02\u0df1ෲ')
buf.write('\x05ыȦ\x02ෲෳ\x05нȟ\x02ෳʐ')
buf.write('\x03\x02\x02\x02෴\u0df5\x05љȭ\x02\u0df5\u0df6\x05е')
buf.write('ț\x02\u0df6\u0df7\x05џȰ\x02\u0df7\u0df8\x05н')
buf.write('ȟ\x02\u0df8ʒ\x03\x02\x02\x02\u0df9\u0dfa\x05љȭ')
buf.write('\x02\u0dfa\u0dfb\x05еț\x02\u0dfb\u0dfc\x05џȰ')
buf.write('\x02\u0dfc\u0dfd\x05нȟ\x02\u0dfd\u0dfe\x05ѓȪ')
buf.write('\x02\u0dfe\u0dff\x05ёȩ\x02\u0dff\u0e00\x05хȣ')
buf.write('\x02\u0e00ก\x05яȨ\x02กข\x05ћȮ')
buf.write('\x02ขʔ\x03\x02\x02\x02ฃค\x05љȭ\x02ค')
buf.write('ฅ\x05йȝ\x02ฅฆ\x05уȢ\x02ฆ')
buf.write('ง\x05нȟ\x02งจ\x05эȧ\x02จ')
buf.write('ฉ\x05еț\x02ฉʖ\x03\x02\x02\x02ชซ')
buf.write('\x05љȭ\x02ซฌ\x05йȝ\x02ฌญ')
buf.write('\x05уȢ\x02ญฎ\x05нȟ\x02ฎฏ')
buf.write('\x05эȧ\x02ฏฐ\x05еț\x02ฐฑ')
buf.write('\x05йȝ\x02ฑฒ\x05уȢ\x02ฒณ')
buf.write('\x05нȟ\x02ณด\x05йȝ\x02ดต')
buf.write('\x05щȥ\x02ตʘ\x03\x02\x02\x02ถท\x05љ')
buf.write('ȭ\x02ทธ\x05йȝ\x02ธน\x05я')
buf.write('Ȩ\x02นʚ\x03\x02\x02\x02บป\x05љȭ')
buf.write('\x02ปผ\x05нȟ\x02ผฝ\x05еț')
buf.write('\x02ฝพ\x05їȬ\x02พฟ\x05йȝ')
buf.write('\x02ฟภ\x05уȢ\x02ภʜ\x03\x02\x02\x02ม')
buf.write('ย\x05љȭ\x02ยร\x05нȟ\x02ร')
buf.write('ฤ\x05йȝ\x02ฤล\x05ёȩ\x02ล')
buf.write('ฦ\x05яȨ\x02ฦว\x05лȞ\x02ว')
buf.write('ʞ\x03\x02\x02\x02ศษ\x05љȭ\x02ษส')
buf.write('\x05нȟ\x02สห\x05нȟ\x02หฬ')
buf.write('\x05лȞ\x02ฬʠ\x03\x02\x02\x02อฮ\x05љ')
buf.write('ȭ\x02ฮฯ\x05нȟ\x02ฯะ\x05с')
buf.write('ȡ\x02ะั\x05эȧ\x02ัา\x05н')
buf.write('ȟ\x02าำ\x05яȨ\x02ำิ\x05ћ')
buf.write('Ȯ\x02ิʢ\x03\x02\x02\x02ีึ\x05љȭ')
buf.write('\x02ึื\x05нȟ\x02ืุ\x05ыȦ')
buf.write('\x02ุู\x05нȟ\x02ฺู\x05йȝ')
buf.write('\x02ฺ\u0e3b\x05ћȮ\x02\u0e3bʤ\x03\x02\x02\x02\u0e3c')
buf.write('\u0e3d\x05љȭ\x02\u0e3d\u0e3e\x05нȟ\x02\u0e3e')
buf.write('฿\x05ыȦ\x02฿เ\x05пȠ\x02เ')
buf.write('ʦ\x03\x02\x02\x02แโ\x05љȭ\x02โใ')
buf.write('\x05нȟ\x02ใไ\x05ѕȫ\x02ไๅ')
buf.write('\x05ѝȯ\x02ๅๆ\x05нȟ\x02ๆ็')
buf.write('\x05яȨ\x02็่\x05йȝ\x02่้')
buf.write('\x05нȟ\x02้ʨ\x03\x02\x02\x02๊๋\x05љ')
buf.write('ȭ\x02๋์\x05нȟ\x02์ํ\x05ѕ')
buf.write('ȫ\x02ํ๎\x05ѝȯ\x02๎๏\x05н')
buf.write('ȟ\x02๏๐\x05яȨ\x02๐๑\x05ћ')
buf.write('Ȯ\x02๑๒\x05хȣ\x02๒๓\x05е')
buf.write('ț\x02๓๔\x05ыȦ\x02๔ʪ\x03\x02\x02')
buf.write('\x02๕๖\x05љȭ\x02๖๗\x05нȟ')
buf.write('\x02๗๘\x05їȬ\x02๘๙\x05хȣ')
buf.write('\x02๙๚\x05еț\x02๚๛\x05ыȦ')
buf.write('\x02๛\u0e5c\x05хȣ\x02\u0e5c\u0e5d\x05ѧȴ')
buf.write('\x02\u0e5d\u0e5e\x05еț\x02\u0e5e\u0e5f\x05зȜ')
buf.write('\x02\u0e5f\u0e60\x05ыȦ\x02\u0e60\u0e61\x05нȟ')
buf.write('\x02\u0e61ʬ\x03\x02\x02\x02\u0e62\u0e63\x05љȭ\x02\u0e63')
buf.write('\u0e64\x05нȟ\x02\u0e64\u0e65\x05їȬ\x02\u0e65')
buf.write('\u0e66\x05хȣ\x02\u0e66\u0e67\x05еț\x02\u0e67')
buf.write('\u0e68\x05ыȦ\x02\u0e68\u0e69\x05ыȦ\x02\u0e69')
buf.write('\u0e6a\x05ѥȳ\x02\u0e6a\u0e6b\x07a\x02\x02\u0e6b\u0e6c')
buf.write('\x05їȬ\x02\u0e6c\u0e6d\x05нȟ\x02\u0e6d\u0e6e')
buf.write('\x05ѝȯ\x02\u0e6e\u0e6f\x05љȭ\x02\u0e6f\u0e70')
buf.write('\x05еț\x02\u0e70\u0e71\x05зȜ\x02\u0e71\u0e72')
buf.write('\x05ыȦ\x02\u0e72\u0e73\x05нȟ\x02\u0e73ʮ')
buf.write('\x03\x02\x02\x02\u0e74\u0e75\x05љȭ\x02\u0e75\u0e76\x05н')
buf.write('ȟ\x02\u0e76\u0e77\x05їȬ\x02\u0e77\u0e78\x05џ')
buf.write('Ȱ\x02\u0e78\u0e79\x05нȟ\x02\u0e79\u0e7a\x05ї')
buf.write('Ȭ\x02\u0e7a\u0e7b\x05нȟ\x02\u0e7b\u0e7c\x05ї')
buf.write('Ȭ\x02\u0e7c\u0e7d\x05їȬ\x02\u0e7d\u0e7e\x05ё')
buf.write('ȩ\x02\u0e7e\u0e7f\x05їȬ\x02\u0e7fʰ\x03\x02\x02')
buf.write('\x02\u0e80ກ\x05љȭ\x02ກຂ\x05нȟ')
buf.write('\x02ຂ\u0e83\x05љȭ\x02\u0e83ຄ\x05љȭ')
buf.write('\x02ຄ\u0e85\x05хȣ\x02\u0e85ຆ\x05ёȩ')
buf.write('\x02ຆງ\x05яȨ\x02ງຈ\x05ћȮ')
buf.write('\x02ຈຉ\x05хȣ\x02ຉຊ\x05эȧ')
buf.write('\x02ຊ\u0e8b\x05нȟ\x02\u0e8bຌ\x05ѧȴ')
buf.write('\x02ຌຍ\x05ёȩ\x02ຍຎ\x05яȨ')
buf.write('\x02ຎຏ\x05нȟ\x02ຏʲ\x03\x02\x02\x02ຐ')
buf.write('ຑ\x05љȭ\x02ຑຒ\x05нȟ\x02ຒ')
buf.write('ຓ\x05ћȮ\x02ຓʴ\x03\x02\x02\x02ດຕ')
buf.write('\x05љȭ\x02ຕຖ\x05нȟ\x02ຖທ')
buf.write('\x05ћȮ\x02ທຘ\x05љȭ\x02ຘʶ')
buf.write('\x03\x02\x02\x02ນບ\x05љȭ\x02ບປ\x05н')
buf.write('ȟ\x02ປຜ\x05ћȮ\x02ຜຝ\x05ћ')
buf.write('Ȯ\x02ຝພ\x05хȣ\x02ພຟ\x05я')
buf.write('Ȩ\x02ຟຠ\x05сȡ\x02ຠມ\x05љ')
buf.write('ȭ\x02ມʸ\x03\x02\x02\x02ຢຣ\x05љȭ')
buf.write('\x02ຣ\u0ea4\x05уȢ\x02\u0ea4ລ\x05еț')
buf.write('\x02ລ\u0ea6\x05їȬ\x02\u0ea6ວ\x05нȟ')
buf.write('\x02ວʺ\x03\x02\x02\x02ຨຩ\x05љȭ\x02ຩ')
buf.write('ສ\x05уȢ\x02ສຫ\x05ёȩ\x02ຫ')
buf.write('ຬ\x05ѡȱ\x02ຬʼ\x03\x02\x02\x02ອຮ')
buf.write('\x05љȭ\x02ຮຯ\x05уȢ\x02ຯະ')
buf.write('\x05ѝȯ\x02ະັ\x05ћȮ\x02ັາ')
buf.write('\x05лȞ\x02າຳ\x05ёȩ\x02ຳິ')
buf.write('\x05ѡȱ\x02ິີ\x05яȨ\x02ີʾ')
buf.write('\x03\x02\x02\x02ຶື\x05љȭ\x02ືຸ\x05х')
buf.write('ȣ\x02ຸູ\x05зȜ\x02຺ູ\x05ы')
buf.write('Ȧ\x02຺ົ\x05хȣ\x02ົຼ\x05я')
buf.write('Ȩ\x02ຼຽ\x05сȡ\x02ຽ\u0ebe\x05љ')
buf.write('ȭ\x02\u0ebeˀ\x03\x02\x02\x02\u0ebfເ\x05љȭ')
buf.write('\x02ເແ\x05хȣ\x02ແໂ\x05сȡ')
buf.write('\x02ໂໃ\x05яȨ\x02ໃໄ\x05ћȮ')
buf.write('\x02ໄ\u0ec5\x05ѥȳ\x02\u0ec5ໆ\x05ѓȪ')
buf.write('\x02ໆ\u0ec7\x05нȟ\x02\u0ec7˂\x03\x02\x02\x02່')
buf.write('້\x05љȭ\x02້໊\x05хȣ\x02໊')
buf.write('໋\x05эȧ\x02໋໌\x05ѓȪ\x02໌')
buf.write('ໍ\x05ыȦ\x02ໍ\u0ece\x05нȟ\x02\u0ece')
buf.write('\u0ecf\x07a\x02\x02\u0ecf໐\x05хȣ\x02໐໑')
buf.write('\x05яȨ\x02໑໒\x05ћȮ\x02໒໓')
buf.write('\x05нȟ\x02໓໔\x05сȡ\x02໔໕')
buf.write('\x05нȟ\x02໕໖\x05їȬ\x02໖˄')
buf.write('\x03\x02\x02\x02໗໘\x05љȭ\x02໘໙\x05х')
buf.write('ȣ\x02໙\u0eda\x05яȨ\x02\u0eda\u0edb\x05с')
buf.write('ȡ\x02\u0edbໜ\x05ыȦ\x02ໜໝ\x05н')
buf.write('ȟ\x02ໝˆ\x03\x02\x02\x02ໞໟ\x05љȭ')
buf.write('\x02ໟ\u0ee0\x05хȣ\x02\u0ee0\u0ee1\x05ѧȴ')
buf.write('\x02\u0ee1\u0ee2\x05нȟ\x02\u0ee2ˈ\x03\x02\x02\x02\u0ee3')
buf.write('\u0ee4\x05љȭ\x02\u0ee4\u0ee5\x05щȥ\x02\u0ee5')
buf.write('\u0ee6\x05хȣ\x02\u0ee6\u0ee7\x05ѓȪ\x02\u0ee7')
buf.write('ˊ\x03\x02\x02\x02\u0ee8\u0ee9\x05љȭ\x02\u0ee9\u0eea')
buf.write('\x05эȧ\x02\u0eea\u0eeb\x05еț\x02\u0eeb\u0eec')
buf.write('\x05ыȦ\x02\u0eec\u0eed\x05ыȦ\x02\u0eed\u0eee')
buf.write('\x05хȣ\x02\u0eee\u0eef\x05яȨ\x02\u0eef\u0ef0')
buf.write('\x05ћȮ\x02\u0ef0ˌ\x03\x02\x02\x02\u0ef1\u0ef2\x05љ')
buf.write('ȭ\x02\u0ef2\u0ef3\x05яȨ\x02\u0ef3\u0ef4\x05е')
buf.write('ț\x02\u0ef4\u0ef5\x05ѓȪ\x02\u0ef5\u0ef6\x05љ')
buf.write('ȭ\x02\u0ef6\u0ef7\x05уȢ\x02\u0ef7\u0ef8\x05ё')
buf.write('ȩ\x02\u0ef8\u0ef9\x05ћȮ\x02\u0ef9ˎ\x03\x02\x02')
buf.write('\x02\u0efa\u0efb\x05љȭ\x02\u0efb\u0efc\x05ёȩ')
buf.write('\x02\u0efc\u0efd\x05эȧ\x02\u0efd\u0efe\x05нȟ')
buf.write('\x02\u0efeː\x03\x02\x02\x02\u0effༀ\x05љȭ\x02ༀ')
buf.write('༁\x05ѓȪ\x02༁༂\x05нȟ\x02༂')
buf.write('༃\x05йȝ\x02༃༄\x05хȣ\x02༄')
buf.write('༅\x05пȠ\x02༅༆\x05хȣ\x02༆')
buf.write('༇\x05йȝ\x02༇༈\x05еț\x02༈')
buf.write('༉\x05ћȮ\x02༉༊\x05хȣ\x02༊')
buf.write('་\x05ёȩ\x02་༌\x05яȨ\x02༌')
buf.write('˒\x03\x02\x02\x02།༎\x05љȭ\x02༎༏')
buf.write('\x05ѕȫ\x02༏༐\x05ыȦ\x02༐༑')
buf.write('\x05лȞ\x02༑༒\x05еț\x02༒༓')
buf.write('\x05ћȮ\x02༓༔\x05еț\x02༔˔')
buf.write('\x03\x02\x02\x02༕༖\x05љȭ\x02༖༗\x05ѕ')
buf.write('ȫ\x02༗༘\x05ыȦ\x02༘༙\x05н')
buf.write('ȟ\x02༙༚\x05їȬ\x02༚༛\x05ї')
buf.write('Ȭ\x02༛༜\x05ёȩ\x02༜༝\x05ї')
buf.write('Ȭ\x02༝˖\x03\x02\x02\x02༞༟\x05љȭ')
buf.write('\x02༟༠\x05ћȮ\x02༠༡\x05еț')
buf.write('\x02༡༢\x05яȨ\x02༢༣\x05лȞ')
buf.write('\x02༣༤\x05еț\x02༤༥\x05ыȦ')
buf.write('\x02༥༦\x05ёȩ\x02༦༧\x05яȨ')
buf.write('\x02༧༨\x05нȟ\x02༨˘\x03\x02\x02\x02༩')
buf.write('༪\x05љȭ\x02༪༫\x05ћȮ\x02༫')
buf.write('༬\x05еț\x02༬༭\x05їȬ\x02༭')
buf.write('༮\x05ћȮ\x02༮˚\x03\x02\x02\x02༯༰')
buf.write('\x05љȭ\x02༰༱\x05ћȮ\x02༱༲')
buf.write('\x05еț\x02༲༳\x05їȬ\x02༳༴')
buf.write('\x05ћȮ\x02༴༵\x05ѝȯ\x02༵༶')
buf.write('\x05ѓȪ\x02༶˜\x03\x02\x02\x02༷༸\x05љ')
buf.write('ȭ\x02༸༹\x05ћȮ\x02༹༺\x05е')
buf.write('ț\x02༺༻\x05ћȮ\x02༻༼\x05н')
buf.write('ȟ\x02༼༽\x05эȧ\x02༽༾\x05н')
buf.write('ȟ\x02༾༿\x05яȨ\x02༿ཀ\x05ћ')
buf.write('Ȯ\x02ཀ˞\x03\x02\x02\x02ཁག\x05љȭ')
buf.write('\x02གགྷ\x05ћȮ\x02གྷང\x05еț')
buf.write('\x02ངཅ\x05ћȮ\x02ཅཆ\x05нȟ')
buf.write('\x02ཆཇ\x05эȧ\x02ཇ\u0f48\x05нȟ')
buf.write('\x02\u0f48ཉ\x05яȨ\x02ཉཊ\x05ћȮ')
buf.write('\x02ཊཋ\x07a\x02\x02ཋཌ\x05хȣ\x02ཌ')
buf.write('ཌྷ\x05лȞ\x02ཌྷˠ\x03\x02\x02\x02ཎཏ')
buf.write('\x05љȭ\x02ཏཐ\x05ћȮ\x02ཐད')
buf.write('\x05еț\x02དདྷ\x05ћȮ\x02དྷན')
buf.write('\x05хȣ\x02ནཔ\x05йȝ\x02པˢ')
buf.write('\x03\x02\x02\x02ཕབ\x05љȭ\x02བབྷ\x05ћ')
buf.write('Ȯ\x02བྷམ\x05еț\x02མཙ\x05ћ')
buf.write('Ȯ\x02ཙཚ\x05хȣ\x02ཚཛ\x05љ')
buf.write('ȭ\x02ཛཛྷ\x05ћȮ\x02ཛྷཝ\x05х')
buf.write('ȣ\x02ཝཞ\x05йȝ\x02ཞཟ\x05љ')
buf.write('ȭ\x02ཟˤ\x03\x02\x02\x02འཡ\x05љȭ')
buf.write('\x02ཡར\x05ћȮ\x02རལ\x05їȬ')
buf.write('\x02ལཤ\x05хȣ\x02ཤཥ\x05яȨ')
buf.write('\x02ཥས\x05сȡ\x02ས˦\x03\x02\x02\x02ཧ')
buf.write('ཨ\x05љȭ\x02ཨཀྵ\x05ѝȯ\x02ཀྵ')
buf.write('ཪ\x05зȜ\x02ཪཫ\x05эȧ\x02ཫ')
buf.write('ཬ\x05ѝȯ\x02ཬ\u0f6d\x05ыȦ\x02\u0f6d')
buf.write('\u0f6e\x05ћȮ\x02\u0f6e\u0f6f\x05хȣ\x02\u0f6f')
buf.write('\u0f70\x05љȭ\x02\u0f70ཱ\x05нȟ\x02ཱ')
buf.write('ི\x05ћȮ\x02ི˨\x03\x02\x02\x02ཱིུ')
buf.write('\x05љȭ\x02ཱུུ\x05ѝȯ\x02ཱུྲྀ')
buf.write('\x05зȜ\x02ྲྀཷ\x05ѓȪ\x02ཷླྀ')
buf.write('\x05еț\x02ླྀཹ\x05їȬ\x02ཹེ')
buf.write('\x05ћȮ\x02ེཻ\x05хȣ\x02ཻོ')
buf.write('\x05ћȮ\x02ོཽ\x05хȣ\x02ཽཾ')
buf.write('\x05ёȩ\x02ཾཿ\x05яȨ\x02ཿ˪')
buf.write('\x03\x02\x02\x02ཱྀྀ\x05љȭ\x02ཱྀྂ\x05ѝ')
buf.write('ȯ\x02ྂྃ\x05зȜ\x02྄ྃ\x05љ')
buf.write('ȭ\x02྄྅\x05ћȮ\x02྅྆\x05х')
buf.write('ȣ\x02྆྇\x05ћȮ\x02྇ྈ\x05ѝ')
buf.write('ȯ\x02ྈྉ\x05ћȮ\x02ྉྊ\x05е')
buf.write('ț\x02ྊྋ\x05зȜ\x02ྋྌ\x05ы')
buf.write('Ȧ\x02ྌྍ\x05нȟ\x02ྍˬ\x03\x02\x02')
buf.write('\x02ྎྏ\x05љȭ\x02ྏྐ\x05ѝȯ')
buf.write('\x02ྐྑ\x05зȜ\x02ྑྒ\x05ћȮ')
buf.write('\x02ྒྒྷ\x05ѥȳ\x02ྒྷྔ\x05ѓȪ')
buf.write('\x02ྔྕ\x05нȟ\x02ྕˮ\x03\x02\x02\x02ྖ')
buf.write('ྗ\x05љȭ\x02ྗ\u0f98\x05ѝȯ\x02\u0f98')
buf.write('ྙ\x05йȝ\x02ྙྚ\x05йȝ\x02ྚ')
buf.write('ྛ\x05нȟ\x02ྛྜ\x05љȭ\x02ྜ')
buf.write('ྜྷ\x05љȭ\x02ྜྷ˰\x03\x02\x02\x02ྞྟ')
buf.write('\x05љȭ\x02ྟྠ\x05ѝȯ\x02ྠྡ')
buf.write('\x05љȭ\x02ྡྡྷ\x05ѓȪ\x02ྡྷྣ')
buf.write('\x05нȟ\x02ྣྤ\x05яȨ\x02ྤྥ')
buf.write('\x05лȞ\x02ྥ˲\x03\x02\x02\x02ྦྦྷ\x05ћ')
buf.write('Ȯ\x02ྦྷྨ\x05еț\x02ྨྩ\x05з')
buf.write('Ȝ\x02ྩྪ\x05ыȦ\x02ྪྫ\x05н')
buf.write('ȟ\x02ྫ˴\x03\x02\x02\x02ྫྷྭ\x05ћȮ')
buf.write('\x02ྭྮ\x05уȢ\x02ྮྯ\x05нȟ')
buf.write('\x02ྯ˶\x03\x02\x02\x02ྰྱ\x05ћȮ\x02ྱ')
buf.write('ྲ\x05уȢ\x02ྲླ\x05нȟ\x02ླ')
buf.write('ྴ\x05яȨ\x02ྴ˸\x03\x02\x02\x02ྵྶ')
buf.write('\x05ћȮ\x02ྶྷ\x05хȣ\x02ྷྸ')
buf.write('\x05эȧ\x02ྸྐྵ\x05нȟ\x02ྐྵ˺')
buf.write('\x03\x02\x02\x02ྺྻ\x05ћȮ\x02ྻྼ\x05х')
buf.write('ȣ\x02ྼ\u0fbd\x05эȧ\x02\u0fbd྾\x05н')
buf.write('ȟ\x02྾྿\x05љȭ\x02྿࿀\x05ћ')
buf.write('Ȯ\x02࿀࿁\x05еț\x02࿁࿂\x05э')
buf.write('ȧ\x02࿂࿃\x05ѓȪ\x02࿃˼\x03\x02\x02')
buf.write('\x02࿄࿅\x05ћȮ\x02࿅࿆\x05хȣ')
buf.write('\x02࿆࿇\x05эȧ\x02࿇࿈\x05нȟ')
buf.write('\x02࿈࿉\x05љȭ\x02࿉࿊\x05ћȮ')
buf.write('\x02࿊࿋\x05еț\x02࿋࿌\x05эȧ')
buf.write('\x02࿌\u0fcd\x05ѓȪ\x02\u0fcd࿎\x07a\x02\x02࿎')
buf.write('࿏\x05ыȦ\x02࿏࿐\x05ћȮ\x02࿐')
buf.write('࿑\x05ѧȴ\x02࿑࿒\x07a\x02\x02࿒࿓')
buf.write('\x05ѝȯ\x02࿓࿔\x05яȨ\x02࿔࿕')
buf.write('\x05йȝ\x02࿕࿖\x05ёȩ\x02࿖࿗')
buf.write('\x05яȨ\x02࿗࿘\x05љȭ\x02࿘࿙')
buf.write('\x05ћȮ\x02࿙࿚\x05їȬ\x02࿚\u0fdb')
buf.write('\x05еț\x02\u0fdb\u0fdc\x05хȣ\x02\u0fdc\u0fdd')
buf.write('\x05яȨ\x02\u0fdd\u0fde\x05нȟ\x02\u0fde\u0fdf')
buf.write('\x05лȞ\x02\u0fdf˾\x03\x02\x02\x02\u0fe0\u0fe1\x05ћ')
buf.write('Ȯ\x02\u0fe1\u0fe2\x05хȣ\x02\u0fe2\u0fe3\x05э')
buf.write('ȧ\x02\u0fe3\u0fe4\x05нȟ\x02\u0fe4\u0fe5\x05љ')
buf.write('ȭ\x02\u0fe5\u0fe6\x05ћȮ\x02\u0fe6\u0fe7\x05е')
buf.write('ț\x02\u0fe7\u0fe8\x05эȧ\x02\u0fe8\u0fe9\x05ѓ')
buf.write('Ȫ\x02\u0fe9\u0fea\x07a\x02\x02\u0fea\u0feb\x05ћȮ')
buf.write('\x02\u0feb\u0fec\x05ѧȴ\x02\u0fec\u0fed\x07a\x02\x02\u0fed')
buf.write('\u0fee\x05ѝȯ\x02\u0fee\u0fef\x05яȨ\x02\u0fef')
buf.write('\u0ff0\x05йȝ\x02\u0ff0\u0ff1\x05ёȩ\x02\u0ff1')
buf.write('\u0ff2\x05яȨ\x02\u0ff2\u0ff3\x05љȭ\x02\u0ff3')
buf.write('\u0ff4\x05ћȮ\x02\u0ff4\u0ff5\x05їȬ\x02\u0ff5')
buf.write('\u0ff6\x05еț\x02\u0ff6\u0ff7\x05хȣ\x02\u0ff7')
buf.write('\u0ff8\x05яȨ\x02\u0ff8\u0ff9\x05нȟ\x02\u0ff9')
buf.write('\u0ffa\x05лȞ\x02\u0ffà\x03\x02\x02\x02\u0ffb\u0ffc')
buf.write('\x05ћȮ\x02\u0ffc\u0ffd\x05хȣ\x02\u0ffd\u0ffe')
buf.write('\x05эȧ\x02\u0ffe\u0fff\x05нȟ\x02\u0fffက')
buf.write('\x05љȭ\x02ကခ\x05ћȮ\x02ခဂ')
buf.write('\x05еț\x02ဂဃ\x05эȧ\x02ဃင')
buf.write('\x05ѓȪ\x02ငစ\x07a\x02\x02စဆ\x05ѝ')
buf.write('ȯ\x02ဆဇ\x05яȨ\x02ဇဈ\x05й')
buf.write('ȝ\x02ဈဉ\x05ёȩ\x02ဉည\x05я')
buf.write('Ȩ\x02ညဋ\x05љȭ\x02ဋဌ\x05ћ')
buf.write('Ȯ\x02ဌဍ\x05їȬ\x02ဍဎ\x05е')
buf.write('ț\x02ဎဏ\x05хȣ\x02ဏတ\x05я')
buf.write('Ȩ\x02တထ\x05нȟ\x02ထဒ\x05л')
buf.write('Ȟ\x02ဒ̂\x03\x02\x02\x02ဓန\x05ћȮ')
buf.write('\x02နပ\x05хȣ\x02ပဖ\x05эȧ')
buf.write('\x02ဖဗ\x05нȟ\x02ဗဘ\x05ѧȴ')
buf.write('\x02ဘမ\x05ёȩ\x02မယ\x05яȨ')
buf.write('\x02ယရ\x05нȟ\x02ရလ\x07a\x02\x02လ')
buf.write('ဝ\x05еț\x02ဝသ\x05зȜ\x02သ')
buf.write('ဟ\x05зȜ\x02ဟဠ\x05їȬ\x02ဠ')
buf.write('̄\x03\x02\x02\x02အဢ\x05ћȮ\x02ဢဣ')
buf.write('\x05хȣ\x02ဣဤ\x05эȧ\x02ဤဥ')
buf.write('\x05нȟ\x02ဥဦ\x05ѧȴ\x02ဦဧ')
buf.write('\x05ёȩ\x02ဧဨ\x05яȨ\x02ဨဩ')
buf.write('\x05нȟ\x02ဩဪ\x07a\x02\x02ဪါ\x05у')
buf.write('Ȣ\x02ါာ\x05ёȩ\x02ာိ\x05ѝ')
buf.write('ȯ\x02ိီ\x05їȬ\x02ီ̆\x03\x02\x02')
buf.write('\x02ုူ\x05ћȮ\x02ူေ\x05хȣ')
buf.write('\x02ေဲ\x05эȧ\x02ဲဳ\x05нȟ')
buf.write('\x02ဳဴ\x05ѧȴ\x02ဴဵ\x05ёȩ')
buf.write('\x02ဵံ\x05яȨ\x02ံ့\x05нȟ')
buf.write('\x02့း\x07a\x02\x02း္\x05эȧ\x02္')
buf.write('်\x05хȣ\x02်ျ\x05яȨ\x02ျ')
buf.write('ြ\x05ѝȯ\x02ြွ\x05ћȮ\x02ွ')
buf.write('ှ\x05нȟ\x02ှ̈\x03\x02\x02\x02ဿ၀')
buf.write('\x05ћȮ\x02၀၁\x05хȣ\x02၁၂')
buf.write('\x05эȧ\x02၂၃\x05нȟ\x02၃၄')
buf.write('\x05ѧȴ\x02၄၅\x05ёȩ\x02၅၆')
buf.write('\x05яȨ\x02၆၇\x05нȟ\x02၇၈')
buf.write('\x07a\x02\x02၈၉\x05їȬ\x02၉၊\x05н')
buf.write('ȟ\x02၊။\x05сȡ\x02။၌\x05х')
buf.write('ȣ\x02၌၍\x05ёȩ\x02၍၎\x05я')
buf.write('Ȩ\x02၎̊\x03\x02\x02\x02၏ၐ\x05ћȮ')
buf.write('\x02ၐၑ\x05ёȩ\x02ၑ̌\x03\x02\x02\x02ၒ')
buf.write('ၓ\x05ћȮ\x02ၓၔ\x05їȬ\x02ၔ')
buf.write('ၕ\x05еț\x02ၕၖ\x05хȣ\x02ၖ')
buf.write('ၗ\x05ыȦ\x02ၗၘ\x05хȣ\x02ၘ')
buf.write('ၙ\x05яȨ\x02ၙၚ\x05сȡ\x02ၚ')
buf.write('̎\x03\x02\x02\x02ၛၜ\x05ћȮ\x02ၜၝ')
buf.write('\x05їȬ\x02ၝၞ\x05еț\x02ၞၟ')
buf.write('\x05яȨ\x02ၟၠ\x05љȭ\x02ၠၡ')
buf.write('\x05еț\x02ၡၢ\x05йȝ\x02ၢၣ')
buf.write('\x05ћȮ\x02ၣၤ\x05хȣ\x02ၤၥ')
buf.write('\x05ёȩ\x02ၥၦ\x05яȨ\x02ၦ̐')
buf.write('\x03\x02\x02\x02ၧၨ\x05ћȮ\x02ၨၩ\x05ї')
buf.write('Ȭ\x02ၩၪ\x05еț\x02ၪၫ\x05я')
buf.write('Ȩ\x02ၫၬ\x05љȭ\x02ၬၭ\x05ы')
buf.write('Ȧ\x02ၭၮ\x05еț\x02ၮၯ\x05ћ')
buf.write('Ȯ\x02ၯၰ\x05нȟ\x02ၰ̒\x03\x02\x02')
buf.write('\x02ၱၲ\x05ћȮ\x02ၲၳ\x05їȬ')
buf.write('\x02ၳၴ\x05нȟ\x02ၴၵ\x05еț')
buf.write('\x02ၵၶ\x05ћȮ\x02ၶ̔\x03\x02\x02\x02ၷ')
buf.write('ၸ\x05ћȮ\x02ၸၹ\x05їȬ\x02ၹ')
buf.write('ၺ\x05хȣ\x02ၺၻ\x05сȡ\x02ၻ')
buf.write('ၼ\x05сȡ\x02ၼၽ\x05нȟ\x02ၽ')
buf.write('ၾ\x05їȬ\x02ၾ̖\x03\x02\x02\x02ၿႀ')
buf.write('\x05ћȮ\x02ႀႁ\x05їȬ\x02ႁႂ')
buf.write('\x05хȣ\x02ႂႃ\x05эȧ\x02ႃ̘')
buf.write('\x03\x02\x02\x02ႄႅ\x05ћȮ\x02ႅႆ\x05ї')
buf.write('Ȭ\x02ႆႇ\x05ѝȯ\x02ႇႈ\x05н')
buf.write('ȟ\x02ႈ̚\x03\x02\x02\x02ႉႊ\x05ћȮ')
buf.write('\x02ႊႋ\x05їȬ\x02ႋႌ\x05ѝȯ')
buf.write('\x02ႌႍ\x05яȨ\x02ႍႎ\x05йȝ')
buf.write('\x02ႎႏ\x05еț\x02ႏ႐\x05ћȮ')
buf.write('\x02႐႑\x05нȟ\x02႑̜\x03\x02\x02\x02႒')
buf.write('႓\x05ћȮ\x02႓႔\x05ѥȳ\x02႔')
buf.write('႕\x05ѓȪ\x02႕႖\x05нȟ\x02႖')
buf.write('̞\x03\x02\x02\x02႗႘\x05ѝȯ\x02႘႙')
buf.write('\x05яȨ\x02႙ႚ\x05зȜ\x02ႚႛ')
buf.write('\x05ёȩ\x02ႛႜ\x05ѝȯ\x02ႜႝ')
buf.write('\x05яȨ\x02ႝ႞\x05лȞ\x02႞႟')
buf.write('\x05нȟ\x02႟Ⴀ\x05лȞ\x02Ⴀ̠')
buf.write('\x03\x02\x02\x02ႡႢ\x05ѝȯ\x02ႢႣ\x05я')
buf.write('Ȩ\x02ႣႤ\x05лȞ\x02ႤႥ\x05н')
buf.write('ȟ\x02ႥႦ\x05їȬ\x02Ⴆ̢\x03\x02\x02')
buf.write('\x02ႧႨ\x05ѝȯ\x02ႨႩ\x05яȨ')
buf.write('\x02ႩႪ\x05хȣ\x02ႪႫ\x05ёȩ')
buf.write('\x02ႫႬ\x05яȨ\x02Ⴌ̤\x03\x02\x02\x02Ⴍ')
buf.write('Ⴎ\x05ѝȯ\x02ႮႯ\x05яȨ\x02Ⴏ')
buf.write('Ⴐ\x05хȣ\x02ႰႱ\x05ѕȫ\x02Ⴑ')
buf.write('Ⴒ\x05ѝȯ\x02ႲႳ\x05нȟ\x02Ⴓ')
buf.write('̦\x03\x02\x02\x02ႴႵ\x05ѝȯ\x02ႵႶ')
buf.write('\x05яȨ\x02ႶႷ\x05ыȦ\x02ႷႸ')
buf.write('\x05хȣ\x02ႸႹ\x05эȧ\x02ႹႺ')
buf.write('\x05хȣ\x02ႺႻ\x05ћȮ\x02ႻႼ')
buf.write('\x05нȟ\x02ႼႽ\x05лȞ\x02Ⴝ̨')
buf.write('\x03\x02\x02\x02ႾႿ\x05ѝȯ\x02ႿჀ\x05я')
buf.write('Ȩ\x02ჀჁ\x05ѓȪ\x02ჁჂ\x05х')
buf.write('ȣ\x02ჂჃ\x05џȰ\x02ჃჄ\x05ё')
buf.write('ȩ\x02ჄჅ\x05ћȮ\x02Ⴥ̪\x03\x02\x02')
buf.write('\x02\u10c6Ⴧ\x05ѝȯ\x02Ⴧ\u10c8\x05яȨ')
buf.write('\x02\u10c8\u10c9\x05ћȮ\x02\u10c9\u10ca\x05хȣ')
buf.write('\x02\u10ca\u10cb\x05ыȦ\x02\u10cb̬\x03\x02\x02\x02\u10cc')
buf.write('Ⴭ\x05ѝȯ\x02Ⴭ\u10ce\x05ѓȪ\x02\u10ce')
buf.write('\u10cf\x05лȞ\x02\u10cfა\x05еț\x02ა')
buf.write('ბ\x05ћȮ\x02ბგ\x05нȟ\x02გ')
buf.write('̮\x03\x02\x02\x02დე\x05ѝȯ\x02ევ')
buf.write('\x05ѓȪ\x02ვზ\x05лȞ\x02ზთ')
buf.write('\x05еț\x02თი\x05ћȮ\x02იკ')
buf.write('\x05нȟ\x02კლ\x05лȞ\x02ლ̰')
buf.write('\x03\x02\x02\x02მნ\x05ѝȯ\x02ნო\x05ѓ')
buf.write('Ȫ\x02ოპ\x05љȭ\x02პჟ\x05н')
buf.write('ȟ\x02ჟრ\x05їȬ\x02რს\x05ћ')
buf.write('Ȯ\x02ს̲\x03\x02\x02\x02ტუ\x05ѝȯ')
buf.write('\x02უფ\x05їȬ\x02ფქ\x05ёȩ')
buf.write('\x02ქღ\x05ѡȱ\x02ღყ\x05хȣ')
buf.write('\x02ყშ\x05лȞ\x02შ̴\x03\x02\x02\x02ჩ')
buf.write('ც\x05ѝȯ\x02ცძ\x05љȭ\x02ძ')
buf.write('წ\x05нȟ\x02წ̶\x03\x02\x02\x02ჭხ')
buf.write('\x05ѝȯ\x02ხჯ\x05љȭ\x02ჯჰ')
buf.write('\x05хȣ\x02ჰჱ\x05яȨ\x02ჱჲ')
buf.write('\x05сȡ\x02ჲ̸\x03\x02\x02\x02ჳჴ\x05џ')
buf.write('Ȱ\x02ჴჵ\x05еț\x02ჵჶ\x05ы')
buf.write('Ȧ\x02ჶჷ\x05хȣ\x02ჷჸ\x05л')
buf.write('Ȟ\x02ჸჹ\x05еț\x02ჹჺ\x05ћ')
buf.write('Ȯ\x02ჺ჻\x05нȟ\x02჻̺\x03\x02\x02')
buf.write('\x02ჼჽ\x05џȰ\x02ჽჾ\x05еț')
buf.write('\x02ჾჿ\x05ыȦ\x02ჿᄀ\x05ѝȯ')
buf.write('\x02ᄀᄁ\x05нȟ\x02ᄁ̼\x03\x02\x02\x02ᄂ')
buf.write('ᄃ\x05џȰ\x02ᄃᄄ\x05еț\x02ᄄ')
buf.write('ᄅ\x05ыȦ\x02ᄅᄆ\x05ѝȯ\x02ᄆ')
buf.write('ᄇ\x05нȟ\x02ᄇᄈ\x05љȭ\x02ᄈ')
buf.write('̾\x03\x02\x02\x02ᄉᄊ\x05џȰ\x02ᄊᄋ')
buf.write('\x05еț\x02ᄋᄌ\x05їȬ\x02ᄌᄍ')
buf.write('\x05йȝ\x02ᄍᄎ\x05уȢ\x02ᄎᄏ')
buf.write('\x05еț\x02ᄏᄐ\x05їȬ\x02ᄐ̀')
buf.write('\x03\x02\x02\x02ᄑᄒ\x05џȰ\x02ᄒᄓ\x05е')
buf.write('ț\x02ᄓᄔ\x05їȬ\x02ᄔᄕ\x05й')
buf.write('ȝ\x02ᄕᄖ\x05уȢ\x02ᄖᄗ\x05е')
buf.write('ț\x02ᄗᄘ\x05їȬ\x02ᄘᄙ\x074')
buf.write('\x02\x02ᄙ͂\x03\x02\x02\x02ᄚᄛ\x05џȰ\x02ᄛ')
buf.write('ᄜ\x05еț\x02ᄜᄝ\x05їȬ\x02ᄝ')
buf.write('ᄞ\x05хȣ\x02ᄞᄟ\x05еț\x02ᄟ')
buf.write('ᄠ\x05зȜ\x02ᄠᄡ\x05ыȦ\x02ᄡ')
buf.write('ᄢ\x05нȟ\x02ᄢ̈́\x03\x02\x02\x02ᄣᄤ')
buf.write('\x05џȰ\x02ᄤᄥ\x05еț\x02ᄥᄦ')
buf.write('\x05їȬ\x02ᄦᄧ\x05їȬ\x02ᄧᄨ')
buf.write('\x05еț\x02ᄨᄩ\x05ѥȳ\x02ᄩ͆')
buf.write('\x03\x02\x02\x02ᄪᄫ\x05џȰ\x02ᄫᄬ\x05е')
buf.write('ț\x02ᄬᄭ\x05їȬ\x02ᄭᄮ\x05ѥ')
buf.write('ȳ\x02ᄮᄯ\x05хȣ\x02ᄯᄰ\x05я')
buf.write('Ȩ\x02ᄰᄱ\x05сȡ\x02ᄱ͈\x03\x02\x02')
buf.write('\x02ᄲᄳ\x05џȰ\x02ᄳᄴ\x05нȟ')
buf.write('\x02ᄴᄵ\x05їȬ\x02ᄵᄶ\x05љȭ')
buf.write('\x02ᄶᄷ\x05хȣ\x02ᄷᄸ\x05ёȩ')
buf.write('\x02ᄸᄹ\x05яȨ\x02ᄹ͊\x03\x02\x02\x02ᄺ')
buf.write('ᄻ\x05џȰ\x02ᄻᄼ\x05нȟ\x02ᄼ')
buf.write('ᄽ\x05їȬ\x02ᄽᄾ\x05љȭ\x02ᄾ')
buf.write('ᄿ\x05хȣ\x02ᄿᅀ\x05ёȩ\x02ᅀ')
buf.write('ᅁ\x05яȨ\x02ᅁᅂ\x05љȭ\x02ᅂ')
buf.write('͌\x03\x02\x02\x02ᅃᅄ\x05ѡȱ\x02ᅄᅅ')
buf.write('\x05еț\x02ᅅᅆ\x05хȣ\x02ᅆᅇ')
buf.write('\x05ћȮ\x02ᅇ͎\x03\x02\x02\x02ᅈᅉ\x05ѡ')
buf.write('ȱ\x02ᅉᅊ\x05еț\x02ᅊᅋ\x05ї')
buf.write('Ȭ\x02ᅋᅌ\x05яȨ\x02ᅌᅍ\x05х')
buf.write('ȣ\x02ᅍᅎ\x05яȨ\x02ᅎᅏ\x05с')
buf.write('ȡ\x02ᅏ͐\x03\x02\x02\x02ᅐᅑ\x05ѡȱ')
buf.write('\x02ᅑᅒ\x05нȟ\x02ᅒᅓ\x05ыȦ')
buf.write('\x02ᅓᅔ\x05ыȦ\x02ᅔᅕ\x05пȠ')
buf.write('\x02ᅕᅖ\x05ёȩ\x02ᅖᅗ\x05їȬ')
buf.write('\x02ᅗᅘ\x05эȧ\x02ᅘᅙ\x05нȟ')
buf.write('\x02ᅙᅚ\x05лȞ\x02ᅚ͒\x03\x02\x02\x02ᅛ')
buf.write('ᅜ\x05ѡȱ\x02ᅜᅝ\x05уȢ\x02ᅝ')
buf.write('ᅞ\x05нȟ\x02ᅞᅟ\x05яȨ\x02ᅟ')
buf.write('͔\x03\x02\x02\x02ᅠᅡ\x05ѡȱ\x02ᅡᅢ')
buf.write('\x05уȢ\x02ᅢᅣ\x05нȟ\x02ᅣᅤ')
buf.write('\x05яȨ\x02ᅤᅥ\x05нȟ\x02ᅥᅦ')
buf.write('\x05џȰ\x02ᅦᅧ\x05нȟ\x02ᅧᅨ')
buf.write('\x05їȬ\x02ᅨ͖\x03\x02\x02\x02ᅩᅪ\x05ѡ')
buf.write('ȱ\x02ᅪᅫ\x05уȢ\x02ᅫᅬ\x05н')
buf.write('ȟ\x02ᅬᅭ\x05їȬ\x02ᅭᅮ\x05н')
buf.write('ȟ\x02ᅮ͘\x03\x02\x02\x02ᅯᅰ\x05ѡȱ')
buf.write('\x02ᅰᅱ\x05уȢ\x02ᅱᅲ\x05хȣ')
buf.write('\x02ᅲᅳ\x05ыȦ\x02ᅳᅴ\x05нȟ')
buf.write('\x02ᅴ͚\x03\x02\x02\x02ᅵᅶ\x05ѡȱ\x02ᅶ')
buf.write('ᅷ\x05хȣ\x02ᅷᅸ\x05ћȮ\x02ᅸ')
buf.write('ᅹ\x05уȢ\x02ᅹ͜\x03\x02\x02\x02ᅺᅻ')
buf.write('\x05ѡȱ\x02ᅻᅼ\x05хȣ\x02ᅼᅽ')
buf.write('\x05ћȮ\x02ᅽᅾ\x05уȢ\x02ᅾᅿ')
buf.write('\x05хȣ\x02ᅿᆀ\x05яȨ\x02ᆀ͞')
buf.write('\x03\x02\x02\x02ᆁᆂ\x05ѡȱ\x02ᆂᆃ\x05ё')
buf.write('ȩ\x02ᆃᆄ\x05їȬ\x02ᆄᆅ\x05щ')
buf.write('ȥ\x02ᆅ͠\x03\x02\x02\x02ᆆᆇ\x05ѡȱ')
buf.write('\x02ᆇᆈ\x05їȬ\x02ᆈᆉ\x05хȣ')
buf.write('\x02ᆉᆊ\x05ћȮ\x02ᆊᆋ\x05нȟ')
buf.write('\x02ᆋ͢\x03\x02\x02\x02ᆌᆍ\x05ѣȲ\x02ᆍ')
buf.write('ᆎ\x05эȧ\x02ᆎᆏ\x05ыȦ\x02ᆏ')
buf.write('ͤ\x03\x02\x02\x02ᆐᆑ\x05ѣȲ\x02ᆑᆒ')
buf.write('\x05эȧ\x02ᆒᆓ\x05ыȦ\x02ᆓᆔ')
buf.write('\x05еț\x02ᆔᆕ\x05сȡ\x02ᆕᆖ')
buf.write('\x05сȡ\x02ᆖͦ\x03\x02\x02\x02ᆗᆘ\x05ѣ')
buf.write('Ȳ\x02ᆘᆙ\x05эȧ\x02ᆙᆚ\x05ы')
buf.write('Ȧ\x02ᆚᆛ\x05еț\x02ᆛᆜ\x05ћ')
buf.write('Ȯ\x02ᆜᆝ\x05ћȮ\x02ᆝᆞ\x05ї')
buf.write('Ȭ\x02ᆞᆟ\x05хȣ\x02ᆟᆠ\x05з')
buf.write('Ȝ\x02ᆠᆡ\x05ѝȯ\x02ᆡᆢ\x05ћ')
buf.write('Ȯ\x02ᆢᆣ\x05нȟ\x02ᆣᆤ\x05љ')
buf.write('ȭ\x02ᆤͨ\x03\x02\x02\x02ᆥᆦ\x05ѣȲ')
buf.write('\x02ᆦᆧ\x05эȧ\x02ᆧᆨ\x05ыȦ')
buf.write('\x02ᆨᆩ\x05йȝ\x02ᆩᆪ\x05еț')
buf.write('\x02ᆪᆫ\x05љȭ\x02ᆫᆬ\x05ћȮ')
buf.write('\x02ᆬͪ\x03\x02\x02\x02ᆭᆮ\x05ѣȲ\x02ᆮ')
buf.write('ᆯ\x05эȧ\x02ᆯᆰ\x05ыȦ\x02ᆰ')
buf.write('ᆱ\x05йȝ\x02ᆱᆲ\x05ёȩ\x02ᆲ')
buf.write('ᆳ\x05ыȦ\x02ᆳᆴ\x05еț\x02ᆴ')
buf.write('ᆵ\x05ћȮ\x02ᆵᆶ\x05ћȮ\x02ᆶ')
buf.write('ᆷ\x05џȰ\x02ᆷᆸ\x05еț\x02ᆸ')
buf.write('ᆹ\x05ыȦ\x02ᆹͬ\x03\x02\x02\x02ᆺᆻ')
buf.write('\x05ѣȲ\x02ᆻᆼ\x05эȧ\x02ᆼᆽ')
buf.write('\x05ыȦ\x02ᆽᆾ\x05нȟ\x02ᆾᆿ')
buf.write('\x05ыȦ\x02ᆿᇀ\x05нȟ\x02ᇀᇁ')
buf.write('\x05эȧ\x02ᇁᇂ\x05нȟ\x02ᇂᇃ')
buf.write('\x05яȨ\x02ᇃᇄ\x05ћȮ\x02ᇄͮ')
buf.write('\x03\x02\x02\x02ᇅᇆ\x05ѣȲ\x02ᇆᇇ\x05э')
buf.write('ȧ\x02ᇇᇈ\x05ыȦ\x02ᇈᇉ\x05н')
buf.write('ȟ\x02ᇉᇊ\x05ѣȲ\x02ᇊᇋ\x05х')
buf.write('ȣ\x02ᇋᇌ\x05љȭ\x02ᇌᇍ\x05ћ')
buf.write('Ȯ\x02ᇍᇎ\x05љȭ\x02ᇎͰ\x03\x02\x02')
buf.write('\x02ᇏᇐ\x05ѣȲ\x02ᇐᇑ\x05эȧ')
buf.write('\x02ᇑᇒ\x05ыȦ\x02ᇒᇓ\x05пȠ')
buf.write('\x02ᇓᇔ\x05ёȩ\x02ᇔᇕ\x05їȬ')
buf.write('\x02ᇕᇖ\x05нȟ\x02ᇖᇗ\x05љȭ')
buf.write('\x02ᇗᇘ\x05ћȮ\x02ᇘͲ\x03\x02\x02\x02ᇙ')
buf.write('ᇚ\x05ѣȲ\x02ᇚᇛ\x05эȧ\x02ᇛ')
buf.write('ᇜ\x05ыȦ\x02ᇜᇝ\x05яȨ\x02ᇝ')
buf.write('ᇞ\x05еț\x02ᇞᇟ\x05эȧ\x02ᇟ')
buf.write('ᇠ\x05нȟ\x02ᇠᇡ\x05љȭ\x02ᇡ')
buf.write('ᇢ\x05ѓȪ\x02ᇢᇣ\x05еț\x02ᇣ')
buf.write('ᇤ\x05йȝ\x02ᇤᇥ\x05нȟ\x02ᇥ')
buf.write('ᇦ\x05љȭ\x02ᇦʹ\x03\x02\x02\x02ᇧᇨ')
buf.write('\x05ѣȲ\x02ᇨᇩ\x05эȧ\x02ᇩᇪ')
buf.write('\x05ыȦ\x02ᇪᇫ\x05ѓȪ\x02ᇫᇬ')
buf.write('\x05еț\x02ᇬᇭ\x05їȬ\x02ᇭᇮ')
buf.write('\x05љȭ\x02ᇮᇯ\x05нȟ\x02ᇯͶ')
buf.write('\x03\x02\x02\x02ᇰᇱ\x05ѣȲ\x02ᇱᇲ\x05э')
buf.write('ȧ\x02ᇲᇳ\x05ыȦ\x02ᇳᇴ\x05ѓ')
buf.write('Ȫ\x02ᇴᇵ\x05хȣ\x02ᇵ\u0378\x03\x02\x02')
buf.write('\x02ᇶᇷ\x05ѣȲ\x02ᇷᇸ\x05эȧ')
buf.write('\x02ᇸᇹ\x05ыȦ\x02ᇹᇺ\x05ѕȫ')
buf.write('\x02ᇺᇻ\x05ѝȯ\x02ᇻᇼ\x05нȟ')
buf.write('\x02ᇼᇽ\x05їȬ\x02ᇽᇾ\x05ѥȳ')
buf.write('\x02ᇾͺ\x03\x02\x02\x02ᇿሀ\x05ѣȲ\x02ሀ')
buf.write('ሁ\x05эȧ\x02ሁሂ\x05ыȦ\x02ሂ')
buf.write('ሃ\x05їȬ\x02ሃሄ\x05ёȩ\x02ሄ')
buf.write('ህ\x05ёȩ\x02ህሆ\x05ћȮ\x02ሆ')
buf.write('ͼ\x03\x02\x02\x02ሇለ\x05ѣȲ\x02ለሉ')
buf.write('\x05эȧ\x02ሉሊ\x05ыȦ\x02ሊላ')
buf.write('\x05љȭ\x02ላሌ\x05нȟ\x02ሌል')
buf.write('\x05їȬ\x02ልሎ\x05хȣ\x02ሎሏ')
buf.write('\x05еț\x02ሏሐ\x05ыȦ\x02ሐሑ')
buf.write('\x05хȣ\x02ሑሒ\x05ѧȴ\x02ሒሓ')
buf.write('\x05нȟ\x02ሓ;\x03\x02\x02\x02ሔሕ\x05ѣ')
buf.write('Ȳ\x02ሕሖ\x05эȧ\x02ሖሗ\x05ы')
buf.write('Ȧ\x02ሗመ\x05ћȮ\x02መሙ\x05е')
buf.write('ț\x02ሙሚ\x05зȜ\x02ሚማ\x05ы')
buf.write('Ȧ\x02ማሜ\x05нȟ\x02ሜ\u0380\x03\x02\x02')
buf.write('\x02ምሞ\x05ѥȳ\x02ሞሟ\x05нȟ')
buf.write('\x02ሟሠ\x05еț\x02ሠሡ\x05їȬ')
buf.write('\x02ሡ\u0382\x03\x02\x02\x02ሢሣ\x05ѥȳ\x02ሣ')
buf.write('ሤ\x05нȟ\x02ሤሥ\x05љȭ\x02ሥ')
buf.write('΄\x03\x02\x02\x02ሦሧ\x05ѥȳ\x02ሧረ')
buf.write('\x05эȧ\x02ረሩ\x05хȣ\x02ሩሪ')
buf.write('\x05яȨ\x02ሪራ\x05ћȮ\x02ራሬ')
buf.write('\x05нȟ\x02ሬር\x05їȬ\x02ርሮ')
buf.write('\x05џȰ\x02ሮሯ\x05еț\x02ሯሰ')
buf.write('\x05ыȦ\x02ሰሱ\x07a\x02\x02ሱሲ\x05ѝ')
buf.write('ȯ\x02ሲሳ\x05яȨ\x02ሳሴ\x05й')
buf.write('ȝ\x02ሴስ\x05ёȩ\x02ስሶ\x05я')
buf.write('Ȩ\x02ሶሷ\x05љȭ\x02ሷሸ\x05ћ')
buf.write('Ȯ\x02ሸሹ\x05їȬ\x02ሹሺ\x05е')
buf.write('ț\x02ሺሻ\x05хȣ\x02ሻሼ\x05я')
buf.write('Ȩ\x02ሼሽ\x05нȟ\x02ሽሾ\x05л')
buf.write('Ȟ\x02ሾΆ\x03\x02\x02\x02ሿቀ\x05ѧȴ')
buf.write('\x02ቀቁ\x05ёȩ\x02ቁቂ\x05яȨ')
buf.write('\x02ቂቃ\x05нȟ\x02ቃΈ\x03\x02\x02\x02ቄ')
buf.write('ቅ\x05ѓȪ\x02ቅቆ\x05їȬ\x02ቆ')
buf.write('ቇ\x05нȟ\x02ቇቈ\x05лȞ\x02ቈ')
buf.write('\u1249\x05хȣ\x02\u1249ቊ\x05йȝ\x02ቊ')
buf.write('ቋ\x05ћȮ\x02ቋቌ\x05хȣ\x02ቌ')
buf.write('ቍ\x05ёȩ\x02ቍ\u124e\x05яȨ\x02\u124e')
buf.write('Ί\x03\x02\x02\x02\u124fቐ\x05ѓȪ\x02ቐቑ')
buf.write('\x05їȬ\x02ቑቒ\x05нȟ\x02ቒቓ')
buf.write('\x05лȞ\x02ቓቔ\x05хȣ\x02ቔቕ')
buf.write('\x05йȝ\x02ቕቖ\x05ћȮ\x02ቖ\u1257')
buf.write('\x05хȣ\x02\u1257ቘ\x05ёȩ\x02ቘ\u1259')
buf.write('\x05яȨ\x02\u1259ቚ\x07a\x02\x02ቚቛ\x05з')
buf.write('Ȝ\x02ቛቜ\x05ёȩ\x02ቜቝ\x05ѝ')
buf.write('ȯ\x02ቝ\u125e\x05яȨ\x02\u125e\u125f\x05л')
buf.write('Ȟ\x02\u125fበ\x05љȭ\x02በΌ\x03\x02\x02')
buf.write('\x02ቡቢ\x05ѓȪ\x02ቢባ\x05їȬ')
buf.write('\x02ባቤ\x05нȟ\x02ቤብ\x05лȞ')
buf.write('\x02ብቦ\x05хȣ\x02ቦቧ\x05йȝ')
buf.write('\x02ቧቨ\x05ћȮ\x02ቨቩ\x05хȣ')
buf.write('\x02ቩቪ\x05ёȩ\x02ቪቫ\x05яȨ')
buf.write('\x02ቫቬ\x07a\x02\x02ቬቭ\x05йȝ\x02ቭ')
buf.write('ቮ\x05ёȩ\x02ቮቯ\x05љȭ\x02ቯ')
buf.write('ተ\x05ћȮ\x02ተΎ\x03\x02\x02\x02ቱቲ')
buf.write('\x05ѓȪ\x02ቲታ\x05їȬ\x02ታቴ')
buf.write('\x05нȟ\x02ቴት\x05лȞ\x02ትቶ')
buf.write('\x05хȣ\x02ቶቷ\x05йȝ\x02ቷቸ')
buf.write('\x05ћȮ\x02ቸቹ\x05хȣ\x02ቹቺ')
buf.write('\x05ёȩ\x02ቺቻ\x05яȨ\x02ቻቼ')
buf.write('\x07a\x02\x02ቼች\x05лȞ\x02ችቾ\x05н')
buf.write('ȟ\x02ቾቿ\x05ћȮ\x02ቿኀ\x05е')
buf.write('ț\x02ኀኁ\x05хȣ\x02ኁኂ\x05ы')
buf.write('Ȧ\x02ኂኃ\x05љȭ\x02ኃΐ\x03\x02\x02')
buf.write('\x02ኄኅ\x05ѓȪ\x02ኅኆ\x05їȬ')
buf.write('\x02ኆኇ\x05нȟ\x02ኇኈ\x05лȞ')
buf.write('\x02ኈ\u1289\x05хȣ\x02\u1289ኊ\x05йȝ')
buf.write('\x02ኊኋ\x05ћȮ\x02ኋኌ\x05хȣ')
buf.write('\x02ኌኍ\x05ёȩ\x02ኍ\u128e\x05яȨ')
buf.write('\x02\u128e\u128f\x07a\x02\x02\u128fነ\x05ѓȪ\x02ነ')
buf.write('ኑ\x05їȬ\x02ኑኒ\x05ёȩ\x02ኒ')
buf.write('ና\x05зȜ\x02ናኔ\x05еț\x02ኔ')
buf.write('ን\x05зȜ\x02ንኖ\x05хȣ\x02ኖ')
buf.write('ኗ\x05ыȦ\x02ኗኘ\x05хȣ\x02ኘ')
buf.write('ኙ\x05ћȮ\x02ኙኚ\x05ѥȳ\x02ኚ')
buf.write('Β\x03\x02\x02\x02ኛኜ\x05ѓȪ\x02ኜኝ')
buf.write('\x05їȬ\x02ኝኞ\x05нȟ\x02ኞኟ')
buf.write('\x05лȞ\x02ኟአ\x05хȣ\x02አኡ')
buf.write('\x05йȝ\x02ኡኢ\x05ћȮ\x02ኢኣ')
buf.write('\x05хȣ\x02ኣኤ\x05ёȩ\x02ኤእ')
buf.write('\x05яȨ\x02እኦ\x07a\x02\x02ኦኧ\x05љ')
buf.write('ȭ\x02ኧከ\x05нȟ\x02ከኩ\x05ћ')
buf.write('Ȯ\x02ኩΔ\x03\x02\x02\x02ኪካ\x05йȝ')
buf.write('\x02ካኬ\x05ѝȯ\x02ኬክ\x05эȧ')
buf.write('\x02ክኮ\x05нȟ\x02ኮኯ\x07a\x02\x02ኯ')
buf.write('ኰ\x05лȞ\x02ኰ\u12b1\x05хȣ\x02\u12b1')
buf.write('ኲ\x05љȭ\x02ኲኳ\x05ћȮ\x02ኳ')
buf.write('Ζ\x03\x02\x02\x02ኴኵ\x05лȞ\x02ኵ\u12b6')
buf.write('\x05нȟ\x02\u12b6\u12b7\x05яȨ\x02\u12b7ኸ')
buf.write('\x05љȭ\x02ኸኹ\x05нȟ\x02ኹኺ')
buf.write('\x07a\x02\x02ኺኻ\x05їȬ\x02ኻኼ\x05е')
buf.write('ț\x02ኼኽ\x05яȨ\x02ኽኾ\x05щ')
buf.write('ȥ\x02ኾΘ\x03\x02\x02\x02\u12bfዀ\x05ыȦ')
buf.write('\x02ዀ\u12c1\x05хȣ\x02\u12c1ዂ\x05љȭ')
buf.write('\x02ዂዃ\x05ћȮ\x02ዃዄ\x05еț')
buf.write('\x02ዄዅ\x05сȡ\x02ዅ\u12c6\x05сȡ')
buf.write('\x02\u12c6Κ\x03\x02\x02\x02\u12c7ወ\x05ѓȪ\x02ወ')
buf.write('ዉ\x05нȟ\x02ዉዊ\x05їȬ\x02ዊ')
buf.write('ዋ\x05йȝ\x02ዋዌ\x05нȟ\x02ዌ')
buf.write('ው\x05яȨ\x02ውዎ\x05ћȮ\x02ዎ')
buf.write('ዏ\x07a\x02\x02ዏዐ\x05їȬ\x02ዐዑ')
buf.write('\x05еț\x02ዑዒ\x05яȨ\x02ዒዓ')
buf.write('\x05щȥ\x02ዓΜ\x03\x02\x02\x02ዔዕ\x05ѓ')
buf.write('Ȫ\x02ዕዖ\x05нȟ\x02ዖ\u12d7\x05ї')
buf.write('Ȭ\x02\u12d7ዘ\x05йȝ\x02ዘዙ\x05н')
buf.write('ȟ\x02ዙዚ\x05яȨ\x02ዚዛ\x05ћ')
buf.write('Ȯ\x02ዛዜ\x05хȣ\x02ዜዝ\x05ы')
buf.write('Ȧ\x02ዝዞ\x05нȟ\x02ዞዟ\x07a\x02')
buf.write('\x02ዟዠ\x05йȝ\x02ዠዡ\x05ёȩ')
buf.write('\x02ዡዢ\x05яȨ\x02ዢዣ\x05ћȮ')
buf.write('\x02ዣΞ\x03\x02\x02\x02ዤዥ\x05ѓȪ\x02ዥ')
buf.write('ዦ\x05нȟ\x02ዦዧ\x05їȬ\x02ዧ')
buf.write('የ\x05йȝ\x02የዩ\x05нȟ\x02ዩ')
buf.write('ዪ\x05яȨ\x02ዪያ\x05ћȮ\x02ያ')
buf.write('ዬ\x05хȣ\x02ዬይ\x05ыȦ\x02ይ')
buf.write('ዮ\x05нȟ\x02ዮዯ\x07a\x02\x02ዯደ')
buf.write('\x05лȞ\x02ደዱ\x05хȣ\x02ዱዲ')
buf.write('\x05љȭ\x02ዲዳ\x05йȝ\x02ዳΠ')
buf.write('\x03\x02\x02\x02ዴድ\x05їȬ\x02ድዶ\x05е')
buf.write('ț\x02ዶዷ\x05яȨ\x02ዷዸ\x05щ')
buf.write('ȥ\x02ዸ\u03a2\x03\x02\x02\x02ዹዺ\x05еț')
buf.write('\x02ዺዻ\x05џȰ\x02ዻዼ\x05сȡ')
buf.write('\x02ዼΤ\x03\x02\x02\x02ዽዾ\x05йȝ\x02ዾ')
buf.write('ዿ\x05ёȩ\x02ዿጀ\x05їȬ\x02ጀ')
buf.write('ጁ\x05їȬ\x02ጁΦ\x03\x02\x02\x02ጂጃ')
buf.write('\x05ыȦ\x02ጃጄ\x05еț\x02ጄጅ')
buf.write('\x05сȡ\x02ጅΨ\x03\x02\x02\x02ጆጇ\x05ы')
buf.write('Ȧ\x02ጇገ\x05нȟ\x02ገጉ\x05е')
buf.write('ț\x02ጉጊ\x05лȞ\x02ጊΪ\x03\x02\x02')
buf.write('\x02ጋጌ\x05эȧ\x02ጌግ\x05еț')
buf.write('\x02ግጎ\x05ѣȲ\x02ጎά\x03\x02\x02\x02ጏ')
buf.write('ጐ\x05эȧ\x02ጐ\u1311\x05нȟ\x02\u1311')
buf.write('ጒ\x05лȞ\x02ጒጓ\x05хȣ\x02ጓ')
buf.write('ጔ\x05еț\x02ጔጕ\x05яȨ\x02ጕ')
buf.write('ή\x03\x02\x02\x02\u1316\u1317\x05эȧ\x02\u1317ጘ')
buf.write('\x05хȣ\x02ጘጙ\x05яȨ\x02ጙΰ')
buf.write('\x03\x02\x02\x02ጚጛ\x05яȨ\x02ጛጜ\x05ћ')
buf.write('Ȯ\x02ጜጝ\x05хȣ\x02ጝጞ\x05ы')
buf.write('Ȧ\x02ጞጟ\x05нȟ\x02ጟβ\x03\x02\x02')
buf.write('\x02ጠጡ\x05їȬ\x02ጡጢ\x05еț')
buf.write('\x02ጢጣ\x05ћȮ\x02ጣጤ\x05хȣ')
buf.write('\x02ጤጥ\x05ёȩ\x02ጥጦ\x07a\x02\x02ጦ')
buf.write('ጧ\x05ћȮ\x02ጧጨ\x05ёȩ\x02ጨ')
buf.write('ጩ\x07a\x02\x02ጩጪ\x05їȬ\x02ጪጫ')
buf.write('\x05нȟ\x02ጫጬ\x05ѓȪ\x02ጬጭ')
buf.write('\x05ёȩ\x02ጭጮ\x05їȬ\x02ጮጯ')
buf.write('\x05ћȮ\x02ጯδ\x03\x02\x02\x02ጰጱ\x05ї')
buf.write('Ȭ\x02ጱጲ\x05ёȩ\x02ጲጳ\x05ѡ')
buf.write('ȱ\x02ጳጴ\x07a\x02\x02ጴጵ\x05яȨ')
buf.write('\x02ጵጶ\x05ѝȯ\x02ጶጷ\x05эȧ')
buf.write('\x02ጷጸ\x05зȜ\x02ጸጹ\x05нȟ')
buf.write('\x02ጹጺ\x05їȬ\x02ጺζ\x03\x02\x02\x02ጻ')
buf.write('ጼ\x05љȭ\x02ጼጽ\x05ѝȯ\x02ጽ')
buf.write('ጾ\x05эȧ\x02ጾθ\x03\x02\x02\x02ጿፀ')
buf.write('\x05џȰ\x02ፀፁ\x05еț\x02ፁፂ')
buf.write('\x05їȬ\x02ፂፃ\x05хȣ\x02ፃፄ')
buf.write('\x05еț\x02ፄፅ\x05яȨ\x02ፅፆ')
buf.write('\x05йȝ\x02ፆፇ\x05нȟ\x02ፇκ')
buf.write('\x03\x02\x02\x02ፈፉ\x05їȬ\x02ፉፊ\x05н')
buf.write('ȟ\x02ፊፋ\x05сȡ\x02ፋፌ\x05ї')
buf.write('Ȭ\x02ፌፍ\x07a\x02\x02ፍμ\x03\x02\x02\x02ፎ')
buf.write('ፏ\x05љȭ\x02ፏፐ\x05ћȮ\x02ፐ')
buf.write('ፑ\x05лȞ\x02ፑፒ\x05лȞ\x02ፒ')
buf.write('ፓ\x05нȟ\x02ፓፔ\x05џȰ\x02ፔ')
buf.write('ξ\x03\x02\x02\x02ፕፖ\x05џȰ\x02ፖፗ')
buf.write('\x05еț\x02ፗፘ\x05їȬ\x02ፘፙ')
buf.write('\x07a\x02\x02ፙπ\x03\x02\x02\x02ፚ\u135b\x05йȝ')
buf.write('\x02\u135b\u135c\x05ёȩ\x02\u135c፝\x05џȰ')
buf.write('\x02፝፞\x05еț\x02፞፟\x05їȬ')
buf.write('\x02፟፠\x07a\x02\x02፠ς\x03\x02\x02\x02፡።')
buf.write('\x05яȨ\x02።፩\x07)\x02\x02፣፨\n\x02\x02')
buf.write('\x02፤፥\x07)\x02\x02፥፨\x07)\x02\x02፦፨\x05')
buf.write('Эȗ\x02፧፣\x03\x02\x02\x02፧፤\x03\x02\x02\x02')
buf.write('፧፦\x03\x02\x02\x02፨፫\x03\x02\x02\x02፩፧\x03')
buf.write('\x02\x02\x02፩፪\x03\x02\x02\x02፪፬\x03\x02\x02\x02፫፩')
buf.write('\x03\x02\x02\x02፬፭\x07)\x02\x02፭τ\x03\x02\x02\x02፮')
buf.write('፷\x05зȜ\x02፯፳\x07)\x02\x02፰፲')
buf.write('\x0423\x02፱፰\x03\x02\x02\x02፲፵\x03\x02\x02\x02፳')
buf.write('፱\x03\x02\x02\x02፳፴\x03\x02\x02\x02፴፶\x03\x02\x02\x02')
buf.write('፵፳\x03\x02\x02\x02፶፸\x07)\x02\x02፷፯\x03')
buf.write('\x02\x02\x02፸፹\x03\x02\x02\x02፹፷\x03\x02\x02\x02፹፺')
buf.write('\x03\x02\x02\x02፺φ\x03\x02\x02\x02፻ᎄ\x05ѣȲ')
buf.write('\x02፼ᎀ\x07)\x02\x02\u137d\u137f\t\x03\x02\x02\u137e\u137d')
buf.write(
'\x03\x02\x02\x02\u137fᎂ\x03\x02\x02\x02ᎀ\u137e\x03\x02\x02\x02ᎀ')
buf.write('ᎁ\x03\x02\x02\x02ᎁᎃ\x03\x02\x02\x02ᎂᎀ\x03\x02\x02\x02')
buf.write('ᎃᎅ\x07)\x02\x02ᎄ፼\x03\x02\x02\x02ᎅᎆ\x03')
buf.write('\x02\x02\x02ᎆᎄ\x03\x02\x02\x02ᎆᎇ\x03\x02\x02\x02ᎇψ')
buf.write('\x03\x02\x02\x02ᎈᎉ\x070\x02\x02ᎉᎊ\x070\x02\x02ᎊ')
buf.write('ϊ\x03\x02\x02\x02ᎋᎌ\x070\x02\x02ᎌό\x03\x02\x02')
buf.write('\x02ᎍᎎ\x05УȒ\x02ᎎώ\x03\x02\x02\x02ᎏ')
buf.write('᎘\x05Хȓ\x02᎐᎒\t\x04\x02\x02᎑᎓')
buf.write('\t\x05\x02\x02᎒᎑\x03\x02\x02\x02᎒᎓\x03\x02\x02\x02᎓')
buf.write('᎖\x03\x02\x02\x02᎔᎗\x05Хȓ\x02᎕᎗')
buf.write('\x05УȒ\x02᎖᎔\x03\x02\x02\x02᎖᎕\x03\x02\x02')
buf.write('\x02᎗᎙\x03\x02\x02\x02᎘᎐\x03\x02\x02\x02᎘᎙')
buf.write('\x03\x02\x02\x02᎙\u139c\x03\x02\x02\x02\u139a\u139d\x05лȞ')
buf.write(
'\x02\u139b\u139d\x05пȠ\x02\u139c\u139a\x03\x02\x02\x02\u139c')
buf.write(
'\u139b\x03\x02\x02\x02\u139c\u139d\x03\x02\x02\x02\u139dϐ\x03\x02\x02\x02'
)
buf.write('\u139eᎥ\x07)\x02\x02\u139fᎤ\n\x02\x02\x02ᎠᎡ\x07')
buf.write(')\x02\x02ᎡᎤ\x07)\x02\x02ᎢᎤ\x05Эȗ\x02Ꭳ')
buf.write('\u139f\x03\x02\x02\x02ᎣᎠ\x03\x02\x02\x02ᎣᎢ\x03\x02\x02\x02')
buf.write('ᎤᎧ\x03\x02\x02\x02ᎥᎣ\x03\x02\x02\x02ᎥᎦ\x03')
buf.write('\x02\x02\x02ᎦᎨ\x03\x02\x02\x02ᎧᎥ\x03\x02\x02\x02ᎨᎩ')
buf.write('\x07)\x02\x02Ꭹϒ\x03\x02\x02\x02ᎪᎯ\x05ѕȫ')
buf.write('\x02ᎫᎰ\x05ϗǬ\x02ᎬᎰ\x05ϙǭ')
buf.write('\x02ᎭᎰ\x05ϛǮ\x02ᎮᎰ\x05ϝǯ')
buf.write('\x02ᎯᎫ\x03\x02\x02\x02ᎯᎬ\x03\x02\x02\x02ᎯᎭ')
buf.write('\x03\x02\x02\x02ᎯᎮ\x03\x02\x02\x02ᎰᎱ\x03\x02\x02\x02Ꮁ')
buf.write('Ꮂ\x08Ǫ\x02\x02Ꮂϔ\x03\x02\x02\x02ᎳᎴ\x07)')
buf.write('\x02\x02Ꮄϖ\x03\x02\x02\x02ᎵᎶ\x05ϕǫ\x02Ꮆ')
buf.write('Ꮊ\x07>\x02\x02ᎷᎹ\x0b\x02\x02\x02ᎸᎷ\x03\x02\x02\x02')
buf.write('ᎹᎼ\x03\x02\x02\x02ᎺᎻ\x03\x02\x02\x02ᎺᎸ\x03')
buf.write('\x02\x02\x02ᎻᎽ\x03\x02\x02\x02ᎼᎺ\x03\x02\x02\x02ᎽᎾ')
buf.write('\x07@\x02\x02ᎾᎿ\x05ϕǫ\x02ᎿϘ\x03\x02\x02')
buf.write('\x02ᏀᏁ\x05ϕǫ\x02ᏁᏅ\x07}\x02\x02Ꮒ')
buf.write('Ꮔ\x0b\x02\x02\x02ᏃᏂ\x03\x02\x02\x02ᏄᏇ\x03\x02\x02')
buf.write('\x02ᏅᏆ\x03\x02\x02\x02ᏅᏃ\x03\x02\x02\x02ᏆᏈ')
buf.write('\x03\x02\x02\x02ᏇᏅ\x03\x02\x02\x02ᏈᏉ\x07\x7f\x02\x02Ꮙ')
buf.write('Ꮚ\x05ϕǫ\x02ᏊϚ\x03\x02\x02\x02ᏋᏌ')
buf.write('\x05ϕǫ\x02ᏌᏐ\x07]\x02\x02ᏍᏏ\x0b\x02\x02')
buf.write('\x02ᏎᏍ\x03\x02\x02\x02ᏏᏒ\x03\x02\x02\x02ᏐᏑ')
buf.write('\x03\x02\x02\x02ᏐᏎ\x03\x02\x02\x02ᏑᏓ\x03\x02\x02\x02Ꮢ')
buf.write('Ꮠ\x03\x02\x02\x02ᏓᏔ\x07_\x02\x02ᏔᏕ\x05ϕ')
buf.write('ǫ\x02ᏕϜ\x03\x02\x02\x02ᏖᏗ\x05ϕǫ')
buf.write('\x02ᏗᏛ\x07*\x02\x02ᏘᏚ\x0b\x02\x02\x02ᏙᏘ')
buf.write('\x03\x02\x02\x02ᏚᏝ\x03\x02\x02\x02ᏛᏜ\x03\x02\x02\x02Ꮫ')
buf.write('Ꮩ\x03\x02\x02\x02ᏜᏞ\x03\x02\x02\x02ᏝᏛ\x03\x02\x02\x02')
buf.write('ᏞᏟ\x07+\x02\x02ᏟᏠ\x05ϕǫ\x02Ꮰ')
buf.write('Ϟ\x03\x02\x02\x02ᏡᏢ\n\x06\x02\x02ᏢϠ\x03\x02\x02\x02')
buf.write('ᏣᏧ\x07$\x02\x02ᏤᏨ\n\x07\x02\x02ᏥᏦ\x07')
buf.write('$\x02\x02ᏦᏨ\x07$\x02\x02ᏧᏤ\x03\x02\x02\x02ᏧᏥ')
buf.write('\x03\x02\x02\x02ᏨᏩ\x03\x02\x02\x02ᏩᏧ\x03\x02\x02\x02Ꮹ')
buf.write('Ꮺ\x03\x02\x02\x02ᏪᏫ\x03\x02\x02\x02ᏫᏬ\x07$\x02\x02')
buf.write("ᏬϢ\x03\x02\x02\x02ᏭᏮ\x07'\x02\x02ᏮϤ\x03")
buf.write('\x02\x02\x02ᏯᏰ\x07(\x02\x02ᏰϦ\x03\x02\x02\x02ᏱᏲ')
buf.write('\x07*\x02\x02ᏲϨ\x03\x02\x02\x02ᏳᏴ\x07+\x02\x02ᏴϪ')
buf.write(
'\x03\x02\x02\x02Ᏽ\u13f6\x07,\x02\x02\u13f6\u13f7\x07,\x02\x02\u13f7Ϭ'
)
buf.write('\x03\x02\x02\x02ᏸᏹ\x07,\x02\x02ᏹϮ\x03\x02\x02\x02ᏺ')
buf.write('ᏻ\x07-\x02\x02ᏻϰ\x03\x02\x02\x02ᏼᏽ\x07/\x02\x02ᏽ')
buf.write(
'ϲ\x03\x02\x02\x02\u13fe\u13ff\x07.\x02\x02\u13ffϴ\x03\x02\x02\x02'
)
buf.write('᐀ᐁ\x071\x02\x02ᐁ϶\x03\x02\x02\x02ᐂᐃ')
buf.write('\x07B\x02\x02ᐃϸ\x03\x02\x02\x02ᐄᐅ\x07<\x02\x02ᐅᐆ')
buf.write('\x07?\x02\x02ᐆϺ\x03\x02\x02\x02ᐇᐈ\x07<\x02\x02ᐈᐍ')
buf.write('\x05Сȑ\x02ᐉᐌ\x05Сȑ\x02ᐊᐌ')
buf.write('\t\x08\x02\x02ᐋᐉ\x03\x02\x02\x02ᐋᐊ\x03\x02\x02\x02ᐌ')
buf.write('ᐏ\x03\x02\x02\x02ᐍᐋ\x03\x02\x02\x02ᐍᐎ\x03\x02\x02\x02')
buf.write('ᐎᐖ\x03\x02\x02\x02ᐏᐍ\x03\x02\x02\x02ᐐᐑ\x07')
buf.write('<\x02\x02ᐑᐖ\x05ϡDZ\x02ᐒᐓ\x07<\x02\x02ᐓ')
buf.write('ᐖ\x05ύǧ\x02ᐔᐖ\x05Бȉ\x02ᐕ')
buf.write('ᐇ\x03\x02\x02\x02ᐕᐐ\x03\x02\x02\x02ᐕᐒ\x03\x02\x02\x02')
buf.write('ᐕᐔ\x03\x02\x02\x02ᐖϼ\x03\x02\x02\x02ᐗᐘ\x07')
buf.write('<\x02\x02ᐘϾ\x03\x02\x02\x02ᐙᐚ\x07=\x02\x02ᐚЀ')
buf.write('\x03\x02\x02\x02ᐛᐜ\x07>\x02\x02ᐜᐝ\x07?\x02\x02ᐝЂ')
buf.write('\x03\x02\x02\x02ᐞᐟ\x07>\x02\x02ᐟЄ\x03\x02\x02\x02ᐠ')
buf.write('ᐡ\x07@\x02\x02ᐡᐢ\x07?\x02\x02ᐢІ\x03\x02\x02\x02ᐣ')
buf.write('ᐤ\x07#\x02\x02ᐤᐬ\x07?\x02\x02ᐥᐦ\x07>\x02\x02ᐦ')
buf.write('ᐬ\x07@\x02\x02ᐧᐨ\x07`\x02\x02ᐨᐬ\x07?\x02\x02ᐩ')
buf.write('ᐪ\x07\x80\x02\x02ᐪᐬ\x07?\x02\x02ᐫᐣ\x03\x02')
buf.write('\x02\x02ᐫᐥ\x03\x02\x02\x02ᐫᐧ\x03\x02\x02\x02ᐫᐩ')
buf.write('\x03\x02\x02\x02ᐬЈ\x03\x02\x02\x02ᐭᐮ\x07`\x02\x02ᐮ')
buf.write('Њ\x03\x02\x02\x02ᐯᐰ\x07\x80\x02\x02ᐰЌ\x03\x02')
buf.write('\x02\x02ᐱᐲ\x07#\x02\x02ᐲЎ\x03\x02\x02\x02ᐳᐴ')
buf.write('\x07@\x02\x02ᐴА\x03\x02\x02\x02ᐵᐶ\x07A\x02\x02ᐶВ')
buf.write('\x03\x02\x02\x02ᐷᐸ\x07~\x02\x02ᐸᐹ\x07~\x02\x02ᐹД')
buf.write('\x03\x02\x02\x02ᐺᐻ\x07~\x02\x02ᐻЖ\x03\x02\x02\x02ᐼ')
buf.write('ᐽ\x07?\x02\x02ᐽИ\x03\x02\x02\x02ᐾᐿ\x07]\x02\x02ᐿ')
buf.write('К\x03\x02\x02\x02ᑀᑁ\x07_\x02\x02ᑁМ\x03\x02\x02\x02')
buf.write('ᑂᑃ\x07a\x02\x02ᑃО\x03\x02\x02\x02ᑄᑆ\t')
buf.write('\t\x02\x02ᑅᑄ\x03\x02\x02\x02ᑆᑇ\x03\x02\x02\x02ᑇᑅ')
buf.write('\x03\x02\x02\x02ᑇᑈ\x03\x02\x02\x02ᑈᑉ\x03\x02\x02\x02ᑉ')
buf.write('ᑊ\x08Ȑ\x03\x02ᑊР\x03\x02\x02\x02ᑋᑌ\t\n')
buf.write('\x02\x02ᑌТ\x03\x02\x02\x02ᑍᑏ\x042;\x02ᑎᑍ')
buf.write('\x03\x02\x02\x02ᑏᑐ\x03\x02\x02\x02ᑐᑎ\x03\x02\x02\x02ᑐ')
buf.write('ᑑ\x03\x02\x02\x02ᑑФ\x03\x02\x02\x02ᑒᑔ\x05ύ')
buf.write('ǧ\x02ᑓᑒ\x03\x02\x02\x02ᑔᑗ\x03\x02\x02\x02ᑕ')
buf.write('ᑓ\x03\x02\x02\x02ᑕᑖ\x03\x02\x02\x02ᑖᑙ\x03\x02\x02\x02')
buf.write('ᑗᑕ\x03\x02\x02\x02ᑘᑚ\x070\x02\x02ᑙᑘ')
buf.write('\x03\x02\x02\x02ᑙᑚ\x03\x02\x02\x02ᑚᑜ\x03\x02\x02\x02ᑛ')
buf.write('ᑝ\x05ύǧ\x02ᑜᑛ\x03\x02\x02\x02ᑝᑞ')
buf.write('\x03\x02\x02\x02ᑞᑜ\x03\x02\x02\x02ᑞᑟ\x03\x02\x02\x02ᑟ')
buf.write('Ц\x03\x02\x02\x02ᑠᑡ\x07/\x02\x02ᑡᑢ\x07/\x02\x02ᑢ')
buf.write('ᑦ\x03\x02\x02\x02ᑣᑥ\n\x0b\x02\x02ᑤᑣ\x03\x02\x02')
buf.write('\x02ᑥᑨ\x03\x02\x02\x02ᑦᑤ\x03\x02\x02\x02ᑦᑧ')
buf.write('\x03\x02\x02\x02ᑧᑫ\x03\x02\x02\x02ᑨᑦ\x03\x02\x02\x02ᑩ')
buf.write('ᑬ\x05Эȗ\x02ᑪᑬ\x07\x02\x02\x03ᑫᑩ')
buf.write('\x03\x02\x02\x02ᑫᑪ\x03\x02\x02\x02ᑬᑭ\x03\x02\x02\x02ᑭ')
buf.write('ᑮ\x08Ȕ\x04\x02ᑮШ\x03\x02\x02\x02ᑯᑰ\x071')
buf.write('\x02\x02ᑰᑱ\x07,\x02\x02ᑱᑵ\x03\x02\x02\x02ᑲᑴ')
buf.write('\x0b\x02\x02\x02ᑳᑲ\x03\x02\x02\x02ᑴᑷ\x03\x02\x02\x02ᑵ')
buf.write('ᑶ\x03\x02\x02\x02ᑵᑳ\x03\x02\x02\x02ᑶᑸ\x03\x02\x02\x02')
buf.write('ᑷᑵ\x03\x02\x02\x02ᑸᑹ\x07,\x02\x02ᑹᑺ\x07')
buf.write('1\x02\x02ᑺᑻ\x03\x02\x02\x02ᑻᑼ\x08ȕ\x04\x02ᑼ')
buf.write('Ъ\x03\x02\x02\x02ᑽᑾ\x07r\x02\x02ᑾᑿ\x07t\x02\x02ᑿ')
buf.write('ᒀ\x07q\x02\x02ᒀᒁ\x07o\x02\x02ᒁᒂ\x07r\x02\x02ᒂ')
buf.write('ᒃ\x07v\x02\x02ᒃᒄ\x03\x02\x02\x02ᒄᒈ\x05Я')
buf.write('Ș\x02ᒅᒇ\n\x0b\x02\x02ᒆᒅ\x03\x02\x02\x02ᒇ')
buf.write('ᒊ\x03\x02\x02\x02ᒈᒆ\x03\x02\x02\x02ᒈᒉ\x03\x02\x02\x02')
buf.write('ᒉᒍ\x03\x02\x02\x02ᒊᒈ\x03\x02\x02\x02ᒋᒎ\x05')
buf.write('Эȗ\x02ᒌᒎ\x07\x02\x02\x03ᒍᒋ\x03\x02\x02\x02')
buf.write('ᒍᒌ\x03\x02\x02\x02ᒎЬ\x03\x02\x02\x02ᒏᒑ\x07')
buf.write('\x0f\x02\x02ᒐᒏ\x03\x02\x02\x02ᒐᒑ\x03\x02\x02\x02ᒑ')
buf.write('ᒒ\x03\x02\x02\x02ᒒᒓ\x07\x0c\x02\x02ᒓЮ\x03\x02\x02\x02')
buf.write('ᒔᒕ\t\x0c\x02\x02ᒕа\x03\x02\x02\x02ᒖᒛ\x05')
buf.write('Сȑ\x02ᒗᒚ\x05Сȑ\x02ᒘᒚ')
buf.write('\t\r\x02\x02ᒙᒗ\x03\x02\x02\x02ᒙᒘ\x03\x02\x02\x02ᒚ')
buf.write('ᒝ\x03\x02\x02\x02ᒛᒙ\x03\x02\x02\x02ᒛᒜ\x03\x02\x02\x02')
buf.write('ᒜв\x03\x02\x02\x02ᒝᒛ\x03\x02\x02\x02ᒞᒟ\x07')
buf.write('B\x02\x02ᒟᒠ\x07#\x02\x02ᒠᒡ\x03\x02\x02\x02ᒡᒢ')
buf.write('\x08Ț\x04\x02ᒢд\x03\x02\x02\x02ᒣᒤ\t\x0e\x02\x02')
buf.write('ᒤж\x03\x02\x02\x02ᒥᒦ\t\x0f\x02\x02ᒦи')
buf.write('\x03\x02\x02\x02ᒧᒨ\t\x10\x02\x02ᒨк\x03\x02\x02\x02ᒩ')
buf.write('ᒪ\t\x11\x02\x02ᒪм\x03\x02\x02\x02ᒫᒬ\t\x04\x02')
buf.write('\x02ᒬо\x03\x02\x02\x02ᒭᒮ\t\x12\x02\x02ᒮр')
buf.write('\x03\x02\x02\x02ᒯᒰ\t\x13\x02\x02ᒰт\x03\x02\x02\x02ᒱ')
buf.write('ᒲ\t\x14\x02\x02ᒲф\x03\x02\x02\x02ᒳᒴ\t\x15\x02')
buf.write('\x02ᒴц\x03\x02\x02\x02ᒵᒶ\t\x16\x02\x02ᒶш')
buf.write('\x03\x02\x02\x02ᒷᒸ\t\x17\x02\x02ᒸъ\x03\x02\x02\x02ᒹ')
buf.write('ᒺ\t\x18\x02\x02ᒺь\x03\x02\x02\x02ᒻᒼ\t\x19\x02')
buf.write('\x02ᒼю\x03\x02\x02\x02ᒽᒾ\t\x1a\x02\x02ᒾѐ')
buf.write('\x03\x02\x02\x02ᒿᓀ\t\x1b\x02\x02ᓀђ\x03\x02\x02\x02ᓁ')
buf.write('ᓂ\t\x1c\x02\x02ᓂє\x03\x02\x02\x02ᓃᓄ\t\x1d\x02')
buf.write('\x02ᓄі\x03\x02\x02\x02ᓅᓆ\t\x1e\x02\x02ᓆј')
buf.write('\x03\x02\x02\x02ᓇᓈ\t\x1f\x02\x02ᓈњ\x03\x02\x02\x02ᓉ')
buf.write('ᓊ\t \x02\x02ᓊќ\x03\x02\x02\x02ᓋᓌ\t!\x02\x02ᓌ')
buf.write('ў\x03\x02\x02\x02ᓍᓎ\t"\x02\x02ᓎѠ\x03\x02\x02\x02')
buf.write('ᓏᓐ\t#\x02\x02ᓐѢ\x03\x02\x02\x02ᓑᓒ\t')
buf.write('$\x02\x02ᓒѤ\x03\x02\x02\x02ᓓᓔ\t%\x02\x02ᓔѦ')
buf.write("\x03\x02\x02\x02ᓕᓖ\t&\x02\x02ᓖѨ\x03\x02\x02\x02'\x02፧")
buf.write('፩፳፹ᎀᎆ᎒᎖᎘\u139c')
buf.write('ᎣᎥᎯᎺᏅᏐᏛᏧᏩ')
buf.write('ᐋᐍᐕᐫᑇᑐᑕᑙᑞ')
buf.write('ᑦᑫᑵᒈᒍᒐᒙᒛ\x05\tǪ')
buf.write('\x02\x08\x02\x02\x02\x03\x02')
return buf.getvalue()
class PlSqlLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [DFA(ds, i) for i, ds in enumerate(atn.decisionToState)]
T__0 = 1
A_LETTER = 2
ADD = 3
AFTER = 4
AGENT = 5
AGGREGATE = 6
ALL = 7
ALTER = 8
ANALYZE = 9
AND = 10
ANY = 11
ARRAY = 12
AS = 13
ASSUME = 14
ASSERT = 15
ASC = 16
ASSOCIATE = 17
AT = 18
ATTRIBUTE = 19
AUDIT = 20
AUTHID = 21
AUTO = 22
AUTOMATIC = 23
AUTONOMOUS_TRANSACTION = 24
BATCH = 25
BEFORE = 26
BEGIN = 27
BETWEEN = 28
BFILE = 29
BINARY_DOUBLE = 30
BINARY_FLOAT = 31
BINARY_INTEGER = 32
BLOB = 33
BLOCK = 34
BODY = 35
BOOLEAN = 36
BOTH = 37
BREADTH = 38
BULK = 39
BY = 40
BYTE = 41
C_LETTER = 42
CACHE = 43
CALL = 44
CANONICAL = 45
CASCADE = 46
CASE = 47
CAST = 48
CHAR = 49
CHAR_CS = 50
CHARACTER = 51
CHECK = 52
CHR = 53
CLOB = 54
CLOSE = 55
CLUSTER = 56
COLLECT = 57
COLUMNS = 58
COMMENT = 59
COMMIT = 60
COMMITTED = 61
COMPATIBILITY = 62
COMPILE = 63
COMPOUND = 64
CONNECT = 65
CONNECT_BY_ROOT = 66
CONSTANT = 67
CONSTRAINT = 68
CONSTRAINTS = 69
CONSTRUCTOR = 70
CONTENT = 71
CONTEXT = 72
CONTINUE = 73
CONVERT = 74
CORRUPT_XID = 75
CORRUPT_XID_ALL = 76
COST = 77
COUNT = 78
CREATE = 79
CROSS = 80
CUBE = 81
CURRENT = 82
CURRENT_USER = 83
CURSOR = 84
CUSTOMDATUM = 85
CYCLE = 86
DATA = 87
DATABASE = 88
DATE = 89
DAY = 90
DB_ROLE_CHANGE = 91
DBTIMEZONE = 92
DDL = 93
DEBUG = 94
DEC = 95
DECIMAL = 96
DECLARE = 97
DECOMPOSE = 98
DECREMENT = 99
DEFAULT = 100
DEFAULTS = 101
DEFERRED = 102
DEFINER = 103
DELETE = 104
DEPTH = 105
DESC = 106
DETERMINISTIC = 107
DIMENSION = 108
DISABLE = 109
DISASSOCIATE = 110
DISTINCT = 111
DOCUMENT = 112
DOUBLE = 113
DROP = 114
DSINTERVAL_UNCONSTRAINED = 115
EACH = 116
ELEMENT = 117
ELSE = 118
ELSIF = 119
EMPTY = 120
ENABLE = 121
ENCODING = 122
END = 123
ENTITYESCAPING = 124
ERR = 125
ERRORS = 126
ESCAPE = 127
EVALNAME = 128
EXCEPT = 129
EXCEPTION = 130
EXCEPTION_INIT = 131
EXCEPTIONS = 132
EXCLUDE = 133
EXCLUSIVE = 134
EXECUTE = 135
EXISTS = 136
EXIT = 137
EXPLAIN = 138
EXTERNAL = 139
EXTRACT = 140
FAILURE = 141
FALSE = 142
FETCH = 143
FINAL = 144
FIRST = 145
FIRST_VALUE = 146
FLOAT = 147
FOLLOWING = 148
FOLLOWS = 149
FOR = 150
FORALL = 151
FORCE = 152
FROM = 153
FULL = 154
FUNCTION = 155
GOTO = 156
GRANT = 157
GROUP = 158
GROUPING = 159
HASH = 160
HAVING = 161
HIDE = 162
HOUR = 163
IF = 164
IGNORE = 165
IMMEDIATE = 166
IN = 167
INCLUDE = 168
INCLUDING = 169
INCREMENT = 170
INDENT = 171
INDEX = 172
INDEXED = 173
INDICATOR = 174
INDICES = 175
INFINITE = 176
INLINE = 177
INNER = 178
INOUT = 179
INSERT = 180
INSTANTIABLE = 181
INSTEAD = 182
INT = 183
INTEGER = 184
INTERSECT = 185
INTERVAL = 186
INTO = 187
INVALIDATE = 188
IS = 189
ISOLATION = 190
ITERATE = 191
JAVA = 192
JOIN = 193
KEEP = 194
LANGUAGE = 195
LAST = 196
LAST_VALUE = 197
LEADING = 198
LEFT = 199
LEVEL = 200
LIBRARY = 201
LIKE = 202
LIKE2 = 203
LIKE4 = 204
LIKEC = 205
LIMIT = 206
LOCAL = 207
LOCK = 208
LOCKED = 209
LOG = 210
LOGOFF = 211
LOGON = 212
LONG = 213
LOOP = 214
MAIN = 215
MAP = 216
MATCHED = 217
MAXVALUE = 218
MEASURES = 219
MEMBER = 220
MERGE = 221
MINUS = 222
MINUTE = 223
MINVALUE = 224
MLSLABEL = 225
MODE = 226
MODEL = 227
MODIFY = 228
MONTH = 229
MULTISET = 230
NAME = 231
NAN = 232
NATURAL = 233
NATURALN = 234
NAV = 235
NCHAR = 236
NCHAR_CS = 237
NCLOB = 238
NESTED = 239
NEW = 240
NO = 241
NOAUDIT = 242
NOCACHE = 243
NOCOPY = 244
NOCYCLE = 245
NOENTITYESCAPING = 246
NOMAXVALUE = 247
NOMINVALUE = 248
NONE = 249
NOORDER = 250
NOSCHEMACHECK = 251
NOT = 252
NOWAIT = 253
NULL = 254
NULLS = 255
NUMBER = 256
NUMERIC = 257
NVARCHAR2 = 258
OBJECT = 259
OF = 260
OFF = 261
OID = 262
OLD = 263
ON = 264
ONLY = 265
OPEN = 266
OPTION = 267
OR = 268
ORADATA = 269
ORDER = 270
ORDINALITY = 271
OSERROR = 272
OUT = 273
OUTER = 274
OVER = 275
OVERRIDING = 276
PACKAGE = 277
PARALLEL_ENABLE = 278
PARAMETERS = 279
PARENT = 280
PARTITION = 281
PASSING = 282
PATH = 283
PERCENT_ROWTYPE = 284
PERCENT_TYPE = 285
PIPELINED = 286
PIVOT = 287
PLAN = 288
PLS_INTEGER = 289
POSITIVE = 290
POSITIVEN = 291
PRAGMA = 292
PRECEDING = 293
PRECISION = 294
PRESENT = 295
PRIOR = 296
PROCEDURE = 297
RAISE = 298
RANGE = 299
RAW = 300
READ = 301
REAL = 302
RECORD = 303
REF = 304
REFERENCE = 305
REFERENCING = 306
REJECT = 307
RELIES_ON = 308
RENAME = 309
REPLACE = 310
RESPECT = 311
RESTRICT_REFERENCES = 312
RESULT = 313
RESULT_CACHE = 314
RETURN = 315
RETURNING = 316
REUSE = 317
REVERSE = 318
REVOKE = 319
RIGHT = 320
ROLLBACK = 321
ROLLUP = 322
ROW = 323
ROWID = 324
ROWS = 325
RULES = 326
SAMPLE = 327
SAVE = 328
SAVEPOINT = 329
SCHEMA = 330
SCHEMACHECK = 331
SCN = 332
SEARCH = 333
SECOND = 334
SEED = 335
SEGMENT = 336
SELECT = 337
SELF = 338
SEQUENCE = 339
SEQUENTIAL = 340
SERIALIZABLE = 341
SERIALLY_REUSABLE = 342
SERVERERROR = 343
SESSIONTIMEZONE = 344
SET = 345
SETS = 346
SETTINGS = 347
SHARE = 348
SHOW = 349
SHUTDOWN = 350
SIBLINGS = 351
SIGNTYPE = 352
SIMPLE_INTEGER = 353
SINGLE = 354
SIZE = 355
SKIP_ = 356
SMALLINT = 357
SNAPSHOT = 358
SOME = 359
SPECIFICATION = 360
SQLDATA = 361
SQLERROR = 362
STANDALONE = 363
START = 364
STARTUP = 365
STATEMENT = 366
STATEMENT_ID = 367
STATIC = 368
STATISTICS = 369
STRING = 370
SUBMULTISET = 371
SUBPARTITION = 372
SUBSTITUTABLE = 373
SUBTYPE = 374
SUCCESS = 375
SUSPEND = 376
TABLE = 377
THE = 378
THEN = 379
TIME = 380
TIMESTAMP = 381
TIMESTAMP_LTZ_UNCONSTRAINED = 382
TIMESTAMP_TZ_UNCONSTRAINED = 383
TIMESTAMP_UNCONSTRAINED = 384
TIMEZONE_ABBR = 385
TIMEZONE_HOUR = 386
TIMEZONE_MINUTE = 387
TIMEZONE_REGION = 388
TO = 389
TRAILING = 390
TRANSACTION = 391
TRANSLATE = 392
TREAT = 393
TRIGGER = 394
TRIM = 395
TRUE = 396
TRUNCATE = 397
TYPE = 398
UNBOUNDED = 399
UNDER = 400
UNION = 401
UNIQUE = 402
UNLIMITED = 403
UNPIVOT = 404
UNTIL = 405
UPDATE = 406
UPDATED = 407
UPSERT = 408
UROWID = 409
USE = 410
USING = 411
VALIDATE = 412
VALUE = 413
VALUES = 414
VARCHAR = 415
VARCHAR2 = 416
VARIABLE = 417
VARRAY = 418
VARYING = 419
VERSION = 420
VERSIONS = 421
WAIT = 422
WARNING = 423
WELLFORMED = 424
WHEN = 425
WHENEVER = 426
WHERE = 427
WHILE = 428
WITH = 429
WITHIN = 430
WORK = 431
WRITE = 432
XML = 433
XMLAGG = 434
XMLATTRIBUTES = 435
XMLCAST = 436
XMLCOLATTVAL = 437
XMLELEMENT = 438
XMLEXISTS = 439
XMLFOREST = 440
XMLNAMESPACES = 441
XMLPARSE = 442
XMLPI = 443
XMLQUERY = 444
XMLROOT = 445
XMLSERIALIZE = 446
XMLTABLE = 447
YEAR = 448
YES = 449
YMINTERVAL_UNCONSTRAINED = 450
ZONE = 451
PREDICTION = 452
PREDICTION_BOUNDS = 453
PREDICTION_COST = 454
PREDICTION_DETAILS = 455
PREDICTION_PROBABILITY = 456
PREDICTION_SET = 457
CUME_DIST = 458
DENSE_RANK = 459
LISTAGG = 460
PERCENT_RANK = 461
PERCENTILE_CONT = 462
PERCENTILE_DISC = 463
RANK = 464
AVG = 465
CORR = 466
LAG = 467
LEAD = 468
MAX = 469
MEDIAN = 470
MIN = 471
NTILE = 472
RATIO_TO_REPORT = 473
ROW_NUMBER = 474
SUM = 475
VARIANCE = 476
REGR_ = 477
STDDEV = 478
VAR_ = 479
COVAR_ = 480
NATIONAL_CHAR_STRING_LIT = 481
BIT_STRING_LIT = 482
HEX_STRING_LIT = 483
DOUBLE_PERIOD = 484
PERIOD = 485
UNSIGNED_INTEGER = 486
APPROXIMATE_NUM_LIT = 487
CHAR_STRING = 488
DELIMITED_ID = 489
PERCENT = 490
AMPERSAND = 491
LEFT_PAREN = 492
RIGHT_PAREN = 493
DOUBLE_ASTERISK = 494
ASTERISK = 495
PLUS_SIGN = 496
MINUS_SIGN = 497
COMMA = 498
SOLIDUS = 499
AT_SIGN = 500
ASSIGN_OP = 501
BINDVAR = 502
COLON = 503
SEMICOLON = 504
LESS_THAN_OR_EQUALS_OP = 505
LESS_THAN_OP = 506
GREATER_THAN_OR_EQUALS_OP = 507
NOT_EQUAL_OP = 508
CARRET_OPERATOR_PART = 509
TILDE_OPERATOR_PART = 510
EXCLAMATION_OPERATOR_PART = 511
GREATER_THAN_OP = 512
CONCATENATION_OP = 513
VERTICAL_BAR = 514
EQUALS_OP = 515
LEFT_BRACKET = 516
RIGHT_BRACKET = 517
INTRODUCER = 518
SPACES = 519
SINGLE_LINE_COMMENT = 520
MULTI_LINE_COMMENT = 521
PROMPT = 522
REGULAR_ID = 523
ZV = 524
channelNames = [u'DEFAULT_TOKEN_CHANNEL', u'HIDDEN']
modeNames = ['DEFAULT_MODE']
literalNames = ['<INVALID>', "'..'", "'.'", "'%'", "'&'", "'('", "')'",
"'**'", "'*'", "'+'", "'-'", "','", "'/'", "'@'", "':='", "':'",
"';'", "'<='", "'<'", "'>='", "'^'", "'~'", "'!'", "'>'", "'||'",
"'|'", "'='", "'['", "']'", "'_'", "'@!'"]
symbolicNames = ['<INVALID>', 'A_LETTER', 'ADD', 'AFTER', 'AGENT',
'AGGREGATE', 'ALL', 'ALTER', 'ANALYZE', 'AND', 'ANY', 'ARRAY', 'AS',
'ASSUME', 'ASSERT', 'ASC', 'ASSOCIATE', 'AT', 'ATTRIBUTE', 'AUDIT',
'AUTHID', 'AUTO', 'AUTOMATIC', 'AUTONOMOUS_TRANSACTION', 'BATCH',
'BEFORE', 'BEGIN', 'BETWEEN', 'BFILE', 'BINARY_DOUBLE',
'BINARY_FLOAT', 'BINARY_INTEGER', 'BLOB', 'BLOCK', 'BODY',
'BOOLEAN', 'BOTH', 'BREADTH', 'BULK', 'BY', 'BYTE', 'C_LETTER',
'CACHE', 'CALL', 'CANONICAL', 'CASCADE', 'CASE', 'CAST', 'CHAR',
'CHAR_CS', 'CHARACTER', 'CHECK', 'CHR', 'CLOB', 'CLOSE', 'CLUSTER',
'COLLECT', 'COLUMNS', 'COMMENT', 'COMMIT', 'COMMITTED',
'COMPATIBILITY', 'COMPILE', 'COMPOUND', 'CONNECT',
'CONNECT_BY_ROOT', 'CONSTANT', 'CONSTRAINT', 'CONSTRAINTS',
'CONSTRUCTOR', 'CONTENT', 'CONTEXT', 'CONTINUE', 'CONVERT',
'CORRUPT_XID', 'CORRUPT_XID_ALL', 'COST', 'COUNT', 'CREATE',
'CROSS', 'CUBE', 'CURRENT', 'CURRENT_USER', 'CURSOR', 'CUSTOMDATUM',
'CYCLE', 'DATA', 'DATABASE', 'DATE', 'DAY', 'DB_ROLE_CHANGE',
'DBTIMEZONE', 'DDL', 'DEBUG', 'DEC', 'DECIMAL', 'DECLARE',
'DECOMPOSE', 'DECREMENT', 'DEFAULT', 'DEFAULTS', 'DEFERRED',
'DEFINER', 'DELETE', 'DEPTH', 'DESC', 'DETERMINISTIC', 'DIMENSION',
'DISABLE', 'DISASSOCIATE', 'DISTINCT', 'DOCUMENT', 'DOUBLE', 'DROP',
'DSINTERVAL_UNCONSTRAINED', 'EACH', 'ELEMENT', 'ELSE', 'ELSIF',
'EMPTY', 'ENABLE', 'ENCODING', 'END', 'ENTITYESCAPING', 'ERR',
'ERRORS', 'ESCAPE', 'EVALNAME', 'EXCEPT', 'EXCEPTION',
'EXCEPTION_INIT', 'EXCEPTIONS', 'EXCLUDE', 'EXCLUSIVE', 'EXECUTE',
'EXISTS', 'EXIT', 'EXPLAIN', 'EXTERNAL', 'EXTRACT', 'FAILURE',
'FALSE', 'FETCH', 'FINAL', 'FIRST', 'FIRST_VALUE', 'FLOAT',
'FOLLOWING', 'FOLLOWS', 'FOR', 'FORALL', 'FORCE', 'FROM', 'FULL',
'FUNCTION', 'GOTO', 'GRANT', 'GROUP', 'GROUPING', 'HASH', 'HAVING',
'HIDE', 'HOUR', 'IF', 'IGNORE', 'IMMEDIATE', 'IN', 'INCLUDE',
'INCLUDING', 'INCREMENT', 'INDENT', 'INDEX', 'INDEXED', 'INDICATOR',
'INDICES', 'INFINITE', 'INLINE', 'INNER', 'INOUT', 'INSERT',
'INSTANTIABLE', 'INSTEAD', 'INT', 'INTEGER', 'INTERSECT',
'INTERVAL', 'INTO', 'INVALIDATE', 'IS', 'ISOLATION', 'ITERATE',
'JAVA', 'JOIN', 'KEEP', 'LANGUAGE', 'LAST', 'LAST_VALUE', 'LEADING',
'LEFT', 'LEVEL', 'LIBRARY', 'LIKE', 'LIKE2', 'LIKE4', 'LIKEC',
'LIMIT', 'LOCAL', 'LOCK', 'LOCKED', 'LOG', 'LOGOFF', 'LOGON',
'LONG', 'LOOP', 'MAIN', 'MAP', 'MATCHED', 'MAXVALUE', 'MEASURES',
'MEMBER', 'MERGE', 'MINUS', 'MINUTE', 'MINVALUE', 'MLSLABEL',
'MODE', 'MODEL', 'MODIFY', 'MONTH', 'MULTISET', 'NAME', 'NAN',
'NATURAL', 'NATURALN', 'NAV', 'NCHAR', 'NCHAR_CS', 'NCLOB',
'NESTED', 'NEW', 'NO', 'NOAUDIT', 'NOCACHE', 'NOCOPY', 'NOCYCLE',
'NOENTITYESCAPING', 'NOMAXVALUE', 'NOMINVALUE', 'NONE', 'NOORDER',
'NOSCHEMACHECK', 'NOT', 'NOWAIT', 'NULL', 'NULLS', 'NUMBER',
'NUMERIC', 'NVARCHAR2', 'OBJECT', 'OF', 'OFF', 'OID', 'OLD', 'ON',
'ONLY', 'OPEN', 'OPTION', 'OR', 'ORADATA', 'ORDER', 'ORDINALITY',
'OSERROR', 'OUT', 'OUTER', 'OVER', 'OVERRIDING', 'PACKAGE',
'PARALLEL_ENABLE', 'PARAMETERS', 'PARENT', 'PARTITION', 'PASSING',
'PATH', 'PERCENT_ROWTYPE', 'PERCENT_TYPE', 'PIPELINED', 'PIVOT',
'PLAN', 'PLS_INTEGER', 'POSITIVE', 'POSITIVEN', 'PRAGMA',
'PRECEDING', 'PRECISION', 'PRESENT', 'PRIOR', 'PROCEDURE', 'RAISE',
'RANGE', 'RAW', 'READ', 'REAL', 'RECORD', 'REF', 'REFERENCE',
'REFERENCING', 'REJECT', 'RELIES_ON', 'RENAME', 'REPLACE',
'RESPECT', 'RESTRICT_REFERENCES', 'RESULT', 'RESULT_CACHE',
'RETURN', 'RETURNING', 'REUSE', 'REVERSE', 'REVOKE', 'RIGHT',
'ROLLBACK', 'ROLLUP', 'ROW', 'ROWID', 'ROWS', 'RULES', 'SAMPLE',
'SAVE', 'SAVEPOINT', 'SCHEMA', 'SCHEMACHECK', 'SCN', 'SEARCH',
'SECOND', 'SEED', 'SEGMENT', 'SELECT', 'SELF', 'SEQUENCE',
'SEQUENTIAL', 'SERIALIZABLE', 'SERIALLY_REUSABLE', 'SERVERERROR',
'SESSIONTIMEZONE', 'SET', 'SETS', 'SETTINGS', 'SHARE', 'SHOW',
'SHUTDOWN', 'SIBLINGS', 'SIGNTYPE', 'SIMPLE_INTEGER', 'SINGLE',
'SIZE', 'SKIP_', 'SMALLINT', 'SNAPSHOT', 'SOME', 'SPECIFICATION',
'SQLDATA', 'SQLERROR', 'STANDALONE', 'START', 'STARTUP',
'STATEMENT', 'STATEMENT_ID', 'STATIC', 'STATISTICS', 'STRING',
'SUBMULTISET', 'SUBPARTITION', 'SUBSTITUTABLE', 'SUBTYPE',
'SUCCESS', 'SUSPEND', 'TABLE', 'THE', 'THEN', 'TIME', 'TIMESTAMP',
'TIMESTAMP_LTZ_UNCONSTRAINED', 'TIMESTAMP_TZ_UNCONSTRAINED',
'TIMESTAMP_UNCONSTRAINED', 'TIMEZONE_ABBR', 'TIMEZONE_HOUR',
'TIMEZONE_MINUTE', 'TIMEZONE_REGION', 'TO', 'TRAILING',
'TRANSACTION', 'TRANSLATE', 'TREAT', 'TRIGGER', 'TRIM', 'TRUE',
'TRUNCATE', 'TYPE', 'UNBOUNDED', 'UNDER', 'UNION', 'UNIQUE',
'UNLIMITED', 'UNPIVOT', 'UNTIL', 'UPDATE', 'UPDATED', 'UPSERT',
'UROWID', 'USE', 'USING', 'VALIDATE', 'VALUE', 'VALUES', 'VARCHAR',
'VARCHAR2', 'VARIABLE', 'VARRAY', 'VARYING', 'VERSION', 'VERSIONS',
'WAIT', 'WARNING', 'WELLFORMED', 'WHEN', 'WHENEVER', 'WHERE',
'WHILE', 'WITH', 'WITHIN', 'WORK', 'WRITE', 'XML', 'XMLAGG',
'XMLATTRIBUTES', 'XMLCAST', 'XMLCOLATTVAL', 'XMLELEMENT',
'XMLEXISTS', 'XMLFOREST', 'XMLNAMESPACES', 'XMLPARSE', 'XMLPI',
'XMLQUERY', 'XMLROOT', 'XMLSERIALIZE', 'XMLTABLE', 'YEAR', 'YES',
'YMINTERVAL_UNCONSTRAINED', 'ZONE', 'PREDICTION',
'PREDICTION_BOUNDS', 'PREDICTION_COST', 'PREDICTION_DETAILS',
'PREDICTION_PROBABILITY', 'PREDICTION_SET', 'CUME_DIST',
'DENSE_RANK', 'LISTAGG', 'PERCENT_RANK', 'PERCENTILE_CONT',
'PERCENTILE_DISC', 'RANK', 'AVG', 'CORR', 'LAG', 'LEAD', 'MAX',
'MEDIAN', 'MIN', 'NTILE', 'RATIO_TO_REPORT', 'ROW_NUMBER', 'SUM',
'VARIANCE', 'REGR_', 'STDDEV', 'VAR_', 'COVAR_',
'NATIONAL_CHAR_STRING_LIT', 'BIT_STRING_LIT', 'HEX_STRING_LIT',
'DOUBLE_PERIOD', 'PERIOD', 'UNSIGNED_INTEGER',
'APPROXIMATE_NUM_LIT', 'CHAR_STRING', 'DELIMITED_ID', 'PERCENT',
'AMPERSAND', 'LEFT_PAREN', 'RIGHT_PAREN', 'DOUBLE_ASTERISK',
'ASTERISK', 'PLUS_SIGN', 'MINUS_SIGN', 'COMMA', 'SOLIDUS',
'AT_SIGN', 'ASSIGN_OP', 'BINDVAR', 'COLON', 'SEMICOLON',
'LESS_THAN_OR_EQUALS_OP', 'LESS_THAN_OP',
'GREATER_THAN_OR_EQUALS_OP', 'NOT_EQUAL_OP', 'CARRET_OPERATOR_PART',
'TILDE_OPERATOR_PART', 'EXCLAMATION_OPERATOR_PART',
'GREATER_THAN_OP', 'CONCATENATION_OP', 'VERTICAL_BAR', 'EQUALS_OP',
'LEFT_BRACKET', 'RIGHT_BRACKET', 'INTRODUCER', 'SPACES',
'SINGLE_LINE_COMMENT', 'MULTI_LINE_COMMENT', 'PROMPT', 'REGULAR_ID',
'ZV']
ruleNames = ['T__0', 'A_LETTER', 'ADD', 'AFTER', 'AGENT', 'AGGREGATE',
'ALL', 'ALTER', 'ANALYZE', 'AND', 'ANY', 'ARRAY', 'AS', 'ASSUME',
'ASSERT', 'ASC', 'ASSOCIATE', 'AT', 'ATTRIBUTE', 'AUDIT', 'AUTHID',
'AUTO', 'AUTOMATIC', 'AUTONOMOUS_TRANSACTION', 'BATCH', 'BEFORE',
'BEGIN', 'BETWEEN', 'BFILE', 'BINARY_DOUBLE', 'BINARY_FLOAT',
'BINARY_INTEGER', 'BLOB', 'BLOCK', 'BODY', 'BOOLEAN', 'BOTH',
'BREADTH', 'BULK', 'BY', 'BYTE', 'C_LETTER', 'CACHE', 'CALL',
'CANONICAL', 'CASCADE', 'CASE', 'CAST', 'CHAR', 'CHAR_CS',
'CHARACTER', 'CHECK', 'CHR', 'CLOB', 'CLOSE', 'CLUSTER', 'COLLECT',
'COLUMNS', 'COMMENT', 'COMMIT', 'COMMITTED', 'COMPATIBILITY',
'COMPILE', 'COMPOUND', 'CONNECT', 'CONNECT_BY_ROOT', 'CONSTANT',
'CONSTRAINT', 'CONSTRAINTS', 'CONSTRUCTOR', 'CONTENT', 'CONTEXT',
'CONTINUE', 'CONVERT', 'CORRUPT_XID', 'CORRUPT_XID_ALL', 'COST',
'COUNT', 'CREATE', 'CROSS', 'CUBE', 'CURRENT', 'CURRENT_USER',
'CURSOR', 'CUSTOMDATUM', 'CYCLE', 'DATA', 'DATABASE', 'DATE', 'DAY',
'DB_ROLE_CHANGE', 'DBTIMEZONE', 'DDL', 'DEBUG', 'DEC', 'DECIMAL',
'DECLARE', 'DECOMPOSE', 'DECREMENT', 'DEFAULT', 'DEFAULTS',
'DEFERRED', 'DEFINER', 'DELETE', 'DEPTH', 'DESC', 'DETERMINISTIC',
'DIMENSION', 'DISABLE', 'DISASSOCIATE', 'DISTINCT', 'DOCUMENT',
'DOUBLE', 'DROP', 'DSINTERVAL_UNCONSTRAINED', 'EACH', 'ELEMENT',
'ELSE', 'ELSIF', 'EMPTY', 'ENABLE', 'ENCODING', 'END',
'ENTITYESCAPING', 'ERR', 'ERRORS', 'ESCAPE', 'EVALNAME', 'EXCEPT',
'EXCEPTION', 'EXCEPTION_INIT', 'EXCEPTIONS', 'EXCLUDE', 'EXCLUSIVE',
'EXECUTE', 'EXISTS', 'EXIT', 'EXPLAIN', 'EXTERNAL', 'EXTRACT',
'FAILURE', 'FALSE', 'FETCH', 'FINAL', 'FIRST', 'FIRST_VALUE',
'FLOAT', 'FOLLOWING', 'FOLLOWS', 'FOR', 'FORALL', 'FORCE', 'FROM',
'FULL', 'FUNCTION', 'GOTO', 'GRANT', 'GROUP', 'GROUPING', 'HASH',
'HAVING', 'HIDE', 'HOUR', 'IF', 'IGNORE', 'IMMEDIATE', 'IN',
'INCLUDE', 'INCLUDING', 'INCREMENT', 'INDENT', 'INDEX', 'INDEXED',
'INDICATOR', 'INDICES', 'INFINITE', 'INLINE', 'INNER', 'INOUT',
'INSERT', 'INSTANTIABLE', 'INSTEAD', 'INT', 'INTEGER', 'INTERSECT',
'INTERVAL', 'INTO', 'INVALIDATE', 'IS', 'ISOLATION', 'ITERATE',
'JAVA', 'JOIN', 'KEEP', 'LANGUAGE', 'LAST', 'LAST_VALUE', 'LEADING',
'LEFT', 'LEVEL', 'LIBRARY', 'LIKE', 'LIKE2', 'LIKE4', 'LIKEC',
'LIMIT', 'LOCAL', 'LOCK', 'LOCKED', 'LOG', 'LOGOFF', 'LOGON',
'LONG', 'LOOP', 'MAIN', 'MAP', 'MATCHED', 'MAXVALUE', 'MEASURES',
'MEMBER', 'MERGE', 'MINUS', 'MINUTE', 'MINVALUE', 'MLSLABEL',
'MODE', 'MODEL', 'MODIFY', 'MONTH', 'MULTISET', 'NAME', 'NAN',
'NATURAL', 'NATURALN', 'NAV', 'NCHAR', 'NCHAR_CS', 'NCLOB',
'NESTED', 'NEW', 'NO', 'NOAUDIT', 'NOCACHE', 'NOCOPY', 'NOCYCLE',
'NOENTITYESCAPING', 'NOMAXVALUE', 'NOMINVALUE', 'NONE', 'NOORDER',
'NOSCHEMACHECK', 'NOT', 'NOWAIT', 'NULL', 'NULLS', 'NUMBER',
'NUMERIC', 'NVARCHAR2', 'OBJECT', 'OF', 'OFF', 'OID', 'OLD', 'ON',
'ONLY', 'OPEN', 'OPTION', 'OR', 'ORADATA', 'ORDER', 'ORDINALITY',
'OSERROR', 'OUT', 'OUTER', 'OVER', 'OVERRIDING', 'PACKAGE',
'PARALLEL_ENABLE', 'PARAMETERS', 'PARENT', 'PARTITION', 'PASSING',
'PATH', 'PERCENT_ROWTYPE', 'PERCENT_TYPE', 'PIPELINED', 'PIVOT',
'PLAN', 'PLS_INTEGER', 'POSITIVE', 'POSITIVEN', 'PRAGMA',
'PRECEDING', 'PRECISION', 'PRESENT', 'PRIOR', 'PROCEDURE', 'RAISE',
'RANGE', 'RAW', 'READ', 'REAL', 'RECORD', 'REF', 'REFERENCE',
'REFERENCING', 'REJECT', 'RELIES_ON', 'RENAME', 'REPLACE',
'RESPECT', 'RESTRICT_REFERENCES', 'RESULT', 'RESULT_CACHE',
'RETURN', 'RETURNING', 'REUSE', 'REVERSE', 'REVOKE', 'RIGHT',
'ROLLBACK', 'ROLLUP', 'ROW', 'ROWID', 'ROWS', 'RULES', 'SAMPLE',
'SAVE', 'SAVEPOINT', 'SCHEMA', 'SCHEMACHECK', 'SCN', 'SEARCH',
'SECOND', 'SEED', 'SEGMENT', 'SELECT', 'SELF', 'SEQUENCE',
'SEQUENTIAL', 'SERIALIZABLE', 'SERIALLY_REUSABLE', 'SERVERERROR',
'SESSIONTIMEZONE', 'SET', 'SETS', 'SETTINGS', 'SHARE', 'SHOW',
'SHUTDOWN', 'SIBLINGS', 'SIGNTYPE', 'SIMPLE_INTEGER', 'SINGLE',
'SIZE', 'SKIP_', 'SMALLINT', 'SNAPSHOT', 'SOME', 'SPECIFICATION',
'SQLDATA', 'SQLERROR', 'STANDALONE', 'START', 'STARTUP',
'STATEMENT', 'STATEMENT_ID', 'STATIC', 'STATISTICS', 'STRING',
'SUBMULTISET', 'SUBPARTITION', 'SUBSTITUTABLE', 'SUBTYPE',
'SUCCESS', 'SUSPEND', 'TABLE', 'THE', 'THEN', 'TIME', 'TIMESTAMP',
'TIMESTAMP_LTZ_UNCONSTRAINED', 'TIMESTAMP_TZ_UNCONSTRAINED',
'TIMESTAMP_UNCONSTRAINED', 'TIMEZONE_ABBR', 'TIMEZONE_HOUR',
'TIMEZONE_MINUTE', 'TIMEZONE_REGION', 'TO', 'TRAILING',
'TRANSACTION', 'TRANSLATE', 'TREAT', 'TRIGGER', 'TRIM', 'TRUE',
'TRUNCATE', 'TYPE', 'UNBOUNDED', 'UNDER', 'UNION', 'UNIQUE',
'UNLIMITED', 'UNPIVOT', 'UNTIL', 'UPDATE', 'UPDATED', 'UPSERT',
'UROWID', 'USE', 'USING', 'VALIDATE', 'VALUE', 'VALUES', 'VARCHAR',
'VARCHAR2', 'VARIABLE', 'VARRAY', 'VARYING', 'VERSION', 'VERSIONS',
'WAIT', 'WARNING', 'WELLFORMED', 'WHEN', 'WHENEVER', 'WHERE',
'WHILE', 'WITH', 'WITHIN', 'WORK', 'WRITE', 'XML', 'XMLAGG',
'XMLATTRIBUTES', 'XMLCAST', 'XMLCOLATTVAL', 'XMLELEMENT',
'XMLEXISTS', 'XMLFOREST', 'XMLNAMESPACES', 'XMLPARSE', 'XMLPI',
'XMLQUERY', 'XMLROOT', 'XMLSERIALIZE', 'XMLTABLE', 'YEAR', 'YES',
'YMINTERVAL_UNCONSTRAINED', 'ZONE', 'PREDICTION',
'PREDICTION_BOUNDS', 'PREDICTION_COST', 'PREDICTION_DETAILS',
'PREDICTION_PROBABILITY', 'PREDICTION_SET', 'CUME_DIST',
'DENSE_RANK', 'LISTAGG', 'PERCENT_RANK', 'PERCENTILE_CONT',
'PERCENTILE_DISC', 'RANK', 'AVG', 'CORR', 'LAG', 'LEAD', 'MAX',
'MEDIAN', 'MIN', 'NTILE', 'RATIO_TO_REPORT', 'ROW_NUMBER', 'SUM',
'VARIANCE', 'REGR_', 'STDDEV', 'VAR_', 'COVAR_',
'NATIONAL_CHAR_STRING_LIT', 'BIT_STRING_LIT', 'HEX_STRING_LIT',
'DOUBLE_PERIOD', 'PERIOD', 'UNSIGNED_INTEGER',
'APPROXIMATE_NUM_LIT', 'CHAR_STRING', 'CHAR_STRING_PERL', 'QUOTE',
'QS_ANGLE', 'QS_BRACE', 'QS_BRACK', 'QS_PAREN', 'QS_OTHER_CH',
'DELIMITED_ID', 'PERCENT', 'AMPERSAND', 'LEFT_PAREN', 'RIGHT_PAREN',
'DOUBLE_ASTERISK', 'ASTERISK', 'PLUS_SIGN', 'MINUS_SIGN', 'COMMA',
'SOLIDUS', 'AT_SIGN', 'ASSIGN_OP', 'BINDVAR', 'COLON', 'SEMICOLON',
'LESS_THAN_OR_EQUALS_OP', 'LESS_THAN_OP',
'GREATER_THAN_OR_EQUALS_OP', 'NOT_EQUAL_OP', 'CARRET_OPERATOR_PART',
'TILDE_OPERATOR_PART', 'EXCLAMATION_OPERATOR_PART',
'GREATER_THAN_OP', 'QUESTION_MARK', 'CONCATENATION_OP',
'VERTICAL_BAR', 'EQUALS_OP', 'LEFT_BRACKET', 'RIGHT_BRACKET',
'INTRODUCER', 'SPACES', 'SIMPLE_LETTER',
'UNSIGNED_INTEGER_FRAGMENT', 'FLOAT_FRAGMENT',
'SINGLE_LINE_COMMENT', 'MULTI_LINE_COMMENT', 'PROMPT', 'NEWLINE',
'SPACE', 'REGULAR_ID', 'ZV', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U',
'V', 'W', 'X', 'Y', 'Z']
grammarFileName = 'PlSql.g4'
def __init__(self, input=None, output: TextIO=sys.stdout):
super().__init__(input, output)
self.checkVersion('4.7.2')
self._interp = LexerATNSimulator(self, self.atn, self.
decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
| # Generated from /home/mridul/PycharmProjects/BTP_2k18-19/PlSql.g4 by ANTLR 4.7.2
from antlr4 import *
from io import StringIO
from typing.io import TextIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2\u020e")
buf.write("\u14d7\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7")
buf.write("\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r")
buf.write("\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23")
buf.write("\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30")
buf.write("\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36")
buf.write("\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%")
buf.write("\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t,\4-\t-\4.")
buf.write("\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64")
buf.write("\t\64\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4:\t:")
buf.write("\4;\t;\4<\t<\4=\t=\4>\t>\4?\t?\4@\t@\4A\tA\4B\tB\4C\t")
buf.write("C\4D\tD\4E\tE\4F\tF\4G\tG\4H\tH\4I\tI\4J\tJ\4K\tK\4L\t")
buf.write("L\4M\tM\4N\tN\4O\tO\4P\tP\4Q\tQ\4R\tR\4S\tS\4T\tT\4U\t")
buf.write("U\4V\tV\4W\tW\4X\tX\4Y\tY\4Z\tZ\4[\t[\4\\\t\\\4]\t]\4")
buf.write("^\t^\4_\t_\4`\t`\4a\ta\4b\tb\4c\tc\4d\td\4e\te\4f\tf\4")
buf.write("g\tg\4h\th\4i\ti\4j\tj\4k\tk\4l\tl\4m\tm\4n\tn\4o\to\4")
buf.write("p\tp\4q\tq\4r\tr\4s\ts\4t\tt\4u\tu\4v\tv\4w\tw\4x\tx\4")
buf.write("y\ty\4z\tz\4{\t{\4|\t|\4}\t}\4~\t~\4\177\t\177\4\u0080")
buf.write("\t\u0080\4\u0081\t\u0081\4\u0082\t\u0082\4\u0083\t\u0083")
buf.write("\4\u0084\t\u0084\4\u0085\t\u0085\4\u0086\t\u0086\4\u0087")
buf.write("\t\u0087\4\u0088\t\u0088\4\u0089\t\u0089\4\u008a\t\u008a")
buf.write("\4\u008b\t\u008b\4\u008c\t\u008c\4\u008d\t\u008d\4\u008e")
buf.write("\t\u008e\4\u008f\t\u008f\4\u0090\t\u0090\4\u0091\t\u0091")
buf.write("\4\u0092\t\u0092\4\u0093\t\u0093\4\u0094\t\u0094\4\u0095")
buf.write("\t\u0095\4\u0096\t\u0096\4\u0097\t\u0097\4\u0098\t\u0098")
buf.write("\4\u0099\t\u0099\4\u009a\t\u009a\4\u009b\t\u009b\4\u009c")
buf.write("\t\u009c\4\u009d\t\u009d\4\u009e\t\u009e\4\u009f\t\u009f")
buf.write("\4\u00a0\t\u00a0\4\u00a1\t\u00a1\4\u00a2\t\u00a2\4\u00a3")
buf.write("\t\u00a3\4\u00a4\t\u00a4\4\u00a5\t\u00a5\4\u00a6\t\u00a6")
buf.write("\4\u00a7\t\u00a7\4\u00a8\t\u00a8\4\u00a9\t\u00a9\4\u00aa")
buf.write("\t\u00aa\4\u00ab\t\u00ab\4\u00ac\t\u00ac\4\u00ad\t\u00ad")
buf.write("\4\u00ae\t\u00ae\4\u00af\t\u00af\4\u00b0\t\u00b0\4\u00b1")
buf.write("\t\u00b1\4\u00b2\t\u00b2\4\u00b3\t\u00b3\4\u00b4\t\u00b4")
buf.write("\4\u00b5\t\u00b5\4\u00b6\t\u00b6\4\u00b7\t\u00b7\4\u00b8")
buf.write("\t\u00b8\4\u00b9\t\u00b9\4\u00ba\t\u00ba\4\u00bb\t\u00bb")
buf.write("\4\u00bc\t\u00bc\4\u00bd\t\u00bd\4\u00be\t\u00be\4\u00bf")
buf.write("\t\u00bf\4\u00c0\t\u00c0\4\u00c1\t\u00c1\4\u00c2\t\u00c2")
buf.write("\4\u00c3\t\u00c3\4\u00c4\t\u00c4\4\u00c5\t\u00c5\4\u00c6")
buf.write("\t\u00c6\4\u00c7\t\u00c7\4\u00c8\t\u00c8\4\u00c9\t\u00c9")
buf.write("\4\u00ca\t\u00ca\4\u00cb\t\u00cb\4\u00cc\t\u00cc\4\u00cd")
buf.write("\t\u00cd\4\u00ce\t\u00ce\4\u00cf\t\u00cf\4\u00d0\t\u00d0")
buf.write("\4\u00d1\t\u00d1\4\u00d2\t\u00d2\4\u00d3\t\u00d3\4\u00d4")
buf.write("\t\u00d4\4\u00d5\t\u00d5\4\u00d6\t\u00d6\4\u00d7\t\u00d7")
buf.write("\4\u00d8\t\u00d8\4\u00d9\t\u00d9\4\u00da\t\u00da\4\u00db")
buf.write("\t\u00db\4\u00dc\t\u00dc\4\u00dd\t\u00dd\4\u00de\t\u00de")
buf.write("\4\u00df\t\u00df\4\u00e0\t\u00e0\4\u00e1\t\u00e1\4\u00e2")
buf.write("\t\u00e2\4\u00e3\t\u00e3\4\u00e4\t\u00e4\4\u00e5\t\u00e5")
buf.write("\4\u00e6\t\u00e6\4\u00e7\t\u00e7\4\u00e8\t\u00e8\4\u00e9")
buf.write("\t\u00e9\4\u00ea\t\u00ea\4\u00eb\t\u00eb\4\u00ec\t\u00ec")
buf.write("\4\u00ed\t\u00ed\4\u00ee\t\u00ee\4\u00ef\t\u00ef\4\u00f0")
buf.write("\t\u00f0\4\u00f1\t\u00f1\4\u00f2\t\u00f2\4\u00f3\t\u00f3")
buf.write("\4\u00f4\t\u00f4\4\u00f5\t\u00f5\4\u00f6\t\u00f6\4\u00f7")
buf.write("\t\u00f7\4\u00f8\t\u00f8\4\u00f9\t\u00f9\4\u00fa\t\u00fa")
buf.write("\4\u00fb\t\u00fb\4\u00fc\t\u00fc\4\u00fd\t\u00fd\4\u00fe")
buf.write("\t\u00fe\4\u00ff\t\u00ff\4\u0100\t\u0100\4\u0101\t\u0101")
buf.write("\4\u0102\t\u0102\4\u0103\t\u0103\4\u0104\t\u0104\4\u0105")
buf.write("\t\u0105\4\u0106\t\u0106\4\u0107\t\u0107\4\u0108\t\u0108")
buf.write("\4\u0109\t\u0109\4\u010a\t\u010a\4\u010b\t\u010b\4\u010c")
buf.write("\t\u010c\4\u010d\t\u010d\4\u010e\t\u010e\4\u010f\t\u010f")
buf.write("\4\u0110\t\u0110\4\u0111\t\u0111\4\u0112\t\u0112\4\u0113")
buf.write("\t\u0113\4\u0114\t\u0114\4\u0115\t\u0115\4\u0116\t\u0116")
buf.write("\4\u0117\t\u0117\4\u0118\t\u0118\4\u0119\t\u0119\4\u011a")
buf.write("\t\u011a\4\u011b\t\u011b\4\u011c\t\u011c\4\u011d\t\u011d")
buf.write("\4\u011e\t\u011e\4\u011f\t\u011f\4\u0120\t\u0120\4\u0121")
buf.write("\t\u0121\4\u0122\t\u0122\4\u0123\t\u0123\4\u0124\t\u0124")
buf.write("\4\u0125\t\u0125\4\u0126\t\u0126\4\u0127\t\u0127\4\u0128")
buf.write("\t\u0128\4\u0129\t\u0129\4\u012a\t\u012a\4\u012b\t\u012b")
buf.write("\4\u012c\t\u012c\4\u012d\t\u012d\4\u012e\t\u012e\4\u012f")
buf.write("\t\u012f\4\u0130\t\u0130\4\u0131\t\u0131\4\u0132\t\u0132")
buf.write("\4\u0133\t\u0133\4\u0134\t\u0134\4\u0135\t\u0135\4\u0136")
buf.write("\t\u0136\4\u0137\t\u0137\4\u0138\t\u0138\4\u0139\t\u0139")
buf.write("\4\u013a\t\u013a\4\u013b\t\u013b\4\u013c\t\u013c\4\u013d")
buf.write("\t\u013d\4\u013e\t\u013e\4\u013f\t\u013f\4\u0140\t\u0140")
buf.write("\4\u0141\t\u0141\4\u0142\t\u0142\4\u0143\t\u0143\4\u0144")
buf.write("\t\u0144\4\u0145\t\u0145\4\u0146\t\u0146\4\u0147\t\u0147")
buf.write("\4\u0148\t\u0148\4\u0149\t\u0149\4\u014a\t\u014a\4\u014b")
buf.write("\t\u014b\4\u014c\t\u014c\4\u014d\t\u014d\4\u014e\t\u014e")
buf.write("\4\u014f\t\u014f\4\u0150\t\u0150\4\u0151\t\u0151\4\u0152")
buf.write("\t\u0152\4\u0153\t\u0153\4\u0154\t\u0154\4\u0155\t\u0155")
buf.write("\4\u0156\t\u0156\4\u0157\t\u0157\4\u0158\t\u0158\4\u0159")
buf.write("\t\u0159\4\u015a\t\u015a\4\u015b\t\u015b\4\u015c\t\u015c")
buf.write("\4\u015d\t\u015d\4\u015e\t\u015e\4\u015f\t\u015f\4\u0160")
buf.write("\t\u0160\4\u0161\t\u0161\4\u0162\t\u0162\4\u0163\t\u0163")
buf.write("\4\u0164\t\u0164\4\u0165\t\u0165\4\u0166\t\u0166\4\u0167")
buf.write("\t\u0167\4\u0168\t\u0168\4\u0169\t\u0169\4\u016a\t\u016a")
buf.write("\4\u016b\t\u016b\4\u016c\t\u016c\4\u016d\t\u016d\4\u016e")
buf.write("\t\u016e\4\u016f\t\u016f\4\u0170\t\u0170\4\u0171\t\u0171")
buf.write("\4\u0172\t\u0172\4\u0173\t\u0173\4\u0174\t\u0174\4\u0175")
buf.write("\t\u0175\4\u0176\t\u0176\4\u0177\t\u0177\4\u0178\t\u0178")
buf.write("\4\u0179\t\u0179\4\u017a\t\u017a\4\u017b\t\u017b\4\u017c")
buf.write("\t\u017c\4\u017d\t\u017d\4\u017e\t\u017e\4\u017f\t\u017f")
buf.write("\4\u0180\t\u0180\4\u0181\t\u0181\4\u0182\t\u0182\4\u0183")
buf.write("\t\u0183\4\u0184\t\u0184\4\u0185\t\u0185\4\u0186\t\u0186")
buf.write("\4\u0187\t\u0187\4\u0188\t\u0188\4\u0189\t\u0189\4\u018a")
buf.write("\t\u018a\4\u018b\t\u018b\4\u018c\t\u018c\4\u018d\t\u018d")
buf.write("\4\u018e\t\u018e\4\u018f\t\u018f\4\u0190\t\u0190\4\u0191")
buf.write("\t\u0191\4\u0192\t\u0192\4\u0193\t\u0193\4\u0194\t\u0194")
buf.write("\4\u0195\t\u0195\4\u0196\t\u0196\4\u0197\t\u0197\4\u0198")
buf.write("\t\u0198\4\u0199\t\u0199\4\u019a\t\u019a\4\u019b\t\u019b")
buf.write("\4\u019c\t\u019c\4\u019d\t\u019d\4\u019e\t\u019e\4\u019f")
buf.write("\t\u019f\4\u01a0\t\u01a0\4\u01a1\t\u01a1\4\u01a2\t\u01a2")
buf.write("\4\u01a3\t\u01a3\4\u01a4\t\u01a4\4\u01a5\t\u01a5\4\u01a6")
buf.write("\t\u01a6\4\u01a7\t\u01a7\4\u01a8\t\u01a8\4\u01a9\t\u01a9")
buf.write("\4\u01aa\t\u01aa\4\u01ab\t\u01ab\4\u01ac\t\u01ac\4\u01ad")
buf.write("\t\u01ad\4\u01ae\t\u01ae\4\u01af\t\u01af\4\u01b0\t\u01b0")
buf.write("\4\u01b1\t\u01b1\4\u01b2\t\u01b2\4\u01b3\t\u01b3\4\u01b4")
buf.write("\t\u01b4\4\u01b5\t\u01b5\4\u01b6\t\u01b6\4\u01b7\t\u01b7")
buf.write("\4\u01b8\t\u01b8\4\u01b9\t\u01b9\4\u01ba\t\u01ba\4\u01bb")
buf.write("\t\u01bb\4\u01bc\t\u01bc\4\u01bd\t\u01bd\4\u01be\t\u01be")
buf.write("\4\u01bf\t\u01bf\4\u01c0\t\u01c0\4\u01c1\t\u01c1\4\u01c2")
buf.write("\t\u01c2\4\u01c3\t\u01c3\4\u01c4\t\u01c4\4\u01c5\t\u01c5")
buf.write("\4\u01c6\t\u01c6\4\u01c7\t\u01c7\4\u01c8\t\u01c8\4\u01c9")
buf.write("\t\u01c9\4\u01ca\t\u01ca\4\u01cb\t\u01cb\4\u01cc\t\u01cc")
buf.write("\4\u01cd\t\u01cd\4\u01ce\t\u01ce\4\u01cf\t\u01cf\4\u01d0")
buf.write("\t\u01d0\4\u01d1\t\u01d1\4\u01d2\t\u01d2\4\u01d3\t\u01d3")
buf.write("\4\u01d4\t\u01d4\4\u01d5\t\u01d5\4\u01d6\t\u01d6\4\u01d7")
buf.write("\t\u01d7\4\u01d8\t\u01d8\4\u01d9\t\u01d9\4\u01da\t\u01da")
buf.write("\4\u01db\t\u01db\4\u01dc\t\u01dc\4\u01dd\t\u01dd\4\u01de")
buf.write("\t\u01de\4\u01df\t\u01df\4\u01e0\t\u01e0\4\u01e1\t\u01e1")
buf.write("\4\u01e2\t\u01e2\4\u01e3\t\u01e3\4\u01e4\t\u01e4\4\u01e5")
buf.write("\t\u01e5\4\u01e6\t\u01e6\4\u01e7\t\u01e7\4\u01e8\t\u01e8")
buf.write("\4\u01e9\t\u01e9\4\u01ea\t\u01ea\4\u01eb\t\u01eb\4\u01ec")
buf.write("\t\u01ec\4\u01ed\t\u01ed\4\u01ee\t\u01ee\4\u01ef\t\u01ef")
buf.write("\4\u01f0\t\u01f0\4\u01f1\t\u01f1\4\u01f2\t\u01f2\4\u01f3")
buf.write("\t\u01f3\4\u01f4\t\u01f4\4\u01f5\t\u01f5\4\u01f6\t\u01f6")
buf.write("\4\u01f7\t\u01f7\4\u01f8\t\u01f8\4\u01f9\t\u01f9\4\u01fa")
buf.write("\t\u01fa\4\u01fb\t\u01fb\4\u01fc\t\u01fc\4\u01fd\t\u01fd")
buf.write("\4\u01fe\t\u01fe\4\u01ff\t\u01ff\4\u0200\t\u0200\4\u0201")
buf.write("\t\u0201\4\u0202\t\u0202\4\u0203\t\u0203\4\u0204\t\u0204")
buf.write("\4\u0205\t\u0205\4\u0206\t\u0206\4\u0207\t\u0207\4\u0208")
buf.write("\t\u0208\4\u0209\t\u0209\4\u020a\t\u020a\4\u020b\t\u020b")
buf.write("\4\u020c\t\u020c\4\u020d\t\u020d\4\u020e\t\u020e\4\u020f")
buf.write("\t\u020f\4\u0210\t\u0210\4\u0211\t\u0211\4\u0212\t\u0212")
buf.write("\4\u0213\t\u0213\4\u0214\t\u0214\4\u0215\t\u0215\4\u0216")
buf.write("\t\u0216\4\u0217\t\u0217\4\u0218\t\u0218\4\u0219\t\u0219")
buf.write("\4\u021a\t\u021a\4\u021b\t\u021b\4\u021c\t\u021c\4\u021d")
buf.write("\t\u021d\4\u021e\t\u021e\4\u021f\t\u021f\4\u0220\t\u0220")
buf.write("\4\u0221\t\u0221\4\u0222\t\u0222\4\u0223\t\u0223\4\u0224")
buf.write("\t\u0224\4\u0225\t\u0225\4\u0226\t\u0226\4\u0227\t\u0227")
buf.write("\4\u0228\t\u0228\4\u0229\t\u0229\4\u022a\t\u022a\4\u022b")
buf.write("\t\u022b\4\u022c\t\u022c\4\u022d\t\u022d\4\u022e\t\u022e")
buf.write("\4\u022f\t\u022f\4\u0230\t\u0230\4\u0231\t\u0231\4\u0232")
buf.write("\t\u0232\4\u0233\t\u0233\4\u0234\t\u0234\3\2\3\2\3\2\3")
buf.write("\3\3\3\3\4\3\4\3\4\3\4\3\5\3\5\3\5\3\5\3\5\3\5\3\6\3\6")
buf.write("\3\6\3\6\3\6\3\6\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3")
buf.write("\7\3\b\3\b\3\b\3\b\3\t\3\t\3\t\3\t\3\t\3\t\3\n\3\n\3\n")
buf.write("\3\n\3\n\3\n\3\n\3\n\3\13\3\13\3\13\3\13\3\f\3\f\3\f\3")
buf.write("\f\3\r\3\r\3\r\3\r\3\r\3\r\3\16\3\16\3\16\3\17\3\17\3")
buf.write("\17\3\17\3\17\3\17\3\17\3\20\3\20\3\20\3\20\3\20\3\20")
buf.write("\3\20\3\21\3\21\3\21\3\21\3\22\3\22\3\22\3\22\3\22\3\22")
buf.write("\3\22\3\22\3\22\3\22\3\23\3\23\3\23\3\24\3\24\3\24\3\24")
buf.write("\3\24\3\24\3\24\3\24\3\24\3\24\3\25\3\25\3\25\3\25\3\25")
buf.write("\3\25\3\26\3\26\3\26\3\26\3\26\3\26\3\26\3\27\3\27\3\27")
buf.write("\3\27\3\27\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30")
buf.write("\3\30\3\31\3\31\3\31\3\31\3\31\3\31\3\31\3\31\3\31\3\31")
buf.write("\3\31\3\31\3\31\3\31\3\31\3\31\3\31\3\31\3\31\3\31\3\31")
buf.write("\3\31\3\31\3\32\3\32\3\32\3\32\3\32\3\32\3\33\3\33\3\33")
buf.write("\3\33\3\33\3\33\3\33\3\34\3\34\3\34\3\34\3\34\3\34\3\35")
buf.write("\3\35\3\35\3\35\3\35\3\35\3\35\3\35\3\36\3\36\3\36\3\36")
buf.write("\3\36\3\36\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37")
buf.write("\3\37\3\37\3\37\3\37\3\37\3 \3 \3 \3 \3 \3 \3 \3 \3 \3")
buf.write(" \3 \3 \3 \3!\3!\3!\3!\3!\3!\3!\3!\3!\3!\3!\3!\3!\3!\3")
buf.write("!\3\"\3\"\3\"\3\"\3\"\3#\3#\3#\3#\3#\3#\3$\3$\3$\3$\3")
buf.write("$\3%\3%\3%\3%\3%\3%\3%\3%\3&\3&\3&\3&\3&\3\'\3\'\3\'\3")
buf.write("\'\3\'\3\'\3\'\3\'\3(\3(\3(\3(\3(\3)\3)\3)\3*\3*\3*\3")
buf.write("*\3*\3+\3+\3,\3,\3,\3,\3,\3,\3-\3-\3-\3-\3-\3.\3.\3.\3")
buf.write(".\3.\3.\3.\3.\3.\3.\3/\3/\3/\3/\3/\3/\3/\3/\3\60\3\60")
buf.write("\3\60\3\60\3\60\3\61\3\61\3\61\3\61\3\61\3\62\3\62\3\62")
buf.write("\3\62\3\62\3\63\3\63\3\63\3\63\3\63\3\63\3\63\3\63\3\64")
buf.write("\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\65\3\65")
buf.write("\3\65\3\65\3\65\3\65\3\66\3\66\3\66\3\66\3\67\3\67\3\67")
buf.write("\3\67\3\67\38\38\38\38\38\38\39\39\39\39\39\39\39\39\3")
buf.write(":\3:\3:\3:\3:\3:\3:\3:\3;\3;\3;\3;\3;\3;\3;\3;\3<\3<\3")
buf.write("<\3<\3<\3<\3<\3<\3=\3=\3=\3=\3=\3=\3=\3>\3>\3>\3>\3>\3")
buf.write(">\3>\3>\3>\3>\3?\3?\3?\3?\3?\3?\3?\3?\3?\3?\3?\3?\3?\3")
buf.write("?\3@\3@\3@\3@\3@\3@\3@\3@\3A\3A\3A\3A\3A\3A\3A\3A\3A\3")
buf.write("B\3B\3B\3B\3B\3B\3B\3B\3C\3C\3C\3C\3C\3C\3C\3C\3C\3C\3")
buf.write("C\3C\3C\3C\3C\3C\3D\3D\3D\3D\3D\3D\3D\3D\3D\3E\3E\3E\3")
buf.write("E\3E\3E\3E\3E\3E\3E\3E\3F\3F\3F\3F\3F\3F\3F\3F\3F\3F\3")
buf.write("F\3F\3G\3G\3G\3G\3G\3G\3G\3G\3G\3G\3G\3G\3H\3H\3H\3H\3")
buf.write("H\3H\3H\3H\3I\3I\3I\3I\3I\3I\3I\3I\3J\3J\3J\3J\3J\3J\3")
buf.write("J\3J\3J\3K\3K\3K\3K\3K\3K\3K\3K\3L\3L\3L\3L\3L\3L\3L\3")
buf.write("L\3L\3L\3L\3L\3M\3M\3M\3M\3M\3M\3M\3M\3M\3M\3M\3M\3M\3")
buf.write("M\3M\3M\3N\3N\3N\3N\3N\3O\3O\3O\3O\3O\3O\3P\3P\3P\3P\3")
buf.write("P\3P\3P\3Q\3Q\3Q\3Q\3Q\3Q\3R\3R\3R\3R\3R\3S\3S\3S\3S\3")
buf.write("S\3S\3S\3S\3T\3T\3T\3T\3T\3T\3T\3T\3T\3T\3T\3T\3T\3U\3")
buf.write("U\3U\3U\3U\3U\3U\3V\3V\3V\3V\3V\3V\3V\3V\3V\3V\3V\3V\3")
buf.write("W\3W\3W\3W\3W\3W\3X\3X\3X\3X\3X\3Y\3Y\3Y\3Y\3Y\3Y\3Y\3")
buf.write("Y\3Y\3Z\3Z\3Z\3Z\3Z\3[\3[\3[\3[\3\\\3\\\3\\\3\\\3\\\3")
buf.write("\\\3\\\3\\\3\\\3\\\3\\\3\\\3\\\3\\\3\\\3]\3]\3]\3]\3]")
buf.write("\3]\3]\3]\3]\3]\3]\3^\3^\3^\3^\3_\3_\3_\3_\3_\3_\3`\3")
buf.write("`\3`\3`\3a\3a\3a\3a\3a\3a\3a\3a\3b\3b\3b\3b\3b\3b\3b\3")
buf.write("b\3c\3c\3c\3c\3c\3c\3c\3c\3c\3c\3d\3d\3d\3d\3d\3d\3d\3")
buf.write("d\3d\3d\3e\3e\3e\3e\3e\3e\3e\3e\3f\3f\3f\3f\3f\3f\3f\3")
buf.write("f\3f\3g\3g\3g\3g\3g\3g\3g\3g\3g\3h\3h\3h\3h\3h\3h\3h\3")
buf.write("h\3i\3i\3i\3i\3i\3i\3i\3j\3j\3j\3j\3j\3j\3k\3k\3k\3k\3")
buf.write("k\3l\3l\3l\3l\3l\3l\3l\3l\3l\3l\3l\3l\3l\3l\3m\3m\3m\3")
buf.write("m\3m\3m\3m\3m\3m\3m\3n\3n\3n\3n\3n\3n\3n\3n\3o\3o\3o\3")
buf.write("o\3o\3o\3o\3o\3o\3o\3o\3o\3o\3p\3p\3p\3p\3p\3p\3p\3p\3")
buf.write("p\3q\3q\3q\3q\3q\3q\3q\3q\3q\3r\3r\3r\3r\3r\3r\3r\3s\3")
buf.write("s\3s\3s\3s\3t\3t\3t\3t\3t\3t\3t\3t\3t\3t\3t\3t\3t\3t\3")
buf.write("t\3t\3t\3t\3t\3t\3t\3t\3t\3t\3t\3u\3u\3u\3u\3u\3v\3v\3")
buf.write("v\3v\3v\3v\3v\3v\3w\3w\3w\3w\3w\3x\3x\3x\3x\3x\3x\3y\3")
buf.write("y\3y\3y\3y\3y\3z\3z\3z\3z\3z\3z\3z\3{\3{\3{\3{\3{\3{\3")
buf.write("{\3{\3{\3|\3|\3|\3|\3}\3}\3}\3}\3}\3}\3}\3}\3}\3}\3}\3")
buf.write("}\3}\3}\3}\3~\3~\3~\3~\3\177\3\177\3\177\3\177\3\177\3")
buf.write("\177\3\177\3\u0080\3\u0080\3\u0080\3\u0080\3\u0080\3\u0080")
buf.write("\3\u0080\3\u0081\3\u0081\3\u0081\3\u0081\3\u0081\3\u0081")
buf.write("\3\u0081\3\u0081\3\u0081\3\u0082\3\u0082\3\u0082\3\u0082")
buf.write("\3\u0082\3\u0082\3\u0082\3\u0083\3\u0083\3\u0083\3\u0083")
buf.write("\3\u0083\3\u0083\3\u0083\3\u0083\3\u0083\3\u0083\3\u0084")
buf.write("\3\u0084\3\u0084\3\u0084\3\u0084\3\u0084\3\u0084\3\u0084")
buf.write("\3\u0084\3\u0084\3\u0084\3\u0084\3\u0084\3\u0084\3\u0084")
buf.write("\3\u0085\3\u0085\3\u0085\3\u0085\3\u0085\3\u0085\3\u0085")
buf.write("\3\u0085\3\u0085\3\u0085\3\u0085\3\u0086\3\u0086\3\u0086")
buf.write("\3\u0086\3\u0086\3\u0086\3\u0086\3\u0086\3\u0087\3\u0087")
buf.write("\3\u0087\3\u0087\3\u0087\3\u0087\3\u0087\3\u0087\3\u0087")
buf.write("\3\u0087\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write("\3\u0088\3\u0088\3\u0089\3\u0089\3\u0089\3\u0089\3\u0089")
buf.write("\3\u0089\3\u0089\3\u008a\3\u008a\3\u008a\3\u008a\3\u008a")
buf.write("\3\u008b\3\u008b\3\u008b\3\u008b\3\u008b\3\u008b\3\u008b")
buf.write("\3\u008b\3\u008c\3\u008c\3\u008c\3\u008c\3\u008c\3\u008c")
buf.write("\3\u008c\3\u008c\3\u008c\3\u008d\3\u008d\3\u008d\3\u008d")
buf.write("\3\u008d\3\u008d\3\u008d\3\u008d\3\u008e\3\u008e\3\u008e")
buf.write("\3\u008e\3\u008e\3\u008e\3\u008e\3\u008e\3\u008f\3\u008f")
buf.write("\3\u008f\3\u008f\3\u008f\3\u008f\3\u0090\3\u0090\3\u0090")
buf.write("\3\u0090\3\u0090\3\u0090\3\u0091\3\u0091\3\u0091\3\u0091")
buf.write("\3\u0091\3\u0091\3\u0092\3\u0092\3\u0092\3\u0092\3\u0092")
buf.write("\3\u0092\3\u0093\3\u0093\3\u0093\3\u0093\3\u0093\3\u0093")
buf.write("\3\u0093\3\u0093\3\u0093\3\u0093\3\u0093\3\u0093\3\u0094")
buf.write("\3\u0094\3\u0094\3\u0094\3\u0094\3\u0094\3\u0095\3\u0095")
buf.write("\3\u0095\3\u0095\3\u0095\3\u0095\3\u0095\3\u0095\3\u0095")
buf.write("\3\u0095\3\u0096\3\u0096\3\u0096\3\u0096\3\u0096\3\u0096")
buf.write("\3\u0096\3\u0096\3\u0097\3\u0097\3\u0097\3\u0097\3\u0098")
buf.write("\3\u0098\3\u0098\3\u0098\3\u0098\3\u0098\3\u0098\3\u0099")
buf.write("\3\u0099\3\u0099\3\u0099\3\u0099\3\u0099\3\u009a\3\u009a")
buf.write("\3\u009a\3\u009a\3\u009a\3\u009b\3\u009b\3\u009b\3\u009b")
buf.write("\3\u009b\3\u009c\3\u009c\3\u009c\3\u009c\3\u009c\3\u009c")
buf.write("\3\u009c\3\u009c\3\u009c\3\u009d\3\u009d\3\u009d\3\u009d")
buf.write("\3\u009d\3\u009e\3\u009e\3\u009e\3\u009e\3\u009e\3\u009e")
buf.write("\3\u009f\3\u009f\3\u009f\3\u009f\3\u009f\3\u009f\3\u00a0")
buf.write("\3\u00a0\3\u00a0\3\u00a0\3\u00a0\3\u00a0\3\u00a0\3\u00a0")
buf.write("\3\u00a0\3\u00a1\3\u00a1\3\u00a1\3\u00a1\3\u00a1\3\u00a2")
buf.write("\3\u00a2\3\u00a2\3\u00a2\3\u00a2\3\u00a2\3\u00a2\3\u00a3")
buf.write("\3\u00a3\3\u00a3\3\u00a3\3\u00a3\3\u00a4\3\u00a4\3\u00a4")
buf.write("\3\u00a4\3\u00a4\3\u00a5\3\u00a5\3\u00a5\3\u00a6\3\u00a6")
buf.write("\3\u00a6\3\u00a6\3\u00a6\3\u00a6\3\u00a6\3\u00a7\3\u00a7")
buf.write("\3\u00a7\3\u00a7\3\u00a7\3\u00a7\3\u00a7\3\u00a7\3\u00a7")
buf.write("\3\u00a7\3\u00a8\3\u00a8\3\u00a8\3\u00a9\3\u00a9\3\u00a9")
buf.write("\3\u00a9\3\u00a9\3\u00a9\3\u00a9\3\u00a9\3\u00aa\3\u00aa")
buf.write("\3\u00aa\3\u00aa\3\u00aa\3\u00aa\3\u00aa\3\u00aa\3\u00aa")
buf.write("\3\u00aa\3\u00ab\3\u00ab\3\u00ab\3\u00ab\3\u00ab\3\u00ab")
buf.write("\3\u00ab\3\u00ab\3\u00ab\3\u00ab\3\u00ac\3\u00ac\3\u00ac")
buf.write("\3\u00ac\3\u00ac\3\u00ac\3\u00ac\3\u00ad\3\u00ad\3\u00ad")
buf.write("\3\u00ad\3\u00ad\3\u00ad\3\u00ae\3\u00ae\3\u00ae\3\u00ae")
buf.write("\3\u00ae\3\u00ae\3\u00ae\3\u00ae\3\u00af\3\u00af\3\u00af")
buf.write("\3\u00af\3\u00af\3\u00af\3\u00af\3\u00af\3\u00af\3\u00af")
buf.write("\3\u00b0\3\u00b0\3\u00b0\3\u00b0\3\u00b0\3\u00b0\3\u00b0")
buf.write("\3\u00b0\3\u00b1\3\u00b1\3\u00b1\3\u00b1\3\u00b1\3\u00b1")
buf.write("\3\u00b1\3\u00b1\3\u00b1\3\u00b2\3\u00b2\3\u00b2\3\u00b2")
buf.write("\3\u00b2\3\u00b2\3\u00b2\3\u00b3\3\u00b3\3\u00b3\3\u00b3")
buf.write("\3\u00b3\3\u00b3\3\u00b4\3\u00b4\3\u00b4\3\u00b4\3\u00b4")
buf.write("\3\u00b4\3\u00b5\3\u00b5\3\u00b5\3\u00b5\3\u00b5\3\u00b5")
buf.write("\3\u00b5\3\u00b6\3\u00b6\3\u00b6\3\u00b6\3\u00b6\3\u00b6")
buf.write("\3\u00b6\3\u00b6\3\u00b6\3\u00b6\3\u00b6\3\u00b6\3\u00b6")
buf.write("\3\u00b7\3\u00b7\3\u00b7\3\u00b7\3\u00b7\3\u00b7\3\u00b7")
buf.write("\3\u00b7\3\u00b8\3\u00b8\3\u00b8\3\u00b8\3\u00b9\3\u00b9")
buf.write("\3\u00b9\3\u00b9\3\u00b9\3\u00b9\3\u00b9\3\u00b9\3\u00ba")
buf.write("\3\u00ba\3\u00ba\3\u00ba\3\u00ba\3\u00ba\3\u00ba\3\u00ba")
buf.write("\3\u00ba\3\u00ba\3\u00bb\3\u00bb\3\u00bb\3\u00bb\3\u00bb")
buf.write("\3\u00bb\3\u00bb\3\u00bb\3\u00bb\3\u00bc\3\u00bc\3\u00bc")
buf.write("\3\u00bc\3\u00bc\3\u00bd\3\u00bd\3\u00bd\3\u00bd\3\u00bd")
buf.write("\3\u00bd\3\u00bd\3\u00bd\3\u00bd\3\u00bd\3\u00bd\3\u00be")
buf.write("\3\u00be\3\u00be\3\u00bf\3\u00bf\3\u00bf\3\u00bf\3\u00bf")
buf.write("\3\u00bf\3\u00bf\3\u00bf\3\u00bf\3\u00bf\3\u00c0\3\u00c0")
buf.write("\3\u00c0\3\u00c0\3\u00c0\3\u00c0\3\u00c0\3\u00c0\3\u00c1")
buf.write("\3\u00c1\3\u00c1\3\u00c1\3\u00c1\3\u00c2\3\u00c2\3\u00c2")
buf.write("\3\u00c2\3\u00c2\3\u00c3\3\u00c3\3\u00c3\3\u00c3\3\u00c3")
buf.write("\3\u00c4\3\u00c4\3\u00c4\3\u00c4\3\u00c4\3\u00c4\3\u00c4")
buf.write("\3\u00c4\3\u00c4\3\u00c5\3\u00c5\3\u00c5\3\u00c5\3\u00c5")
buf.write("\3\u00c6\3\u00c6\3\u00c6\3\u00c6\3\u00c6\3\u00c6\3\u00c6")
buf.write("\3\u00c6\3\u00c6\3\u00c6\3\u00c6\3\u00c7\3\u00c7\3\u00c7")
buf.write("\3\u00c7\3\u00c7\3\u00c7\3\u00c7\3\u00c7\3\u00c8\3\u00c8")
buf.write("\3\u00c8\3\u00c8\3\u00c8\3\u00c9\3\u00c9\3\u00c9\3\u00c9")
buf.write("\3\u00c9\3\u00c9\3\u00ca\3\u00ca\3\u00ca\3\u00ca\3\u00ca")
buf.write("\3\u00ca\3\u00ca\3\u00ca\3\u00cb\3\u00cb\3\u00cb\3\u00cb")
buf.write("\3\u00cb\3\u00cc\3\u00cc\3\u00cc\3\u00cc\3\u00cc\3\u00cc")
buf.write("\3\u00cd\3\u00cd\3\u00cd\3\u00cd\3\u00cd\3\u00cd\3\u00ce")
buf.write("\3\u00ce\3\u00ce\3\u00ce\3\u00ce\3\u00ce\3\u00cf\3\u00cf")
buf.write("\3\u00cf\3\u00cf\3\u00cf\3\u00cf\3\u00d0\3\u00d0\3\u00d0")
buf.write("\3\u00d0\3\u00d0\3\u00d0\3\u00d1\3\u00d1\3\u00d1\3\u00d1")
buf.write("\3\u00d1\3\u00d2\3\u00d2\3\u00d2\3\u00d2\3\u00d2\3\u00d2")
buf.write("\3\u00d2\3\u00d3\3\u00d3\3\u00d3\3\u00d3\3\u00d4\3\u00d4")
buf.write("\3\u00d4\3\u00d4\3\u00d4\3\u00d4\3\u00d4\3\u00d5\3\u00d5")
buf.write("\3\u00d5\3\u00d5\3\u00d5\3\u00d5\3\u00d6\3\u00d6\3\u00d6")
buf.write("\3\u00d6\3\u00d6\3\u00d7\3\u00d7\3\u00d7\3\u00d7\3\u00d7")
buf.write("\3\u00d8\3\u00d8\3\u00d8\3\u00d8\3\u00d8\3\u00d9\3\u00d9")
buf.write("\3\u00d9\3\u00d9\3\u00da\3\u00da\3\u00da\3\u00da\3\u00da")
buf.write("\3\u00da\3\u00da\3\u00da\3\u00db\3\u00db\3\u00db\3\u00db")
buf.write("\3\u00db\3\u00db\3\u00db\3\u00db\3\u00db\3\u00dc\3\u00dc")
buf.write("\3\u00dc\3\u00dc\3\u00dc\3\u00dc\3\u00dc\3\u00dc\3\u00dc")
buf.write("\3\u00dd\3\u00dd\3\u00dd\3\u00dd\3\u00dd\3\u00dd\3\u00dd")
buf.write("\3\u00de\3\u00de\3\u00de\3\u00de\3\u00de\3\u00de\3\u00df")
buf.write("\3\u00df\3\u00df\3\u00df\3\u00df\3\u00df\3\u00e0\3\u00e0")
buf.write("\3\u00e0\3\u00e0\3\u00e0\3\u00e0\3\u00e0\3\u00e1\3\u00e1")
buf.write("\3\u00e1\3\u00e1\3\u00e1\3\u00e1\3\u00e1\3\u00e1\3\u00e1")
buf.write("\3\u00e2\3\u00e2\3\u00e2\3\u00e2\3\u00e2\3\u00e2\3\u00e2")
buf.write("\3\u00e2\3\u00e2\3\u00e3\3\u00e3\3\u00e3\3\u00e3\3\u00e3")
buf.write("\3\u00e4\3\u00e4\3\u00e4\3\u00e4\3\u00e4\3\u00e4\3\u00e5")
buf.write("\3\u00e5\3\u00e5\3\u00e5\3\u00e5\3\u00e5\3\u00e5\3\u00e6")
buf.write("\3\u00e6\3\u00e6\3\u00e6\3\u00e6\3\u00e6\3\u00e7\3\u00e7")
buf.write("\3\u00e7\3\u00e7\3\u00e7\3\u00e7\3\u00e7\3\u00e7\3\u00e7")
buf.write("\3\u00e8\3\u00e8\3\u00e8\3\u00e8\3\u00e8\3\u00e9\3\u00e9")
buf.write("\3\u00e9\3\u00e9\3\u00ea\3\u00ea\3\u00ea\3\u00ea\3\u00ea")
buf.write("\3\u00ea\3\u00ea\3\u00ea\3\u00eb\3\u00eb\3\u00eb\3\u00eb")
buf.write("\3\u00eb\3\u00eb\3\u00eb\3\u00eb\3\u00eb\3\u00ec\3\u00ec")
buf.write("\3\u00ec\3\u00ec\3\u00ed\3\u00ed\3\u00ed\3\u00ed\3\u00ed")
buf.write("\3\u00ed\3\u00ee\3\u00ee\3\u00ee\3\u00ee\3\u00ee\3\u00ee")
buf.write("\3\u00ee\3\u00ee\3\u00ee\3\u00ef\3\u00ef\3\u00ef\3\u00ef")
buf.write("\3\u00ef\3\u00ef\3\u00f0\3\u00f0\3\u00f0\3\u00f0\3\u00f0")
buf.write("\3\u00f0\3\u00f0\3\u00f1\3\u00f1\3\u00f1\3\u00f1\3\u00f2")
buf.write("\3\u00f2\3\u00f2\3\u00f3\3\u00f3\3\u00f3\3\u00f3\3\u00f3")
buf.write("\3\u00f3\3\u00f3\3\u00f3\3\u00f4\3\u00f4\3\u00f4\3\u00f4")
buf.write("\3\u00f4\3\u00f4\3\u00f4\3\u00f4\3\u00f5\3\u00f5\3\u00f5")
buf.write("\3\u00f5\3\u00f5\3\u00f5\3\u00f5\3\u00f6\3\u00f6\3\u00f6")
buf.write("\3\u00f6\3\u00f6\3\u00f6\3\u00f6\3\u00f6\3\u00f7\3\u00f7")
buf.write("\3\u00f7\3\u00f7\3\u00f7\3\u00f7\3\u00f7\3\u00f7\3\u00f7")
buf.write("\3\u00f7\3\u00f7\3\u00f7\3\u00f7\3\u00f7\3\u00f7\3\u00f7")
buf.write("\3\u00f7\3\u00f8\3\u00f8\3\u00f8\3\u00f8\3\u00f8\3\u00f8")
buf.write("\3\u00f8\3\u00f8\3\u00f8\3\u00f8\3\u00f8\3\u00f9\3\u00f9")
buf.write("\3\u00f9\3\u00f9\3\u00f9\3\u00f9\3\u00f9\3\u00f9\3\u00f9")
buf.write("\3\u00f9\3\u00f9\3\u00fa\3\u00fa\3\u00fa\3\u00fa\3\u00fa")
buf.write("\3\u00fb\3\u00fb\3\u00fb\3\u00fb\3\u00fb\3\u00fb\3\u00fb")
buf.write("\3\u00fb\3\u00fc\3\u00fc\3\u00fc\3\u00fc\3\u00fc\3\u00fc")
buf.write("\3\u00fc\3\u00fc\3\u00fc\3\u00fc\3\u00fc\3\u00fc\3\u00fc")
buf.write("\3\u00fc\3\u00fd\3\u00fd\3\u00fd\3\u00fd\3\u00fe\3\u00fe")
buf.write("\3\u00fe\3\u00fe\3\u00fe\3\u00fe\3\u00fe\3\u00ff\3\u00ff")
buf.write("\3\u00ff\3\u00ff\3\u00ff\3\u0100\3\u0100\3\u0100\3\u0100")
buf.write("\3\u0100\3\u0100\3\u0101\3\u0101\3\u0101\3\u0101\3\u0101")
buf.write("\3\u0101\3\u0101\3\u0102\3\u0102\3\u0102\3\u0102\3\u0102")
buf.write("\3\u0102\3\u0102\3\u0102\3\u0103\3\u0103\3\u0103\3\u0103")
buf.write("\3\u0103\3\u0103\3\u0103\3\u0103\3\u0103\3\u0103\3\u0104")
buf.write("\3\u0104\3\u0104\3\u0104\3\u0104\3\u0104\3\u0104\3\u0105")
buf.write("\3\u0105\3\u0105\3\u0106\3\u0106\3\u0106\3\u0106\3\u0107")
buf.write("\3\u0107\3\u0107\3\u0107\3\u0108\3\u0108\3\u0108\3\u0108")
buf.write("\3\u0109\3\u0109\3\u0109\3\u010a\3\u010a\3\u010a\3\u010a")
buf.write("\3\u010a\3\u010b\3\u010b\3\u010b\3\u010b\3\u010b\3\u010c")
buf.write("\3\u010c\3\u010c\3\u010c\3\u010c\3\u010c\3\u010c\3\u010d")
buf.write("\3\u010d\3\u010d\3\u010e\3\u010e\3\u010e\3\u010e\3\u010e")
buf.write("\3\u010e\3\u010e\3\u010e\3\u010f\3\u010f\3\u010f\3\u010f")
buf.write("\3\u010f\3\u010f\3\u0110\3\u0110\3\u0110\3\u0110\3\u0110")
buf.write("\3\u0110\3\u0110\3\u0110\3\u0110\3\u0110\3\u0110\3\u0111")
buf.write("\3\u0111\3\u0111\3\u0111\3\u0111\3\u0111\3\u0111\3\u0111")
buf.write("\3\u0112\3\u0112\3\u0112\3\u0112\3\u0113\3\u0113\3\u0113")
buf.write("\3\u0113\3\u0113\3\u0113\3\u0114\3\u0114\3\u0114\3\u0114")
buf.write("\3\u0114\3\u0115\3\u0115\3\u0115\3\u0115\3\u0115\3\u0115")
buf.write("\3\u0115\3\u0115\3\u0115\3\u0115\3\u0115\3\u0116\3\u0116")
buf.write("\3\u0116\3\u0116\3\u0116\3\u0116\3\u0116\3\u0116\3\u0117")
buf.write("\3\u0117\3\u0117\3\u0117\3\u0117\3\u0117\3\u0117\3\u0117")
buf.write("\3\u0117\3\u0117\3\u0117\3\u0117\3\u0117\3\u0117\3\u0117")
buf.write("\3\u0117\3\u0118\3\u0118\3\u0118\3\u0118\3\u0118\3\u0118")
buf.write("\3\u0118\3\u0118\3\u0118\3\u0118\3\u0118\3\u0119\3\u0119")
buf.write("\3\u0119\3\u0119\3\u0119\3\u0119\3\u0119\3\u011a\3\u011a")
buf.write("\3\u011a\3\u011a\3\u011a\3\u011a\3\u011a\3\u011a\3\u011a")
buf.write("\3\u011a\3\u011b\3\u011b\3\u011b\3\u011b\3\u011b\3\u011b")
buf.write("\3\u011b\3\u011b\3\u011c\3\u011c\3\u011c\3\u011c\3\u011c")
buf.write("\3\u011d\3\u011d\3\u011d\3\u011d\3\u011d\3\u011d\3\u011d")
buf.write("\3\u011d\3\u011d\3\u011e\3\u011e\3\u011e\3\u011e\3\u011e")
buf.write("\3\u011e\3\u011f\3\u011f\3\u011f\3\u011f\3\u011f\3\u011f")
buf.write("\3\u011f\3\u011f\3\u011f\3\u011f\3\u0120\3\u0120\3\u0120")
buf.write("\3\u0120\3\u0120\3\u0120\3\u0121\3\u0121\3\u0121\3\u0121")
buf.write("\3\u0121\3\u0122\3\u0122\3\u0122\3\u0122\3\u0122\3\u0122")
buf.write("\3\u0122\3\u0122\3\u0122\3\u0122\3\u0122\3\u0122\3\u0123")
buf.write("\3\u0123\3\u0123\3\u0123\3\u0123\3\u0123\3\u0123\3\u0123")
buf.write("\3\u0123\3\u0124\3\u0124\3\u0124\3\u0124\3\u0124\3\u0124")
buf.write("\3\u0124\3\u0124\3\u0124\3\u0124\3\u0125\3\u0125\3\u0125")
buf.write("\3\u0125\3\u0125\3\u0125\3\u0125\3\u0126\3\u0126\3\u0126")
buf.write("\3\u0126\3\u0126\3\u0126\3\u0126\3\u0126\3\u0126\3\u0126")
buf.write("\3\u0127\3\u0127\3\u0127\3\u0127\3\u0127\3\u0127\3\u0127")
buf.write("\3\u0127\3\u0127\3\u0127\3\u0128\3\u0128\3\u0128\3\u0128")
buf.write("\3\u0128\3\u0128\3\u0128\3\u0128\3\u0129\3\u0129\3\u0129")
buf.write("\3\u0129\3\u0129\3\u0129\3\u012a\3\u012a\3\u012a\3\u012a")
buf.write("\3\u012a\3\u012a\3\u012a\3\u012a\3\u012a\3\u012a\3\u012b")
buf.write("\3\u012b\3\u012b\3\u012b\3\u012b\3\u012b\3\u012c\3\u012c")
buf.write("\3\u012c\3\u012c\3\u012c\3\u012c\3\u012d\3\u012d\3\u012d")
buf.write("\3\u012d\3\u012e\3\u012e\3\u012e\3\u012e\3\u012e\3\u012f")
buf.write("\3\u012f\3\u012f\3\u012f\3\u012f\3\u0130\3\u0130\3\u0130")
buf.write("\3\u0130\3\u0130\3\u0130\3\u0130\3\u0131\3\u0131\3\u0131")
buf.write("\3\u0131\3\u0132\3\u0132\3\u0132\3\u0132\3\u0132\3\u0132")
buf.write("\3\u0132\3\u0132\3\u0132\3\u0132\3\u0133\3\u0133\3\u0133")
buf.write("\3\u0133\3\u0133\3\u0133\3\u0133\3\u0133\3\u0133\3\u0133")
buf.write("\3\u0133\3\u0133\3\u0134\3\u0134\3\u0134\3\u0134\3\u0134")
buf.write("\3\u0134\3\u0134\3\u0135\3\u0135\3\u0135\3\u0135\3\u0135")
buf.write("\3\u0135\3\u0135\3\u0135\3\u0135\3\u0135\3\u0136\3\u0136")
buf.write("\3\u0136\3\u0136\3\u0136\3\u0136\3\u0136\3\u0137\3\u0137")
buf.write("\3\u0137\3\u0137\3\u0137\3\u0137\3\u0137\3\u0137\3\u0138")
buf.write("\3\u0138\3\u0138\3\u0138\3\u0138\3\u0138\3\u0138\3\u0138")
buf.write("\3\u0139\3\u0139\3\u0139\3\u0139\3\u0139\3\u0139\3\u0139")
buf.write("\3\u0139\3\u0139\3\u0139\3\u0139\3\u0139\3\u0139\3\u0139")
buf.write("\3\u0139\3\u0139\3\u0139\3\u0139\3\u0139\3\u0139\3\u013a")
buf.write("\3\u013a\3\u013a\3\u013a\3\u013a\3\u013a\3\u013a\3\u013b")
buf.write("\3\u013b\3\u013b\3\u013b\3\u013b\3\u013b\3\u013b\3\u013b")
buf.write("\3\u013b\3\u013b\3\u013b\3\u013b\3\u013b\3\u013c\3\u013c")
buf.write("\3\u013c\3\u013c\3\u013c\3\u013c\3\u013c\3\u013d\3\u013d")
buf.write("\3\u013d\3\u013d\3\u013d\3\u013d\3\u013d\3\u013d\3\u013d")
buf.write("\3\u013d\3\u013e\3\u013e\3\u013e\3\u013e\3\u013e\3\u013e")
buf.write("\3\u013f\3\u013f\3\u013f\3\u013f\3\u013f\3\u013f\3\u013f")
buf.write("\3\u013f\3\u0140\3\u0140\3\u0140\3\u0140\3\u0140\3\u0140")
buf.write("\3\u0140\3\u0141\3\u0141\3\u0141\3\u0141\3\u0141\3\u0141")
buf.write("\3\u0142\3\u0142\3\u0142\3\u0142\3\u0142\3\u0142\3\u0142")
buf.write("\3\u0142\3\u0142\3\u0143\3\u0143\3\u0143\3\u0143\3\u0143")
buf.write("\3\u0143\3\u0143\3\u0144\3\u0144\3\u0144\3\u0144\3\u0145")
buf.write("\3\u0145\3\u0145\3\u0145\3\u0145\3\u0145\3\u0146\3\u0146")
buf.write("\3\u0146\3\u0146\3\u0146\3\u0147\3\u0147\3\u0147\3\u0147")
buf.write("\3\u0147\3\u0147\3\u0148\3\u0148\3\u0148\3\u0148\3\u0148")
buf.write("\3\u0148\3\u0148\3\u0149\3\u0149\3\u0149\3\u0149\3\u0149")
buf.write("\3\u014a\3\u014a\3\u014a\3\u014a\3\u014a\3\u014a\3\u014a")
buf.write("\3\u014a\3\u014a\3\u014a\3\u014b\3\u014b\3\u014b\3\u014b")
buf.write("\3\u014b\3\u014b\3\u014b\3\u014c\3\u014c\3\u014c\3\u014c")
buf.write("\3\u014c\3\u014c\3\u014c\3\u014c\3\u014c\3\u014c\3\u014c")
buf.write("\3\u014c\3\u014d\3\u014d\3\u014d\3\u014d\3\u014e\3\u014e")
buf.write("\3\u014e\3\u014e\3\u014e\3\u014e\3\u014e\3\u014f\3\u014f")
buf.write("\3\u014f\3\u014f\3\u014f\3\u014f\3\u014f\3\u0150\3\u0150")
buf.write("\3\u0150\3\u0150\3\u0150\3\u0151\3\u0151\3\u0151\3\u0151")
buf.write("\3\u0151\3\u0151\3\u0151\3\u0151\3\u0152\3\u0152\3\u0152")
buf.write("\3\u0152\3\u0152\3\u0152\3\u0152\3\u0153\3\u0153\3\u0153")
buf.write("\3\u0153\3\u0153\3\u0154\3\u0154\3\u0154\3\u0154\3\u0154")
buf.write("\3\u0154\3\u0154\3\u0154\3\u0154\3\u0155\3\u0155\3\u0155")
buf.write("\3\u0155\3\u0155\3\u0155\3\u0155\3\u0155\3\u0155\3\u0155")
buf.write("\3\u0155\3\u0156\3\u0156\3\u0156\3\u0156\3\u0156\3\u0156")
buf.write("\3\u0156\3\u0156\3\u0156\3\u0156\3\u0156\3\u0156\3\u0156")
buf.write("\3\u0157\3\u0157\3\u0157\3\u0157\3\u0157\3\u0157\3\u0157")
buf.write("\3\u0157\3\u0157\3\u0157\3\u0157\3\u0157\3\u0157\3\u0157")
buf.write("\3\u0157\3\u0157\3\u0157\3\u0157\3\u0158\3\u0158\3\u0158")
buf.write("\3\u0158\3\u0158\3\u0158\3\u0158\3\u0158\3\u0158\3\u0158")
buf.write("\3\u0158\3\u0158\3\u0159\3\u0159\3\u0159\3\u0159\3\u0159")
buf.write("\3\u0159\3\u0159\3\u0159\3\u0159\3\u0159\3\u0159\3\u0159")
buf.write("\3\u0159\3\u0159\3\u0159\3\u0159\3\u015a\3\u015a\3\u015a")
buf.write("\3\u015a\3\u015b\3\u015b\3\u015b\3\u015b\3\u015b\3\u015c")
buf.write("\3\u015c\3\u015c\3\u015c\3\u015c\3\u015c\3\u015c\3\u015c")
buf.write("\3\u015c\3\u015d\3\u015d\3\u015d\3\u015d\3\u015d\3\u015d")
buf.write("\3\u015e\3\u015e\3\u015e\3\u015e\3\u015e\3\u015f\3\u015f")
buf.write("\3\u015f\3\u015f\3\u015f\3\u015f\3\u015f\3\u015f\3\u015f")
buf.write("\3\u0160\3\u0160\3\u0160\3\u0160\3\u0160\3\u0160\3\u0160")
buf.write("\3\u0160\3\u0160\3\u0161\3\u0161\3\u0161\3\u0161\3\u0161")
buf.write("\3\u0161\3\u0161\3\u0161\3\u0161\3\u0162\3\u0162\3\u0162")
buf.write("\3\u0162\3\u0162\3\u0162\3\u0162\3\u0162\3\u0162\3\u0162")
buf.write("\3\u0162\3\u0162\3\u0162\3\u0162\3\u0162\3\u0163\3\u0163")
buf.write("\3\u0163\3\u0163\3\u0163\3\u0163\3\u0163\3\u0164\3\u0164")
buf.write("\3\u0164\3\u0164\3\u0164\3\u0165\3\u0165\3\u0165\3\u0165")
buf.write("\3\u0165\3\u0166\3\u0166\3\u0166\3\u0166\3\u0166\3\u0166")
buf.write("\3\u0166\3\u0166\3\u0166\3\u0167\3\u0167\3\u0167\3\u0167")
buf.write("\3\u0167\3\u0167\3\u0167\3\u0167\3\u0167\3\u0168\3\u0168")
buf.write("\3\u0168\3\u0168\3\u0168\3\u0169\3\u0169\3\u0169\3\u0169")
buf.write("\3\u0169\3\u0169\3\u0169\3\u0169\3\u0169\3\u0169\3\u0169")
buf.write("\3\u0169\3\u0169\3\u0169\3\u016a\3\u016a\3\u016a\3\u016a")
buf.write("\3\u016a\3\u016a\3\u016a\3\u016a\3\u016b\3\u016b\3\u016b")
buf.write("\3\u016b\3\u016b\3\u016b\3\u016b\3\u016b\3\u016b\3\u016c")
buf.write("\3\u016c\3\u016c\3\u016c\3\u016c\3\u016c\3\u016c\3\u016c")
buf.write("\3\u016c\3\u016c\3\u016c\3\u016d\3\u016d\3\u016d\3\u016d")
buf.write("\3\u016d\3\u016d\3\u016e\3\u016e\3\u016e\3\u016e\3\u016e")
buf.write("\3\u016e\3\u016e\3\u016e\3\u016f\3\u016f\3\u016f\3\u016f")
buf.write("\3\u016f\3\u016f\3\u016f\3\u016f\3\u016f\3\u016f\3\u0170")
buf.write("\3\u0170\3\u0170\3\u0170\3\u0170\3\u0170\3\u0170\3\u0170")
buf.write("\3\u0170\3\u0170\3\u0170\3\u0170\3\u0170\3\u0171\3\u0171")
buf.write("\3\u0171\3\u0171\3\u0171\3\u0171\3\u0171\3\u0172\3\u0172")
buf.write("\3\u0172\3\u0172\3\u0172\3\u0172\3\u0172\3\u0172\3\u0172")
buf.write("\3\u0172\3\u0172\3\u0173\3\u0173\3\u0173\3\u0173\3\u0173")
buf.write("\3\u0173\3\u0173\3\u0174\3\u0174\3\u0174\3\u0174\3\u0174")
buf.write("\3\u0174\3\u0174\3\u0174\3\u0174\3\u0174\3\u0174\3\u0174")
buf.write("\3\u0175\3\u0175\3\u0175\3\u0175\3\u0175\3\u0175\3\u0175")
buf.write("\3\u0175\3\u0175\3\u0175\3\u0175\3\u0175\3\u0175\3\u0176")
buf.write("\3\u0176\3\u0176\3\u0176\3\u0176\3\u0176\3\u0176\3\u0176")
buf.write("\3\u0176\3\u0176\3\u0176\3\u0176\3\u0176\3\u0176\3\u0177")
buf.write("\3\u0177\3\u0177\3\u0177\3\u0177\3\u0177\3\u0177\3\u0177")
buf.write("\3\u0178\3\u0178\3\u0178\3\u0178\3\u0178\3\u0178\3\u0178")
buf.write("\3\u0178\3\u0179\3\u0179\3\u0179\3\u0179\3\u0179\3\u0179")
buf.write("\3\u0179\3\u0179\3\u017a\3\u017a\3\u017a\3\u017a\3\u017a")
buf.write("\3\u017a\3\u017b\3\u017b\3\u017b\3\u017b\3\u017c\3\u017c")
buf.write("\3\u017c\3\u017c\3\u017c\3\u017d\3\u017d\3\u017d\3\u017d")
buf.write("\3\u017d\3\u017e\3\u017e\3\u017e\3\u017e\3\u017e\3\u017e")
buf.write("\3\u017e\3\u017e\3\u017e\3\u017e\3\u017f\3\u017f\3\u017f")
buf.write("\3\u017f\3\u017f\3\u017f\3\u017f\3\u017f\3\u017f\3\u017f")
buf.write("\3\u017f\3\u017f\3\u017f\3\u017f\3\u017f\3\u017f\3\u017f")
buf.write("\3\u017f\3\u017f\3\u017f\3\u017f\3\u017f\3\u017f\3\u017f")
buf.write("\3\u017f\3\u017f\3\u017f\3\u017f\3\u0180\3\u0180\3\u0180")
buf.write("\3\u0180\3\u0180\3\u0180\3\u0180\3\u0180\3\u0180\3\u0180")
buf.write("\3\u0180\3\u0180\3\u0180\3\u0180\3\u0180\3\u0180\3\u0180")
buf.write("\3\u0180\3\u0180\3\u0180\3\u0180\3\u0180\3\u0180\3\u0180")
buf.write("\3\u0180\3\u0180\3\u0180\3\u0181\3\u0181\3\u0181\3\u0181")
buf.write("\3\u0181\3\u0181\3\u0181\3\u0181\3\u0181\3\u0181\3\u0181")
buf.write("\3\u0181\3\u0181\3\u0181\3\u0181\3\u0181\3\u0181\3\u0181")
buf.write("\3\u0181\3\u0181\3\u0181\3\u0181\3\u0181\3\u0181\3\u0182")
buf.write("\3\u0182\3\u0182\3\u0182\3\u0182\3\u0182\3\u0182\3\u0182")
buf.write("\3\u0182\3\u0182\3\u0182\3\u0182\3\u0182\3\u0182\3\u0183")
buf.write("\3\u0183\3\u0183\3\u0183\3\u0183\3\u0183\3\u0183\3\u0183")
buf.write("\3\u0183\3\u0183\3\u0183\3\u0183\3\u0183\3\u0183\3\u0184")
buf.write("\3\u0184\3\u0184\3\u0184\3\u0184\3\u0184\3\u0184\3\u0184")
buf.write("\3\u0184\3\u0184\3\u0184\3\u0184\3\u0184\3\u0184\3\u0184")
buf.write("\3\u0184\3\u0185\3\u0185\3\u0185\3\u0185\3\u0185\3\u0185")
buf.write("\3\u0185\3\u0185\3\u0185\3\u0185\3\u0185\3\u0185\3\u0185")
buf.write("\3\u0185\3\u0185\3\u0185\3\u0186\3\u0186\3\u0186\3\u0187")
buf.write("\3\u0187\3\u0187\3\u0187\3\u0187\3\u0187\3\u0187\3\u0187")
buf.write("\3\u0187\3\u0188\3\u0188\3\u0188\3\u0188\3\u0188\3\u0188")
buf.write("\3\u0188\3\u0188\3\u0188\3\u0188\3\u0188\3\u0188\3\u0189")
buf.write("\3\u0189\3\u0189\3\u0189\3\u0189\3\u0189\3\u0189\3\u0189")
buf.write("\3\u0189\3\u0189\3\u018a\3\u018a\3\u018a\3\u018a\3\u018a")
buf.write("\3\u018a\3\u018b\3\u018b\3\u018b\3\u018b\3\u018b\3\u018b")
buf.write("\3\u018b\3\u018b\3\u018c\3\u018c\3\u018c\3\u018c\3\u018c")
buf.write("\3\u018d\3\u018d\3\u018d\3\u018d\3\u018d\3\u018e\3\u018e")
buf.write("\3\u018e\3\u018e\3\u018e\3\u018e\3\u018e\3\u018e\3\u018e")
buf.write("\3\u018f\3\u018f\3\u018f\3\u018f\3\u018f\3\u0190\3\u0190")
buf.write("\3\u0190\3\u0190\3\u0190\3\u0190\3\u0190\3\u0190\3\u0190")
buf.write("\3\u0190\3\u0191\3\u0191\3\u0191\3\u0191\3\u0191\3\u0191")
buf.write("\3\u0192\3\u0192\3\u0192\3\u0192\3\u0192\3\u0192\3\u0193")
buf.write("\3\u0193\3\u0193\3\u0193\3\u0193\3\u0193\3\u0193\3\u0194")
buf.write("\3\u0194\3\u0194\3\u0194\3\u0194\3\u0194\3\u0194\3\u0194")
buf.write("\3\u0194\3\u0194\3\u0195\3\u0195\3\u0195\3\u0195\3\u0195")
buf.write("\3\u0195\3\u0195\3\u0195\3\u0196\3\u0196\3\u0196\3\u0196")
buf.write("\3\u0196\3\u0196\3\u0197\3\u0197\3\u0197\3\u0197\3\u0197")
buf.write("\3\u0197\3\u0197\3\u0198\3\u0198\3\u0198\3\u0198\3\u0198")
buf.write("\3\u0198\3\u0198\3\u0198\3\u0199\3\u0199\3\u0199\3\u0199")
buf.write("\3\u0199\3\u0199\3\u0199\3\u019a\3\u019a\3\u019a\3\u019a")
buf.write("\3\u019a\3\u019a\3\u019a\3\u019b\3\u019b\3\u019b\3\u019b")
buf.write("\3\u019c\3\u019c\3\u019c\3\u019c\3\u019c\3\u019c\3\u019d")
buf.write("\3\u019d\3\u019d\3\u019d\3\u019d\3\u019d\3\u019d\3\u019d")
buf.write("\3\u019d\3\u019e\3\u019e\3\u019e\3\u019e\3\u019e\3\u019e")
buf.write("\3\u019f\3\u019f\3\u019f\3\u019f\3\u019f\3\u019f\3\u019f")
buf.write("\3\u01a0\3\u01a0\3\u01a0\3\u01a0\3\u01a0\3\u01a0\3\u01a0")
buf.write("\3\u01a0\3\u01a1\3\u01a1\3\u01a1\3\u01a1\3\u01a1\3\u01a1")
buf.write("\3\u01a1\3\u01a1\3\u01a1\3\u01a2\3\u01a2\3\u01a2\3\u01a2")
buf.write("\3\u01a2\3\u01a2\3\u01a2\3\u01a2\3\u01a2\3\u01a3\3\u01a3")
buf.write("\3\u01a3\3\u01a3\3\u01a3\3\u01a3\3\u01a3\3\u01a4\3\u01a4")
buf.write("\3\u01a4\3\u01a4\3\u01a4\3\u01a4\3\u01a4\3\u01a4\3\u01a5")
buf.write("\3\u01a5\3\u01a5\3\u01a5\3\u01a5\3\u01a5\3\u01a5\3\u01a5")
buf.write("\3\u01a6\3\u01a6\3\u01a6\3\u01a6\3\u01a6\3\u01a6\3\u01a6")
buf.write("\3\u01a6\3\u01a6\3\u01a7\3\u01a7\3\u01a7\3\u01a7\3\u01a7")
buf.write("\3\u01a8\3\u01a8\3\u01a8\3\u01a8\3\u01a8\3\u01a8\3\u01a8")
buf.write("\3\u01a8\3\u01a9\3\u01a9\3\u01a9\3\u01a9\3\u01a9\3\u01a9")
buf.write("\3\u01a9\3\u01a9\3\u01a9\3\u01a9\3\u01a9\3\u01aa\3\u01aa")
buf.write("\3\u01aa\3\u01aa\3\u01aa\3\u01ab\3\u01ab\3\u01ab\3\u01ab")
buf.write("\3\u01ab\3\u01ab\3\u01ab\3\u01ab\3\u01ab\3\u01ac\3\u01ac")
buf.write("\3\u01ac\3\u01ac\3\u01ac\3\u01ac\3\u01ad\3\u01ad\3\u01ad")
buf.write("\3\u01ad\3\u01ad\3\u01ad\3\u01ae\3\u01ae\3\u01ae\3\u01ae")
buf.write("\3\u01ae\3\u01af\3\u01af\3\u01af\3\u01af\3\u01af\3\u01af")
buf.write("\3\u01af\3\u01b0\3\u01b0\3\u01b0\3\u01b0\3\u01b0\3\u01b1")
buf.write("\3\u01b1\3\u01b1\3\u01b1\3\u01b1\3\u01b1\3\u01b2\3\u01b2")
buf.write("\3\u01b2\3\u01b2\3\u01b3\3\u01b3\3\u01b3\3\u01b3\3\u01b3")
buf.write("\3\u01b3\3\u01b3\3\u01b4\3\u01b4\3\u01b4\3\u01b4\3\u01b4")
buf.write("\3\u01b4\3\u01b4\3\u01b4\3\u01b4\3\u01b4\3\u01b4\3\u01b4")
buf.write("\3\u01b4\3\u01b4\3\u01b5\3\u01b5\3\u01b5\3\u01b5\3\u01b5")
buf.write("\3\u01b5\3\u01b5\3\u01b5\3\u01b6\3\u01b6\3\u01b6\3\u01b6")
buf.write("\3\u01b6\3\u01b6\3\u01b6\3\u01b6\3\u01b6\3\u01b6\3\u01b6")
buf.write("\3\u01b6\3\u01b6\3\u01b7\3\u01b7\3\u01b7\3\u01b7\3\u01b7")
buf.write("\3\u01b7\3\u01b7\3\u01b7\3\u01b7\3\u01b7\3\u01b7\3\u01b8")
buf.write("\3\u01b8\3\u01b8\3\u01b8\3\u01b8\3\u01b8\3\u01b8\3\u01b8")
buf.write("\3\u01b8\3\u01b8\3\u01b9\3\u01b9\3\u01b9\3\u01b9\3\u01b9")
buf.write("\3\u01b9\3\u01b9\3\u01b9\3\u01b9\3\u01b9\3\u01ba\3\u01ba")
buf.write("\3\u01ba\3\u01ba\3\u01ba\3\u01ba\3\u01ba\3\u01ba\3\u01ba")
buf.write("\3\u01ba\3\u01ba\3\u01ba\3\u01ba\3\u01ba\3\u01bb\3\u01bb")
buf.write("\3\u01bb\3\u01bb\3\u01bb\3\u01bb\3\u01bb\3\u01bb\3\u01bb")
buf.write("\3\u01bc\3\u01bc\3\u01bc\3\u01bc\3\u01bc\3\u01bc\3\u01bd")
buf.write("\3\u01bd\3\u01bd\3\u01bd\3\u01bd\3\u01bd\3\u01bd\3\u01bd")
buf.write("\3\u01bd\3\u01be\3\u01be\3\u01be\3\u01be\3\u01be\3\u01be")
buf.write("\3\u01be\3\u01be\3\u01bf\3\u01bf\3\u01bf\3\u01bf\3\u01bf")
buf.write("\3\u01bf\3\u01bf\3\u01bf\3\u01bf\3\u01bf\3\u01bf\3\u01bf")
buf.write("\3\u01bf\3\u01c0\3\u01c0\3\u01c0\3\u01c0\3\u01c0\3\u01c0")
buf.write("\3\u01c0\3\u01c0\3\u01c0\3\u01c1\3\u01c1\3\u01c1\3\u01c1")
buf.write("\3\u01c1\3\u01c2\3\u01c2\3\u01c2\3\u01c2\3\u01c3\3\u01c3")
buf.write("\3\u01c3\3\u01c3\3\u01c3\3\u01c3\3\u01c3\3\u01c3\3\u01c3")
buf.write("\3\u01c3\3\u01c3\3\u01c3\3\u01c3\3\u01c3\3\u01c3\3\u01c3")
buf.write("\3\u01c3\3\u01c3\3\u01c3\3\u01c3\3\u01c3\3\u01c3\3\u01c3")
buf.write("\3\u01c3\3\u01c3\3\u01c4\3\u01c4\3\u01c4\3\u01c4\3\u01c4")
buf.write("\3\u01c5\3\u01c5\3\u01c5\3\u01c5\3\u01c5\3\u01c5\3\u01c5")
buf.write("\3\u01c5\3\u01c5\3\u01c5\3\u01c5\3\u01c6\3\u01c6\3\u01c6")
buf.write("\3\u01c6\3\u01c6\3\u01c6\3\u01c6\3\u01c6\3\u01c6\3\u01c6")
buf.write("\3\u01c6\3\u01c6\3\u01c6\3\u01c6\3\u01c6\3\u01c6\3\u01c6")
buf.write("\3\u01c6\3\u01c7\3\u01c7\3\u01c7\3\u01c7\3\u01c7\3\u01c7")
buf.write("\3\u01c7\3\u01c7\3\u01c7\3\u01c7\3\u01c7\3\u01c7\3\u01c7")
buf.write("\3\u01c7\3\u01c7\3\u01c7\3\u01c8\3\u01c8\3\u01c8\3\u01c8")
buf.write("\3\u01c8\3\u01c8\3\u01c8\3\u01c8\3\u01c8\3\u01c8\3\u01c8")
buf.write("\3\u01c8\3\u01c8\3\u01c8\3\u01c8\3\u01c8\3\u01c8\3\u01c8")
buf.write("\3\u01c8\3\u01c9\3\u01c9\3\u01c9\3\u01c9\3\u01c9\3\u01c9")
buf.write("\3\u01c9\3\u01c9\3\u01c9\3\u01c9\3\u01c9\3\u01c9\3\u01c9")
buf.write("\3\u01c9\3\u01c9\3\u01c9\3\u01c9\3\u01c9\3\u01c9\3\u01c9")
buf.write("\3\u01c9\3\u01c9\3\u01c9\3\u01ca\3\u01ca\3\u01ca\3\u01ca")
buf.write("\3\u01ca\3\u01ca\3\u01ca\3\u01ca\3\u01ca\3\u01ca\3\u01ca")
buf.write("\3\u01ca\3\u01ca\3\u01ca\3\u01ca\3\u01cb\3\u01cb\3\u01cb")
buf.write("\3\u01cb\3\u01cb\3\u01cb\3\u01cb\3\u01cb\3\u01cb\3\u01cb")
buf.write("\3\u01cc\3\u01cc\3\u01cc\3\u01cc\3\u01cc\3\u01cc\3\u01cc")
buf.write("\3\u01cc\3\u01cc\3\u01cc\3\u01cc\3\u01cd\3\u01cd\3\u01cd")
buf.write("\3\u01cd\3\u01cd\3\u01cd\3\u01cd\3\u01cd\3\u01ce\3\u01ce")
buf.write("\3\u01ce\3\u01ce\3\u01ce\3\u01ce\3\u01ce\3\u01ce\3\u01ce")
buf.write("\3\u01ce\3\u01ce\3\u01ce\3\u01ce\3\u01cf\3\u01cf\3\u01cf")
buf.write("\3\u01cf\3\u01cf\3\u01cf\3\u01cf\3\u01cf\3\u01cf\3\u01cf")
buf.write("\3\u01cf\3\u01cf\3\u01cf\3\u01cf\3\u01cf\3\u01cf\3\u01d0")
buf.write("\3\u01d0\3\u01d0\3\u01d0\3\u01d0\3\u01d0\3\u01d0\3\u01d0")
buf.write("\3\u01d0\3\u01d0\3\u01d0\3\u01d0\3\u01d0\3\u01d0\3\u01d0")
buf.write("\3\u01d0\3\u01d1\3\u01d1\3\u01d1\3\u01d1\3\u01d1\3\u01d2")
buf.write("\3\u01d2\3\u01d2\3\u01d2\3\u01d3\3\u01d3\3\u01d3\3\u01d3")
buf.write("\3\u01d3\3\u01d4\3\u01d4\3\u01d4\3\u01d4\3\u01d5\3\u01d5")
buf.write("\3\u01d5\3\u01d5\3\u01d5\3\u01d6\3\u01d6\3\u01d6\3\u01d6")
buf.write("\3\u01d7\3\u01d7\3\u01d7\3\u01d7\3\u01d7\3\u01d7\3\u01d7")
buf.write("\3\u01d8\3\u01d8\3\u01d8\3\u01d8\3\u01d9\3\u01d9\3\u01d9")
buf.write("\3\u01d9\3\u01d9\3\u01d9\3\u01da\3\u01da\3\u01da\3\u01da")
buf.write("\3\u01da\3\u01da\3\u01da\3\u01da\3\u01da\3\u01da\3\u01da")
buf.write("\3\u01da\3\u01da\3\u01da\3\u01da\3\u01da\3\u01db\3\u01db")
buf.write("\3\u01db\3\u01db\3\u01db\3\u01db\3\u01db\3\u01db\3\u01db")
buf.write("\3\u01db\3\u01db\3\u01dc\3\u01dc\3\u01dc\3\u01dc\3\u01dd")
buf.write("\3\u01dd\3\u01dd\3\u01dd\3\u01dd\3\u01dd\3\u01dd\3\u01dd")
buf.write("\3\u01dd\3\u01de\3\u01de\3\u01de\3\u01de\3\u01de\3\u01de")
buf.write("\3\u01df\3\u01df\3\u01df\3\u01df\3\u01df\3\u01df\3\u01df")
buf.write("\3\u01e0\3\u01e0\3\u01e0\3\u01e0\3\u01e0\3\u01e1\3\u01e1")
buf.write("\3\u01e1\3\u01e1\3\u01e1\3\u01e1\3\u01e1\3\u01e2\3\u01e2")
buf.write("\3\u01e2\3\u01e2\3\u01e2\3\u01e2\7\u01e2\u1368\n\u01e2")
buf.write("\f\u01e2\16\u01e2\u136b\13\u01e2\3\u01e2\3\u01e2\3\u01e3")
buf.write("\3\u01e3\3\u01e3\7\u01e3\u1372\n\u01e3\f\u01e3\16\u01e3")
buf.write("\u1375\13\u01e3\3\u01e3\6\u01e3\u1378\n\u01e3\r\u01e3")
buf.write("\16\u01e3\u1379\3\u01e4\3\u01e4\3\u01e4\7\u01e4\u137f")
buf.write("\n\u01e4\f\u01e4\16\u01e4\u1382\13\u01e4\3\u01e4\6\u01e4")
buf.write("\u1385\n\u01e4\r\u01e4\16\u01e4\u1386\3\u01e5\3\u01e5")
buf.write("\3\u01e5\3\u01e6\3\u01e6\3\u01e7\3\u01e7\3\u01e8\3\u01e8")
buf.write("\3\u01e8\5\u01e8\u1393\n\u01e8\3\u01e8\3\u01e8\5\u01e8")
buf.write("\u1397\n\u01e8\5\u01e8\u1399\n\u01e8\3\u01e8\3\u01e8\5")
buf.write("\u01e8\u139d\n\u01e8\3\u01e9\3\u01e9\3\u01e9\3\u01e9\3")
buf.write("\u01e9\7\u01e9\u13a4\n\u01e9\f\u01e9\16\u01e9\u13a7\13")
buf.write("\u01e9\3\u01e9\3\u01e9\3\u01ea\3\u01ea\3\u01ea\3\u01ea")
buf.write("\3\u01ea\5\u01ea\u13b0\n\u01ea\3\u01ea\3\u01ea\3\u01eb")
buf.write("\3\u01eb\3\u01ec\3\u01ec\3\u01ec\7\u01ec\u13b9\n\u01ec")
buf.write("\f\u01ec\16\u01ec\u13bc\13\u01ec\3\u01ec\3\u01ec\3\u01ec")
buf.write("\3\u01ed\3\u01ed\3\u01ed\7\u01ed\u13c4\n\u01ed\f\u01ed")
buf.write("\16\u01ed\u13c7\13\u01ed\3\u01ed\3\u01ed\3\u01ed\3\u01ee")
buf.write("\3\u01ee\3\u01ee\7\u01ee\u13cf\n\u01ee\f\u01ee\16\u01ee")
buf.write("\u13d2\13\u01ee\3\u01ee\3\u01ee\3\u01ee\3\u01ef\3\u01ef")
buf.write("\3\u01ef\7\u01ef\u13da\n\u01ef\f\u01ef\16\u01ef\u13dd")
buf.write("\13\u01ef\3\u01ef\3\u01ef\3\u01ef\3\u01f0\3\u01f0\3\u01f1")
buf.write("\3\u01f1\3\u01f1\3\u01f1\6\u01f1\u13e8\n\u01f1\r\u01f1")
buf.write("\16\u01f1\u13e9\3\u01f1\3\u01f1\3\u01f2\3\u01f2\3\u01f3")
buf.write("\3\u01f3\3\u01f4\3\u01f4\3\u01f5\3\u01f5\3\u01f6\3\u01f6")
buf.write("\3\u01f6\3\u01f7\3\u01f7\3\u01f8\3\u01f8\3\u01f9\3\u01f9")
buf.write("\3\u01fa\3\u01fa\3\u01fb\3\u01fb\3\u01fc\3\u01fc\3\u01fd")
buf.write("\3\u01fd\3\u01fd\3\u01fe\3\u01fe\3\u01fe\3\u01fe\7\u01fe")
buf.write("\u140c\n\u01fe\f\u01fe\16\u01fe\u140f\13\u01fe\3\u01fe")
buf.write("\3\u01fe\3\u01fe\3\u01fe\3\u01fe\5\u01fe\u1416\n\u01fe")
buf.write("\3\u01ff\3\u01ff\3\u0200\3\u0200\3\u0201\3\u0201\3\u0201")
buf.write("\3\u0202\3\u0202\3\u0203\3\u0203\3\u0203\3\u0204\3\u0204")
buf.write("\3\u0204\3\u0204\3\u0204\3\u0204\3\u0204\3\u0204\5\u0204")
buf.write("\u142c\n\u0204\3\u0205\3\u0205\3\u0206\3\u0206\3\u0207")
buf.write("\3\u0207\3\u0208\3\u0208\3\u0209\3\u0209\3\u020a\3\u020a")
buf.write("\3\u020a\3\u020b\3\u020b\3\u020c\3\u020c\3\u020d\3\u020d")
buf.write("\3\u020e\3\u020e\3\u020f\3\u020f\3\u0210\6\u0210\u1446")
buf.write("\n\u0210\r\u0210\16\u0210\u1447\3\u0210\3\u0210\3\u0211")
buf.write("\3\u0211\3\u0212\6\u0212\u144f\n\u0212\r\u0212\16\u0212")
buf.write("\u1450\3\u0213\7\u0213\u1454\n\u0213\f\u0213\16\u0213")
buf.write("\u1457\13\u0213\3\u0213\5\u0213\u145a\n\u0213\3\u0213")
buf.write("\6\u0213\u145d\n\u0213\r\u0213\16\u0213\u145e\3\u0214")
buf.write("\3\u0214\3\u0214\3\u0214\7\u0214\u1465\n\u0214\f\u0214")
buf.write("\16\u0214\u1468\13\u0214\3\u0214\3\u0214\5\u0214\u146c")
buf.write("\n\u0214\3\u0214\3\u0214\3\u0215\3\u0215\3\u0215\3\u0215")
buf.write("\7\u0215\u1474\n\u0215\f\u0215\16\u0215\u1477\13\u0215")
buf.write("\3\u0215\3\u0215\3\u0215\3\u0215\3\u0215\3\u0216\3\u0216")
buf.write("\3\u0216\3\u0216\3\u0216\3\u0216\3\u0216\3\u0216\3\u0216")
buf.write("\7\u0216\u1487\n\u0216\f\u0216\16\u0216\u148a\13\u0216")
buf.write("\3\u0216\3\u0216\5\u0216\u148e\n\u0216\3\u0217\5\u0217")
buf.write("\u1491\n\u0217\3\u0217\3\u0217\3\u0218\3\u0218\3\u0219")
buf.write("\3\u0219\3\u0219\7\u0219\u149a\n\u0219\f\u0219\16\u0219")
buf.write("\u149d\13\u0219\3\u021a\3\u021a\3\u021a\3\u021a\3\u021a")
buf.write("\3\u021b\3\u021b\3\u021c\3\u021c\3\u021d\3\u021d\3\u021e")
buf.write("\3\u021e\3\u021f\3\u021f\3\u0220\3\u0220\3\u0221\3\u0221")
buf.write("\3\u0222\3\u0222\3\u0223\3\u0223\3\u0224\3\u0224\3\u0225")
buf.write("\3\u0225\3\u0226\3\u0226\3\u0227\3\u0227\3\u0228\3\u0228")
buf.write("\3\u0229\3\u0229\3\u022a\3\u022a\3\u022b\3\u022b\3\u022c")
buf.write("\3\u022c\3\u022d\3\u022d\3\u022e\3\u022e\3\u022f\3\u022f")
buf.write("\3\u0230\3\u0230\3\u0231\3\u0231\3\u0232\3\u0232\3\u0233")
buf.write("\3\u0233\3\u0234\3\u0234\7\u13ba\u13c5\u13d0\u13db\u1475")
buf.write("\2\u0235\3\3\5\4\7\5\t\6\13\7\r\b\17\t\21\n\23\13\25\f")
buf.write("\27\r\31\16\33\17\35\20\37\21!\22#\23%\24\'\25)\26+\27")
buf.write("-\30/\31\61\32\63\33\65\34\67\359\36;\37= ?!A\"C#E$G%")
buf.write("I&K\'M(O)Q*S+U,W-Y.[/]\60_\61a\62c\63e\64g\65i\66k\67")
buf.write("m8o9q:s;u<w=y>{?}@\177A\u0081B\u0083C\u0085D\u0087E\u0089")
buf.write("F\u008bG\u008dH\u008fI\u0091J\u0093K\u0095L\u0097M\u0099")
buf.write("N\u009bO\u009dP\u009fQ\u00a1R\u00a3S\u00a5T\u00a7U\u00a9")
buf.write("V\u00abW\u00adX\u00afY\u00b1Z\u00b3[\u00b5\\\u00b7]\u00b9")
buf.write("^\u00bb_\u00bd`\u00bfa\u00c1b\u00c3c\u00c5d\u00c7e\u00c9")
buf.write("f\u00cbg\u00cdh\u00cfi\u00d1j\u00d3k\u00d5l\u00d7m\u00d9")
buf.write("n\u00dbo\u00ddp\u00dfq\u00e1r\u00e3s\u00e5t\u00e7u\u00e9")
buf.write("v\u00ebw\u00edx\u00efy\u00f1z\u00f3{\u00f5|\u00f7}\u00f9")
buf.write("~\u00fb\177\u00fd\u0080\u00ff\u0081\u0101\u0082\u0103")
buf.write("\u0083\u0105\u0084\u0107\u0085\u0109\u0086\u010b\u0087")
buf.write("\u010d\u0088\u010f\u0089\u0111\u008a\u0113\u008b\u0115")
buf.write("\u008c\u0117\u008d\u0119\u008e\u011b\u008f\u011d\u0090")
buf.write("\u011f\u0091\u0121\u0092\u0123\u0093\u0125\u0094\u0127")
buf.write("\u0095\u0129\u0096\u012b\u0097\u012d\u0098\u012f\u0099")
buf.write("\u0131\u009a\u0133\u009b\u0135\u009c\u0137\u009d\u0139")
buf.write("\u009e\u013b\u009f\u013d\u00a0\u013f\u00a1\u0141\u00a2")
buf.write("\u0143\u00a3\u0145\u00a4\u0147\u00a5\u0149\u00a6\u014b")
buf.write("\u00a7\u014d\u00a8\u014f\u00a9\u0151\u00aa\u0153\u00ab")
buf.write("\u0155\u00ac\u0157\u00ad\u0159\u00ae\u015b\u00af\u015d")
buf.write("\u00b0\u015f\u00b1\u0161\u00b2\u0163\u00b3\u0165\u00b4")
buf.write("\u0167\u00b5\u0169\u00b6\u016b\u00b7\u016d\u00b8\u016f")
buf.write("\u00b9\u0171\u00ba\u0173\u00bb\u0175\u00bc\u0177\u00bd")
buf.write("\u0179\u00be\u017b\u00bf\u017d\u00c0\u017f\u00c1\u0181")
buf.write("\u00c2\u0183\u00c3\u0185\u00c4\u0187\u00c5\u0189\u00c6")
buf.write("\u018b\u00c7\u018d\u00c8\u018f\u00c9\u0191\u00ca\u0193")
buf.write("\u00cb\u0195\u00cc\u0197\u00cd\u0199\u00ce\u019b\u00cf")
buf.write("\u019d\u00d0\u019f\u00d1\u01a1\u00d2\u01a3\u00d3\u01a5")
buf.write("\u00d4\u01a7\u00d5\u01a9\u00d6\u01ab\u00d7\u01ad\u00d8")
buf.write("\u01af\u00d9\u01b1\u00da\u01b3\u00db\u01b5\u00dc\u01b7")
buf.write("\u00dd\u01b9\u00de\u01bb\u00df\u01bd\u00e0\u01bf\u00e1")
buf.write("\u01c1\u00e2\u01c3\u00e3\u01c5\u00e4\u01c7\u00e5\u01c9")
buf.write("\u00e6\u01cb\u00e7\u01cd\u00e8\u01cf\u00e9\u01d1\u00ea")
buf.write("\u01d3\u00eb\u01d5\u00ec\u01d7\u00ed\u01d9\u00ee\u01db")
buf.write("\u00ef\u01dd\u00f0\u01df\u00f1\u01e1\u00f2\u01e3\u00f3")
buf.write("\u01e5\u00f4\u01e7\u00f5\u01e9\u00f6\u01eb\u00f7\u01ed")
buf.write("\u00f8\u01ef\u00f9\u01f1\u00fa\u01f3\u00fb\u01f5\u00fc")
buf.write("\u01f7\u00fd\u01f9\u00fe\u01fb\u00ff\u01fd\u0100\u01ff")
buf.write("\u0101\u0201\u0102\u0203\u0103\u0205\u0104\u0207\u0105")
buf.write("\u0209\u0106\u020b\u0107\u020d\u0108\u020f\u0109\u0211")
buf.write("\u010a\u0213\u010b\u0215\u010c\u0217\u010d\u0219\u010e")
buf.write("\u021b\u010f\u021d\u0110\u021f\u0111\u0221\u0112\u0223")
buf.write("\u0113\u0225\u0114\u0227\u0115\u0229\u0116\u022b\u0117")
buf.write("\u022d\u0118\u022f\u0119\u0231\u011a\u0233\u011b\u0235")
buf.write("\u011c\u0237\u011d\u0239\u011e\u023b\u011f\u023d\u0120")
buf.write("\u023f\u0121\u0241\u0122\u0243\u0123\u0245\u0124\u0247")
buf.write("\u0125\u0249\u0126\u024b\u0127\u024d\u0128\u024f\u0129")
buf.write("\u0251\u012a\u0253\u012b\u0255\u012c\u0257\u012d\u0259")
buf.write("\u012e\u025b\u012f\u025d\u0130\u025f\u0131\u0261\u0132")
buf.write("\u0263\u0133\u0265\u0134\u0267\u0135\u0269\u0136\u026b")
buf.write("\u0137\u026d\u0138\u026f\u0139\u0271\u013a\u0273\u013b")
buf.write("\u0275\u013c\u0277\u013d\u0279\u013e\u027b\u013f\u027d")
buf.write("\u0140\u027f\u0141\u0281\u0142\u0283\u0143\u0285\u0144")
buf.write("\u0287\u0145\u0289\u0146\u028b\u0147\u028d\u0148\u028f")
buf.write("\u0149\u0291\u014a\u0293\u014b\u0295\u014c\u0297\u014d")
buf.write("\u0299\u014e\u029b\u014f\u029d\u0150\u029f\u0151\u02a1")
buf.write("\u0152\u02a3\u0153\u02a5\u0154\u02a7\u0155\u02a9\u0156")
buf.write("\u02ab\u0157\u02ad\u0158\u02af\u0159\u02b1\u015a\u02b3")
buf.write("\u015b\u02b5\u015c\u02b7\u015d\u02b9\u015e\u02bb\u015f")
buf.write("\u02bd\u0160\u02bf\u0161\u02c1\u0162\u02c3\u0163\u02c5")
buf.write("\u0164\u02c7\u0165\u02c9\u0166\u02cb\u0167\u02cd\u0168")
buf.write("\u02cf\u0169\u02d1\u016a\u02d3\u016b\u02d5\u016c\u02d7")
buf.write("\u016d\u02d9\u016e\u02db\u016f\u02dd\u0170\u02df\u0171")
buf.write("\u02e1\u0172\u02e3\u0173\u02e5\u0174\u02e7\u0175\u02e9")
buf.write("\u0176\u02eb\u0177\u02ed\u0178\u02ef\u0179\u02f1\u017a")
buf.write("\u02f3\u017b\u02f5\u017c\u02f7\u017d\u02f9\u017e\u02fb")
buf.write("\u017f\u02fd\u0180\u02ff\u0181\u0301\u0182\u0303\u0183")
buf.write("\u0305\u0184\u0307\u0185\u0309\u0186\u030b\u0187\u030d")
buf.write("\u0188\u030f\u0189\u0311\u018a\u0313\u018b\u0315\u018c")
buf.write("\u0317\u018d\u0319\u018e\u031b\u018f\u031d\u0190\u031f")
buf.write("\u0191\u0321\u0192\u0323\u0193\u0325\u0194\u0327\u0195")
buf.write("\u0329\u0196\u032b\u0197\u032d\u0198\u032f\u0199\u0331")
buf.write("\u019a\u0333\u019b\u0335\u019c\u0337\u019d\u0339\u019e")
buf.write("\u033b\u019f\u033d\u01a0\u033f\u01a1\u0341\u01a2\u0343")
buf.write("\u01a3\u0345\u01a4\u0347\u01a5\u0349\u01a6\u034b\u01a7")
buf.write("\u034d\u01a8\u034f\u01a9\u0351\u01aa\u0353\u01ab\u0355")
buf.write("\u01ac\u0357\u01ad\u0359\u01ae\u035b\u01af\u035d\u01b0")
buf.write("\u035f\u01b1\u0361\u01b2\u0363\u01b3\u0365\u01b4\u0367")
buf.write("\u01b5\u0369\u01b6\u036b\u01b7\u036d\u01b8\u036f\u01b9")
buf.write("\u0371\u01ba\u0373\u01bb\u0375\u01bc\u0377\u01bd\u0379")
buf.write("\u01be\u037b\u01bf\u037d\u01c0\u037f\u01c1\u0381\u01c2")
buf.write("\u0383\u01c3\u0385\u01c4\u0387\u01c5\u0389\u01c6\u038b")
buf.write("\u01c7\u038d\u01c8\u038f\u01c9\u0391\u01ca\u0393\u01cb")
buf.write("\u0395\u01cc\u0397\u01cd\u0399\u01ce\u039b\u01cf\u039d")
buf.write("\u01d0\u039f\u01d1\u03a1\u01d2\u03a3\u01d3\u03a5\u01d4")
buf.write("\u03a7\u01d5\u03a9\u01d6\u03ab\u01d7\u03ad\u01d8\u03af")
buf.write("\u01d9\u03b1\u01da\u03b3\u01db\u03b5\u01dc\u03b7\u01dd")
buf.write("\u03b9\u01de\u03bb\u01df\u03bd\u01e0\u03bf\u01e1\u03c1")
buf.write("\u01e2\u03c3\u01e3\u03c5\u01e4\u03c7\u01e5\u03c9\u01e6")
buf.write("\u03cb\u01e7\u03cd\u01e8\u03cf\u01e9\u03d1\u01ea\u03d3")
buf.write("\2\u03d5\2\u03d7\2\u03d9\2\u03db\2\u03dd\2\u03df\2\u03e1")
buf.write("\u01eb\u03e3\u01ec\u03e5\u01ed\u03e7\u01ee\u03e9\u01ef")
buf.write("\u03eb\u01f0\u03ed\u01f1\u03ef\u01f2\u03f1\u01f3\u03f3")
buf.write("\u01f4\u03f5\u01f5\u03f7\u01f6\u03f9\u01f7\u03fb\u01f8")
buf.write("\u03fd\u01f9\u03ff\u01fa\u0401\u01fb\u0403\u01fc\u0405")
buf.write("\u01fd\u0407\u01fe\u0409\u01ff\u040b\u0200\u040d\u0201")
buf.write("\u040f\u0202\u0411\2\u0413\u0203\u0415\u0204\u0417\u0205")
buf.write("\u0419\u0206\u041b\u0207\u041d\u0208\u041f\u0209\u0421")
buf.write("\2\u0423\2\u0425\2\u0427\u020a\u0429\u020b\u042b\u020c")
buf.write("\u042d\2\u042f\2\u0431\u020d\u0433\u020e\u0435\2\u0437")
buf.write("\2\u0439\2\u043b\2\u043d\2\u043f\2\u0441\2\u0443\2\u0445")
buf.write("\2\u0447\2\u0449\2\u044b\2\u044d\2\u044f\2\u0451\2\u0453")
buf.write("\2\u0455\2\u0457\2\u0459\2\u045b\2\u045d\2\u045f\2\u0461")
buf.write("\2\u0463\2\u0465\2\u0467\2\3\2\'\5\2\f\f\17\17))\5\2\62")
buf.write(";CHch\4\2GGgg\4\2--//\t\2\13\f\17\17\"\"**>>]]}}\5\2\f")
buf.write("\f\17\17$$\4\2\62;aa\5\2\13\f\17\17\"\"\4\2C\\c|\4\2\f")
buf.write("\f\17\17\4\2\13\13\"\"\5\2%&\62;aa\4\2CCcc\4\2DDdd\4\2")
buf.write("EEee\4\2FFff\4\2HHhh\4\2IIii\4\2JJjj\4\2KKkk\4\2LLll\4")
buf.write("\2MMmm\4\2NNnn\4\2OOoo\4\2PPpp\4\2QQqq\4\2RRrr\4\2SSs")
buf.write("s\4\2TTtt\4\2UUuu\4\2VVvv\4\2WWww\4\2XXxx\4\2YYyy\4\2")
buf.write("ZZzz\4\2[[{{\4\2\\\\||\2\u14dd\2\3\3\2\2\2\2\5\3\2\2\2")
buf.write("\2\7\3\2\2\2\2\t\3\2\2\2\2\13\3\2\2\2\2\r\3\2\2\2\2\17")
buf.write("\3\2\2\2\2\21\3\2\2\2\2\23\3\2\2\2\2\25\3\2\2\2\2\27\3")
buf.write("\2\2\2\2\31\3\2\2\2\2\33\3\2\2\2\2\35\3\2\2\2\2\37\3\2")
buf.write("\2\2\2!\3\2\2\2\2#\3\2\2\2\2%\3\2\2\2\2\'\3\2\2\2\2)\3")
buf.write("\2\2\2\2+\3\2\2\2\2-\3\2\2\2\2/\3\2\2\2\2\61\3\2\2\2\2")
buf.write("\63\3\2\2\2\2\65\3\2\2\2\2\67\3\2\2\2\29\3\2\2\2\2;\3")
buf.write("\2\2\2\2=\3\2\2\2\2?\3\2\2\2\2A\3\2\2\2\2C\3\2\2\2\2E")
buf.write("\3\2\2\2\2G\3\2\2\2\2I\3\2\2\2\2K\3\2\2\2\2M\3\2\2\2\2")
buf.write("O\3\2\2\2\2Q\3\2\2\2\2S\3\2\2\2\2U\3\2\2\2\2W\3\2\2\2")
buf.write("\2Y\3\2\2\2\2[\3\2\2\2\2]\3\2\2\2\2_\3\2\2\2\2a\3\2\2")
buf.write("\2\2c\3\2\2\2\2e\3\2\2\2\2g\3\2\2\2\2i\3\2\2\2\2k\3\2")
buf.write("\2\2\2m\3\2\2\2\2o\3\2\2\2\2q\3\2\2\2\2s\3\2\2\2\2u\3")
buf.write("\2\2\2\2w\3\2\2\2\2y\3\2\2\2\2{\3\2\2\2\2}\3\2\2\2\2\177")
buf.write("\3\2\2\2\2\u0081\3\2\2\2\2\u0083\3\2\2\2\2\u0085\3\2\2")
buf.write("\2\2\u0087\3\2\2\2\2\u0089\3\2\2\2\2\u008b\3\2\2\2\2\u008d")
buf.write("\3\2\2\2\2\u008f\3\2\2\2\2\u0091\3\2\2\2\2\u0093\3\2\2")
buf.write("\2\2\u0095\3\2\2\2\2\u0097\3\2\2\2\2\u0099\3\2\2\2\2\u009b")
buf.write("\3\2\2\2\2\u009d\3\2\2\2\2\u009f\3\2\2\2\2\u00a1\3\2\2")
buf.write("\2\2\u00a3\3\2\2\2\2\u00a5\3\2\2\2\2\u00a7\3\2\2\2\2\u00a9")
buf.write("\3\2\2\2\2\u00ab\3\2\2\2\2\u00ad\3\2\2\2\2\u00af\3\2\2")
buf.write("\2\2\u00b1\3\2\2\2\2\u00b3\3\2\2\2\2\u00b5\3\2\2\2\2\u00b7")
buf.write("\3\2\2\2\2\u00b9\3\2\2\2\2\u00bb\3\2\2\2\2\u00bd\3\2\2")
buf.write("\2\2\u00bf\3\2\2\2\2\u00c1\3\2\2\2\2\u00c3\3\2\2\2\2\u00c5")
buf.write("\3\2\2\2\2\u00c7\3\2\2\2\2\u00c9\3\2\2\2\2\u00cb\3\2\2")
buf.write("\2\2\u00cd\3\2\2\2\2\u00cf\3\2\2\2\2\u00d1\3\2\2\2\2\u00d3")
buf.write("\3\2\2\2\2\u00d5\3\2\2\2\2\u00d7\3\2\2\2\2\u00d9\3\2\2")
buf.write("\2\2\u00db\3\2\2\2\2\u00dd\3\2\2\2\2\u00df\3\2\2\2\2\u00e1")
buf.write("\3\2\2\2\2\u00e3\3\2\2\2\2\u00e5\3\2\2\2\2\u00e7\3\2\2")
buf.write("\2\2\u00e9\3\2\2\2\2\u00eb\3\2\2\2\2\u00ed\3\2\2\2\2\u00ef")
buf.write("\3\2\2\2\2\u00f1\3\2\2\2\2\u00f3\3\2\2\2\2\u00f5\3\2\2")
buf.write("\2\2\u00f7\3\2\2\2\2\u00f9\3\2\2\2\2\u00fb\3\2\2\2\2\u00fd")
buf.write("\3\2\2\2\2\u00ff\3\2\2\2\2\u0101\3\2\2\2\2\u0103\3\2\2")
buf.write("\2\2\u0105\3\2\2\2\2\u0107\3\2\2\2\2\u0109\3\2\2\2\2\u010b")
buf.write("\3\2\2\2\2\u010d\3\2\2\2\2\u010f\3\2\2\2\2\u0111\3\2\2")
buf.write("\2\2\u0113\3\2\2\2\2\u0115\3\2\2\2\2\u0117\3\2\2\2\2\u0119")
buf.write("\3\2\2\2\2\u011b\3\2\2\2\2\u011d\3\2\2\2\2\u011f\3\2\2")
buf.write("\2\2\u0121\3\2\2\2\2\u0123\3\2\2\2\2\u0125\3\2\2\2\2\u0127")
buf.write("\3\2\2\2\2\u0129\3\2\2\2\2\u012b\3\2\2\2\2\u012d\3\2\2")
buf.write("\2\2\u012f\3\2\2\2\2\u0131\3\2\2\2\2\u0133\3\2\2\2\2\u0135")
buf.write("\3\2\2\2\2\u0137\3\2\2\2\2\u0139\3\2\2\2\2\u013b\3\2\2")
buf.write("\2\2\u013d\3\2\2\2\2\u013f\3\2\2\2\2\u0141\3\2\2\2\2\u0143")
buf.write("\3\2\2\2\2\u0145\3\2\2\2\2\u0147\3\2\2\2\2\u0149\3\2\2")
buf.write("\2\2\u014b\3\2\2\2\2\u014d\3\2\2\2\2\u014f\3\2\2\2\2\u0151")
buf.write("\3\2\2\2\2\u0153\3\2\2\2\2\u0155\3\2\2\2\2\u0157\3\2\2")
buf.write("\2\2\u0159\3\2\2\2\2\u015b\3\2\2\2\2\u015d\3\2\2\2\2\u015f")
buf.write("\3\2\2\2\2\u0161\3\2\2\2\2\u0163\3\2\2\2\2\u0165\3\2\2")
buf.write("\2\2\u0167\3\2\2\2\2\u0169\3\2\2\2\2\u016b\3\2\2\2\2\u016d")
buf.write("\3\2\2\2\2\u016f\3\2\2\2\2\u0171\3\2\2\2\2\u0173\3\2\2")
buf.write("\2\2\u0175\3\2\2\2\2\u0177\3\2\2\2\2\u0179\3\2\2\2\2\u017b")
buf.write("\3\2\2\2\2\u017d\3\2\2\2\2\u017f\3\2\2\2\2\u0181\3\2\2")
buf.write("\2\2\u0183\3\2\2\2\2\u0185\3\2\2\2\2\u0187\3\2\2\2\2\u0189")
buf.write("\3\2\2\2\2\u018b\3\2\2\2\2\u018d\3\2\2\2\2\u018f\3\2\2")
buf.write("\2\2\u0191\3\2\2\2\2\u0193\3\2\2\2\2\u0195\3\2\2\2\2\u0197")
buf.write("\3\2\2\2\2\u0199\3\2\2\2\2\u019b\3\2\2\2\2\u019d\3\2\2")
buf.write("\2\2\u019f\3\2\2\2\2\u01a1\3\2\2\2\2\u01a3\3\2\2\2\2\u01a5")
buf.write("\3\2\2\2\2\u01a7\3\2\2\2\2\u01a9\3\2\2\2\2\u01ab\3\2\2")
buf.write("\2\2\u01ad\3\2\2\2\2\u01af\3\2\2\2\2\u01b1\3\2\2\2\2\u01b3")
buf.write("\3\2\2\2\2\u01b5\3\2\2\2\2\u01b7\3\2\2\2\2\u01b9\3\2\2")
buf.write("\2\2\u01bb\3\2\2\2\2\u01bd\3\2\2\2\2\u01bf\3\2\2\2\2\u01c1")
buf.write("\3\2\2\2\2\u01c3\3\2\2\2\2\u01c5\3\2\2\2\2\u01c7\3\2\2")
buf.write("\2\2\u01c9\3\2\2\2\2\u01cb\3\2\2\2\2\u01cd\3\2\2\2\2\u01cf")
buf.write("\3\2\2\2\2\u01d1\3\2\2\2\2\u01d3\3\2\2\2\2\u01d5\3\2\2")
buf.write("\2\2\u01d7\3\2\2\2\2\u01d9\3\2\2\2\2\u01db\3\2\2\2\2\u01dd")
buf.write("\3\2\2\2\2\u01df\3\2\2\2\2\u01e1\3\2\2\2\2\u01e3\3\2\2")
buf.write("\2\2\u01e5\3\2\2\2\2\u01e7\3\2\2\2\2\u01e9\3\2\2\2\2\u01eb")
buf.write("\3\2\2\2\2\u01ed\3\2\2\2\2\u01ef\3\2\2\2\2\u01f1\3\2\2")
buf.write("\2\2\u01f3\3\2\2\2\2\u01f5\3\2\2\2\2\u01f7\3\2\2\2\2\u01f9")
buf.write("\3\2\2\2\2\u01fb\3\2\2\2\2\u01fd\3\2\2\2\2\u01ff\3\2\2")
buf.write("\2\2\u0201\3\2\2\2\2\u0203\3\2\2\2\2\u0205\3\2\2\2\2\u0207")
buf.write("\3\2\2\2\2\u0209\3\2\2\2\2\u020b\3\2\2\2\2\u020d\3\2\2")
buf.write("\2\2\u020f\3\2\2\2\2\u0211\3\2\2\2\2\u0213\3\2\2\2\2\u0215")
buf.write("\3\2\2\2\2\u0217\3\2\2\2\2\u0219\3\2\2\2\2\u021b\3\2\2")
buf.write("\2\2\u021d\3\2\2\2\2\u021f\3\2\2\2\2\u0221\3\2\2\2\2\u0223")
buf.write("\3\2\2\2\2\u0225\3\2\2\2\2\u0227\3\2\2\2\2\u0229\3\2\2")
buf.write("\2\2\u022b\3\2\2\2\2\u022d\3\2\2\2\2\u022f\3\2\2\2\2\u0231")
buf.write("\3\2\2\2\2\u0233\3\2\2\2\2\u0235\3\2\2\2\2\u0237\3\2\2")
buf.write("\2\2\u0239\3\2\2\2\2\u023b\3\2\2\2\2\u023d\3\2\2\2\2\u023f")
buf.write("\3\2\2\2\2\u0241\3\2\2\2\2\u0243\3\2\2\2\2\u0245\3\2\2")
buf.write("\2\2\u0247\3\2\2\2\2\u0249\3\2\2\2\2\u024b\3\2\2\2\2\u024d")
buf.write("\3\2\2\2\2\u024f\3\2\2\2\2\u0251\3\2\2\2\2\u0253\3\2\2")
buf.write("\2\2\u0255\3\2\2\2\2\u0257\3\2\2\2\2\u0259\3\2\2\2\2\u025b")
buf.write("\3\2\2\2\2\u025d\3\2\2\2\2\u025f\3\2\2\2\2\u0261\3\2\2")
buf.write("\2\2\u0263\3\2\2\2\2\u0265\3\2\2\2\2\u0267\3\2\2\2\2\u0269")
buf.write("\3\2\2\2\2\u026b\3\2\2\2\2\u026d\3\2\2\2\2\u026f\3\2\2")
buf.write("\2\2\u0271\3\2\2\2\2\u0273\3\2\2\2\2\u0275\3\2\2\2\2\u0277")
buf.write("\3\2\2\2\2\u0279\3\2\2\2\2\u027b\3\2\2\2\2\u027d\3\2\2")
buf.write("\2\2\u027f\3\2\2\2\2\u0281\3\2\2\2\2\u0283\3\2\2\2\2\u0285")
buf.write("\3\2\2\2\2\u0287\3\2\2\2\2\u0289\3\2\2\2\2\u028b\3\2\2")
buf.write("\2\2\u028d\3\2\2\2\2\u028f\3\2\2\2\2\u0291\3\2\2\2\2\u0293")
buf.write("\3\2\2\2\2\u0295\3\2\2\2\2\u0297\3\2\2\2\2\u0299\3\2\2")
buf.write("\2\2\u029b\3\2\2\2\2\u029d\3\2\2\2\2\u029f\3\2\2\2\2\u02a1")
buf.write("\3\2\2\2\2\u02a3\3\2\2\2\2\u02a5\3\2\2\2\2\u02a7\3\2\2")
buf.write("\2\2\u02a9\3\2\2\2\2\u02ab\3\2\2\2\2\u02ad\3\2\2\2\2\u02af")
buf.write("\3\2\2\2\2\u02b1\3\2\2\2\2\u02b3\3\2\2\2\2\u02b5\3\2\2")
buf.write("\2\2\u02b7\3\2\2\2\2\u02b9\3\2\2\2\2\u02bb\3\2\2\2\2\u02bd")
buf.write("\3\2\2\2\2\u02bf\3\2\2\2\2\u02c1\3\2\2\2\2\u02c3\3\2\2")
buf.write("\2\2\u02c5\3\2\2\2\2\u02c7\3\2\2\2\2\u02c9\3\2\2\2\2\u02cb")
buf.write("\3\2\2\2\2\u02cd\3\2\2\2\2\u02cf\3\2\2\2\2\u02d1\3\2\2")
buf.write("\2\2\u02d3\3\2\2\2\2\u02d5\3\2\2\2\2\u02d7\3\2\2\2\2\u02d9")
buf.write("\3\2\2\2\2\u02db\3\2\2\2\2\u02dd\3\2\2\2\2\u02df\3\2\2")
buf.write("\2\2\u02e1\3\2\2\2\2\u02e3\3\2\2\2\2\u02e5\3\2\2\2\2\u02e7")
buf.write("\3\2\2\2\2\u02e9\3\2\2\2\2\u02eb\3\2\2\2\2\u02ed\3\2\2")
buf.write("\2\2\u02ef\3\2\2\2\2\u02f1\3\2\2\2\2\u02f3\3\2\2\2\2\u02f5")
buf.write("\3\2\2\2\2\u02f7\3\2\2\2\2\u02f9\3\2\2\2\2\u02fb\3\2\2")
buf.write("\2\2\u02fd\3\2\2\2\2\u02ff\3\2\2\2\2\u0301\3\2\2\2\2\u0303")
buf.write("\3\2\2\2\2\u0305\3\2\2\2\2\u0307\3\2\2\2\2\u0309\3\2\2")
buf.write("\2\2\u030b\3\2\2\2\2\u030d\3\2\2\2\2\u030f\3\2\2\2\2\u0311")
buf.write("\3\2\2\2\2\u0313\3\2\2\2\2\u0315\3\2\2\2\2\u0317\3\2\2")
buf.write("\2\2\u0319\3\2\2\2\2\u031b\3\2\2\2\2\u031d\3\2\2\2\2\u031f")
buf.write("\3\2\2\2\2\u0321\3\2\2\2\2\u0323\3\2\2\2\2\u0325\3\2\2")
buf.write("\2\2\u0327\3\2\2\2\2\u0329\3\2\2\2\2\u032b\3\2\2\2\2\u032d")
buf.write("\3\2\2\2\2\u032f\3\2\2\2\2\u0331\3\2\2\2\2\u0333\3\2\2")
buf.write("\2\2\u0335\3\2\2\2\2\u0337\3\2\2\2\2\u0339\3\2\2\2\2\u033b")
buf.write("\3\2\2\2\2\u033d\3\2\2\2\2\u033f\3\2\2\2\2\u0341\3\2\2")
buf.write("\2\2\u0343\3\2\2\2\2\u0345\3\2\2\2\2\u0347\3\2\2\2\2\u0349")
buf.write("\3\2\2\2\2\u034b\3\2\2\2\2\u034d\3\2\2\2\2\u034f\3\2\2")
buf.write("\2\2\u0351\3\2\2\2\2\u0353\3\2\2\2\2\u0355\3\2\2\2\2\u0357")
buf.write("\3\2\2\2\2\u0359\3\2\2\2\2\u035b\3\2\2\2\2\u035d\3\2\2")
buf.write("\2\2\u035f\3\2\2\2\2\u0361\3\2\2\2\2\u0363\3\2\2\2\2\u0365")
buf.write("\3\2\2\2\2\u0367\3\2\2\2\2\u0369\3\2\2\2\2\u036b\3\2\2")
buf.write("\2\2\u036d\3\2\2\2\2\u036f\3\2\2\2\2\u0371\3\2\2\2\2\u0373")
buf.write("\3\2\2\2\2\u0375\3\2\2\2\2\u0377\3\2\2\2\2\u0379\3\2\2")
buf.write("\2\2\u037b\3\2\2\2\2\u037d\3\2\2\2\2\u037f\3\2\2\2\2\u0381")
buf.write("\3\2\2\2\2\u0383\3\2\2\2\2\u0385\3\2\2\2\2\u0387\3\2\2")
buf.write("\2\2\u0389\3\2\2\2\2\u038b\3\2\2\2\2\u038d\3\2\2\2\2\u038f")
buf.write("\3\2\2\2\2\u0391\3\2\2\2\2\u0393\3\2\2\2\2\u0395\3\2\2")
buf.write("\2\2\u0397\3\2\2\2\2\u0399\3\2\2\2\2\u039b\3\2\2\2\2\u039d")
buf.write("\3\2\2\2\2\u039f\3\2\2\2\2\u03a1\3\2\2\2\2\u03a3\3\2\2")
buf.write("\2\2\u03a5\3\2\2\2\2\u03a7\3\2\2\2\2\u03a9\3\2\2\2\2\u03ab")
buf.write("\3\2\2\2\2\u03ad\3\2\2\2\2\u03af\3\2\2\2\2\u03b1\3\2\2")
buf.write("\2\2\u03b3\3\2\2\2\2\u03b5\3\2\2\2\2\u03b7\3\2\2\2\2\u03b9")
buf.write("\3\2\2\2\2\u03bb\3\2\2\2\2\u03bd\3\2\2\2\2\u03bf\3\2\2")
buf.write("\2\2\u03c1\3\2\2\2\2\u03c3\3\2\2\2\2\u03c5\3\2\2\2\2\u03c7")
buf.write("\3\2\2\2\2\u03c9\3\2\2\2\2\u03cb\3\2\2\2\2\u03cd\3\2\2")
buf.write("\2\2\u03cf\3\2\2\2\2\u03d1\3\2\2\2\2\u03d3\3\2\2\2\2\u03e1")
buf.write("\3\2\2\2\2\u03e3\3\2\2\2\2\u03e5\3\2\2\2\2\u03e7\3\2\2")
buf.write("\2\2\u03e9\3\2\2\2\2\u03eb\3\2\2\2\2\u03ed\3\2\2\2\2\u03ef")
buf.write("\3\2\2\2\2\u03f1\3\2\2\2\2\u03f3\3\2\2\2\2\u03f5\3\2\2")
buf.write("\2\2\u03f7\3\2\2\2\2\u03f9\3\2\2\2\2\u03fb\3\2\2\2\2\u03fd")
buf.write("\3\2\2\2\2\u03ff\3\2\2\2\2\u0401\3\2\2\2\2\u0403\3\2\2")
buf.write("\2\2\u0405\3\2\2\2\2\u0407\3\2\2\2\2\u0409\3\2\2\2\2\u040b")
buf.write("\3\2\2\2\2\u040d\3\2\2\2\2\u040f\3\2\2\2\2\u0413\3\2\2")
buf.write("\2\2\u0415\3\2\2\2\2\u0417\3\2\2\2\2\u0419\3\2\2\2\2\u041b")
buf.write("\3\2\2\2\2\u041d\3\2\2\2\2\u041f\3\2\2\2\2\u0427\3\2\2")
buf.write("\2\2\u0429\3\2\2\2\2\u042b\3\2\2\2\2\u0431\3\2\2\2\2\u0433")
buf.write("\3\2\2\2\3\u0469\3\2\2\2\5\u046c\3\2\2\2\7\u046e\3\2\2")
buf.write("\2\t\u0472\3\2\2\2\13\u0478\3\2\2\2\r\u047e\3\2\2\2\17")
buf.write("\u0488\3\2\2\2\21\u048c\3\2\2\2\23\u0492\3\2\2\2\25\u049a")
buf.write("\3\2\2\2\27\u049e\3\2\2\2\31\u04a2\3\2\2\2\33\u04a8\3")
buf.write("\2\2\2\35\u04ab\3\2\2\2\37\u04b2\3\2\2\2!\u04b9\3\2\2")
buf.write("\2#\u04bd\3\2\2\2%\u04c7\3\2\2\2\'\u04ca\3\2\2\2)\u04d4")
buf.write("\3\2\2\2+\u04da\3\2\2\2-\u04e1\3\2\2\2/\u04e6\3\2\2\2")
buf.write("\61\u04f0\3\2\2\2\63\u0507\3\2\2\2\65\u050d\3\2\2\2\67")
buf.write("\u0514\3\2\2\29\u051a\3\2\2\2;\u0522\3\2\2\2=\u0528\3")
buf.write("\2\2\2?\u0536\3\2\2\2A\u0543\3\2\2\2C\u0552\3\2\2\2E\u0557")
buf.write("\3\2\2\2G\u055d\3\2\2\2I\u0562\3\2\2\2K\u056a\3\2\2\2")
buf.write("M\u056f\3\2\2\2O\u0577\3\2\2\2Q\u057c\3\2\2\2S\u057f\3")
buf.write("\2\2\2U\u0584\3\2\2\2W\u0586\3\2\2\2Y\u058c\3\2\2\2[\u0591")
buf.write("\3\2\2\2]\u059b\3\2\2\2_\u05a3\3\2\2\2a\u05a8\3\2\2\2")
buf.write("c\u05ad\3\2\2\2e\u05b2\3\2\2\2g\u05ba\3\2\2\2i\u05c4\3")
buf.write("\2\2\2k\u05ca\3\2\2\2m\u05ce\3\2\2\2o\u05d3\3\2\2\2q\u05d9")
buf.write("\3\2\2\2s\u05e1\3\2\2\2u\u05e9\3\2\2\2w\u05f1\3\2\2\2")
buf.write("y\u05f9\3\2\2\2{\u0600\3\2\2\2}\u060a\3\2\2\2\177\u0618")
buf.write("\3\2\2\2\u0081\u0620\3\2\2\2\u0083\u0629\3\2\2\2\u0085")
buf.write("\u0631\3\2\2\2\u0087\u0641\3\2\2\2\u0089\u064a\3\2\2\2")
buf.write("\u008b\u0655\3\2\2\2\u008d\u0661\3\2\2\2\u008f\u066d\3")
buf.write("\2\2\2\u0091\u0675\3\2\2\2\u0093\u067d\3\2\2\2\u0095\u0686")
buf.write("\3\2\2\2\u0097\u068e\3\2\2\2\u0099\u069a\3\2\2\2\u009b")
buf.write("\u06aa\3\2\2\2\u009d\u06af\3\2\2\2\u009f\u06b5\3\2\2\2")
buf.write("\u00a1\u06bc\3\2\2\2\u00a3\u06c2\3\2\2\2\u00a5\u06c7\3")
buf.write("\2\2\2\u00a7\u06cf\3\2\2\2\u00a9\u06dc\3\2\2\2\u00ab\u06e3")
buf.write("\3\2\2\2\u00ad\u06ef\3\2\2\2\u00af\u06f5\3\2\2\2\u00b1")
buf.write("\u06fa\3\2\2\2\u00b3\u0703\3\2\2\2\u00b5\u0708\3\2\2\2")
buf.write("\u00b7\u070c\3\2\2\2\u00b9\u071b\3\2\2\2\u00bb\u0726\3")
buf.write("\2\2\2\u00bd\u072a\3\2\2\2\u00bf\u0730\3\2\2\2\u00c1\u0734")
buf.write("\3\2\2\2\u00c3\u073c\3\2\2\2\u00c5\u0744\3\2\2\2\u00c7")
buf.write("\u074e\3\2\2\2\u00c9\u0758\3\2\2\2\u00cb\u0760\3\2\2\2")
buf.write("\u00cd\u0769\3\2\2\2\u00cf\u0772\3\2\2\2\u00d1\u077a\3")
buf.write("\2\2\2\u00d3\u0781\3\2\2\2\u00d5\u0787\3\2\2\2\u00d7\u078c")
buf.write("\3\2\2\2\u00d9\u079a\3\2\2\2\u00db\u07a4\3\2\2\2\u00dd")
buf.write("\u07ac\3\2\2\2\u00df\u07b9\3\2\2\2\u00e1\u07c2\3\2\2\2")
buf.write("\u00e3\u07cb\3\2\2\2\u00e5\u07d2\3\2\2\2\u00e7\u07d7\3")
buf.write("\2\2\2\u00e9\u07f0\3\2\2\2\u00eb\u07f5\3\2\2\2\u00ed\u07fd")
buf.write("\3\2\2\2\u00ef\u0802\3\2\2\2\u00f1\u0808\3\2\2\2\u00f3")
buf.write("\u080e\3\2\2\2\u00f5\u0815\3\2\2\2\u00f7\u081e\3\2\2\2")
buf.write("\u00f9\u0822\3\2\2\2\u00fb\u0831\3\2\2\2\u00fd\u0835\3")
buf.write("\2\2\2\u00ff\u083c\3\2\2\2\u0101\u0843\3\2\2\2\u0103\u084c")
buf.write("\3\2\2\2\u0105\u0853\3\2\2\2\u0107\u085d\3\2\2\2\u0109")
buf.write("\u086c\3\2\2\2\u010b\u0877\3\2\2\2\u010d\u087f\3\2\2\2")
buf.write("\u010f\u0889\3\2\2\2\u0111\u0891\3\2\2\2\u0113\u0898\3")
buf.write("\2\2\2\u0115\u089d\3\2\2\2\u0117\u08a5\3\2\2\2\u0119\u08ae")
buf.write("\3\2\2\2\u011b\u08b6\3\2\2\2\u011d\u08be\3\2\2\2\u011f")
buf.write("\u08c4\3\2\2\2\u0121\u08ca\3\2\2\2\u0123\u08d0\3\2\2\2")
buf.write("\u0125\u08d6\3\2\2\2\u0127\u08e2\3\2\2\2\u0129\u08e8\3")
buf.write("\2\2\2\u012b\u08f2\3\2\2\2\u012d\u08fa\3\2\2\2\u012f\u08fe")
buf.write("\3\2\2\2\u0131\u0905\3\2\2\2\u0133\u090b\3\2\2\2\u0135")
buf.write("\u0910\3\2\2\2\u0137\u0915\3\2\2\2\u0139\u091e\3\2\2\2")
buf.write("\u013b\u0923\3\2\2\2\u013d\u0929\3\2\2\2\u013f\u092f\3")
buf.write("\2\2\2\u0141\u0938\3\2\2\2\u0143\u093d\3\2\2\2\u0145\u0944")
buf.write("\3\2\2\2\u0147\u0949\3\2\2\2\u0149\u094e\3\2\2\2\u014b")
buf.write("\u0951\3\2\2\2\u014d\u0958\3\2\2\2\u014f\u0962\3\2\2\2")
buf.write("\u0151\u0965\3\2\2\2\u0153\u096d\3\2\2\2\u0155\u0977\3")
buf.write("\2\2\2\u0157\u0981\3\2\2\2\u0159\u0988\3\2\2\2\u015b\u098e")
buf.write("\3\2\2\2\u015d\u0996\3\2\2\2\u015f\u09a0\3\2\2\2\u0161")
buf.write("\u09a8\3\2\2\2\u0163\u09b1\3\2\2\2\u0165\u09b8\3\2\2\2")
buf.write("\u0167\u09be\3\2\2\2\u0169\u09c4\3\2\2\2\u016b\u09cb\3")
buf.write("\2\2\2\u016d\u09d8\3\2\2\2\u016f\u09e0\3\2\2\2\u0171\u09e4")
buf.write("\3\2\2\2\u0173\u09ec\3\2\2\2\u0175\u09f6\3\2\2\2\u0177")
buf.write("\u09ff\3\2\2\2\u0179\u0a04\3\2\2\2\u017b\u0a0f\3\2\2\2")
buf.write("\u017d\u0a12\3\2\2\2\u017f\u0a1c\3\2\2\2\u0181\u0a24\3")
buf.write("\2\2\2\u0183\u0a29\3\2\2\2\u0185\u0a2e\3\2\2\2\u0187\u0a33")
buf.write("\3\2\2\2\u0189\u0a3c\3\2\2\2\u018b\u0a41\3\2\2\2\u018d")
buf.write("\u0a4c\3\2\2\2\u018f\u0a54\3\2\2\2\u0191\u0a59\3\2\2\2")
buf.write("\u0193\u0a5f\3\2\2\2\u0195\u0a67\3\2\2\2\u0197\u0a6c\3")
buf.write("\2\2\2\u0199\u0a72\3\2\2\2\u019b\u0a78\3\2\2\2\u019d\u0a7e")
buf.write("\3\2\2\2\u019f\u0a84\3\2\2\2\u01a1\u0a8a\3\2\2\2\u01a3")
buf.write("\u0a8f\3\2\2\2\u01a5\u0a96\3\2\2\2\u01a7\u0a9a\3\2\2\2")
buf.write("\u01a9\u0aa1\3\2\2\2\u01ab\u0aa7\3\2\2\2\u01ad\u0aac\3")
buf.write("\2\2\2\u01af\u0ab1\3\2\2\2\u01b1\u0ab6\3\2\2\2\u01b3\u0aba")
buf.write("\3\2\2\2\u01b5\u0ac2\3\2\2\2\u01b7\u0acb\3\2\2\2\u01b9")
buf.write("\u0ad4\3\2\2\2\u01bb\u0adb\3\2\2\2\u01bd\u0ae1\3\2\2\2")
buf.write("\u01bf\u0ae7\3\2\2\2\u01c1\u0aee\3\2\2\2\u01c3\u0af7\3")
buf.write("\2\2\2\u01c5\u0b00\3\2\2\2\u01c7\u0b05\3\2\2\2\u01c9\u0b0b")
buf.write("\3\2\2\2\u01cb\u0b12\3\2\2\2\u01cd\u0b18\3\2\2\2\u01cf")
buf.write("\u0b21\3\2\2\2\u01d1\u0b26\3\2\2\2\u01d3\u0b2a\3\2\2\2")
buf.write("\u01d5\u0b32\3\2\2\2\u01d7\u0b3b\3\2\2\2\u01d9\u0b3f\3")
buf.write("\2\2\2\u01db\u0b45\3\2\2\2\u01dd\u0b4e\3\2\2\2\u01df\u0b54")
buf.write("\3\2\2\2\u01e1\u0b5b\3\2\2\2\u01e3\u0b5f\3\2\2\2\u01e5")
buf.write("\u0b62\3\2\2\2\u01e7\u0b6a\3\2\2\2\u01e9\u0b72\3\2\2\2")
buf.write("\u01eb\u0b79\3\2\2\2\u01ed\u0b81\3\2\2\2\u01ef\u0b92\3")
buf.write("\2\2\2\u01f1\u0b9d\3\2\2\2\u01f3\u0ba8\3\2\2\2\u01f5\u0bad")
buf.write("\3\2\2\2\u01f7\u0bb5\3\2\2\2\u01f9\u0bc3\3\2\2\2\u01fb")
buf.write("\u0bc7\3\2\2\2\u01fd\u0bce\3\2\2\2\u01ff\u0bd3\3\2\2\2")
buf.write("\u0201\u0bd9\3\2\2\2\u0203\u0be0\3\2\2\2\u0205\u0be8\3")
buf.write("\2\2\2\u0207\u0bf2\3\2\2\2\u0209\u0bf9\3\2\2\2\u020b\u0bfc")
buf.write("\3\2\2\2\u020d\u0c00\3\2\2\2\u020f\u0c04\3\2\2\2\u0211")
buf.write("\u0c08\3\2\2\2\u0213\u0c0b\3\2\2\2\u0215\u0c10\3\2\2\2")
buf.write("\u0217\u0c15\3\2\2\2\u0219\u0c1c\3\2\2\2\u021b\u0c1f\3")
buf.write("\2\2\2\u021d\u0c27\3\2\2\2\u021f\u0c2d\3\2\2\2\u0221\u0c38")
buf.write("\3\2\2\2\u0223\u0c40\3\2\2\2\u0225\u0c44\3\2\2\2\u0227")
buf.write("\u0c4a\3\2\2\2\u0229\u0c4f\3\2\2\2\u022b\u0c5a\3\2\2\2")
buf.write("\u022d\u0c62\3\2\2\2\u022f\u0c72\3\2\2\2\u0231\u0c7d\3")
buf.write("\2\2\2\u0233\u0c84\3\2\2\2\u0235\u0c8e\3\2\2\2\u0237\u0c96")
buf.write("\3\2\2\2\u0239\u0c9b\3\2\2\2\u023b\u0ca4\3\2\2\2\u023d")
buf.write("\u0caa\3\2\2\2\u023f\u0cb4\3\2\2\2\u0241\u0cba\3\2\2\2")
buf.write("\u0243\u0cbf\3\2\2\2\u0245\u0ccb\3\2\2\2\u0247\u0cd4\3")
buf.write("\2\2\2\u0249\u0cde\3\2\2\2\u024b\u0ce5\3\2\2\2\u024d\u0cef")
buf.write("\3\2\2\2\u024f\u0cf9\3\2\2\2\u0251\u0d01\3\2\2\2\u0253")
buf.write("\u0d07\3\2\2\2\u0255\u0d11\3\2\2\2\u0257\u0d17\3\2\2\2")
buf.write("\u0259\u0d1d\3\2\2\2\u025b\u0d21\3\2\2\2\u025d\u0d26\3")
buf.write("\2\2\2\u025f\u0d2b\3\2\2\2\u0261\u0d32\3\2\2\2\u0263\u0d36")
buf.write("\3\2\2\2\u0265\u0d40\3\2\2\2\u0267\u0d4c\3\2\2\2\u0269")
buf.write("\u0d53\3\2\2\2\u026b\u0d5d\3\2\2\2\u026d\u0d64\3\2\2\2")
buf.write("\u026f\u0d6c\3\2\2\2\u0271\u0d74\3\2\2\2\u0273\u0d88\3")
buf.write("\2\2\2\u0275\u0d8f\3\2\2\2\u0277\u0d9c\3\2\2\2\u0279\u0da3")
buf.write("\3\2\2\2\u027b\u0dad\3\2\2\2\u027d\u0db3\3\2\2\2\u027f")
buf.write("\u0dbb\3\2\2\2\u0281\u0dc2\3\2\2\2\u0283\u0dc8\3\2\2\2")
buf.write("\u0285\u0dd1\3\2\2\2\u0287\u0dd8\3\2\2\2\u0289\u0ddc\3")
buf.write("\2\2\2\u028b\u0de2\3\2\2\2\u028d\u0de7\3\2\2\2\u028f\u0ded")
buf.write("\3\2\2\2\u0291\u0df4\3\2\2\2\u0293\u0df9\3\2\2\2\u0295")
buf.write("\u0e03\3\2\2\2\u0297\u0e0a\3\2\2\2\u0299\u0e16\3\2\2\2")
buf.write("\u029b\u0e1a\3\2\2\2\u029d\u0e21\3\2\2\2\u029f\u0e28\3")
buf.write("\2\2\2\u02a1\u0e2d\3\2\2\2\u02a3\u0e35\3\2\2\2\u02a5\u0e3c")
buf.write("\3\2\2\2\u02a7\u0e41\3\2\2\2\u02a9\u0e4a\3\2\2\2\u02ab")
buf.write("\u0e55\3\2\2\2\u02ad\u0e62\3\2\2\2\u02af\u0e74\3\2\2\2")
buf.write("\u02b1\u0e80\3\2\2\2\u02b3\u0e90\3\2\2\2\u02b5\u0e94\3")
buf.write("\2\2\2\u02b7\u0e99\3\2\2\2\u02b9\u0ea2\3\2\2\2\u02bb\u0ea8")
buf.write("\3\2\2\2\u02bd\u0ead\3\2\2\2\u02bf\u0eb6\3\2\2\2\u02c1")
buf.write("\u0ebf\3\2\2\2\u02c3\u0ec8\3\2\2\2\u02c5\u0ed7\3\2\2\2")
buf.write("\u02c7\u0ede\3\2\2\2\u02c9\u0ee3\3\2\2\2\u02cb\u0ee8\3")
buf.write("\2\2\2\u02cd\u0ef1\3\2\2\2\u02cf\u0efa\3\2\2\2\u02d1\u0eff")
buf.write("\3\2\2\2\u02d3\u0f0d\3\2\2\2\u02d5\u0f15\3\2\2\2\u02d7")
buf.write("\u0f1e\3\2\2\2\u02d9\u0f29\3\2\2\2\u02db\u0f2f\3\2\2\2")
buf.write("\u02dd\u0f37\3\2\2\2\u02df\u0f41\3\2\2\2\u02e1\u0f4e\3")
buf.write("\2\2\2\u02e3\u0f55\3\2\2\2\u02e5\u0f60\3\2\2\2\u02e7\u0f67")
buf.write("\3\2\2\2\u02e9\u0f73\3\2\2\2\u02eb\u0f80\3\2\2\2\u02ed")
buf.write("\u0f8e\3\2\2\2\u02ef\u0f96\3\2\2\2\u02f1\u0f9e\3\2\2\2")
buf.write("\u02f3\u0fa6\3\2\2\2\u02f5\u0fac\3\2\2\2\u02f7\u0fb0\3")
buf.write("\2\2\2\u02f9\u0fb5\3\2\2\2\u02fb\u0fba\3\2\2\2\u02fd\u0fc4")
buf.write("\3\2\2\2\u02ff\u0fe0\3\2\2\2\u0301\u0ffb\3\2\2\2\u0303")
buf.write("\u1013\3\2\2\2\u0305\u1021\3\2\2\2\u0307\u102f\3\2\2\2")
buf.write("\u0309\u103f\3\2\2\2\u030b\u104f\3\2\2\2\u030d\u1052\3")
buf.write("\2\2\2\u030f\u105b\3\2\2\2\u0311\u1067\3\2\2\2\u0313\u1071")
buf.write("\3\2\2\2\u0315\u1077\3\2\2\2\u0317\u107f\3\2\2\2\u0319")
buf.write("\u1084\3\2\2\2\u031b\u1089\3\2\2\2\u031d\u1092\3\2\2\2")
buf.write("\u031f\u1097\3\2\2\2\u0321\u10a1\3\2\2\2\u0323\u10a7\3")
buf.write("\2\2\2\u0325\u10ad\3\2\2\2\u0327\u10b4\3\2\2\2\u0329\u10be")
buf.write("\3\2\2\2\u032b\u10c6\3\2\2\2\u032d\u10cc\3\2\2\2\u032f")
buf.write("\u10d3\3\2\2\2\u0331\u10db\3\2\2\2\u0333\u10e2\3\2\2\2")
buf.write("\u0335\u10e9\3\2\2\2\u0337\u10ed\3\2\2\2\u0339\u10f3\3")
buf.write("\2\2\2\u033b\u10fc\3\2\2\2\u033d\u1102\3\2\2\2\u033f\u1109")
buf.write("\3\2\2\2\u0341\u1111\3\2\2\2\u0343\u111a\3\2\2\2\u0345")
buf.write("\u1123\3\2\2\2\u0347\u112a\3\2\2\2\u0349\u1132\3\2\2\2")
buf.write("\u034b\u113a\3\2\2\2\u034d\u1143\3\2\2\2\u034f\u1148\3")
buf.write("\2\2\2\u0351\u1150\3\2\2\2\u0353\u115b\3\2\2\2\u0355\u1160")
buf.write("\3\2\2\2\u0357\u1169\3\2\2\2\u0359\u116f\3\2\2\2\u035b")
buf.write("\u1175\3\2\2\2\u035d\u117a\3\2\2\2\u035f\u1181\3\2\2\2")
buf.write("\u0361\u1186\3\2\2\2\u0363\u118c\3\2\2\2\u0365\u1190\3")
buf.write("\2\2\2\u0367\u1197\3\2\2\2\u0369\u11a5\3\2\2\2\u036b\u11ad")
buf.write("\3\2\2\2\u036d\u11ba\3\2\2\2\u036f\u11c5\3\2\2\2\u0371")
buf.write("\u11cf\3\2\2\2\u0373\u11d9\3\2\2\2\u0375\u11e7\3\2\2\2")
buf.write("\u0377\u11f0\3\2\2\2\u0379\u11f6\3\2\2\2\u037b\u11ff\3")
buf.write("\2\2\2\u037d\u1207\3\2\2\2\u037f\u1214\3\2\2\2\u0381\u121d")
buf.write("\3\2\2\2\u0383\u1222\3\2\2\2\u0385\u1226\3\2\2\2\u0387")
buf.write("\u123f\3\2\2\2\u0389\u1244\3\2\2\2\u038b\u124f\3\2\2\2")
buf.write("\u038d\u1261\3\2\2\2\u038f\u1271\3\2\2\2\u0391\u1284\3")
buf.write("\2\2\2\u0393\u129b\3\2\2\2\u0395\u12aa\3\2\2\2\u0397\u12b4")
buf.write("\3\2\2\2\u0399\u12bf\3\2\2\2\u039b\u12c7\3\2\2\2\u039d")
buf.write("\u12d4\3\2\2\2\u039f\u12e4\3\2\2\2\u03a1\u12f4\3\2\2\2")
buf.write("\u03a3\u12f9\3\2\2\2\u03a5\u12fd\3\2\2\2\u03a7\u1302\3")
buf.write("\2\2\2\u03a9\u1306\3\2\2\2\u03ab\u130b\3\2\2\2\u03ad\u130f")
buf.write("\3\2\2\2\u03af\u1316\3\2\2\2\u03b1\u131a\3\2\2\2\u03b3")
buf.write("\u1320\3\2\2\2\u03b5\u1330\3\2\2\2\u03b7\u133b\3\2\2\2")
buf.write("\u03b9\u133f\3\2\2\2\u03bb\u1348\3\2\2\2\u03bd\u134e\3")
buf.write("\2\2\2\u03bf\u1355\3\2\2\2\u03c1\u135a\3\2\2\2\u03c3\u1361")
buf.write("\3\2\2\2\u03c5\u136e\3\2\2\2\u03c7\u137b\3\2\2\2\u03c9")
buf.write("\u1388\3\2\2\2\u03cb\u138b\3\2\2\2\u03cd\u138d\3\2\2\2")
buf.write("\u03cf\u138f\3\2\2\2\u03d1\u139e\3\2\2\2\u03d3\u13aa\3")
buf.write("\2\2\2\u03d5\u13b3\3\2\2\2\u03d7\u13b5\3\2\2\2\u03d9\u13c0")
buf.write("\3\2\2\2\u03db\u13cb\3\2\2\2\u03dd\u13d6\3\2\2\2\u03df")
buf.write("\u13e1\3\2\2\2\u03e1\u13e3\3\2\2\2\u03e3\u13ed\3\2\2\2")
buf.write("\u03e5\u13ef\3\2\2\2\u03e7\u13f1\3\2\2\2\u03e9\u13f3\3")
buf.write("\2\2\2\u03eb\u13f5\3\2\2\2\u03ed\u13f8\3\2\2\2\u03ef\u13fa")
buf.write("\3\2\2\2\u03f1\u13fc\3\2\2\2\u03f3\u13fe\3\2\2\2\u03f5")
buf.write("\u1400\3\2\2\2\u03f7\u1402\3\2\2\2\u03f9\u1404\3\2\2\2")
buf.write("\u03fb\u1415\3\2\2\2\u03fd\u1417\3\2\2\2\u03ff\u1419\3")
buf.write("\2\2\2\u0401\u141b\3\2\2\2\u0403\u141e\3\2\2\2\u0405\u1420")
buf.write("\3\2\2\2\u0407\u142b\3\2\2\2\u0409\u142d\3\2\2\2\u040b")
buf.write("\u142f\3\2\2\2\u040d\u1431\3\2\2\2\u040f\u1433\3\2\2\2")
buf.write("\u0411\u1435\3\2\2\2\u0413\u1437\3\2\2\2\u0415\u143a\3")
buf.write("\2\2\2\u0417\u143c\3\2\2\2\u0419\u143e\3\2\2\2\u041b\u1440")
buf.write("\3\2\2\2\u041d\u1442\3\2\2\2\u041f\u1445\3\2\2\2\u0421")
buf.write("\u144b\3\2\2\2\u0423\u144e\3\2\2\2\u0425\u1455\3\2\2\2")
buf.write("\u0427\u1460\3\2\2\2\u0429\u146f\3\2\2\2\u042b\u147d\3")
buf.write("\2\2\2\u042d\u1490\3\2\2\2\u042f\u1494\3\2\2\2\u0431\u1496")
buf.write("\3\2\2\2\u0433\u149e\3\2\2\2\u0435\u14a3\3\2\2\2\u0437")
buf.write("\u14a5\3\2\2\2\u0439\u14a7\3\2\2\2\u043b\u14a9\3\2\2\2")
buf.write("\u043d\u14ab\3\2\2\2\u043f\u14ad\3\2\2\2\u0441\u14af\3")
buf.write("\2\2\2\u0443\u14b1\3\2\2\2\u0445\u14b3\3\2\2\2\u0447\u14b5")
buf.write("\3\2\2\2\u0449\u14b7\3\2\2\2\u044b\u14b9\3\2\2\2\u044d")
buf.write("\u14bb\3\2\2\2\u044f\u14bd\3\2\2\2\u0451\u14bf\3\2\2\2")
buf.write("\u0453\u14c1\3\2\2\2\u0455\u14c3\3\2\2\2\u0457\u14c5\3")
buf.write("\2\2\2\u0459\u14c7\3\2\2\2\u045b\u14c9\3\2\2\2\u045d\u14cb")
buf.write("\3\2\2\2\u045f\u14cd\3\2\2\2\u0461\u14cf\3\2\2\2\u0463")
buf.write("\u14d1\3\2\2\2\u0465\u14d3\3\2\2\2\u0467\u14d5\3\2\2\2")
buf.write("\u0469\u046a\7\60\2\2\u046a\u046b\7\60\2\2\u046b\4\3\2")
buf.write("\2\2\u046c\u046d\5\u0435\u021b\2\u046d\6\3\2\2\2\u046e")
buf.write("\u046f\5\u0435\u021b\2\u046f\u0470\5\u043b\u021e\2\u0470")
buf.write("\u0471\5\u043b\u021e\2\u0471\b\3\2\2\2\u0472\u0473\5\u0435")
buf.write("\u021b\2\u0473\u0474\5\u043f\u0220\2\u0474\u0475\5\u045b")
buf.write("\u022e\2\u0475\u0476\5\u043d\u021f\2\u0476\u0477\5\u0457")
buf.write("\u022c\2\u0477\n\3\2\2\2\u0478\u0479\5\u0435\u021b\2\u0479")
buf.write("\u047a\5\u0441\u0221\2\u047a\u047b\5\u043d\u021f\2\u047b")
buf.write("\u047c\5\u044f\u0228\2\u047c\u047d\5\u045b\u022e\2\u047d")
buf.write("\f\3\2\2\2\u047e\u047f\5\u0435\u021b\2\u047f\u0480\5\u0441")
buf.write("\u0221\2\u0480\u0481\5\u0441\u0221\2\u0481\u0482\5\u0457")
buf.write("\u022c\2\u0482\u0483\5\u043d\u021f\2\u0483\u0484\5\u0441")
buf.write("\u0221\2\u0484\u0485\5\u0435\u021b\2\u0485\u0486\5\u045b")
buf.write("\u022e\2\u0486\u0487\5\u043d\u021f\2\u0487\16\3\2\2\2")
buf.write("\u0488\u0489\5\u0435\u021b\2\u0489\u048a\5\u044b\u0226")
buf.write("\2\u048a\u048b\5\u044b\u0226\2\u048b\20\3\2\2\2\u048c")
buf.write("\u048d\5\u0435\u021b\2\u048d\u048e\5\u044b\u0226\2\u048e")
buf.write("\u048f\5\u045b\u022e\2\u048f\u0490\5\u043d\u021f\2\u0490")
buf.write("\u0491\5\u0457\u022c\2\u0491\22\3\2\2\2\u0492\u0493\5")
buf.write("\u0435\u021b\2\u0493\u0494\5\u044f\u0228\2\u0494\u0495")
buf.write("\5\u0435\u021b\2\u0495\u0496\5\u044b\u0226\2\u0496\u0497")
buf.write("\5\u0465\u0233\2\u0497\u0498\5\u0467\u0234\2\u0498\u0499")
buf.write("\5\u043d\u021f\2\u0499\24\3\2\2\2\u049a\u049b\5\u0435")
buf.write("\u021b\2\u049b\u049c\5\u044f\u0228\2\u049c\u049d\5\u043b")
buf.write("\u021e\2\u049d\26\3\2\2\2\u049e\u049f\5\u0435\u021b\2")
buf.write("\u049f\u04a0\5\u044f\u0228\2\u04a0\u04a1\5\u0465\u0233")
buf.write("\2\u04a1\30\3\2\2\2\u04a2\u04a3\5\u0435\u021b\2\u04a3")
buf.write("\u04a4\5\u0457\u022c\2\u04a4\u04a5\5\u0457\u022c\2\u04a5")
buf.write("\u04a6\5\u0435\u021b\2\u04a6\u04a7\5\u0465\u0233\2\u04a7")
buf.write("\32\3\2\2\2\u04a8\u04a9\5\u0435\u021b\2\u04a9\u04aa\5")
buf.write("\u0459\u022d\2\u04aa\34\3\2\2\2\u04ab\u04ac\5\u0435\u021b")
buf.write("\2\u04ac\u04ad\5\u0459\u022d\2\u04ad\u04ae\5\u0459\u022d")
buf.write("\2\u04ae\u04af\5\u045d\u022f\2\u04af\u04b0\5\u044d\u0227")
buf.write("\2\u04b0\u04b1\5\u043d\u021f\2\u04b1\36\3\2\2\2\u04b2")
buf.write("\u04b3\5\u0435\u021b\2\u04b3\u04b4\5\u0459\u022d\2\u04b4")
buf.write("\u04b5\5\u0459\u022d\2\u04b5\u04b6\5\u043d\u021f\2\u04b6")
buf.write("\u04b7\5\u0457\u022c\2\u04b7\u04b8\5\u045b\u022e\2\u04b8")
buf.write(" \3\2\2\2\u04b9\u04ba\5\u0435\u021b\2\u04ba\u04bb\5\u0459")
buf.write("\u022d\2\u04bb\u04bc\5\u0439\u021d\2\u04bc\"\3\2\2\2\u04bd")
buf.write("\u04be\5\u0435\u021b\2\u04be\u04bf\5\u0459\u022d\2\u04bf")
buf.write("\u04c0\5\u0459\u022d\2\u04c0\u04c1\5\u0451\u0229\2\u04c1")
buf.write("\u04c2\5\u0439\u021d\2\u04c2\u04c3\5\u0445\u0223\2\u04c3")
buf.write("\u04c4\5\u0435\u021b\2\u04c4\u04c5\5\u045b\u022e\2\u04c5")
buf.write("\u04c6\5\u043d\u021f\2\u04c6$\3\2\2\2\u04c7\u04c8\5\u0435")
buf.write("\u021b\2\u04c8\u04c9\5\u045b\u022e\2\u04c9&\3\2\2\2\u04ca")
buf.write("\u04cb\5\u0435\u021b\2\u04cb\u04cc\5\u045b\u022e\2\u04cc")
buf.write("\u04cd\5\u045b\u022e\2\u04cd\u04ce\5\u0457\u022c\2\u04ce")
buf.write("\u04cf\5\u0445\u0223\2\u04cf\u04d0\5\u0437\u021c\2\u04d0")
buf.write("\u04d1\5\u045d\u022f\2\u04d1\u04d2\5\u045b\u022e\2\u04d2")
buf.write("\u04d3\5\u043d\u021f\2\u04d3(\3\2\2\2\u04d4\u04d5\5\u0435")
buf.write("\u021b\2\u04d5\u04d6\5\u045d\u022f\2\u04d6\u04d7\5\u043b")
buf.write("\u021e\2\u04d7\u04d8\5\u0445\u0223\2\u04d8\u04d9\5\u045b")
buf.write("\u022e\2\u04d9*\3\2\2\2\u04da\u04db\5\u0435\u021b\2\u04db")
buf.write("\u04dc\5\u045d\u022f\2\u04dc\u04dd\5\u045b\u022e\2\u04dd")
buf.write("\u04de\5\u0443\u0222\2\u04de\u04df\5\u0445\u0223\2\u04df")
buf.write("\u04e0\5\u043b\u021e\2\u04e0,\3\2\2\2\u04e1\u04e2\5\u0435")
buf.write("\u021b\2\u04e2\u04e3\5\u045d\u022f\2\u04e3\u04e4\5\u045b")
buf.write("\u022e\2\u04e4\u04e5\5\u0451\u0229\2\u04e5.\3\2\2\2\u04e6")
buf.write("\u04e7\5\u0435\u021b\2\u04e7\u04e8\5\u045d\u022f\2\u04e8")
buf.write("\u04e9\5\u045b\u022e\2\u04e9\u04ea\5\u0451\u0229\2\u04ea")
buf.write("\u04eb\5\u044d\u0227\2\u04eb\u04ec\5\u0435\u021b\2\u04ec")
buf.write("\u04ed\5\u045b\u022e\2\u04ed\u04ee\5\u0445\u0223\2\u04ee")
buf.write("\u04ef\5\u0439\u021d\2\u04ef\60\3\2\2\2\u04f0\u04f1\5")
buf.write("\u0435\u021b\2\u04f1\u04f2\5\u045d\u022f\2\u04f2\u04f3")
buf.write("\5\u045b\u022e\2\u04f3\u04f4\5\u0451\u0229\2\u04f4\u04f5")
buf.write("\5\u044f\u0228\2\u04f5\u04f6\5\u0451\u0229\2\u04f6\u04f7")
buf.write("\5\u044d\u0227\2\u04f7\u04f8\5\u0451\u0229\2\u04f8\u04f9")
buf.write("\5\u045d\u022f\2\u04f9\u04fa\5\u0459\u022d\2\u04fa\u04fb")
buf.write("\7a\2\2\u04fb\u04fc\5\u045b\u022e\2\u04fc\u04fd\5\u0457")
buf.write("\u022c\2\u04fd\u04fe\5\u0435\u021b\2\u04fe\u04ff\5\u044f")
buf.write("\u0228\2\u04ff\u0500\5\u0459\u022d\2\u0500\u0501\5\u0435")
buf.write("\u021b\2\u0501\u0502\5\u0439\u021d\2\u0502\u0503\5\u045b")
buf.write("\u022e\2\u0503\u0504\5\u0445\u0223\2\u0504\u0505\5\u0451")
buf.write("\u0229\2\u0505\u0506\5\u044f\u0228\2\u0506\62\3\2\2\2")
buf.write("\u0507\u0508\5\u0437\u021c\2\u0508\u0509\5\u0435\u021b")
buf.write("\2\u0509\u050a\5\u045b\u022e\2\u050a\u050b\5\u0439\u021d")
buf.write("\2\u050b\u050c\5\u0443\u0222\2\u050c\64\3\2\2\2\u050d")
buf.write("\u050e\5\u0437\u021c\2\u050e\u050f\5\u043d\u021f\2\u050f")
buf.write("\u0510\5\u043f\u0220\2\u0510\u0511\5\u0451\u0229\2\u0511")
buf.write("\u0512\5\u0457\u022c\2\u0512\u0513\5\u043d\u021f\2\u0513")
buf.write("\66\3\2\2\2\u0514\u0515\5\u0437\u021c\2\u0515\u0516\5")
buf.write("\u043d\u021f\2\u0516\u0517\5\u0441\u0221\2\u0517\u0518")
buf.write("\5\u0445\u0223\2\u0518\u0519\5\u044f\u0228\2\u05198\3")
buf.write("\2\2\2\u051a\u051b\5\u0437\u021c\2\u051b\u051c\5\u043d")
buf.write("\u021f\2\u051c\u051d\5\u045b\u022e\2\u051d\u051e\5\u0461")
buf.write("\u0231\2\u051e\u051f\5\u043d\u021f\2\u051f\u0520\5\u043d")
buf.write("\u021f\2\u0520\u0521\5\u044f\u0228\2\u0521:\3\2\2\2\u0522")
buf.write("\u0523\5\u0437\u021c\2\u0523\u0524\5\u043f\u0220\2\u0524")
buf.write("\u0525\5\u0445\u0223\2\u0525\u0526\5\u044b\u0226\2\u0526")
buf.write("\u0527\5\u043d\u021f\2\u0527<\3\2\2\2\u0528\u0529\5\u0437")
buf.write("\u021c\2\u0529\u052a\5\u0445\u0223\2\u052a\u052b\5\u044f")
buf.write("\u0228\2\u052b\u052c\5\u0435\u021b\2\u052c\u052d\5\u0457")
buf.write("\u022c\2\u052d\u052e\5\u0465\u0233\2\u052e\u052f\7a\2")
buf.write("\2\u052f\u0530\5\u043b\u021e\2\u0530\u0531\5\u0451\u0229")
buf.write("\2\u0531\u0532\5\u045d\u022f\2\u0532\u0533\5\u0437\u021c")
buf.write("\2\u0533\u0534\5\u044b\u0226\2\u0534\u0535\5\u043d\u021f")
buf.write("\2\u0535>\3\2\2\2\u0536\u0537\5\u0437\u021c\2\u0537\u0538")
buf.write("\5\u0445\u0223\2\u0538\u0539\5\u044f\u0228\2\u0539\u053a")
buf.write("\5\u0435\u021b\2\u053a\u053b\5\u0457\u022c\2\u053b\u053c")
buf.write("\5\u0465\u0233\2\u053c\u053d\7a\2\2\u053d\u053e\5\u043f")
buf.write("\u0220\2\u053e\u053f\5\u044b\u0226\2\u053f\u0540\5\u0451")
buf.write("\u0229\2\u0540\u0541\5\u0435\u021b\2\u0541\u0542\5\u045b")
buf.write("\u022e\2\u0542@\3\2\2\2\u0543\u0544\5\u0437\u021c\2\u0544")
buf.write("\u0545\5\u0445\u0223\2\u0545\u0546\5\u044f\u0228\2\u0546")
buf.write("\u0547\5\u0435\u021b\2\u0547\u0548\5\u0457\u022c\2\u0548")
buf.write("\u0549\5\u0465\u0233\2\u0549\u054a\7a\2\2\u054a\u054b")
buf.write("\5\u0445\u0223\2\u054b\u054c\5\u044f\u0228\2\u054c\u054d")
buf.write("\5\u045b\u022e\2\u054d\u054e\5\u043d\u021f\2\u054e\u054f")
buf.write("\5\u0441\u0221\2\u054f\u0550\5\u043d\u021f\2\u0550\u0551")
buf.write("\5\u0457\u022c\2\u0551B\3\2\2\2\u0552\u0553\5\u0437\u021c")
buf.write("\2\u0553\u0554\5\u044b\u0226\2\u0554\u0555\5\u0451\u0229")
buf.write("\2\u0555\u0556\5\u0437\u021c\2\u0556D\3\2\2\2\u0557\u0558")
buf.write("\5\u0437\u021c\2\u0558\u0559\5\u044b\u0226\2\u0559\u055a")
buf.write("\5\u0451\u0229\2\u055a\u055b\5\u0439\u021d\2\u055b\u055c")
buf.write("\5\u0449\u0225\2\u055cF\3\2\2\2\u055d\u055e\5\u0437\u021c")
buf.write("\2\u055e\u055f\5\u0451\u0229\2\u055f\u0560\5\u043b\u021e")
buf.write("\2\u0560\u0561\5\u0465\u0233\2\u0561H\3\2\2\2\u0562\u0563")
buf.write("\5\u0437\u021c\2\u0563\u0564\5\u0451\u0229\2\u0564\u0565")
buf.write("\5\u0451\u0229\2\u0565\u0566\5\u044b\u0226\2\u0566\u0567")
buf.write("\5\u043d\u021f\2\u0567\u0568\5\u0435\u021b\2\u0568\u0569")
buf.write("\5\u044f\u0228\2\u0569J\3\2\2\2\u056a\u056b\5\u0437\u021c")
buf.write("\2\u056b\u056c\5\u0451\u0229\2\u056c\u056d\5\u045b\u022e")
buf.write("\2\u056d\u056e\5\u0443\u0222\2\u056eL\3\2\2\2\u056f\u0570")
buf.write("\5\u0437\u021c\2\u0570\u0571\5\u0457\u022c\2\u0571\u0572")
buf.write("\5\u043d\u021f\2\u0572\u0573\5\u0435\u021b\2\u0573\u0574")
buf.write("\5\u043b\u021e\2\u0574\u0575\5\u045b\u022e\2\u0575\u0576")
buf.write("\5\u0443\u0222\2\u0576N\3\2\2\2\u0577\u0578\5\u0437\u021c")
buf.write("\2\u0578\u0579\5\u045d\u022f\2\u0579\u057a\5\u044b\u0226")
buf.write("\2\u057a\u057b\5\u0449\u0225\2\u057bP\3\2\2\2\u057c\u057d")
buf.write("\5\u0437\u021c\2\u057d\u057e\5\u0465\u0233\2\u057eR\3")
buf.write("\2\2\2\u057f\u0580\5\u0437\u021c\2\u0580\u0581\5\u0465")
buf.write("\u0233\2\u0581\u0582\5\u045b\u022e\2\u0582\u0583\5\u043d")
buf.write("\u021f\2\u0583T\3\2\2\2\u0584\u0585\5\u0439\u021d\2\u0585")
buf.write("V\3\2\2\2\u0586\u0587\5\u0439\u021d\2\u0587\u0588\5\u0435")
buf.write("\u021b\2\u0588\u0589\5\u0439\u021d\2\u0589\u058a\5\u0443")
buf.write("\u0222\2\u058a\u058b\5\u043d\u021f\2\u058bX\3\2\2\2\u058c")
buf.write("\u058d\5\u0439\u021d\2\u058d\u058e\5\u0435\u021b\2\u058e")
buf.write("\u058f\5\u044b\u0226\2\u058f\u0590\5\u044b\u0226\2\u0590")
buf.write("Z\3\2\2\2\u0591\u0592\5\u0439\u021d\2\u0592\u0593\5\u0435")
buf.write("\u021b\2\u0593\u0594\5\u044f\u0228\2\u0594\u0595\5\u0451")
buf.write("\u0229\2\u0595\u0596\5\u044f\u0228\2\u0596\u0597\5\u0445")
buf.write("\u0223\2\u0597\u0598\5\u0439\u021d\2\u0598\u0599\5\u0435")
buf.write("\u021b\2\u0599\u059a\5\u044b\u0226\2\u059a\\\3\2\2\2\u059b")
buf.write("\u059c\5\u0439\u021d\2\u059c\u059d\5\u0435\u021b\2\u059d")
buf.write("\u059e\5\u0459\u022d\2\u059e\u059f\5\u0439\u021d\2\u059f")
buf.write("\u05a0\5\u0435\u021b\2\u05a0\u05a1\5\u043b\u021e\2\u05a1")
buf.write("\u05a2\5\u043d\u021f\2\u05a2^\3\2\2\2\u05a3\u05a4\5\u0439")
buf.write("\u021d\2\u05a4\u05a5\5\u0435\u021b\2\u05a5\u05a6\5\u0459")
buf.write("\u022d\2\u05a6\u05a7\5\u043d\u021f\2\u05a7`\3\2\2\2\u05a8")
buf.write("\u05a9\5\u0439\u021d\2\u05a9\u05aa\5\u0435\u021b\2\u05aa")
buf.write("\u05ab\5\u0459\u022d\2\u05ab\u05ac\5\u045b\u022e\2\u05ac")
buf.write("b\3\2\2\2\u05ad\u05ae\5\u0439\u021d\2\u05ae\u05af\5\u0443")
buf.write("\u0222\2\u05af\u05b0\5\u0435\u021b\2\u05b0\u05b1\5\u0457")
buf.write("\u022c\2\u05b1d\3\2\2\2\u05b2\u05b3\5\u0439\u021d\2\u05b3")
buf.write("\u05b4\5\u0443\u0222\2\u05b4\u05b5\5\u0435\u021b\2\u05b5")
buf.write("\u05b6\5\u0457\u022c\2\u05b6\u05b7\7a\2\2\u05b7\u05b8")
buf.write("\5\u0439\u021d\2\u05b8\u05b9\5\u0459\u022d\2\u05b9f\3")
buf.write("\2\2\2\u05ba\u05bb\5\u0439\u021d\2\u05bb\u05bc\5\u0443")
buf.write("\u0222\2\u05bc\u05bd\5\u0435\u021b\2\u05bd\u05be\5\u0457")
buf.write("\u022c\2\u05be\u05bf\5\u0435\u021b\2\u05bf\u05c0\5\u0439")
buf.write("\u021d\2\u05c0\u05c1\5\u045b\u022e\2\u05c1\u05c2\5\u043d")
buf.write("\u021f\2\u05c2\u05c3\5\u0457\u022c\2\u05c3h\3\2\2\2\u05c4")
buf.write("\u05c5\5\u0439\u021d\2\u05c5\u05c6\5\u0443\u0222\2\u05c6")
buf.write("\u05c7\5\u043d\u021f\2\u05c7\u05c8\5\u0439\u021d\2\u05c8")
buf.write("\u05c9\5\u0449\u0225\2\u05c9j\3\2\2\2\u05ca\u05cb\5\u0439")
buf.write("\u021d\2\u05cb\u05cc\5\u0443\u0222\2\u05cc\u05cd\5\u0457")
buf.write("\u022c\2\u05cdl\3\2\2\2\u05ce\u05cf\5\u0439\u021d\2\u05cf")
buf.write("\u05d0\5\u044b\u0226\2\u05d0\u05d1\5\u0451\u0229\2\u05d1")
buf.write("\u05d2\5\u0437\u021c\2\u05d2n\3\2\2\2\u05d3\u05d4\5\u0439")
buf.write("\u021d\2\u05d4\u05d5\5\u044b\u0226\2\u05d5\u05d6\5\u0451")
buf.write("\u0229\2\u05d6\u05d7\5\u0459\u022d\2\u05d7\u05d8\5\u043d")
buf.write("\u021f\2\u05d8p\3\2\2\2\u05d9\u05da\5\u0439\u021d\2\u05da")
buf.write("\u05db\5\u044b\u0226\2\u05db\u05dc\5\u045d\u022f\2\u05dc")
buf.write("\u05dd\5\u0459\u022d\2\u05dd\u05de\5\u045b\u022e\2\u05de")
buf.write("\u05df\5\u043d\u021f\2\u05df\u05e0\5\u0457\u022c\2\u05e0")
buf.write("r\3\2\2\2\u05e1\u05e2\5\u0439\u021d\2\u05e2\u05e3\5\u0451")
buf.write("\u0229\2\u05e3\u05e4\5\u044b\u0226\2\u05e4\u05e5\5\u044b")
buf.write("\u0226\2\u05e5\u05e6\5\u043d\u021f\2\u05e6\u05e7\5\u0439")
buf.write("\u021d\2\u05e7\u05e8\5\u045b\u022e\2\u05e8t\3\2\2\2\u05e9")
buf.write("\u05ea\5\u0439\u021d\2\u05ea\u05eb\5\u0451\u0229\2\u05eb")
buf.write("\u05ec\5\u044b\u0226\2\u05ec\u05ed\5\u045d\u022f\2\u05ed")
buf.write("\u05ee\5\u044d\u0227\2\u05ee\u05ef\5\u044f\u0228\2\u05ef")
buf.write("\u05f0\5\u0459\u022d\2\u05f0v\3\2\2\2\u05f1\u05f2\5\u0439")
buf.write("\u021d\2\u05f2\u05f3\5\u0451\u0229\2\u05f3\u05f4\5\u044d")
buf.write("\u0227\2\u05f4\u05f5\5\u044d\u0227\2\u05f5\u05f6\5\u043d")
buf.write("\u021f\2\u05f6\u05f7\5\u044f\u0228\2\u05f7\u05f8\5\u045b")
buf.write("\u022e\2\u05f8x\3\2\2\2\u05f9\u05fa\5\u0439\u021d\2\u05fa")
buf.write("\u05fb\5\u0451\u0229\2\u05fb\u05fc\5\u044d\u0227\2\u05fc")
buf.write("\u05fd\5\u044d\u0227\2\u05fd\u05fe\5\u0445\u0223\2\u05fe")
buf.write("\u05ff\5\u045b\u022e\2\u05ffz\3\2\2\2\u0600\u0601\5\u0439")
buf.write("\u021d\2\u0601\u0602\5\u0451\u0229\2\u0602\u0603\5\u044d")
buf.write("\u0227\2\u0603\u0604\5\u044d\u0227\2\u0604\u0605\5\u0445")
buf.write("\u0223\2\u0605\u0606\5\u045b\u022e\2\u0606\u0607\5\u045b")
buf.write("\u022e\2\u0607\u0608\5\u043d\u021f\2\u0608\u0609\5\u043b")
buf.write("\u021e\2\u0609|\3\2\2\2\u060a\u060b\5\u0439\u021d\2\u060b")
buf.write("\u060c\5\u0451\u0229\2\u060c\u060d\5\u044d\u0227\2\u060d")
buf.write("\u060e\5\u0453\u022a\2\u060e\u060f\5\u0435\u021b\2\u060f")
buf.write("\u0610\5\u045b\u022e\2\u0610\u0611\5\u0445\u0223\2\u0611")
buf.write("\u0612\5\u0437\u021c\2\u0612\u0613\5\u0445\u0223\2\u0613")
buf.write("\u0614\5\u044b\u0226\2\u0614\u0615\5\u0445\u0223\2\u0615")
buf.write("\u0616\5\u045b\u022e\2\u0616\u0617\5\u0465\u0233\2\u0617")
buf.write("~\3\2\2\2\u0618\u0619\5\u0439\u021d\2\u0619\u061a\5\u0451")
buf.write("\u0229\2\u061a\u061b\5\u044d\u0227\2\u061b\u061c\5\u0453")
buf.write("\u022a\2\u061c\u061d\5\u0445\u0223\2\u061d\u061e\5\u044b")
buf.write("\u0226\2\u061e\u061f\5\u043d\u021f\2\u061f\u0080\3\2\2")
buf.write("\2\u0620\u0621\5\u0439\u021d\2\u0621\u0622\5\u0451\u0229")
buf.write("\2\u0622\u0623\5\u044d\u0227\2\u0623\u0624\5\u0453\u022a")
buf.write("\2\u0624\u0625\5\u0451\u0229\2\u0625\u0626\5\u045d\u022f")
buf.write("\2\u0626\u0627\5\u044f\u0228\2\u0627\u0628\5\u043b\u021e")
buf.write("\2\u0628\u0082\3\2\2\2\u0629\u062a\5\u0439\u021d\2\u062a")
buf.write("\u062b\5\u0451\u0229\2\u062b\u062c\5\u044f\u0228\2\u062c")
buf.write("\u062d\5\u044f\u0228\2\u062d\u062e\5\u043d\u021f\2\u062e")
buf.write("\u062f\5\u0439\u021d\2\u062f\u0630\5\u045b\u022e\2\u0630")
buf.write("\u0084\3\2\2\2\u0631\u0632\5\u0439\u021d\2\u0632\u0633")
buf.write("\5\u0451\u0229\2\u0633\u0634\5\u044f\u0228\2\u0634\u0635")
buf.write("\5\u044f\u0228\2\u0635\u0636\5\u043d\u021f\2\u0636\u0637")
buf.write("\5\u0439\u021d\2\u0637\u0638\5\u045b\u022e\2\u0638\u0639")
buf.write("\7a\2\2\u0639\u063a\5\u0437\u021c\2\u063a\u063b\5\u0465")
buf.write("\u0233\2\u063b\u063c\7a\2\2\u063c\u063d\5\u0457\u022c")
buf.write("\2\u063d\u063e\5\u0451\u0229\2\u063e\u063f\5\u0451\u0229")
buf.write("\2\u063f\u0640\5\u045b\u022e\2\u0640\u0086\3\2\2\2\u0641")
buf.write("\u0642\5\u0439\u021d\2\u0642\u0643\5\u0451\u0229\2\u0643")
buf.write("\u0644\5\u044f\u0228\2\u0644\u0645\5\u0459\u022d\2\u0645")
buf.write("\u0646\5\u045b\u022e\2\u0646\u0647\5\u0435\u021b\2\u0647")
buf.write("\u0648\5\u044f\u0228\2\u0648\u0649\5\u045b\u022e\2\u0649")
buf.write("\u0088\3\2\2\2\u064a\u064b\5\u0439\u021d\2\u064b\u064c")
buf.write("\5\u0451\u0229\2\u064c\u064d\5\u044f\u0228\2\u064d\u064e")
buf.write("\5\u0459\u022d\2\u064e\u064f\5\u045b\u022e\2\u064f\u0650")
buf.write("\5\u0457\u022c\2\u0650\u0651\5\u0435\u021b\2\u0651\u0652")
buf.write("\5\u0445\u0223\2\u0652\u0653\5\u044f\u0228\2\u0653\u0654")
buf.write("\5\u045b\u022e\2\u0654\u008a\3\2\2\2\u0655\u0656\5\u0439")
buf.write("\u021d\2\u0656\u0657\5\u0451\u0229\2\u0657\u0658\5\u044f")
buf.write("\u0228\2\u0658\u0659\5\u0459\u022d\2\u0659\u065a\5\u045b")
buf.write("\u022e\2\u065a\u065b\5\u0457\u022c\2\u065b\u065c\5\u0435")
buf.write("\u021b\2\u065c\u065d\5\u0445\u0223\2\u065d\u065e\5\u044f")
buf.write("\u0228\2\u065e\u065f\5\u045b\u022e\2\u065f\u0660\5\u0459")
buf.write("\u022d\2\u0660\u008c\3\2\2\2\u0661\u0662\5\u0439\u021d")
buf.write("\2\u0662\u0663\5\u0451\u0229\2\u0663\u0664\5\u044f\u0228")
buf.write("\2\u0664\u0665\5\u0459\u022d\2\u0665\u0666\5\u045b\u022e")
buf.write("\2\u0666\u0667\5\u0457\u022c\2\u0667\u0668\5\u045d\u022f")
buf.write("\2\u0668\u0669\5\u0439\u021d\2\u0669\u066a\5\u045b\u022e")
buf.write("\2\u066a\u066b\5\u0451\u0229\2\u066b\u066c\5\u0457\u022c")
buf.write("\2\u066c\u008e\3\2\2\2\u066d\u066e\5\u0439\u021d\2\u066e")
buf.write("\u066f\5\u0451\u0229\2\u066f\u0670\5\u044f\u0228\2\u0670")
buf.write("\u0671\5\u045b\u022e\2\u0671\u0672\5\u043d\u021f\2\u0672")
buf.write("\u0673\5\u044f\u0228\2\u0673\u0674\5\u045b\u022e\2\u0674")
buf.write("\u0090\3\2\2\2\u0675\u0676\5\u0439\u021d\2\u0676\u0677")
buf.write("\5\u0451\u0229\2\u0677\u0678\5\u044f\u0228\2\u0678\u0679")
buf.write("\5\u045b\u022e\2\u0679\u067a\5\u043d\u021f\2\u067a\u067b")
buf.write("\5\u0463\u0232\2\u067b\u067c\5\u045b\u022e\2\u067c\u0092")
buf.write("\3\2\2\2\u067d\u067e\5\u0439\u021d\2\u067e\u067f\5\u0451")
buf.write("\u0229\2\u067f\u0680\5\u044f\u0228\2\u0680\u0681\5\u045b")
buf.write("\u022e\2\u0681\u0682\5\u0445\u0223\2\u0682\u0683\5\u044f")
buf.write("\u0228\2\u0683\u0684\5\u045d\u022f\2\u0684\u0685\5\u043d")
buf.write("\u021f\2\u0685\u0094\3\2\2\2\u0686\u0687\5\u0439\u021d")
buf.write("\2\u0687\u0688\5\u0451\u0229\2\u0688\u0689\5\u044f\u0228")
buf.write("\2\u0689\u068a\5\u045f\u0230\2\u068a\u068b\5\u043d\u021f")
buf.write("\2\u068b\u068c\5\u0457\u022c\2\u068c\u068d\5\u045b\u022e")
buf.write("\2\u068d\u0096\3\2\2\2\u068e\u068f\5\u0439\u021d\2\u068f")
buf.write("\u0690\5\u0451\u0229\2\u0690\u0691\5\u0457\u022c\2\u0691")
buf.write("\u0692\5\u0457\u022c\2\u0692\u0693\5\u045d\u022f\2\u0693")
buf.write("\u0694\5\u0453\u022a\2\u0694\u0695\5\u045b\u022e\2\u0695")
buf.write("\u0696\7a\2\2\u0696\u0697\5\u0463\u0232\2\u0697\u0698")
buf.write("\5\u0445\u0223\2\u0698\u0699\5\u043b\u021e\2\u0699\u0098")
buf.write("\3\2\2\2\u069a\u069b\5\u0439\u021d\2\u069b\u069c\5\u0451")
buf.write("\u0229\2\u069c\u069d\5\u0457\u022c\2\u069d\u069e\5\u0457")
buf.write("\u022c\2\u069e\u069f\5\u045d\u022f\2\u069f\u06a0\5\u0453")
buf.write("\u022a\2\u06a0\u06a1\5\u045b\u022e\2\u06a1\u06a2\7a\2")
buf.write("\2\u06a2\u06a3\5\u0463\u0232\2\u06a3\u06a4\5\u0445\u0223")
buf.write("\2\u06a4\u06a5\5\u043b\u021e\2\u06a5\u06a6\7a\2\2\u06a6")
buf.write("\u06a7\5\u0435\u021b\2\u06a7\u06a8\5\u044b\u0226\2\u06a8")
buf.write("\u06a9\5\u044b\u0226\2\u06a9\u009a\3\2\2\2\u06aa\u06ab")
buf.write("\5\u0439\u021d\2\u06ab\u06ac\5\u0451\u0229\2\u06ac\u06ad")
buf.write("\5\u0459\u022d\2\u06ad\u06ae\5\u045b\u022e\2\u06ae\u009c")
buf.write("\3\2\2\2\u06af\u06b0\5\u0439\u021d\2\u06b0\u06b1\5\u0451")
buf.write("\u0229\2\u06b1\u06b2\5\u045d\u022f\2\u06b2\u06b3\5\u044f")
buf.write("\u0228\2\u06b3\u06b4\5\u045b\u022e\2\u06b4\u009e\3\2\2")
buf.write("\2\u06b5\u06b6\5\u0439\u021d\2\u06b6\u06b7\5\u0457\u022c")
buf.write("\2\u06b7\u06b8\5\u043d\u021f\2\u06b8\u06b9\5\u0435\u021b")
buf.write("\2\u06b9\u06ba\5\u045b\u022e\2\u06ba\u06bb\5\u043d\u021f")
buf.write("\2\u06bb\u00a0\3\2\2\2\u06bc\u06bd\5\u0439\u021d\2\u06bd")
buf.write("\u06be\5\u0457\u022c\2\u06be\u06bf\5\u0451\u0229\2\u06bf")
buf.write("\u06c0\5\u0459\u022d\2\u06c0\u06c1\5\u0459\u022d\2\u06c1")
buf.write("\u00a2\3\2\2\2\u06c2\u06c3\5\u0439\u021d\2\u06c3\u06c4")
buf.write("\5\u045d\u022f\2\u06c4\u06c5\5\u0437\u021c\2\u06c5\u06c6")
buf.write("\5\u043d\u021f\2\u06c6\u00a4\3\2\2\2\u06c7\u06c8\5\u0439")
buf.write("\u021d\2\u06c8\u06c9\5\u045d\u022f\2\u06c9\u06ca\5\u0457")
buf.write("\u022c\2\u06ca\u06cb\5\u0457\u022c\2\u06cb\u06cc\5\u043d")
buf.write("\u021f\2\u06cc\u06cd\5\u044f\u0228\2\u06cd\u06ce\5\u045b")
buf.write("\u022e\2\u06ce\u00a6\3\2\2\2\u06cf\u06d0\5\u0439\u021d")
buf.write("\2\u06d0\u06d1\5\u045d\u022f\2\u06d1\u06d2\5\u0457\u022c")
buf.write("\2\u06d2\u06d3\5\u0457\u022c\2\u06d3\u06d4\5\u043d\u021f")
buf.write("\2\u06d4\u06d5\5\u044f\u0228\2\u06d5\u06d6\5\u045b\u022e")
buf.write("\2\u06d6\u06d7\7a\2\2\u06d7\u06d8\5\u045d\u022f\2\u06d8")
buf.write("\u06d9\5\u0459\u022d\2\u06d9\u06da\5\u043d\u021f\2\u06da")
buf.write("\u06db\5\u0457\u022c\2\u06db\u00a8\3\2\2\2\u06dc\u06dd")
buf.write("\5\u0439\u021d\2\u06dd\u06de\5\u045d\u022f\2\u06de\u06df")
buf.write("\5\u0457\u022c\2\u06df\u06e0\5\u0459\u022d\2\u06e0\u06e1")
buf.write("\5\u0451\u0229\2\u06e1\u06e2\5\u0457\u022c\2\u06e2\u00aa")
buf.write("\3\2\2\2\u06e3\u06e4\5\u0439\u021d\2\u06e4\u06e5\5\u045d")
buf.write("\u022f\2\u06e5\u06e6\5\u0459\u022d\2\u06e6\u06e7\5\u045b")
buf.write("\u022e\2\u06e7\u06e8\5\u0451\u0229\2\u06e8\u06e9\5\u044d")
buf.write("\u0227\2\u06e9\u06ea\5\u043b\u021e\2\u06ea\u06eb\5\u0435")
buf.write("\u021b\2\u06eb\u06ec\5\u045b\u022e\2\u06ec\u06ed\5\u045d")
buf.write("\u022f\2\u06ed\u06ee\5\u044d\u0227\2\u06ee\u00ac\3\2\2")
buf.write("\2\u06ef\u06f0\5\u0439\u021d\2\u06f0\u06f1\5\u0465\u0233")
buf.write("\2\u06f1\u06f2\5\u0439\u021d\2\u06f2\u06f3\5\u044b\u0226")
buf.write("\2\u06f3\u06f4\5\u043d\u021f\2\u06f4\u00ae\3\2\2\2\u06f5")
buf.write("\u06f6\5\u043b\u021e\2\u06f6\u06f7\5\u0435\u021b\2\u06f7")
buf.write("\u06f8\5\u045b\u022e\2\u06f8\u06f9\5\u0435\u021b\2\u06f9")
buf.write("\u00b0\3\2\2\2\u06fa\u06fb\5\u043b\u021e\2\u06fb\u06fc")
buf.write("\5\u0435\u021b\2\u06fc\u06fd\5\u045b\u022e\2\u06fd\u06fe")
buf.write("\5\u0435\u021b\2\u06fe\u06ff\5\u0437\u021c\2\u06ff\u0700")
buf.write("\5\u0435\u021b\2\u0700\u0701\5\u0459\u022d\2\u0701\u0702")
buf.write("\5\u043d\u021f\2\u0702\u00b2\3\2\2\2\u0703\u0704\5\u043b")
buf.write("\u021e\2\u0704\u0705\5\u0435\u021b\2\u0705\u0706\5\u045b")
buf.write("\u022e\2\u0706\u0707\5\u043d\u021f\2\u0707\u00b4\3\2\2")
buf.write("\2\u0708\u0709\5\u043b\u021e\2\u0709\u070a\5\u0435\u021b")
buf.write("\2\u070a\u070b\5\u0465\u0233\2\u070b\u00b6\3\2\2\2\u070c")
buf.write("\u070d\5\u043b\u021e\2\u070d\u070e\5\u0437\u021c\2\u070e")
buf.write("\u070f\7a\2\2\u070f\u0710\5\u0457\u022c\2\u0710\u0711")
buf.write("\5\u0451\u0229\2\u0711\u0712\5\u044b\u0226\2\u0712\u0713")
buf.write("\5\u043d\u021f\2\u0713\u0714\7a\2\2\u0714\u0715\5\u0439")
buf.write("\u021d\2\u0715\u0716\5\u0443\u0222\2\u0716\u0717\5\u0435")
buf.write("\u021b\2\u0717\u0718\5\u044f\u0228\2\u0718\u0719\5\u0441")
buf.write("\u0221\2\u0719\u071a\5\u043d\u021f\2\u071a\u00b8\3\2\2")
buf.write("\2\u071b\u071c\5\u043b\u021e\2\u071c\u071d\5\u0437\u021c")
buf.write("\2\u071d\u071e\5\u045b\u022e\2\u071e\u071f\5\u0445\u0223")
buf.write("\2\u071f\u0720\5\u044d\u0227\2\u0720\u0721\5\u043d\u021f")
buf.write("\2\u0721\u0722\5\u0467\u0234\2\u0722\u0723\5\u0451\u0229")
buf.write("\2\u0723\u0724\5\u044f\u0228\2\u0724\u0725\5\u043d\u021f")
buf.write("\2\u0725\u00ba\3\2\2\2\u0726\u0727\5\u043b\u021e\2\u0727")
buf.write("\u0728\5\u043b\u021e\2\u0728\u0729\5\u044b\u0226\2\u0729")
buf.write("\u00bc\3\2\2\2\u072a\u072b\5\u043b\u021e\2\u072b\u072c")
buf.write("\5\u043d\u021f\2\u072c\u072d\5\u0437\u021c\2\u072d\u072e")
buf.write("\5\u045d\u022f\2\u072e\u072f\5\u0441\u0221\2\u072f\u00be")
buf.write("\3\2\2\2\u0730\u0731\5\u043b\u021e\2\u0731\u0732\5\u043d")
buf.write("\u021f\2\u0732\u0733\5\u0439\u021d\2\u0733\u00c0\3\2\2")
buf.write("\2\u0734\u0735\5\u043b\u021e\2\u0735\u0736\5\u043d\u021f")
buf.write("\2\u0736\u0737\5\u0439\u021d\2\u0737\u0738\5\u0445\u0223")
buf.write("\2\u0738\u0739\5\u044d\u0227\2\u0739\u073a\5\u0435\u021b")
buf.write("\2\u073a\u073b\5\u044b\u0226\2\u073b\u00c2\3\2\2\2\u073c")
buf.write("\u073d\5\u043b\u021e\2\u073d\u073e\5\u043d\u021f\2\u073e")
buf.write("\u073f\5\u0439\u021d\2\u073f\u0740\5\u044b\u0226\2\u0740")
buf.write("\u0741\5\u0435\u021b\2\u0741\u0742\5\u0457\u022c\2\u0742")
buf.write("\u0743\5\u043d\u021f\2\u0743\u00c4\3\2\2\2\u0744\u0745")
buf.write("\5\u043b\u021e\2\u0745\u0746\5\u043d\u021f\2\u0746\u0747")
buf.write("\5\u0439\u021d\2\u0747\u0748\5\u0451\u0229\2\u0748\u0749")
buf.write("\5\u044d\u0227\2\u0749\u074a\5\u0453\u022a\2\u074a\u074b")
buf.write("\5\u0451\u0229\2\u074b\u074c\5\u0459\u022d\2\u074c\u074d")
buf.write("\5\u043d\u021f\2\u074d\u00c6\3\2\2\2\u074e\u074f\5\u043b")
buf.write("\u021e\2\u074f\u0750\5\u043d\u021f\2\u0750\u0751\5\u0439")
buf.write("\u021d\2\u0751\u0752\5\u0457\u022c\2\u0752\u0753\5\u043d")
buf.write("\u021f\2\u0753\u0754\5\u044d\u0227\2\u0754\u0755\5\u043d")
buf.write("\u021f\2\u0755\u0756\5\u044f\u0228\2\u0756\u0757\5\u045b")
buf.write("\u022e\2\u0757\u00c8\3\2\2\2\u0758\u0759\5\u043b\u021e")
buf.write("\2\u0759\u075a\5\u043d\u021f\2\u075a\u075b\5\u043f\u0220")
buf.write("\2\u075b\u075c\5\u0435\u021b\2\u075c\u075d\5\u045d\u022f")
buf.write("\2\u075d\u075e\5\u044b\u0226\2\u075e\u075f\5\u045b\u022e")
buf.write("\2\u075f\u00ca\3\2\2\2\u0760\u0761\5\u043b\u021e\2\u0761")
buf.write("\u0762\5\u043d\u021f\2\u0762\u0763\5\u043f\u0220\2\u0763")
buf.write("\u0764\5\u0435\u021b\2\u0764\u0765\5\u045d\u022f\2\u0765")
buf.write("\u0766\5\u044b\u0226\2\u0766\u0767\5\u045b\u022e\2\u0767")
buf.write("\u0768\5\u0459\u022d\2\u0768\u00cc\3\2\2\2\u0769\u076a")
buf.write("\5\u043b\u021e\2\u076a\u076b\5\u043d\u021f\2\u076b\u076c")
buf.write("\5\u043f\u0220\2\u076c\u076d\5\u043d\u021f\2\u076d\u076e")
buf.write("\5\u0457\u022c\2\u076e\u076f\5\u0457\u022c\2\u076f\u0770")
buf.write("\5\u043d\u021f\2\u0770\u0771\5\u043b\u021e\2\u0771\u00ce")
buf.write("\3\2\2\2\u0772\u0773\5\u043b\u021e\2\u0773\u0774\5\u043d")
buf.write("\u021f\2\u0774\u0775\5\u043f\u0220\2\u0775\u0776\5\u0445")
buf.write("\u0223\2\u0776\u0777\5\u044f\u0228\2\u0777\u0778\5\u043d")
buf.write("\u021f\2\u0778\u0779\5\u0457\u022c\2\u0779\u00d0\3\2\2")
buf.write("\2\u077a\u077b\5\u043b\u021e\2\u077b\u077c\5\u043d\u021f")
buf.write("\2\u077c\u077d\5\u044b\u0226\2\u077d\u077e\5\u043d\u021f")
buf.write("\2\u077e\u077f\5\u045b\u022e\2\u077f\u0780\5\u043d\u021f")
buf.write("\2\u0780\u00d2\3\2\2\2\u0781\u0782\5\u043b\u021e\2\u0782")
buf.write("\u0783\5\u043d\u021f\2\u0783\u0784\5\u0453\u022a\2\u0784")
buf.write("\u0785\5\u045b\u022e\2\u0785\u0786\5\u0443\u0222\2\u0786")
buf.write("\u00d4\3\2\2\2\u0787\u0788\5\u043b\u021e\2\u0788\u0789")
buf.write("\5\u043d\u021f\2\u0789\u078a\5\u0459\u022d\2\u078a\u078b")
buf.write("\5\u0439\u021d\2\u078b\u00d6\3\2\2\2\u078c\u078d\5\u043b")
buf.write("\u021e\2\u078d\u078e\5\u043d\u021f\2\u078e\u078f\5\u045b")
buf.write("\u022e\2\u078f\u0790\5\u043d\u021f\2\u0790\u0791\5\u0457")
buf.write("\u022c\2\u0791\u0792\5\u044d\u0227\2\u0792\u0793\5\u0445")
buf.write("\u0223\2\u0793\u0794\5\u044f\u0228\2\u0794\u0795\5\u0445")
buf.write("\u0223\2\u0795\u0796\5\u0459\u022d\2\u0796\u0797\5\u045b")
buf.write("\u022e\2\u0797\u0798\5\u0445\u0223\2\u0798\u0799\5\u0439")
buf.write("\u021d\2\u0799\u00d8\3\2\2\2\u079a\u079b\5\u043b\u021e")
buf.write("\2\u079b\u079c\5\u0445\u0223\2\u079c\u079d\5\u044d\u0227")
buf.write("\2\u079d\u079e\5\u043d\u021f\2\u079e\u079f\5\u044f\u0228")
buf.write("\2\u079f\u07a0\5\u0459\u022d\2\u07a0\u07a1\5\u0445\u0223")
buf.write("\2\u07a1\u07a2\5\u0451\u0229\2\u07a2\u07a3\5\u044f\u0228")
buf.write("\2\u07a3\u00da\3\2\2\2\u07a4\u07a5\5\u043b\u021e\2\u07a5")
buf.write("\u07a6\5\u0445\u0223\2\u07a6\u07a7\5\u0459\u022d\2\u07a7")
buf.write("\u07a8\5\u0435\u021b\2\u07a8\u07a9\5\u0437\u021c\2\u07a9")
buf.write("\u07aa\5\u044b\u0226\2\u07aa\u07ab\5\u043d\u021f\2\u07ab")
buf.write("\u00dc\3\2\2\2\u07ac\u07ad\5\u043b\u021e\2\u07ad\u07ae")
buf.write("\5\u0445\u0223\2\u07ae\u07af\5\u0459\u022d\2\u07af\u07b0")
buf.write("\5\u0435\u021b\2\u07b0\u07b1\5\u0459\u022d\2\u07b1\u07b2")
buf.write("\5\u0459\u022d\2\u07b2\u07b3\5\u0451\u0229\2\u07b3\u07b4")
buf.write("\5\u0439\u021d\2\u07b4\u07b5\5\u0445\u0223\2\u07b5\u07b6")
buf.write("\5\u0435\u021b\2\u07b6\u07b7\5\u045b\u022e\2\u07b7\u07b8")
buf.write("\5\u043d\u021f\2\u07b8\u00de\3\2\2\2\u07b9\u07ba\5\u043b")
buf.write("\u021e\2\u07ba\u07bb\5\u0445\u0223\2\u07bb\u07bc\5\u0459")
buf.write("\u022d\2\u07bc\u07bd\5\u045b\u022e\2\u07bd\u07be\5\u0445")
buf.write("\u0223\2\u07be\u07bf\5\u044f\u0228\2\u07bf\u07c0\5\u0439")
buf.write("\u021d\2\u07c0\u07c1\5\u045b\u022e\2\u07c1\u00e0\3\2\2")
buf.write("\2\u07c2\u07c3\5\u043b\u021e\2\u07c3\u07c4\5\u0451\u0229")
buf.write("\2\u07c4\u07c5\5\u0439\u021d\2\u07c5\u07c6\5\u045d\u022f")
buf.write("\2\u07c6\u07c7\5\u044d\u0227\2\u07c7\u07c8\5\u043d\u021f")
buf.write("\2\u07c8\u07c9\5\u044f\u0228\2\u07c9\u07ca\5\u045b\u022e")
buf.write("\2\u07ca\u00e2\3\2\2\2\u07cb\u07cc\5\u043b\u021e\2\u07cc")
buf.write("\u07cd\5\u0451\u0229\2\u07cd\u07ce\5\u045d\u022f\2\u07ce")
buf.write("\u07cf\5\u0437\u021c\2\u07cf\u07d0\5\u044b\u0226\2\u07d0")
buf.write("\u07d1\5\u043d\u021f\2\u07d1\u00e4\3\2\2\2\u07d2\u07d3")
buf.write("\5\u043b\u021e\2\u07d3\u07d4\5\u0457\u022c\2\u07d4\u07d5")
buf.write("\5\u0451\u0229\2\u07d5\u07d6\5\u0453\u022a\2\u07d6\u00e6")
buf.write("\3\2\2\2\u07d7\u07d8\5\u043b\u021e\2\u07d8\u07d9\5\u0459")
buf.write("\u022d\2\u07d9\u07da\5\u0445\u0223\2\u07da\u07db\5\u044f")
buf.write("\u0228\2\u07db\u07dc\5\u045b\u022e\2\u07dc\u07dd\5\u043d")
buf.write("\u021f\2\u07dd\u07de\5\u0457\u022c\2\u07de\u07df\5\u045f")
buf.write("\u0230\2\u07df\u07e0\5\u0435\u021b\2\u07e0\u07e1\5\u044b")
buf.write("\u0226\2\u07e1\u07e2\7a\2\2\u07e2\u07e3\5\u045d\u022f")
buf.write("\2\u07e3\u07e4\5\u044f\u0228\2\u07e4\u07e5\5\u0439\u021d")
buf.write("\2\u07e5\u07e6\5\u0451\u0229\2\u07e6\u07e7\5\u044f\u0228")
buf.write("\2\u07e7\u07e8\5\u0459\u022d\2\u07e8\u07e9\5\u045b\u022e")
buf.write("\2\u07e9\u07ea\5\u0457\u022c\2\u07ea\u07eb\5\u0435\u021b")
buf.write("\2\u07eb\u07ec\5\u0445\u0223\2\u07ec\u07ed\5\u044f\u0228")
buf.write("\2\u07ed\u07ee\5\u043d\u021f\2\u07ee\u07ef\5\u043b\u021e")
buf.write("\2\u07ef\u00e8\3\2\2\2\u07f0\u07f1\5\u043d\u021f\2\u07f1")
buf.write("\u07f2\5\u0435\u021b\2\u07f2\u07f3\5\u0439\u021d\2\u07f3")
buf.write("\u07f4\5\u0443\u0222\2\u07f4\u00ea\3\2\2\2\u07f5\u07f6")
buf.write("\5\u043d\u021f\2\u07f6\u07f7\5\u044b\u0226\2\u07f7\u07f8")
buf.write("\5\u043d\u021f\2\u07f8\u07f9\5\u044d\u0227\2\u07f9\u07fa")
buf.write("\5\u043d\u021f\2\u07fa\u07fb\5\u044f\u0228\2\u07fb\u07fc")
buf.write("\5\u045b\u022e\2\u07fc\u00ec\3\2\2\2\u07fd\u07fe\5\u043d")
buf.write("\u021f\2\u07fe\u07ff\5\u044b\u0226\2\u07ff\u0800\5\u0459")
buf.write("\u022d\2\u0800\u0801\5\u043d\u021f\2\u0801\u00ee\3\2\2")
buf.write("\2\u0802\u0803\5\u043d\u021f\2\u0803\u0804\5\u044b\u0226")
buf.write("\2\u0804\u0805\5\u0459\u022d\2\u0805\u0806\5\u0445\u0223")
buf.write("\2\u0806\u0807\5\u043f\u0220\2\u0807\u00f0\3\2\2\2\u0808")
buf.write("\u0809\5\u043d\u021f\2\u0809\u080a\5\u044d\u0227\2\u080a")
buf.write("\u080b\5\u0453\u022a\2\u080b\u080c\5\u045b\u022e\2\u080c")
buf.write("\u080d\5\u0465\u0233\2\u080d\u00f2\3\2\2\2\u080e\u080f")
buf.write("\5\u043d\u021f\2\u080f\u0810\5\u044f\u0228\2\u0810\u0811")
buf.write("\5\u0435\u021b\2\u0811\u0812\5\u0437\u021c\2\u0812\u0813")
buf.write("\5\u044b\u0226\2\u0813\u0814\5\u043d\u021f\2\u0814\u00f4")
buf.write("\3\2\2\2\u0815\u0816\5\u043d\u021f\2\u0816\u0817\5\u044f")
buf.write("\u0228\2\u0817\u0818\5\u0439\u021d\2\u0818\u0819\5\u0451")
buf.write("\u0229\2\u0819\u081a\5\u043b\u021e\2\u081a\u081b\5\u0445")
buf.write("\u0223\2\u081b\u081c\5\u044f\u0228\2\u081c\u081d\5\u0441")
buf.write("\u0221\2\u081d\u00f6\3\2\2\2\u081e\u081f\5\u043d\u021f")
buf.write("\2\u081f\u0820\5\u044f\u0228\2\u0820\u0821\5\u043b\u021e")
buf.write("\2\u0821\u00f8\3\2\2\2\u0822\u0823\5\u043d\u021f\2\u0823")
buf.write("\u0824\5\u044f\u0228\2\u0824\u0825\5\u045b\u022e\2\u0825")
buf.write("\u0826\5\u0445\u0223\2\u0826\u0827\5\u045b\u022e\2\u0827")
buf.write("\u0828\5\u0465\u0233\2\u0828\u0829\5\u043d\u021f\2\u0829")
buf.write("\u082a\5\u0459\u022d\2\u082a\u082b\5\u0439\u021d\2\u082b")
buf.write("\u082c\5\u0435\u021b\2\u082c\u082d\5\u0453\u022a\2\u082d")
buf.write("\u082e\5\u0445\u0223\2\u082e\u082f\5\u044f\u0228\2\u082f")
buf.write("\u0830\5\u0441\u0221\2\u0830\u00fa\3\2\2\2\u0831\u0832")
buf.write("\5\u043d\u021f\2\u0832\u0833\5\u0457\u022c\2\u0833\u0834")
buf.write("\5\u0457\u022c\2\u0834\u00fc\3\2\2\2\u0835\u0836\5\u043d")
buf.write("\u021f\2\u0836\u0837\5\u0457\u022c\2\u0837\u0838\5\u0457")
buf.write("\u022c\2\u0838\u0839\5\u0451\u0229\2\u0839\u083a\5\u0457")
buf.write("\u022c\2\u083a\u083b\5\u0459\u022d\2\u083b\u00fe\3\2\2")
buf.write("\2\u083c\u083d\5\u043d\u021f\2\u083d\u083e\5\u0459\u022d")
buf.write("\2\u083e\u083f\5\u0439\u021d\2\u083f\u0840\5\u0435\u021b")
buf.write("\2\u0840\u0841\5\u0453\u022a\2\u0841\u0842\5\u043d\u021f")
buf.write("\2\u0842\u0100\3\2\2\2\u0843\u0844\5\u043d\u021f\2\u0844")
buf.write("\u0845\5\u045f\u0230\2\u0845\u0846\5\u0435\u021b\2\u0846")
buf.write("\u0847\5\u044b\u0226\2\u0847\u0848\5\u044f\u0228\2\u0848")
buf.write("\u0849\5\u0435\u021b\2\u0849\u084a\5\u044d\u0227\2\u084a")
buf.write("\u084b\5\u043d\u021f\2\u084b\u0102\3\2\2\2\u084c\u084d")
buf.write("\5\u043d\u021f\2\u084d\u084e\5\u0463\u0232\2\u084e\u084f")
buf.write("\5\u0439\u021d\2\u084f\u0850\5\u043d\u021f\2\u0850\u0851")
buf.write("\5\u0453\u022a\2\u0851\u0852\5\u045b\u022e\2\u0852\u0104")
buf.write("\3\2\2\2\u0853\u0854\5\u043d\u021f\2\u0854\u0855\5\u0463")
buf.write("\u0232\2\u0855\u0856\5\u0439\u021d\2\u0856\u0857\5\u043d")
buf.write("\u021f\2\u0857\u0858\5\u0453\u022a\2\u0858\u0859\5\u045b")
buf.write("\u022e\2\u0859\u085a\5\u0445\u0223\2\u085a\u085b\5\u0451")
buf.write("\u0229\2\u085b\u085c\5\u044f\u0228\2\u085c\u0106\3\2\2")
buf.write("\2\u085d\u085e\5\u043d\u021f\2\u085e\u085f\5\u0463\u0232")
buf.write("\2\u085f\u0860\5\u0439\u021d\2\u0860\u0861\5\u043d\u021f")
buf.write("\2\u0861\u0862\5\u0453\u022a\2\u0862\u0863\5\u045b\u022e")
buf.write("\2\u0863\u0864\5\u0445\u0223\2\u0864\u0865\5\u0451\u0229")
buf.write("\2\u0865\u0866\5\u044f\u0228\2\u0866\u0867\7a\2\2\u0867")
buf.write("\u0868\5\u0445\u0223\2\u0868\u0869\5\u044f\u0228\2\u0869")
buf.write("\u086a\5\u0445\u0223\2\u086a\u086b\5\u045b\u022e\2\u086b")
buf.write("\u0108\3\2\2\2\u086c\u086d\5\u043d\u021f\2\u086d\u086e")
buf.write("\5\u0463\u0232\2\u086e\u086f\5\u0439\u021d\2\u086f\u0870")
buf.write("\5\u043d\u021f\2\u0870\u0871\5\u0453\u022a\2\u0871\u0872")
buf.write("\5\u045b\u022e\2\u0872\u0873\5\u0445\u0223\2\u0873\u0874")
buf.write("\5\u0451\u0229\2\u0874\u0875\5\u044f\u0228\2\u0875\u0876")
buf.write("\5\u0459\u022d\2\u0876\u010a\3\2\2\2\u0877\u0878\5\u043d")
buf.write("\u021f\2\u0878\u0879\5\u0463\u0232\2\u0879\u087a\5\u0439")
buf.write("\u021d\2\u087a\u087b\5\u044b\u0226\2\u087b\u087c\5\u045d")
buf.write("\u022f\2\u087c\u087d\5\u043b\u021e\2\u087d\u087e\5\u043d")
buf.write("\u021f\2\u087e\u010c\3\2\2\2\u087f\u0880\5\u043d\u021f")
buf.write("\2\u0880\u0881\5\u0463\u0232\2\u0881\u0882\5\u0439\u021d")
buf.write("\2\u0882\u0883\5\u044b\u0226\2\u0883\u0884\5\u045d\u022f")
buf.write("\2\u0884\u0885\5\u0459\u022d\2\u0885\u0886\5\u0445\u0223")
buf.write("\2\u0886\u0887\5\u045f\u0230\2\u0887\u0888\5\u043d\u021f")
buf.write("\2\u0888\u010e\3\2\2\2\u0889\u088a\5\u043d\u021f\2\u088a")
buf.write("\u088b\5\u0463\u0232\2\u088b\u088c\5\u043d\u021f\2\u088c")
buf.write("\u088d\5\u0439\u021d\2\u088d\u088e\5\u045d\u022f\2\u088e")
buf.write("\u088f\5\u045b\u022e\2\u088f\u0890\5\u043d\u021f\2\u0890")
buf.write("\u0110\3\2\2\2\u0891\u0892\5\u043d\u021f\2\u0892\u0893")
buf.write("\5\u0463\u0232\2\u0893\u0894\5\u0445\u0223\2\u0894\u0895")
buf.write("\5\u0459\u022d\2\u0895\u0896\5\u045b\u022e\2\u0896\u0897")
buf.write("\5\u0459\u022d\2\u0897\u0112\3\2\2\2\u0898\u0899\5\u043d")
buf.write("\u021f\2\u0899\u089a\5\u0463\u0232\2\u089a\u089b\5\u0445")
buf.write("\u0223\2\u089b\u089c\5\u045b\u022e\2\u089c\u0114\3\2\2")
buf.write("\2\u089d\u089e\5\u043d\u021f\2\u089e\u089f\5\u0463\u0232")
buf.write("\2\u089f\u08a0\5\u0453\u022a\2\u08a0\u08a1\5\u044b\u0226")
buf.write("\2\u08a1\u08a2\5\u0435\u021b\2\u08a2\u08a3\5\u0445\u0223")
buf.write("\2\u08a3\u08a4\5\u044f\u0228\2\u08a4\u0116\3\2\2\2\u08a5")
buf.write("\u08a6\5\u043d\u021f\2\u08a6\u08a7\5\u0463\u0232\2\u08a7")
buf.write("\u08a8\5\u045b\u022e\2\u08a8\u08a9\5\u043d\u021f\2\u08a9")
buf.write("\u08aa\5\u0457\u022c\2\u08aa\u08ab\5\u044f\u0228\2\u08ab")
buf.write("\u08ac\5\u0435\u021b\2\u08ac\u08ad\5\u044b\u0226\2\u08ad")
buf.write("\u0118\3\2\2\2\u08ae\u08af\5\u043d\u021f\2\u08af\u08b0")
buf.write("\5\u0463\u0232\2\u08b0\u08b1\5\u045b\u022e\2\u08b1\u08b2")
buf.write("\5\u0457\u022c\2\u08b2\u08b3\5\u0435\u021b\2\u08b3\u08b4")
buf.write("\5\u0439\u021d\2\u08b4\u08b5\5\u045b\u022e\2\u08b5\u011a")
buf.write("\3\2\2\2\u08b6\u08b7\5\u043f\u0220\2\u08b7\u08b8\5\u0435")
buf.write("\u021b\2\u08b8\u08b9\5\u0445\u0223\2\u08b9\u08ba\5\u044b")
buf.write("\u0226\2\u08ba\u08bb\5\u045d\u022f\2\u08bb\u08bc\5\u0457")
buf.write("\u022c\2\u08bc\u08bd\5\u043d\u021f\2\u08bd\u011c\3\2\2")
buf.write("\2\u08be\u08bf\5\u043f\u0220\2\u08bf\u08c0\5\u0435\u021b")
buf.write("\2\u08c0\u08c1\5\u044b\u0226\2\u08c1\u08c2\5\u0459\u022d")
buf.write("\2\u08c2\u08c3\5\u043d\u021f\2\u08c3\u011e\3\2\2\2\u08c4")
buf.write("\u08c5\5\u043f\u0220\2\u08c5\u08c6\5\u043d\u021f\2\u08c6")
buf.write("\u08c7\5\u045b\u022e\2\u08c7\u08c8\5\u0439\u021d\2\u08c8")
buf.write("\u08c9\5\u0443\u0222\2\u08c9\u0120\3\2\2\2\u08ca\u08cb")
buf.write("\5\u043f\u0220\2\u08cb\u08cc\5\u0445\u0223\2\u08cc\u08cd")
buf.write("\5\u044f\u0228\2\u08cd\u08ce\5\u0435\u021b\2\u08ce\u08cf")
buf.write("\5\u044b\u0226\2\u08cf\u0122\3\2\2\2\u08d0\u08d1\5\u043f")
buf.write("\u0220\2\u08d1\u08d2\5\u0445\u0223\2\u08d2\u08d3\5\u0457")
buf.write("\u022c\2\u08d3\u08d4\5\u0459\u022d\2\u08d4\u08d5\5\u045b")
buf.write("\u022e\2\u08d5\u0124\3\2\2\2\u08d6\u08d7\5\u043f\u0220")
buf.write("\2\u08d7\u08d8\5\u0445\u0223\2\u08d8\u08d9\5\u0457\u022c")
buf.write("\2\u08d9\u08da\5\u0459\u022d\2\u08da\u08db\5\u045b\u022e")
buf.write("\2\u08db\u08dc\7a\2\2\u08dc\u08dd\5\u045f\u0230\2\u08dd")
buf.write("\u08de\5\u0435\u021b\2\u08de\u08df\5\u044b\u0226\2\u08df")
buf.write("\u08e0\5\u045d\u022f\2\u08e0\u08e1\5\u043d\u021f\2\u08e1")
buf.write("\u0126\3\2\2\2\u08e2\u08e3\5\u043f\u0220\2\u08e3\u08e4")
buf.write("\5\u044b\u0226\2\u08e4\u08e5\5\u0451\u0229\2\u08e5\u08e6")
buf.write("\5\u0435\u021b\2\u08e6\u08e7\5\u045b\u022e\2\u08e7\u0128")
buf.write("\3\2\2\2\u08e8\u08e9\5\u043f\u0220\2\u08e9\u08ea\5\u0451")
buf.write("\u0229\2\u08ea\u08eb\5\u044b\u0226\2\u08eb\u08ec\5\u044b")
buf.write("\u0226\2\u08ec\u08ed\5\u0451\u0229\2\u08ed\u08ee\5\u0461")
buf.write("\u0231\2\u08ee\u08ef\5\u0445\u0223\2\u08ef\u08f0\5\u044f")
buf.write("\u0228\2\u08f0\u08f1\5\u0441\u0221\2\u08f1\u012a\3\2\2")
buf.write("\2\u08f2\u08f3\5\u043f\u0220\2\u08f3\u08f4\5\u0451\u0229")
buf.write("\2\u08f4\u08f5\5\u044b\u0226\2\u08f5\u08f6\5\u044b\u0226")
buf.write("\2\u08f6\u08f7\5\u0451\u0229\2\u08f7\u08f8\5\u0461\u0231")
buf.write("\2\u08f8\u08f9\5\u0459\u022d\2\u08f9\u012c\3\2\2\2\u08fa")
buf.write("\u08fb\5\u043f\u0220\2\u08fb\u08fc\5\u0451\u0229\2\u08fc")
buf.write("\u08fd\5\u0457\u022c\2\u08fd\u012e\3\2\2\2\u08fe\u08ff")
buf.write("\5\u043f\u0220\2\u08ff\u0900\5\u0451\u0229\2\u0900\u0901")
buf.write("\5\u0457\u022c\2\u0901\u0902\5\u0435\u021b\2\u0902\u0903")
buf.write("\5\u044b\u0226\2\u0903\u0904\5\u044b\u0226\2\u0904\u0130")
buf.write("\3\2\2\2\u0905\u0906\5\u043f\u0220\2\u0906\u0907\5\u0451")
buf.write("\u0229\2\u0907\u0908\5\u0457\u022c\2\u0908\u0909\5\u0439")
buf.write("\u021d\2\u0909\u090a\5\u043d\u021f\2\u090a\u0132\3\2\2")
buf.write("\2\u090b\u090c\5\u043f\u0220\2\u090c\u090d\5\u0457\u022c")
buf.write("\2\u090d\u090e\5\u0451\u0229\2\u090e\u090f\5\u044d\u0227")
buf.write("\2\u090f\u0134\3\2\2\2\u0910\u0911\5\u043f\u0220\2\u0911")
buf.write("\u0912\5\u045d\u022f\2\u0912\u0913\5\u044b\u0226\2\u0913")
buf.write("\u0914\5\u044b\u0226\2\u0914\u0136\3\2\2\2\u0915\u0916")
buf.write("\5\u043f\u0220\2\u0916\u0917\5\u045d\u022f\2\u0917\u0918")
buf.write("\5\u044f\u0228\2\u0918\u0919\5\u0439\u021d\2\u0919\u091a")
buf.write("\5\u045b\u022e\2\u091a\u091b\5\u0445\u0223\2\u091b\u091c")
buf.write("\5\u0451\u0229\2\u091c\u091d\5\u044f\u0228\2\u091d\u0138")
buf.write("\3\2\2\2\u091e\u091f\5\u0441\u0221\2\u091f\u0920\5\u0451")
buf.write("\u0229\2\u0920\u0921\5\u045b\u022e\2\u0921\u0922\5\u0451")
buf.write("\u0229\2\u0922\u013a\3\2\2\2\u0923\u0924\5\u0441\u0221")
buf.write("\2\u0924\u0925\5\u0457\u022c\2\u0925\u0926\5\u0435\u021b")
buf.write("\2\u0926\u0927\5\u044f\u0228\2\u0927\u0928\5\u045b\u022e")
buf.write("\2\u0928\u013c\3\2\2\2\u0929\u092a\5\u0441\u0221\2\u092a")
buf.write("\u092b\5\u0457\u022c\2\u092b\u092c\5\u0451\u0229\2\u092c")
buf.write("\u092d\5\u045d\u022f\2\u092d\u092e\5\u0453\u022a\2\u092e")
buf.write("\u013e\3\2\2\2\u092f\u0930\5\u0441\u0221\2\u0930\u0931")
buf.write("\5\u0457\u022c\2\u0931\u0932\5\u0451\u0229\2\u0932\u0933")
buf.write("\5\u045d\u022f\2\u0933\u0934\5\u0453\u022a\2\u0934\u0935")
buf.write("\5\u0445\u0223\2\u0935\u0936\5\u044f\u0228\2\u0936\u0937")
buf.write("\5\u0441\u0221\2\u0937\u0140\3\2\2\2\u0938\u0939\5\u0443")
buf.write("\u0222\2\u0939\u093a\5\u0435\u021b\2\u093a\u093b\5\u0459")
buf.write("\u022d\2\u093b\u093c\5\u0443\u0222\2\u093c\u0142\3\2\2")
buf.write("\2\u093d\u093e\5\u0443\u0222\2\u093e\u093f\5\u0435\u021b")
buf.write("\2\u093f\u0940\5\u045f\u0230\2\u0940\u0941\5\u0445\u0223")
buf.write("\2\u0941\u0942\5\u044f\u0228\2\u0942\u0943\5\u0441\u0221")
buf.write("\2\u0943\u0144\3\2\2\2\u0944\u0945\5\u0443\u0222\2\u0945")
buf.write("\u0946\5\u0445\u0223\2\u0946\u0947\5\u043b\u021e\2\u0947")
buf.write("\u0948\5\u043d\u021f\2\u0948\u0146\3\2\2\2\u0949\u094a")
buf.write("\5\u0443\u0222\2\u094a\u094b\5\u0451\u0229\2\u094b\u094c")
buf.write("\5\u045d\u022f\2\u094c\u094d\5\u0457\u022c\2\u094d\u0148")
buf.write("\3\2\2\2\u094e\u094f\5\u0445\u0223\2\u094f\u0950\5\u043f")
buf.write("\u0220\2\u0950\u014a\3\2\2\2\u0951\u0952\5\u0445\u0223")
buf.write("\2\u0952\u0953\5\u0441\u0221\2\u0953\u0954\5\u044f\u0228")
buf.write("\2\u0954\u0955\5\u0451\u0229\2\u0955\u0956\5\u0457\u022c")
buf.write("\2\u0956\u0957\5\u043d\u021f\2\u0957\u014c\3\2\2\2\u0958")
buf.write("\u0959\5\u0445\u0223\2\u0959\u095a\5\u044d\u0227\2\u095a")
buf.write("\u095b\5\u044d\u0227\2\u095b\u095c\5\u043d\u021f\2\u095c")
buf.write("\u095d\5\u043b\u021e\2\u095d\u095e\5\u0445\u0223\2\u095e")
buf.write("\u095f\5\u0435\u021b\2\u095f\u0960\5\u045b\u022e\2\u0960")
buf.write("\u0961\5\u043d\u021f\2\u0961\u014e\3\2\2\2\u0962\u0963")
buf.write("\5\u0445\u0223\2\u0963\u0964\5\u044f\u0228\2\u0964\u0150")
buf.write("\3\2\2\2\u0965\u0966\5\u0445\u0223\2\u0966\u0967\5\u044f")
buf.write("\u0228\2\u0967\u0968\5\u0439\u021d\2\u0968\u0969\5\u044b")
buf.write("\u0226\2\u0969\u096a\5\u045d\u022f\2\u096a\u096b\5\u043b")
buf.write("\u021e\2\u096b\u096c\5\u043d\u021f\2\u096c\u0152\3\2\2")
buf.write("\2\u096d\u096e\5\u0445\u0223\2\u096e\u096f\5\u044f\u0228")
buf.write("\2\u096f\u0970\5\u0439\u021d\2\u0970\u0971\5\u044b\u0226")
buf.write("\2\u0971\u0972\5\u045d\u022f\2\u0972\u0973\5\u043b\u021e")
buf.write("\2\u0973\u0974\5\u0445\u0223\2\u0974\u0975\5\u044f\u0228")
buf.write("\2\u0975\u0976\5\u0441\u0221\2\u0976\u0154\3\2\2\2\u0977")
buf.write("\u0978\5\u0445\u0223\2\u0978\u0979\5\u044f\u0228\2\u0979")
buf.write("\u097a\5\u0439\u021d\2\u097a\u097b\5\u0457\u022c\2\u097b")
buf.write("\u097c\5\u043d\u021f\2\u097c\u097d\5\u044d\u0227\2\u097d")
buf.write("\u097e\5\u043d\u021f\2\u097e\u097f\5\u044f\u0228\2\u097f")
buf.write("\u0980\5\u045b\u022e\2\u0980\u0156\3\2\2\2\u0981\u0982")
buf.write("\5\u0445\u0223\2\u0982\u0983\5\u044f\u0228\2\u0983\u0984")
buf.write("\5\u043b\u021e\2\u0984\u0985\5\u043d\u021f\2\u0985\u0986")
buf.write("\5\u044f\u0228\2\u0986\u0987\5\u045b\u022e\2\u0987\u0158")
buf.write("\3\2\2\2\u0988\u0989\5\u0445\u0223\2\u0989\u098a\5\u044f")
buf.write("\u0228\2\u098a\u098b\5\u043b\u021e\2\u098b\u098c\5\u043d")
buf.write("\u021f\2\u098c\u098d\5\u0463\u0232\2\u098d\u015a\3\2\2")
buf.write("\2\u098e\u098f\5\u0445\u0223\2\u098f\u0990\5\u044f\u0228")
buf.write("\2\u0990\u0991\5\u043b\u021e\2\u0991\u0992\5\u043d\u021f")
buf.write("\2\u0992\u0993\5\u0463\u0232\2\u0993\u0994\5\u043d\u021f")
buf.write("\2\u0994\u0995\5\u043b\u021e\2\u0995\u015c\3\2\2\2\u0996")
buf.write("\u0997\5\u0445\u0223\2\u0997\u0998\5\u044f\u0228\2\u0998")
buf.write("\u0999\5\u043b\u021e\2\u0999\u099a\5\u0445\u0223\2\u099a")
buf.write("\u099b\5\u0439\u021d\2\u099b\u099c\5\u0435\u021b\2\u099c")
buf.write("\u099d\5\u045b\u022e\2\u099d\u099e\5\u0451\u0229\2\u099e")
buf.write("\u099f\5\u0457\u022c\2\u099f\u015e\3\2\2\2\u09a0\u09a1")
buf.write("\5\u0445\u0223\2\u09a1\u09a2\5\u044f\u0228\2\u09a2\u09a3")
buf.write("\5\u043b\u021e\2\u09a3\u09a4\5\u0445\u0223\2\u09a4\u09a5")
buf.write("\5\u0439\u021d\2\u09a5\u09a6\5\u043d\u021f\2\u09a6\u09a7")
buf.write("\5\u0459\u022d\2\u09a7\u0160\3\2\2\2\u09a8\u09a9\5\u0445")
buf.write("\u0223\2\u09a9\u09aa\5\u044f\u0228\2\u09aa\u09ab\5\u043f")
buf.write("\u0220\2\u09ab\u09ac\5\u0445\u0223\2\u09ac\u09ad\5\u044f")
buf.write("\u0228\2\u09ad\u09ae\5\u0445\u0223\2\u09ae\u09af\5\u045b")
buf.write("\u022e\2\u09af\u09b0\5\u043d\u021f\2\u09b0\u0162\3\2\2")
buf.write("\2\u09b1\u09b2\5\u0445\u0223\2\u09b2\u09b3\5\u044f\u0228")
buf.write("\2\u09b3\u09b4\5\u044b\u0226\2\u09b4\u09b5\5\u0445\u0223")
buf.write("\2\u09b5\u09b6\5\u044f\u0228\2\u09b6\u09b7\5\u043d\u021f")
buf.write("\2\u09b7\u0164\3\2\2\2\u09b8\u09b9\5\u0445\u0223\2\u09b9")
buf.write("\u09ba\5\u044f\u0228\2\u09ba\u09bb\5\u044f\u0228\2\u09bb")
buf.write("\u09bc\5\u043d\u021f\2\u09bc\u09bd\5\u0457\u022c\2\u09bd")
buf.write("\u0166\3\2\2\2\u09be\u09bf\5\u0445\u0223\2\u09bf\u09c0")
buf.write("\5\u044f\u0228\2\u09c0\u09c1\5\u0451\u0229\2\u09c1\u09c2")
buf.write("\5\u045d\u022f\2\u09c2\u09c3\5\u045b\u022e\2\u09c3\u0168")
buf.write("\3\2\2\2\u09c4\u09c5\5\u0445\u0223\2\u09c5\u09c6\5\u044f")
buf.write("\u0228\2\u09c6\u09c7\5\u0459\u022d\2\u09c7\u09c8\5\u043d")
buf.write("\u021f\2\u09c8\u09c9\5\u0457\u022c\2\u09c9\u09ca\5\u045b")
buf.write("\u022e\2\u09ca\u016a\3\2\2\2\u09cb\u09cc\5\u0445\u0223")
buf.write("\2\u09cc\u09cd\5\u044f\u0228\2\u09cd\u09ce\5\u0459\u022d")
buf.write("\2\u09ce\u09cf\5\u045b\u022e\2\u09cf\u09d0\5\u0435\u021b")
buf.write("\2\u09d0\u09d1\5\u044f\u0228\2\u09d1\u09d2\5\u045b\u022e")
buf.write("\2\u09d2\u09d3\5\u0445\u0223\2\u09d3\u09d4\5\u0435\u021b")
buf.write("\2\u09d4\u09d5\5\u0437\u021c\2\u09d5\u09d6\5\u044b\u0226")
buf.write("\2\u09d6\u09d7\5\u043d\u021f\2\u09d7\u016c\3\2\2\2\u09d8")
buf.write("\u09d9\5\u0445\u0223\2\u09d9\u09da\5\u044f\u0228\2\u09da")
buf.write("\u09db\5\u0459\u022d\2\u09db\u09dc\5\u045b\u022e\2\u09dc")
buf.write("\u09dd\5\u043d\u021f\2\u09dd\u09de\5\u0435\u021b\2\u09de")
buf.write("\u09df\5\u043b\u021e\2\u09df\u016e\3\2\2\2\u09e0\u09e1")
buf.write("\5\u0445\u0223\2\u09e1\u09e2\5\u044f\u0228\2\u09e2\u09e3")
buf.write("\5\u045b\u022e\2\u09e3\u0170\3\2\2\2\u09e4\u09e5\5\u0445")
buf.write("\u0223\2\u09e5\u09e6\5\u044f\u0228\2\u09e6\u09e7\5\u045b")
buf.write("\u022e\2\u09e7\u09e8\5\u043d\u021f\2\u09e8\u09e9\5\u0441")
buf.write("\u0221\2\u09e9\u09ea\5\u043d\u021f\2\u09ea\u09eb\5\u0457")
buf.write("\u022c\2\u09eb\u0172\3\2\2\2\u09ec\u09ed\5\u0445\u0223")
buf.write("\2\u09ed\u09ee\5\u044f\u0228\2\u09ee\u09ef\5\u045b\u022e")
buf.write("\2\u09ef\u09f0\5\u043d\u021f\2\u09f0\u09f1\5\u0457\u022c")
buf.write("\2\u09f1\u09f2\5\u0459\u022d\2\u09f2\u09f3\5\u043d\u021f")
buf.write("\2\u09f3\u09f4\5\u0439\u021d\2\u09f4\u09f5\5\u045b\u022e")
buf.write("\2\u09f5\u0174\3\2\2\2\u09f6\u09f7\5\u0445\u0223\2\u09f7")
buf.write("\u09f8\5\u044f\u0228\2\u09f8\u09f9\5\u045b\u022e\2\u09f9")
buf.write("\u09fa\5\u043d\u021f\2\u09fa\u09fb\5\u0457\u022c\2\u09fb")
buf.write("\u09fc\5\u045f\u0230\2\u09fc\u09fd\5\u0435\u021b\2\u09fd")
buf.write("\u09fe\5\u044b\u0226\2\u09fe\u0176\3\2\2\2\u09ff\u0a00")
buf.write("\5\u0445\u0223\2\u0a00\u0a01\5\u044f\u0228\2\u0a01\u0a02")
buf.write("\5\u045b\u022e\2\u0a02\u0a03\5\u0451\u0229\2\u0a03\u0178")
buf.write("\3\2\2\2\u0a04\u0a05\5\u0445\u0223\2\u0a05\u0a06\5\u044f")
buf.write("\u0228\2\u0a06\u0a07\5\u045f\u0230\2\u0a07\u0a08\5\u0435")
buf.write("\u021b\2\u0a08\u0a09\5\u044b\u0226\2\u0a09\u0a0a\5\u0445")
buf.write("\u0223\2\u0a0a\u0a0b\5\u043b\u021e\2\u0a0b\u0a0c\5\u0435")
buf.write("\u021b\2\u0a0c\u0a0d\5\u045b\u022e\2\u0a0d\u0a0e\5\u043d")
buf.write("\u021f\2\u0a0e\u017a\3\2\2\2\u0a0f\u0a10\5\u0445\u0223")
buf.write("\2\u0a10\u0a11\5\u0459\u022d\2\u0a11\u017c\3\2\2\2\u0a12")
buf.write("\u0a13\5\u0445\u0223\2\u0a13\u0a14\5\u0459\u022d\2\u0a14")
buf.write("\u0a15\5\u0451\u0229\2\u0a15\u0a16\5\u044b\u0226\2\u0a16")
buf.write("\u0a17\5\u0435\u021b\2\u0a17\u0a18\5\u045b\u022e\2\u0a18")
buf.write("\u0a19\5\u0445\u0223\2\u0a19\u0a1a\5\u0451\u0229\2\u0a1a")
buf.write("\u0a1b\5\u044f\u0228\2\u0a1b\u017e\3\2\2\2\u0a1c\u0a1d")
buf.write("\5\u0445\u0223\2\u0a1d\u0a1e\5\u045b\u022e\2\u0a1e\u0a1f")
buf.write("\5\u043d\u021f\2\u0a1f\u0a20\5\u0457\u022c\2\u0a20\u0a21")
buf.write("\5\u0435\u021b\2\u0a21\u0a22\5\u045b\u022e\2\u0a22\u0a23")
buf.write("\5\u043d\u021f\2\u0a23\u0180\3\2\2\2\u0a24\u0a25\5\u0447")
buf.write("\u0224\2\u0a25\u0a26\5\u0435\u021b\2\u0a26\u0a27\5\u045f")
buf.write("\u0230\2\u0a27\u0a28\5\u0435\u021b\2\u0a28\u0182\3\2\2")
buf.write("\2\u0a29\u0a2a\5\u0447\u0224\2\u0a2a\u0a2b\5\u0451\u0229")
buf.write("\2\u0a2b\u0a2c\5\u0445\u0223\2\u0a2c\u0a2d\5\u044f\u0228")
buf.write("\2\u0a2d\u0184\3\2\2\2\u0a2e\u0a2f\5\u0449\u0225\2\u0a2f")
buf.write("\u0a30\5\u043d\u021f\2\u0a30\u0a31\5\u043d\u021f\2\u0a31")
buf.write("\u0a32\5\u0453\u022a\2\u0a32\u0186\3\2\2\2\u0a33\u0a34")
buf.write("\5\u044b\u0226\2\u0a34\u0a35\5\u0435\u021b\2\u0a35\u0a36")
buf.write("\5\u044f\u0228\2\u0a36\u0a37\5\u0441\u0221\2\u0a37\u0a38")
buf.write("\5\u045d\u022f\2\u0a38\u0a39\5\u0435\u021b\2\u0a39\u0a3a")
buf.write("\5\u0441\u0221\2\u0a3a\u0a3b\5\u043d\u021f\2\u0a3b\u0188")
buf.write("\3\2\2\2\u0a3c\u0a3d\5\u044b\u0226\2\u0a3d\u0a3e\5\u0435")
buf.write("\u021b\2\u0a3e\u0a3f\5\u0459\u022d\2\u0a3f\u0a40\5\u045b")
buf.write("\u022e\2\u0a40\u018a\3\2\2\2\u0a41\u0a42\5\u044b\u0226")
buf.write("\2\u0a42\u0a43\5\u0435\u021b\2\u0a43\u0a44\5\u0459\u022d")
buf.write("\2\u0a44\u0a45\5\u045b\u022e\2\u0a45\u0a46\7a\2\2\u0a46")
buf.write("\u0a47\5\u045f\u0230\2\u0a47\u0a48\5\u0435\u021b\2\u0a48")
buf.write("\u0a49\5\u044b\u0226\2\u0a49\u0a4a\5\u045d\u022f\2\u0a4a")
buf.write("\u0a4b\5\u043d\u021f\2\u0a4b\u018c\3\2\2\2\u0a4c\u0a4d")
buf.write("\5\u044b\u0226\2\u0a4d\u0a4e\5\u043d\u021f\2\u0a4e\u0a4f")
buf.write("\5\u0435\u021b\2\u0a4f\u0a50\5\u043b\u021e\2\u0a50\u0a51")
buf.write("\5\u0445\u0223\2\u0a51\u0a52\5\u044f\u0228\2\u0a52\u0a53")
buf.write("\5\u0441\u0221\2\u0a53\u018e\3\2\2\2\u0a54\u0a55\5\u044b")
buf.write("\u0226\2\u0a55\u0a56\5\u043d\u021f\2\u0a56\u0a57\5\u043f")
buf.write("\u0220\2\u0a57\u0a58\5\u045b\u022e\2\u0a58\u0190\3\2\2")
buf.write("\2\u0a59\u0a5a\5\u044b\u0226\2\u0a5a\u0a5b\5\u043d\u021f")
buf.write("\2\u0a5b\u0a5c\5\u045f\u0230\2\u0a5c\u0a5d\5\u043d\u021f")
buf.write("\2\u0a5d\u0a5e\5\u044b\u0226\2\u0a5e\u0192\3\2\2\2\u0a5f")
buf.write("\u0a60\5\u044b\u0226\2\u0a60\u0a61\5\u0445\u0223\2\u0a61")
buf.write("\u0a62\5\u0437\u021c\2\u0a62\u0a63\5\u0457\u022c\2\u0a63")
buf.write("\u0a64\5\u0435\u021b\2\u0a64\u0a65\5\u0457\u022c\2\u0a65")
buf.write("\u0a66\5\u0465\u0233\2\u0a66\u0194\3\2\2\2\u0a67\u0a68")
buf.write("\5\u044b\u0226\2\u0a68\u0a69\5\u0445\u0223\2\u0a69\u0a6a")
buf.write("\5\u0449\u0225\2\u0a6a\u0a6b\5\u043d\u021f\2\u0a6b\u0196")
buf.write("\3\2\2\2\u0a6c\u0a6d\5\u044b\u0226\2\u0a6d\u0a6e\5\u0445")
buf.write("\u0223\2\u0a6e\u0a6f\5\u0449\u0225\2\u0a6f\u0a70\5\u043d")
buf.write("\u021f\2\u0a70\u0a71\7\64\2\2\u0a71\u0198\3\2\2\2\u0a72")
buf.write("\u0a73\5\u044b\u0226\2\u0a73\u0a74\5\u0445\u0223\2\u0a74")
buf.write("\u0a75\5\u0449\u0225\2\u0a75\u0a76\5\u043d\u021f\2\u0a76")
buf.write("\u0a77\7\66\2\2\u0a77\u019a\3\2\2\2\u0a78\u0a79\5\u044b")
buf.write("\u0226\2\u0a79\u0a7a\5\u0445\u0223\2\u0a7a\u0a7b\5\u0449")
buf.write("\u0225\2\u0a7b\u0a7c\5\u043d\u021f\2\u0a7c\u0a7d\5\u0439")
buf.write("\u021d\2\u0a7d\u019c\3\2\2\2\u0a7e\u0a7f\5\u044b\u0226")
buf.write("\2\u0a7f\u0a80\5\u0445\u0223\2\u0a80\u0a81\5\u044d\u0227")
buf.write("\2\u0a81\u0a82\5\u0445\u0223\2\u0a82\u0a83\5\u045b\u022e")
buf.write("\2\u0a83\u019e\3\2\2\2\u0a84\u0a85\5\u044b\u0226\2\u0a85")
buf.write("\u0a86\5\u0451\u0229\2\u0a86\u0a87\5\u0439\u021d\2\u0a87")
buf.write("\u0a88\5\u0435\u021b\2\u0a88\u0a89\5\u044b\u0226\2\u0a89")
buf.write("\u01a0\3\2\2\2\u0a8a\u0a8b\5\u044b\u0226\2\u0a8b\u0a8c")
buf.write("\5\u0451\u0229\2\u0a8c\u0a8d\5\u0439\u021d\2\u0a8d\u0a8e")
buf.write("\5\u0449\u0225\2\u0a8e\u01a2\3\2\2\2\u0a8f\u0a90\5\u044b")
buf.write("\u0226\2\u0a90\u0a91\5\u0451\u0229\2\u0a91\u0a92\5\u0439")
buf.write("\u021d\2\u0a92\u0a93\5\u0449\u0225\2\u0a93\u0a94\5\u043d")
buf.write("\u021f\2\u0a94\u0a95\5\u043b\u021e\2\u0a95\u01a4\3\2\2")
buf.write("\2\u0a96\u0a97\5\u044b\u0226\2\u0a97\u0a98\5\u0451\u0229")
buf.write("\2\u0a98\u0a99\5\u0441\u0221\2\u0a99\u01a6\3\2\2\2\u0a9a")
buf.write("\u0a9b\5\u044b\u0226\2\u0a9b\u0a9c\5\u0451\u0229\2\u0a9c")
buf.write("\u0a9d\5\u0441\u0221\2\u0a9d\u0a9e\5\u0451\u0229\2\u0a9e")
buf.write("\u0a9f\5\u043f\u0220\2\u0a9f\u0aa0\5\u043f\u0220\2\u0aa0")
buf.write("\u01a8\3\2\2\2\u0aa1\u0aa2\5\u044b\u0226\2\u0aa2\u0aa3")
buf.write("\5\u0451\u0229\2\u0aa3\u0aa4\5\u0441\u0221\2\u0aa4\u0aa5")
buf.write("\5\u0451\u0229\2\u0aa5\u0aa6\5\u044f\u0228\2\u0aa6\u01aa")
buf.write("\3\2\2\2\u0aa7\u0aa8\5\u044b\u0226\2\u0aa8\u0aa9\5\u0451")
buf.write("\u0229\2\u0aa9\u0aaa\5\u044f\u0228\2\u0aaa\u0aab\5\u0441")
buf.write("\u0221\2\u0aab\u01ac\3\2\2\2\u0aac\u0aad\5\u044b\u0226")
buf.write("\2\u0aad\u0aae\5\u0451\u0229\2\u0aae\u0aaf\5\u0451\u0229")
buf.write("\2\u0aaf\u0ab0\5\u0453\u022a\2\u0ab0\u01ae\3\2\2\2\u0ab1")
buf.write("\u0ab2\5\u044d\u0227\2\u0ab2\u0ab3\5\u0435\u021b\2\u0ab3")
buf.write("\u0ab4\5\u0445\u0223\2\u0ab4\u0ab5\5\u044f\u0228\2\u0ab5")
buf.write("\u01b0\3\2\2\2\u0ab6\u0ab7\5\u044d\u0227\2\u0ab7\u0ab8")
buf.write("\5\u0435\u021b\2\u0ab8\u0ab9\5\u0453\u022a\2\u0ab9\u01b2")
buf.write("\3\2\2\2\u0aba\u0abb\5\u044d\u0227\2\u0abb\u0abc\5\u0435")
buf.write("\u021b\2\u0abc\u0abd\5\u045b\u022e\2\u0abd\u0abe\5\u0439")
buf.write("\u021d\2\u0abe\u0abf\5\u0443\u0222\2\u0abf\u0ac0\5\u043d")
buf.write("\u021f\2\u0ac0\u0ac1\5\u043b\u021e\2\u0ac1\u01b4\3\2\2")
buf.write("\2\u0ac2\u0ac3\5\u044d\u0227\2\u0ac3\u0ac4\5\u0435\u021b")
buf.write("\2\u0ac4\u0ac5\5\u0463\u0232\2\u0ac5\u0ac6\5\u045f\u0230")
buf.write("\2\u0ac6\u0ac7\5\u0435\u021b\2\u0ac7\u0ac8\5\u044b\u0226")
buf.write("\2\u0ac8\u0ac9\5\u045d\u022f\2\u0ac9\u0aca\5\u043d\u021f")
buf.write("\2\u0aca\u01b6\3\2\2\2\u0acb\u0acc\5\u044d\u0227\2\u0acc")
buf.write("\u0acd\5\u043d\u021f\2\u0acd\u0ace\5\u0435\u021b\2\u0ace")
buf.write("\u0acf\5\u0459\u022d\2\u0acf\u0ad0\5\u045d\u022f\2\u0ad0")
buf.write("\u0ad1\5\u0457\u022c\2\u0ad1\u0ad2\5\u043d\u021f\2\u0ad2")
buf.write("\u0ad3\5\u0459\u022d\2\u0ad3\u01b8\3\2\2\2\u0ad4\u0ad5")
buf.write("\5\u044d\u0227\2\u0ad5\u0ad6\5\u043d\u021f\2\u0ad6\u0ad7")
buf.write("\5\u044d\u0227\2\u0ad7\u0ad8\5\u0437\u021c\2\u0ad8\u0ad9")
buf.write("\5\u043d\u021f\2\u0ad9\u0ada\5\u0457\u022c\2\u0ada\u01ba")
buf.write("\3\2\2\2\u0adb\u0adc\5\u044d\u0227\2\u0adc\u0add\5\u043d")
buf.write("\u021f\2\u0add\u0ade\5\u0457\u022c\2\u0ade\u0adf\5\u0441")
buf.write("\u0221\2\u0adf\u0ae0\5\u043d\u021f\2\u0ae0\u01bc\3\2\2")
buf.write("\2\u0ae1\u0ae2\5\u044d\u0227\2\u0ae2\u0ae3\5\u0445\u0223")
buf.write("\2\u0ae3\u0ae4\5\u044f\u0228\2\u0ae4\u0ae5\5\u045d\u022f")
buf.write("\2\u0ae5\u0ae6\5\u0459\u022d\2\u0ae6\u01be\3\2\2\2\u0ae7")
buf.write("\u0ae8\5\u044d\u0227\2\u0ae8\u0ae9\5\u0445\u0223\2\u0ae9")
buf.write("\u0aea\5\u044f\u0228\2\u0aea\u0aeb\5\u045d\u022f\2\u0aeb")
buf.write("\u0aec\5\u045b\u022e\2\u0aec\u0aed\5\u043d\u021f\2\u0aed")
buf.write("\u01c0\3\2\2\2\u0aee\u0aef\5\u044d\u0227\2\u0aef\u0af0")
buf.write("\5\u0445\u0223\2\u0af0\u0af1\5\u044f\u0228\2\u0af1\u0af2")
buf.write("\5\u045f\u0230\2\u0af2\u0af3\5\u0435\u021b\2\u0af3\u0af4")
buf.write("\5\u044b\u0226\2\u0af4\u0af5\5\u045d\u022f\2\u0af5\u0af6")
buf.write("\5\u043d\u021f\2\u0af6\u01c2\3\2\2\2\u0af7\u0af8\5\u044d")
buf.write("\u0227\2\u0af8\u0af9\5\u044b\u0226\2\u0af9\u0afa\5\u0459")
buf.write("\u022d\2\u0afa\u0afb\5\u044b\u0226\2\u0afb\u0afc\5\u0435")
buf.write("\u021b\2\u0afc\u0afd\5\u0437\u021c\2\u0afd\u0afe\5\u043d")
buf.write("\u021f\2\u0afe\u0aff\5\u044b\u0226\2\u0aff\u01c4\3\2\2")
buf.write("\2\u0b00\u0b01\5\u044d\u0227\2\u0b01\u0b02\5\u0451\u0229")
buf.write("\2\u0b02\u0b03\5\u043b\u021e\2\u0b03\u0b04\5\u043d\u021f")
buf.write("\2\u0b04\u01c6\3\2\2\2\u0b05\u0b06\5\u044d\u0227\2\u0b06")
buf.write("\u0b07\5\u0451\u0229\2\u0b07\u0b08\5\u043b\u021e\2\u0b08")
buf.write("\u0b09\5\u043d\u021f\2\u0b09\u0b0a\5\u044b\u0226\2\u0b0a")
buf.write("\u01c8\3\2\2\2\u0b0b\u0b0c\5\u044d\u0227\2\u0b0c\u0b0d")
buf.write("\5\u0451\u0229\2\u0b0d\u0b0e\5\u043b\u021e\2\u0b0e\u0b0f")
buf.write("\5\u0445\u0223\2\u0b0f\u0b10\5\u043f\u0220\2\u0b10\u0b11")
buf.write("\5\u0465\u0233\2\u0b11\u01ca\3\2\2\2\u0b12\u0b13\5\u044d")
buf.write("\u0227\2\u0b13\u0b14\5\u0451\u0229\2\u0b14\u0b15\5\u044f")
buf.write("\u0228\2\u0b15\u0b16\5\u045b\u022e\2\u0b16\u0b17\5\u0443")
buf.write("\u0222\2\u0b17\u01cc\3\2\2\2\u0b18\u0b19\5\u044d\u0227")
buf.write("\2\u0b19\u0b1a\5\u045d\u022f\2\u0b1a\u0b1b\5\u044b\u0226")
buf.write("\2\u0b1b\u0b1c\5\u045b\u022e\2\u0b1c\u0b1d\5\u0445\u0223")
buf.write("\2\u0b1d\u0b1e\5\u0459\u022d\2\u0b1e\u0b1f\5\u043d\u021f")
buf.write("\2\u0b1f\u0b20\5\u045b\u022e\2\u0b20\u01ce\3\2\2\2\u0b21")
buf.write("\u0b22\5\u044f\u0228\2\u0b22\u0b23\5\u0435\u021b\2\u0b23")
buf.write("\u0b24\5\u044d\u0227\2\u0b24\u0b25\5\u043d\u021f\2\u0b25")
buf.write("\u01d0\3\2\2\2\u0b26\u0b27\5\u044f\u0228\2\u0b27\u0b28")
buf.write("\5\u0435\u021b\2\u0b28\u0b29\5\u044f\u0228\2\u0b29\u01d2")
buf.write("\3\2\2\2\u0b2a\u0b2b\5\u044f\u0228\2\u0b2b\u0b2c\5\u0435")
buf.write("\u021b\2\u0b2c\u0b2d\5\u045b\u022e\2\u0b2d\u0b2e\5\u045d")
buf.write("\u022f\2\u0b2e\u0b2f\5\u0457\u022c\2\u0b2f\u0b30\5\u0435")
buf.write("\u021b\2\u0b30\u0b31\5\u044b\u0226\2\u0b31\u01d4\3\2\2")
buf.write("\2\u0b32\u0b33\5\u044f\u0228\2\u0b33\u0b34\5\u0435\u021b")
buf.write("\2\u0b34\u0b35\5\u045b\u022e\2\u0b35\u0b36\5\u045d\u022f")
buf.write("\2\u0b36\u0b37\5\u0457\u022c\2\u0b37\u0b38\5\u0435\u021b")
buf.write("\2\u0b38\u0b39\5\u044b\u0226\2\u0b39\u0b3a\5\u044f\u0228")
buf.write("\2\u0b3a\u01d6\3\2\2\2\u0b3b\u0b3c\5\u044f\u0228\2\u0b3c")
buf.write("\u0b3d\5\u0435\u021b\2\u0b3d\u0b3e\5\u045f\u0230\2\u0b3e")
buf.write("\u01d8\3\2\2\2\u0b3f\u0b40\5\u044f\u0228\2\u0b40\u0b41")
buf.write("\5\u0439\u021d\2\u0b41\u0b42\5\u0443\u0222\2\u0b42\u0b43")
buf.write("\5\u0435\u021b\2\u0b43\u0b44\5\u0457\u022c\2\u0b44\u01da")
buf.write("\3\2\2\2\u0b45\u0b46\5\u044f\u0228\2\u0b46\u0b47\5\u0439")
buf.write("\u021d\2\u0b47\u0b48\5\u0443\u0222\2\u0b48\u0b49\5\u0435")
buf.write("\u021b\2\u0b49\u0b4a\5\u0457\u022c\2\u0b4a\u0b4b\7a\2")
buf.write("\2\u0b4b\u0b4c\5\u0439\u021d\2\u0b4c\u0b4d\5\u0459\u022d")
buf.write("\2\u0b4d\u01dc\3\2\2\2\u0b4e\u0b4f\5\u044f\u0228\2\u0b4f")
buf.write("\u0b50\5\u0439\u021d\2\u0b50\u0b51\5\u044b\u0226\2\u0b51")
buf.write("\u0b52\5\u0451\u0229\2\u0b52\u0b53\5\u0437\u021c\2\u0b53")
buf.write("\u01de\3\2\2\2\u0b54\u0b55\5\u044f\u0228\2\u0b55\u0b56")
buf.write("\5\u043d\u021f\2\u0b56\u0b57\5\u0459\u022d\2\u0b57\u0b58")
buf.write("\5\u045b\u022e\2\u0b58\u0b59\5\u043d\u021f\2\u0b59\u0b5a")
buf.write("\5\u043b\u021e\2\u0b5a\u01e0\3\2\2\2\u0b5b\u0b5c\5\u044f")
buf.write("\u0228\2\u0b5c\u0b5d\5\u043d\u021f\2\u0b5d\u0b5e\5\u0461")
buf.write("\u0231\2\u0b5e\u01e2\3\2\2\2\u0b5f\u0b60\5\u044f\u0228")
buf.write("\2\u0b60\u0b61\5\u0451\u0229\2\u0b61\u01e4\3\2\2\2\u0b62")
buf.write("\u0b63\5\u044f\u0228\2\u0b63\u0b64\5\u0451\u0229\2\u0b64")
buf.write("\u0b65\5\u0435\u021b\2\u0b65\u0b66\5\u045d\u022f\2\u0b66")
buf.write("\u0b67\5\u043b\u021e\2\u0b67\u0b68\5\u0445\u0223\2\u0b68")
buf.write("\u0b69\5\u045b\u022e\2\u0b69\u01e6\3\2\2\2\u0b6a\u0b6b")
buf.write("\5\u044f\u0228\2\u0b6b\u0b6c\5\u0451\u0229\2\u0b6c\u0b6d")
buf.write("\5\u0439\u021d\2\u0b6d\u0b6e\5\u0435\u021b\2\u0b6e\u0b6f")
buf.write("\5\u0439\u021d\2\u0b6f\u0b70\5\u0443\u0222\2\u0b70\u0b71")
buf.write("\5\u043d\u021f\2\u0b71\u01e8\3\2\2\2\u0b72\u0b73\5\u044f")
buf.write("\u0228\2\u0b73\u0b74\5\u0451\u0229\2\u0b74\u0b75\5\u0439")
buf.write("\u021d\2\u0b75\u0b76\5\u0451\u0229\2\u0b76\u0b77\5\u0453")
buf.write("\u022a\2\u0b77\u0b78\5\u0465\u0233\2\u0b78\u01ea\3\2\2")
buf.write("\2\u0b79\u0b7a\5\u044f\u0228\2\u0b7a\u0b7b\5\u0451\u0229")
buf.write("\2\u0b7b\u0b7c\5\u0439\u021d\2\u0b7c\u0b7d\5\u0465\u0233")
buf.write("\2\u0b7d\u0b7e\5\u0439\u021d\2\u0b7e\u0b7f\5\u044b\u0226")
buf.write("\2\u0b7f\u0b80\5\u043d\u021f\2\u0b80\u01ec\3\2\2\2\u0b81")
buf.write("\u0b82\5\u044f\u0228\2\u0b82\u0b83\5\u0451\u0229\2\u0b83")
buf.write("\u0b84\5\u043d\u021f\2\u0b84\u0b85\5\u044f\u0228\2\u0b85")
buf.write("\u0b86\5\u045b\u022e\2\u0b86\u0b87\5\u0445\u0223\2\u0b87")
buf.write("\u0b88\5\u045b\u022e\2\u0b88\u0b89\5\u0465\u0233\2\u0b89")
buf.write("\u0b8a\5\u043d\u021f\2\u0b8a\u0b8b\5\u0459\u022d\2\u0b8b")
buf.write("\u0b8c\5\u0439\u021d\2\u0b8c\u0b8d\5\u0435\u021b\2\u0b8d")
buf.write("\u0b8e\5\u0453\u022a\2\u0b8e\u0b8f\5\u0445\u0223\2\u0b8f")
buf.write("\u0b90\5\u044f\u0228\2\u0b90\u0b91\5\u0441\u0221\2\u0b91")
buf.write("\u01ee\3\2\2\2\u0b92\u0b93\5\u044f\u0228\2\u0b93\u0b94")
buf.write("\5\u0451\u0229\2\u0b94\u0b95\5\u044d\u0227\2\u0b95\u0b96")
buf.write("\5\u0435\u021b\2\u0b96\u0b97\5\u0463\u0232\2\u0b97\u0b98")
buf.write("\5\u045f\u0230\2\u0b98\u0b99\5\u0435\u021b\2\u0b99\u0b9a")
buf.write("\5\u044b\u0226\2\u0b9a\u0b9b\5\u045d\u022f\2\u0b9b\u0b9c")
buf.write("\5\u043d\u021f\2\u0b9c\u01f0\3\2\2\2\u0b9d\u0b9e\5\u044f")
buf.write("\u0228\2\u0b9e\u0b9f\5\u0451\u0229\2\u0b9f\u0ba0\5\u044d")
buf.write("\u0227\2\u0ba0\u0ba1\5\u0445\u0223\2\u0ba1\u0ba2\5\u044f")
buf.write("\u0228\2\u0ba2\u0ba3\5\u045f\u0230\2\u0ba3\u0ba4\5\u0435")
buf.write("\u021b\2\u0ba4\u0ba5\5\u044b\u0226\2\u0ba5\u0ba6\5\u045d")
buf.write("\u022f\2\u0ba6\u0ba7\5\u043d\u021f\2\u0ba7\u01f2\3\2\2")
buf.write("\2\u0ba8\u0ba9\5\u044f\u0228\2\u0ba9\u0baa\5\u0451\u0229")
buf.write("\2\u0baa\u0bab\5\u044f\u0228\2\u0bab\u0bac\5\u043d\u021f")
buf.write("\2\u0bac\u01f4\3\2\2\2\u0bad\u0bae\5\u044f\u0228\2\u0bae")
buf.write("\u0baf\5\u0451\u0229\2\u0baf\u0bb0\5\u0451\u0229\2\u0bb0")
buf.write("\u0bb1\5\u0457\u022c\2\u0bb1\u0bb2\5\u043b\u021e\2\u0bb2")
buf.write("\u0bb3\5\u043d\u021f\2\u0bb3\u0bb4\5\u0457\u022c\2\u0bb4")
buf.write("\u01f6\3\2\2\2\u0bb5\u0bb6\5\u044f\u0228\2\u0bb6\u0bb7")
buf.write("\5\u0451\u0229\2\u0bb7\u0bb8\5\u0459\u022d\2\u0bb8\u0bb9")
buf.write("\5\u0439\u021d\2\u0bb9\u0bba\5\u0443\u0222\2\u0bba\u0bbb")
buf.write("\5\u043d\u021f\2\u0bbb\u0bbc\5\u044d\u0227\2\u0bbc\u0bbd")
buf.write("\5\u0435\u021b\2\u0bbd\u0bbe\5\u0439\u021d\2\u0bbe\u0bbf")
buf.write("\5\u0443\u0222\2\u0bbf\u0bc0\5\u043d\u021f\2\u0bc0\u0bc1")
buf.write("\5\u0439\u021d\2\u0bc1\u0bc2\5\u0449\u0225\2\u0bc2\u01f8")
buf.write("\3\2\2\2\u0bc3\u0bc4\5\u044f\u0228\2\u0bc4\u0bc5\5\u0451")
buf.write("\u0229\2\u0bc5\u0bc6\5\u045b\u022e\2\u0bc6\u01fa\3\2\2")
buf.write("\2\u0bc7\u0bc8\5\u044f\u0228\2\u0bc8\u0bc9\5\u0451\u0229")
buf.write("\2\u0bc9\u0bca\5\u0461\u0231\2\u0bca\u0bcb\5\u0435\u021b")
buf.write("\2\u0bcb\u0bcc\5\u0445\u0223\2\u0bcc\u0bcd\5\u045b\u022e")
buf.write("\2\u0bcd\u01fc\3\2\2\2\u0bce\u0bcf\5\u044f\u0228\2\u0bcf")
buf.write("\u0bd0\5\u045d\u022f\2\u0bd0\u0bd1\5\u044b\u0226\2\u0bd1")
buf.write("\u0bd2\5\u044b\u0226\2\u0bd2\u01fe\3\2\2\2\u0bd3\u0bd4")
buf.write("\5\u044f\u0228\2\u0bd4\u0bd5\5\u045d\u022f\2\u0bd5\u0bd6")
buf.write("\5\u044b\u0226\2\u0bd6\u0bd7\5\u044b\u0226\2\u0bd7\u0bd8")
buf.write("\5\u0459\u022d\2\u0bd8\u0200\3\2\2\2\u0bd9\u0bda\5\u044f")
buf.write("\u0228\2\u0bda\u0bdb\5\u045d\u022f\2\u0bdb\u0bdc\5\u044d")
buf.write("\u0227\2\u0bdc\u0bdd\5\u0437\u021c\2\u0bdd\u0bde\5\u043d")
buf.write("\u021f\2\u0bde\u0bdf\5\u0457\u022c\2\u0bdf\u0202\3\2\2")
buf.write("\2\u0be0\u0be1\5\u044f\u0228\2\u0be1\u0be2\5\u045d\u022f")
buf.write("\2\u0be2\u0be3\5\u044d\u0227\2\u0be3\u0be4\5\u043d\u021f")
buf.write("\2\u0be4\u0be5\5\u0457\u022c\2\u0be5\u0be6\5\u0445\u0223")
buf.write("\2\u0be6\u0be7\5\u0439\u021d\2\u0be7\u0204\3\2\2\2\u0be8")
buf.write("\u0be9\5\u044f\u0228\2\u0be9\u0bea\5\u045f\u0230\2\u0bea")
buf.write("\u0beb\5\u0435\u021b\2\u0beb\u0bec\5\u0457\u022c\2\u0bec")
buf.write("\u0bed\5\u0439\u021d\2\u0bed\u0bee\5\u0443\u0222\2\u0bee")
buf.write("\u0bef\5\u0435\u021b\2\u0bef\u0bf0\5\u0457\u022c\2\u0bf0")
buf.write("\u0bf1\7\64\2\2\u0bf1\u0206\3\2\2\2\u0bf2\u0bf3\5\u0451")
buf.write("\u0229\2\u0bf3\u0bf4\5\u0437\u021c\2\u0bf4\u0bf5\5\u0447")
buf.write("\u0224\2\u0bf5\u0bf6\5\u043d\u021f\2\u0bf6\u0bf7\5\u0439")
buf.write("\u021d\2\u0bf7\u0bf8\5\u045b\u022e\2\u0bf8\u0208\3\2\2")
buf.write("\2\u0bf9\u0bfa\5\u0451\u0229\2\u0bfa\u0bfb\5\u043f\u0220")
buf.write("\2\u0bfb\u020a\3\2\2\2\u0bfc\u0bfd\5\u0451\u0229\2\u0bfd")
buf.write("\u0bfe\5\u043f\u0220\2\u0bfe\u0bff\5\u043f\u0220\2\u0bff")
buf.write("\u020c\3\2\2\2\u0c00\u0c01\5\u0451\u0229\2\u0c01\u0c02")
buf.write("\5\u0445\u0223\2\u0c02\u0c03\5\u043b\u021e\2\u0c03\u020e")
buf.write("\3\2\2\2\u0c04\u0c05\5\u0451\u0229\2\u0c05\u0c06\5\u044b")
buf.write("\u0226\2\u0c06\u0c07\5\u043b\u021e\2\u0c07\u0210\3\2\2")
buf.write("\2\u0c08\u0c09\5\u0451\u0229\2\u0c09\u0c0a\5\u044f\u0228")
buf.write("\2\u0c0a\u0212\3\2\2\2\u0c0b\u0c0c\5\u0451\u0229\2\u0c0c")
buf.write("\u0c0d\5\u044f\u0228\2\u0c0d\u0c0e\5\u044b\u0226\2\u0c0e")
buf.write("\u0c0f\5\u0465\u0233\2\u0c0f\u0214\3\2\2\2\u0c10\u0c11")
buf.write("\5\u0451\u0229\2\u0c11\u0c12\5\u0453\u022a\2\u0c12\u0c13")
buf.write("\5\u043d\u021f\2\u0c13\u0c14\5\u044f\u0228\2\u0c14\u0216")
buf.write("\3\2\2\2\u0c15\u0c16\5\u0451\u0229\2\u0c16\u0c17\5\u0453")
buf.write("\u022a\2\u0c17\u0c18\5\u045b\u022e\2\u0c18\u0c19\5\u0445")
buf.write("\u0223\2\u0c19\u0c1a\5\u0451\u0229\2\u0c1a\u0c1b\5\u044f")
buf.write("\u0228\2\u0c1b\u0218\3\2\2\2\u0c1c\u0c1d\5\u0451\u0229")
buf.write("\2\u0c1d\u0c1e\5\u0457\u022c\2\u0c1e\u021a\3\2\2\2\u0c1f")
buf.write("\u0c20\5\u0451\u0229\2\u0c20\u0c21\5\u0457\u022c\2\u0c21")
buf.write("\u0c22\5\u0435\u021b\2\u0c22\u0c23\5\u043b\u021e\2\u0c23")
buf.write("\u0c24\5\u0435\u021b\2\u0c24\u0c25\5\u045b\u022e\2\u0c25")
buf.write("\u0c26\5\u0435\u021b\2\u0c26\u021c\3\2\2\2\u0c27\u0c28")
buf.write("\5\u0451\u0229\2\u0c28\u0c29\5\u0457\u022c\2\u0c29\u0c2a")
buf.write("\5\u043b\u021e\2\u0c2a\u0c2b\5\u043d\u021f\2\u0c2b\u0c2c")
buf.write("\5\u0457\u022c\2\u0c2c\u021e\3\2\2\2\u0c2d\u0c2e\5\u0451")
buf.write("\u0229\2\u0c2e\u0c2f\5\u0457\u022c\2\u0c2f\u0c30\5\u043b")
buf.write("\u021e\2\u0c30\u0c31\5\u0445\u0223\2\u0c31\u0c32\5\u044f")
buf.write("\u0228\2\u0c32\u0c33\5\u0435\u021b\2\u0c33\u0c34\5\u044b")
buf.write("\u0226\2\u0c34\u0c35\5\u0445\u0223\2\u0c35\u0c36\5\u045b")
buf.write("\u022e\2\u0c36\u0c37\5\u0465\u0233\2\u0c37\u0220\3\2\2")
buf.write("\2\u0c38\u0c39\5\u0451\u0229\2\u0c39\u0c3a\5\u0459\u022d")
buf.write("\2\u0c3a\u0c3b\5\u043d\u021f\2\u0c3b\u0c3c\5\u0457\u022c")
buf.write("\2\u0c3c\u0c3d\5\u0457\u022c\2\u0c3d\u0c3e\5\u0451\u0229")
buf.write("\2\u0c3e\u0c3f\5\u0457\u022c\2\u0c3f\u0222\3\2\2\2\u0c40")
buf.write("\u0c41\5\u0451\u0229\2\u0c41\u0c42\5\u045d\u022f\2\u0c42")
buf.write("\u0c43\5\u045b\u022e\2\u0c43\u0224\3\2\2\2\u0c44\u0c45")
buf.write("\5\u0451\u0229\2\u0c45\u0c46\5\u045d\u022f\2\u0c46\u0c47")
buf.write("\5\u045b\u022e\2\u0c47\u0c48\5\u043d\u021f\2\u0c48\u0c49")
buf.write("\5\u0457\u022c\2\u0c49\u0226\3\2\2\2\u0c4a\u0c4b\5\u0451")
buf.write("\u0229\2\u0c4b\u0c4c\5\u045f\u0230\2\u0c4c\u0c4d\5\u043d")
buf.write("\u021f\2\u0c4d\u0c4e\5\u0457\u022c\2\u0c4e\u0228\3\2\2")
buf.write("\2\u0c4f\u0c50\5\u0451\u0229\2\u0c50\u0c51\5\u045f\u0230")
buf.write("\2\u0c51\u0c52\5\u043d\u021f\2\u0c52\u0c53\5\u0457\u022c")
buf.write("\2\u0c53\u0c54\5\u0457\u022c\2\u0c54\u0c55\5\u0445\u0223")
buf.write("\2\u0c55\u0c56\5\u043b\u021e\2\u0c56\u0c57\5\u0445\u0223")
buf.write("\2\u0c57\u0c58\5\u044f\u0228\2\u0c58\u0c59\5\u0441\u0221")
buf.write("\2\u0c59\u022a\3\2\2\2\u0c5a\u0c5b\5\u0453\u022a\2\u0c5b")
buf.write("\u0c5c\5\u0435\u021b\2\u0c5c\u0c5d\5\u0439\u021d\2\u0c5d")
buf.write("\u0c5e\5\u0449\u0225\2\u0c5e\u0c5f\5\u0435\u021b\2\u0c5f")
buf.write("\u0c60\5\u0441\u0221\2\u0c60\u0c61\5\u043d\u021f\2\u0c61")
buf.write("\u022c\3\2\2\2\u0c62\u0c63\5\u0453\u022a\2\u0c63\u0c64")
buf.write("\5\u0435\u021b\2\u0c64\u0c65\5\u0457\u022c\2\u0c65\u0c66")
buf.write("\5\u0435\u021b\2\u0c66\u0c67\5\u044b\u0226\2\u0c67\u0c68")
buf.write("\5\u044b\u0226\2\u0c68\u0c69\5\u043d\u021f\2\u0c69\u0c6a")
buf.write("\5\u044b\u0226\2\u0c6a\u0c6b\7a\2\2\u0c6b\u0c6c\5\u043d")
buf.write("\u021f\2\u0c6c\u0c6d\5\u044f\u0228\2\u0c6d\u0c6e\5\u0435")
buf.write("\u021b\2\u0c6e\u0c6f\5\u0437\u021c\2\u0c6f\u0c70\5\u044b")
buf.write("\u0226\2\u0c70\u0c71\5\u043d\u021f\2\u0c71\u022e\3\2\2")
buf.write("\2\u0c72\u0c73\5\u0453\u022a\2\u0c73\u0c74\5\u0435\u021b")
buf.write("\2\u0c74\u0c75\5\u0457\u022c\2\u0c75\u0c76\5\u0435\u021b")
buf.write("\2\u0c76\u0c77\5\u044d\u0227\2\u0c77\u0c78\5\u043d\u021f")
buf.write("\2\u0c78\u0c79\5\u045b\u022e\2\u0c79\u0c7a\5\u043d\u021f")
buf.write("\2\u0c7a\u0c7b\5\u0457\u022c\2\u0c7b\u0c7c\5\u0459\u022d")
buf.write("\2\u0c7c\u0230\3\2\2\2\u0c7d\u0c7e\5\u0453\u022a\2\u0c7e")
buf.write("\u0c7f\5\u0435\u021b\2\u0c7f\u0c80\5\u0457\u022c\2\u0c80")
buf.write("\u0c81\5\u043d\u021f\2\u0c81\u0c82\5\u044f\u0228\2\u0c82")
buf.write("\u0c83\5\u045b\u022e\2\u0c83\u0232\3\2\2\2\u0c84\u0c85")
buf.write("\5\u0453\u022a\2\u0c85\u0c86\5\u0435\u021b\2\u0c86\u0c87")
buf.write("\5\u0457\u022c\2\u0c87\u0c88\5\u045b\u022e\2\u0c88\u0c89")
buf.write("\5\u0445\u0223\2\u0c89\u0c8a\5\u045b\u022e\2\u0c8a\u0c8b")
buf.write("\5\u0445\u0223\2\u0c8b\u0c8c\5\u0451\u0229\2\u0c8c\u0c8d")
buf.write("\5\u044f\u0228\2\u0c8d\u0234\3\2\2\2\u0c8e\u0c8f\5\u0453")
buf.write("\u022a\2\u0c8f\u0c90\5\u0435\u021b\2\u0c90\u0c91\5\u0459")
buf.write("\u022d\2\u0c91\u0c92\5\u0459\u022d\2\u0c92\u0c93\5\u0445")
buf.write("\u0223\2\u0c93\u0c94\5\u044f\u0228\2\u0c94\u0c95\5\u0441")
buf.write("\u0221\2\u0c95\u0236\3\2\2\2\u0c96\u0c97\5\u0453\u022a")
buf.write("\2\u0c97\u0c98\5\u0435\u021b\2\u0c98\u0c99\5\u045b\u022e")
buf.write("\2\u0c99\u0c9a\5\u0443\u0222\2\u0c9a\u0238\3\2\2\2\u0c9b")
buf.write("\u0c9c\7\'\2\2\u0c9c\u0c9d\5\u0457\u022c\2\u0c9d\u0c9e")
buf.write("\5\u0451\u0229\2\u0c9e\u0c9f\5\u0461\u0231\2\u0c9f\u0ca0")
buf.write("\5\u045b\u022e\2\u0ca0\u0ca1\5\u0465\u0233\2\u0ca1\u0ca2")
buf.write("\5\u0453\u022a\2\u0ca2\u0ca3\5\u043d\u021f\2\u0ca3\u023a")
buf.write("\3\2\2\2\u0ca4\u0ca5\7\'\2\2\u0ca5\u0ca6\5\u045b\u022e")
buf.write("\2\u0ca6\u0ca7\5\u0465\u0233\2\u0ca7\u0ca8\5\u0453\u022a")
buf.write("\2\u0ca8\u0ca9\5\u043d\u021f\2\u0ca9\u023c\3\2\2\2\u0caa")
buf.write("\u0cab\5\u0453\u022a\2\u0cab\u0cac\5\u0445\u0223\2\u0cac")
buf.write("\u0cad\5\u0453\u022a\2\u0cad\u0cae\5\u043d\u021f\2\u0cae")
buf.write("\u0caf\5\u044b\u0226\2\u0caf\u0cb0\5\u0445\u0223\2\u0cb0")
buf.write("\u0cb1\5\u044f\u0228\2\u0cb1\u0cb2\5\u043d\u021f\2\u0cb2")
buf.write("\u0cb3\5\u043b\u021e\2\u0cb3\u023e\3\2\2\2\u0cb4\u0cb5")
buf.write("\5\u0453\u022a\2\u0cb5\u0cb6\5\u0445\u0223\2\u0cb6\u0cb7")
buf.write("\5\u045f\u0230\2\u0cb7\u0cb8\5\u0451\u0229\2\u0cb8\u0cb9")
buf.write("\5\u045b\u022e\2\u0cb9\u0240\3\2\2\2\u0cba\u0cbb\5\u0453")
buf.write("\u022a\2\u0cbb\u0cbc\5\u044b\u0226\2\u0cbc\u0cbd\5\u0435")
buf.write("\u021b\2\u0cbd\u0cbe\5\u044f\u0228\2\u0cbe\u0242\3\2\2")
buf.write("\2\u0cbf\u0cc0\5\u0453\u022a\2\u0cc0\u0cc1\5\u044b\u0226")
buf.write("\2\u0cc1\u0cc2\5\u0459\u022d\2\u0cc2\u0cc3\7a\2\2\u0cc3")
buf.write("\u0cc4\5\u0445\u0223\2\u0cc4\u0cc5\5\u044f\u0228\2\u0cc5")
buf.write("\u0cc6\5\u045b\u022e\2\u0cc6\u0cc7\5\u043d\u021f\2\u0cc7")
buf.write("\u0cc8\5\u0441\u0221\2\u0cc8\u0cc9\5\u043d\u021f\2\u0cc9")
buf.write("\u0cca\5\u0457\u022c\2\u0cca\u0244\3\2\2\2\u0ccb\u0ccc")
buf.write("\5\u0453\u022a\2\u0ccc\u0ccd\5\u0451\u0229\2\u0ccd\u0cce")
buf.write("\5\u0459\u022d\2\u0cce\u0ccf\5\u0445\u0223\2\u0ccf\u0cd0")
buf.write("\5\u045b\u022e\2\u0cd0\u0cd1\5\u0445\u0223\2\u0cd1\u0cd2")
buf.write("\5\u045f\u0230\2\u0cd2\u0cd3\5\u043d\u021f\2\u0cd3\u0246")
buf.write("\3\2\2\2\u0cd4\u0cd5\5\u0453\u022a\2\u0cd5\u0cd6\5\u0451")
buf.write("\u0229\2\u0cd6\u0cd7\5\u0459\u022d\2\u0cd7\u0cd8\5\u0445")
buf.write("\u0223\2\u0cd8\u0cd9\5\u045b\u022e\2\u0cd9\u0cda\5\u0445")
buf.write("\u0223\2\u0cda\u0cdb\5\u045f\u0230\2\u0cdb\u0cdc\5\u043d")
buf.write("\u021f\2\u0cdc\u0cdd\5\u044f\u0228\2\u0cdd\u0248\3\2\2")
buf.write("\2\u0cde\u0cdf\5\u0453\u022a\2\u0cdf\u0ce0\5\u0457\u022c")
buf.write("\2\u0ce0\u0ce1\5\u0435\u021b\2\u0ce1\u0ce2\5\u0441\u0221")
buf.write("\2\u0ce2\u0ce3\5\u044d\u0227\2\u0ce3\u0ce4\5\u0435\u021b")
buf.write("\2\u0ce4\u024a\3\2\2\2\u0ce5\u0ce6\5\u0453\u022a\2\u0ce6")
buf.write("\u0ce7\5\u0457\u022c\2\u0ce7\u0ce8\5\u043d\u021f\2\u0ce8")
buf.write("\u0ce9\5\u0439\u021d\2\u0ce9\u0cea\5\u043d\u021f\2\u0cea")
buf.write("\u0ceb\5\u043b\u021e\2\u0ceb\u0cec\5\u0445\u0223\2\u0cec")
buf.write("\u0ced\5\u044f\u0228\2\u0ced\u0cee\5\u0441\u0221\2\u0cee")
buf.write("\u024c\3\2\2\2\u0cef\u0cf0\5\u0453\u022a\2\u0cf0\u0cf1")
buf.write("\5\u0457\u022c\2\u0cf1\u0cf2\5\u043d\u021f\2\u0cf2\u0cf3")
buf.write("\5\u0439\u021d\2\u0cf3\u0cf4\5\u0445\u0223\2\u0cf4\u0cf5")
buf.write("\5\u0459\u022d\2\u0cf5\u0cf6\5\u0445\u0223\2\u0cf6\u0cf7")
buf.write("\5\u0451\u0229\2\u0cf7\u0cf8\5\u044f\u0228\2\u0cf8\u024e")
buf.write("\3\2\2\2\u0cf9\u0cfa\5\u0453\u022a\2\u0cfa\u0cfb\5\u0457")
buf.write("\u022c\2\u0cfb\u0cfc\5\u043d\u021f\2\u0cfc\u0cfd\5\u0459")
buf.write("\u022d\2\u0cfd\u0cfe\5\u043d\u021f\2\u0cfe\u0cff\5\u044f")
buf.write("\u0228\2\u0cff\u0d00\5\u045b\u022e\2\u0d00\u0250\3\2\2")
buf.write("\2\u0d01\u0d02\5\u0453\u022a\2\u0d02\u0d03\5\u0457\u022c")
buf.write("\2\u0d03\u0d04\5\u0445\u0223\2\u0d04\u0d05\5\u0451\u0229")
buf.write("\2\u0d05\u0d06\5\u0457\u022c\2\u0d06\u0252\3\2\2\2\u0d07")
buf.write("\u0d08\5\u0453\u022a\2\u0d08\u0d09\5\u0457\u022c\2\u0d09")
buf.write("\u0d0a\5\u0451\u0229\2\u0d0a\u0d0b\5\u0439\u021d\2\u0d0b")
buf.write("\u0d0c\5\u043d\u021f\2\u0d0c\u0d0d\5\u043b\u021e\2\u0d0d")
buf.write("\u0d0e\5\u045d\u022f\2\u0d0e\u0d0f\5\u0457\u022c\2\u0d0f")
buf.write("\u0d10\5\u043d\u021f\2\u0d10\u0254\3\2\2\2\u0d11\u0d12")
buf.write("\5\u0457\u022c\2\u0d12\u0d13\5\u0435\u021b\2\u0d13\u0d14")
buf.write("\5\u0445\u0223\2\u0d14\u0d15\5\u0459\u022d\2\u0d15\u0d16")
buf.write("\5\u043d\u021f\2\u0d16\u0256\3\2\2\2\u0d17\u0d18\5\u0457")
buf.write("\u022c\2\u0d18\u0d19\5\u0435\u021b\2\u0d19\u0d1a\5\u044f")
buf.write("\u0228\2\u0d1a\u0d1b\5\u0441\u0221\2\u0d1b\u0d1c\5\u043d")
buf.write("\u021f\2\u0d1c\u0258\3\2\2\2\u0d1d\u0d1e\5\u0457\u022c")
buf.write("\2\u0d1e\u0d1f\5\u0435\u021b\2\u0d1f\u0d20\5\u0461\u0231")
buf.write("\2\u0d20\u025a\3\2\2\2\u0d21\u0d22\5\u0457\u022c\2\u0d22")
buf.write("\u0d23\5\u043d\u021f\2\u0d23\u0d24\5\u0435\u021b\2\u0d24")
buf.write("\u0d25\5\u043b\u021e\2\u0d25\u025c\3\2\2\2\u0d26\u0d27")
buf.write("\5\u0457\u022c\2\u0d27\u0d28\5\u043d\u021f\2\u0d28\u0d29")
buf.write("\5\u0435\u021b\2\u0d29\u0d2a\5\u044b\u0226\2\u0d2a\u025e")
buf.write("\3\2\2\2\u0d2b\u0d2c\5\u0457\u022c\2\u0d2c\u0d2d\5\u043d")
buf.write("\u021f\2\u0d2d\u0d2e\5\u0439\u021d\2\u0d2e\u0d2f\5\u0451")
buf.write("\u0229\2\u0d2f\u0d30\5\u0457\u022c\2\u0d30\u0d31\5\u043b")
buf.write("\u021e\2\u0d31\u0260\3\2\2\2\u0d32\u0d33\5\u0457\u022c")
buf.write("\2\u0d33\u0d34\5\u043d\u021f\2\u0d34\u0d35\5\u043f\u0220")
buf.write("\2\u0d35\u0262\3\2\2\2\u0d36\u0d37\5\u0457\u022c\2\u0d37")
buf.write("\u0d38\5\u043d\u021f\2\u0d38\u0d39\5\u043f\u0220\2\u0d39")
buf.write("\u0d3a\5\u043d\u021f\2\u0d3a\u0d3b\5\u0457\u022c\2\u0d3b")
buf.write("\u0d3c\5\u043d\u021f\2\u0d3c\u0d3d\5\u044f\u0228\2\u0d3d")
buf.write("\u0d3e\5\u0439\u021d\2\u0d3e\u0d3f\5\u043d\u021f\2\u0d3f")
buf.write("\u0264\3\2\2\2\u0d40\u0d41\5\u0457\u022c\2\u0d41\u0d42")
buf.write("\5\u043d\u021f\2\u0d42\u0d43\5\u043f\u0220\2\u0d43\u0d44")
buf.write("\5\u043d\u021f\2\u0d44\u0d45\5\u0457\u022c\2\u0d45\u0d46")
buf.write("\5\u043d\u021f\2\u0d46\u0d47\5\u044f\u0228\2\u0d47\u0d48")
buf.write("\5\u0439\u021d\2\u0d48\u0d49\5\u0445\u0223\2\u0d49\u0d4a")
buf.write("\5\u044f\u0228\2\u0d4a\u0d4b\5\u0441\u0221\2\u0d4b\u0266")
buf.write("\3\2\2\2\u0d4c\u0d4d\5\u0457\u022c\2\u0d4d\u0d4e\5\u043d")
buf.write("\u021f\2\u0d4e\u0d4f\5\u0447\u0224\2\u0d4f\u0d50\5\u043d")
buf.write("\u021f\2\u0d50\u0d51\5\u0439\u021d\2\u0d51\u0d52\5\u045b")
buf.write("\u022e\2\u0d52\u0268\3\2\2\2\u0d53\u0d54\5\u0457\u022c")
buf.write("\2\u0d54\u0d55\5\u043d\u021f\2\u0d55\u0d56\5\u044b\u0226")
buf.write("\2\u0d56\u0d57\5\u0445\u0223\2\u0d57\u0d58\5\u043d\u021f")
buf.write("\2\u0d58\u0d59\5\u0459\u022d\2\u0d59\u0d5a\7a\2\2\u0d5a")
buf.write("\u0d5b\5\u0451\u0229\2\u0d5b\u0d5c\5\u044f\u0228\2\u0d5c")
buf.write("\u026a\3\2\2\2\u0d5d\u0d5e\5\u0457\u022c\2\u0d5e\u0d5f")
buf.write("\5\u043d\u021f\2\u0d5f\u0d60\5\u044f\u0228\2\u0d60\u0d61")
buf.write("\5\u0435\u021b\2\u0d61\u0d62\5\u044d\u0227\2\u0d62\u0d63")
buf.write("\5\u043d\u021f\2\u0d63\u026c\3\2\2\2\u0d64\u0d65\5\u0457")
buf.write("\u022c\2\u0d65\u0d66\5\u043d\u021f\2\u0d66\u0d67\5\u0453")
buf.write("\u022a\2\u0d67\u0d68\5\u044b\u0226\2\u0d68\u0d69\5\u0435")
buf.write("\u021b\2\u0d69\u0d6a\5\u0439\u021d\2\u0d6a\u0d6b\5\u043d")
buf.write("\u021f\2\u0d6b\u026e\3\2\2\2\u0d6c\u0d6d\5\u0457\u022c")
buf.write("\2\u0d6d\u0d6e\5\u043d\u021f\2\u0d6e\u0d6f\5\u0459\u022d")
buf.write("\2\u0d6f\u0d70\5\u0453\u022a\2\u0d70\u0d71\5\u043d\u021f")
buf.write("\2\u0d71\u0d72\5\u0439\u021d\2\u0d72\u0d73\5\u045b\u022e")
buf.write("\2\u0d73\u0270\3\2\2\2\u0d74\u0d75\5\u0457\u022c\2\u0d75")
buf.write("\u0d76\5\u043d\u021f\2\u0d76\u0d77\5\u0459\u022d\2\u0d77")
buf.write("\u0d78\5\u045b\u022e\2\u0d78\u0d79\5\u0457\u022c\2\u0d79")
buf.write("\u0d7a\5\u0445\u0223\2\u0d7a\u0d7b\5\u0439\u021d\2\u0d7b")
buf.write("\u0d7c\5\u045b\u022e\2\u0d7c\u0d7d\7a\2\2\u0d7d\u0d7e")
buf.write("\5\u0457\u022c\2\u0d7e\u0d7f\5\u043d\u021f\2\u0d7f\u0d80")
buf.write("\5\u043f\u0220\2\u0d80\u0d81\5\u043d\u021f\2\u0d81\u0d82")
buf.write("\5\u0457\u022c\2\u0d82\u0d83\5\u043d\u021f\2\u0d83\u0d84")
buf.write("\5\u044f\u0228\2\u0d84\u0d85\5\u0439\u021d\2\u0d85\u0d86")
buf.write("\5\u043d\u021f\2\u0d86\u0d87\5\u0459\u022d\2\u0d87\u0272")
buf.write("\3\2\2\2\u0d88\u0d89\5\u0457\u022c\2\u0d89\u0d8a\5\u043d")
buf.write("\u021f\2\u0d8a\u0d8b\5\u0459\u022d\2\u0d8b\u0d8c\5\u045d")
buf.write("\u022f\2\u0d8c\u0d8d\5\u044b\u0226\2\u0d8d\u0d8e\5\u045b")
buf.write("\u022e\2\u0d8e\u0274\3\2\2\2\u0d8f\u0d90\5\u0457\u022c")
buf.write("\2\u0d90\u0d91\5\u043d\u021f\2\u0d91\u0d92\5\u0459\u022d")
buf.write("\2\u0d92\u0d93\5\u045d\u022f\2\u0d93\u0d94\5\u044b\u0226")
buf.write("\2\u0d94\u0d95\5\u045b\u022e\2\u0d95\u0d96\7a\2\2\u0d96")
buf.write("\u0d97\5\u0439\u021d\2\u0d97\u0d98\5\u0435\u021b\2\u0d98")
buf.write("\u0d99\5\u0439\u021d\2\u0d99\u0d9a\5\u0443\u0222\2\u0d9a")
buf.write("\u0d9b\5\u043d\u021f\2\u0d9b\u0276\3\2\2\2\u0d9c\u0d9d")
buf.write("\5\u0457\u022c\2\u0d9d\u0d9e\5\u043d\u021f\2\u0d9e\u0d9f")
buf.write("\5\u045b\u022e\2\u0d9f\u0da0\5\u045d\u022f\2\u0da0\u0da1")
buf.write("\5\u0457\u022c\2\u0da1\u0da2\5\u044f\u0228\2\u0da2\u0278")
buf.write("\3\2\2\2\u0da3\u0da4\5\u0457\u022c\2\u0da4\u0da5\5\u043d")
buf.write("\u021f\2\u0da5\u0da6\5\u045b\u022e\2\u0da6\u0da7\5\u045d")
buf.write("\u022f\2\u0da7\u0da8\5\u0457\u022c\2\u0da8\u0da9\5\u044f")
buf.write("\u0228\2\u0da9\u0daa\5\u0445\u0223\2\u0daa\u0dab\5\u044f")
buf.write("\u0228\2\u0dab\u0dac\5\u0441\u0221\2\u0dac\u027a\3\2\2")
buf.write("\2\u0dad\u0dae\5\u0457\u022c\2\u0dae\u0daf\5\u043d\u021f")
buf.write("\2\u0daf\u0db0\5\u045d\u022f\2\u0db0\u0db1\5\u0459\u022d")
buf.write("\2\u0db1\u0db2\5\u043d\u021f\2\u0db2\u027c\3\2\2\2\u0db3")
buf.write("\u0db4\5\u0457\u022c\2\u0db4\u0db5\5\u043d\u021f\2\u0db5")
buf.write("\u0db6\5\u045f\u0230\2\u0db6\u0db7\5\u043d\u021f\2\u0db7")
buf.write("\u0db8\5\u0457\u022c\2\u0db8\u0db9\5\u0459\u022d\2\u0db9")
buf.write("\u0dba\5\u043d\u021f\2\u0dba\u027e\3\2\2\2\u0dbb\u0dbc")
buf.write("\5\u0457\u022c\2\u0dbc\u0dbd\5\u043d\u021f\2\u0dbd\u0dbe")
buf.write("\5\u045f\u0230\2\u0dbe\u0dbf\5\u0451\u0229\2\u0dbf\u0dc0")
buf.write("\5\u0449\u0225\2\u0dc0\u0dc1\5\u043d\u021f\2\u0dc1\u0280")
buf.write("\3\2\2\2\u0dc2\u0dc3\5\u0457\u022c\2\u0dc3\u0dc4\5\u0445")
buf.write("\u0223\2\u0dc4\u0dc5\5\u0441\u0221\2\u0dc5\u0dc6\5\u0443")
buf.write("\u0222\2\u0dc6\u0dc7\5\u045b\u022e\2\u0dc7\u0282\3\2\2")
buf.write("\2\u0dc8\u0dc9\5\u0457\u022c\2\u0dc9\u0dca\5\u0451\u0229")
buf.write("\2\u0dca\u0dcb\5\u044b\u0226\2\u0dcb\u0dcc\5\u044b\u0226")
buf.write("\2\u0dcc\u0dcd\5\u0437\u021c\2\u0dcd\u0dce\5\u0435\u021b")
buf.write("\2\u0dce\u0dcf\5\u0439\u021d\2\u0dcf\u0dd0\5\u0449\u0225")
buf.write("\2\u0dd0\u0284\3\2\2\2\u0dd1\u0dd2\5\u0457\u022c\2\u0dd2")
buf.write("\u0dd3\5\u0451\u0229\2\u0dd3\u0dd4\5\u044b\u0226\2\u0dd4")
buf.write("\u0dd5\5\u044b\u0226\2\u0dd5\u0dd6\5\u045d\u022f\2\u0dd6")
buf.write("\u0dd7\5\u0453\u022a\2\u0dd7\u0286\3\2\2\2\u0dd8\u0dd9")
buf.write("\5\u0457\u022c\2\u0dd9\u0dda\5\u0451\u0229\2\u0dda\u0ddb")
buf.write("\5\u0461\u0231\2\u0ddb\u0288\3\2\2\2\u0ddc\u0ddd\5\u0457")
buf.write("\u022c\2\u0ddd\u0dde\5\u0451\u0229\2\u0dde\u0ddf\5\u0461")
buf.write("\u0231\2\u0ddf\u0de0\5\u0445\u0223\2\u0de0\u0de1\5\u043b")
buf.write("\u021e\2\u0de1\u028a\3\2\2\2\u0de2\u0de3\5\u0457\u022c")
buf.write("\2\u0de3\u0de4\5\u0451\u0229\2\u0de4\u0de5\5\u0461\u0231")
buf.write("\2\u0de5\u0de6\5\u0459\u022d\2\u0de6\u028c\3\2\2\2\u0de7")
buf.write("\u0de8\5\u0457\u022c\2\u0de8\u0de9\5\u045d\u022f\2\u0de9")
buf.write("\u0dea\5\u044b\u0226\2\u0dea\u0deb\5\u043d\u021f\2\u0deb")
buf.write("\u0dec\5\u0459\u022d\2\u0dec\u028e\3\2\2\2\u0ded\u0dee")
buf.write("\5\u0459\u022d\2\u0dee\u0def\5\u0435\u021b\2\u0def\u0df0")
buf.write("\5\u044d\u0227\2\u0df0\u0df1\5\u0453\u022a\2\u0df1\u0df2")
buf.write("\5\u044b\u0226\2\u0df2\u0df3\5\u043d\u021f\2\u0df3\u0290")
buf.write("\3\2\2\2\u0df4\u0df5\5\u0459\u022d\2\u0df5\u0df6\5\u0435")
buf.write("\u021b\2\u0df6\u0df7\5\u045f\u0230\2\u0df7\u0df8\5\u043d")
buf.write("\u021f\2\u0df8\u0292\3\2\2\2\u0df9\u0dfa\5\u0459\u022d")
buf.write("\2\u0dfa\u0dfb\5\u0435\u021b\2\u0dfb\u0dfc\5\u045f\u0230")
buf.write("\2\u0dfc\u0dfd\5\u043d\u021f\2\u0dfd\u0dfe\5\u0453\u022a")
buf.write("\2\u0dfe\u0dff\5\u0451\u0229\2\u0dff\u0e00\5\u0445\u0223")
buf.write("\2\u0e00\u0e01\5\u044f\u0228\2\u0e01\u0e02\5\u045b\u022e")
buf.write("\2\u0e02\u0294\3\2\2\2\u0e03\u0e04\5\u0459\u022d\2\u0e04")
buf.write("\u0e05\5\u0439\u021d\2\u0e05\u0e06\5\u0443\u0222\2\u0e06")
buf.write("\u0e07\5\u043d\u021f\2\u0e07\u0e08\5\u044d\u0227\2\u0e08")
buf.write("\u0e09\5\u0435\u021b\2\u0e09\u0296\3\2\2\2\u0e0a\u0e0b")
buf.write("\5\u0459\u022d\2\u0e0b\u0e0c\5\u0439\u021d\2\u0e0c\u0e0d")
buf.write("\5\u0443\u0222\2\u0e0d\u0e0e\5\u043d\u021f\2\u0e0e\u0e0f")
buf.write("\5\u044d\u0227\2\u0e0f\u0e10\5\u0435\u021b\2\u0e10\u0e11")
buf.write("\5\u0439\u021d\2\u0e11\u0e12\5\u0443\u0222\2\u0e12\u0e13")
buf.write("\5\u043d\u021f\2\u0e13\u0e14\5\u0439\u021d\2\u0e14\u0e15")
buf.write("\5\u0449\u0225\2\u0e15\u0298\3\2\2\2\u0e16\u0e17\5\u0459")
buf.write("\u022d\2\u0e17\u0e18\5\u0439\u021d\2\u0e18\u0e19\5\u044f")
buf.write("\u0228\2\u0e19\u029a\3\2\2\2\u0e1a\u0e1b\5\u0459\u022d")
buf.write("\2\u0e1b\u0e1c\5\u043d\u021f\2\u0e1c\u0e1d\5\u0435\u021b")
buf.write("\2\u0e1d\u0e1e\5\u0457\u022c\2\u0e1e\u0e1f\5\u0439\u021d")
buf.write("\2\u0e1f\u0e20\5\u0443\u0222\2\u0e20\u029c\3\2\2\2\u0e21")
buf.write("\u0e22\5\u0459\u022d\2\u0e22\u0e23\5\u043d\u021f\2\u0e23")
buf.write("\u0e24\5\u0439\u021d\2\u0e24\u0e25\5\u0451\u0229\2\u0e25")
buf.write("\u0e26\5\u044f\u0228\2\u0e26\u0e27\5\u043b\u021e\2\u0e27")
buf.write("\u029e\3\2\2\2\u0e28\u0e29\5\u0459\u022d\2\u0e29\u0e2a")
buf.write("\5\u043d\u021f\2\u0e2a\u0e2b\5\u043d\u021f\2\u0e2b\u0e2c")
buf.write("\5\u043b\u021e\2\u0e2c\u02a0\3\2\2\2\u0e2d\u0e2e\5\u0459")
buf.write("\u022d\2\u0e2e\u0e2f\5\u043d\u021f\2\u0e2f\u0e30\5\u0441")
buf.write("\u0221\2\u0e30\u0e31\5\u044d\u0227\2\u0e31\u0e32\5\u043d")
buf.write("\u021f\2\u0e32\u0e33\5\u044f\u0228\2\u0e33\u0e34\5\u045b")
buf.write("\u022e\2\u0e34\u02a2\3\2\2\2\u0e35\u0e36\5\u0459\u022d")
buf.write("\2\u0e36\u0e37\5\u043d\u021f\2\u0e37\u0e38\5\u044b\u0226")
buf.write("\2\u0e38\u0e39\5\u043d\u021f\2\u0e39\u0e3a\5\u0439\u021d")
buf.write("\2\u0e3a\u0e3b\5\u045b\u022e\2\u0e3b\u02a4\3\2\2\2\u0e3c")
buf.write("\u0e3d\5\u0459\u022d\2\u0e3d\u0e3e\5\u043d\u021f\2\u0e3e")
buf.write("\u0e3f\5\u044b\u0226\2\u0e3f\u0e40\5\u043f\u0220\2\u0e40")
buf.write("\u02a6\3\2\2\2\u0e41\u0e42\5\u0459\u022d\2\u0e42\u0e43")
buf.write("\5\u043d\u021f\2\u0e43\u0e44\5\u0455\u022b\2\u0e44\u0e45")
buf.write("\5\u045d\u022f\2\u0e45\u0e46\5\u043d\u021f\2\u0e46\u0e47")
buf.write("\5\u044f\u0228\2\u0e47\u0e48\5\u0439\u021d\2\u0e48\u0e49")
buf.write("\5\u043d\u021f\2\u0e49\u02a8\3\2\2\2\u0e4a\u0e4b\5\u0459")
buf.write("\u022d\2\u0e4b\u0e4c\5\u043d\u021f\2\u0e4c\u0e4d\5\u0455")
buf.write("\u022b\2\u0e4d\u0e4e\5\u045d\u022f\2\u0e4e\u0e4f\5\u043d")
buf.write("\u021f\2\u0e4f\u0e50\5\u044f\u0228\2\u0e50\u0e51\5\u045b")
buf.write("\u022e\2\u0e51\u0e52\5\u0445\u0223\2\u0e52\u0e53\5\u0435")
buf.write("\u021b\2\u0e53\u0e54\5\u044b\u0226\2\u0e54\u02aa\3\2\2")
buf.write("\2\u0e55\u0e56\5\u0459\u022d\2\u0e56\u0e57\5\u043d\u021f")
buf.write("\2\u0e57\u0e58\5\u0457\u022c\2\u0e58\u0e59\5\u0445\u0223")
buf.write("\2\u0e59\u0e5a\5\u0435\u021b\2\u0e5a\u0e5b\5\u044b\u0226")
buf.write("\2\u0e5b\u0e5c\5\u0445\u0223\2\u0e5c\u0e5d\5\u0467\u0234")
buf.write("\2\u0e5d\u0e5e\5\u0435\u021b\2\u0e5e\u0e5f\5\u0437\u021c")
buf.write("\2\u0e5f\u0e60\5\u044b\u0226\2\u0e60\u0e61\5\u043d\u021f")
buf.write("\2\u0e61\u02ac\3\2\2\2\u0e62\u0e63\5\u0459\u022d\2\u0e63")
buf.write("\u0e64\5\u043d\u021f\2\u0e64\u0e65\5\u0457\u022c\2\u0e65")
buf.write("\u0e66\5\u0445\u0223\2\u0e66\u0e67\5\u0435\u021b\2\u0e67")
buf.write("\u0e68\5\u044b\u0226\2\u0e68\u0e69\5\u044b\u0226\2\u0e69")
buf.write("\u0e6a\5\u0465\u0233\2\u0e6a\u0e6b\7a\2\2\u0e6b\u0e6c")
buf.write("\5\u0457\u022c\2\u0e6c\u0e6d\5\u043d\u021f\2\u0e6d\u0e6e")
buf.write("\5\u045d\u022f\2\u0e6e\u0e6f\5\u0459\u022d\2\u0e6f\u0e70")
buf.write("\5\u0435\u021b\2\u0e70\u0e71\5\u0437\u021c\2\u0e71\u0e72")
buf.write("\5\u044b\u0226\2\u0e72\u0e73\5\u043d\u021f\2\u0e73\u02ae")
buf.write("\3\2\2\2\u0e74\u0e75\5\u0459\u022d\2\u0e75\u0e76\5\u043d")
buf.write("\u021f\2\u0e76\u0e77\5\u0457\u022c\2\u0e77\u0e78\5\u045f")
buf.write("\u0230\2\u0e78\u0e79\5\u043d\u021f\2\u0e79\u0e7a\5\u0457")
buf.write("\u022c\2\u0e7a\u0e7b\5\u043d\u021f\2\u0e7b\u0e7c\5\u0457")
buf.write("\u022c\2\u0e7c\u0e7d\5\u0457\u022c\2\u0e7d\u0e7e\5\u0451")
buf.write("\u0229\2\u0e7e\u0e7f\5\u0457\u022c\2\u0e7f\u02b0\3\2\2")
buf.write("\2\u0e80\u0e81\5\u0459\u022d\2\u0e81\u0e82\5\u043d\u021f")
buf.write("\2\u0e82\u0e83\5\u0459\u022d\2\u0e83\u0e84\5\u0459\u022d")
buf.write("\2\u0e84\u0e85\5\u0445\u0223\2\u0e85\u0e86\5\u0451\u0229")
buf.write("\2\u0e86\u0e87\5\u044f\u0228\2\u0e87\u0e88\5\u045b\u022e")
buf.write("\2\u0e88\u0e89\5\u0445\u0223\2\u0e89\u0e8a\5\u044d\u0227")
buf.write("\2\u0e8a\u0e8b\5\u043d\u021f\2\u0e8b\u0e8c\5\u0467\u0234")
buf.write("\2\u0e8c\u0e8d\5\u0451\u0229\2\u0e8d\u0e8e\5\u044f\u0228")
buf.write("\2\u0e8e\u0e8f\5\u043d\u021f\2\u0e8f\u02b2\3\2\2\2\u0e90")
buf.write("\u0e91\5\u0459\u022d\2\u0e91\u0e92\5\u043d\u021f\2\u0e92")
buf.write("\u0e93\5\u045b\u022e\2\u0e93\u02b4\3\2\2\2\u0e94\u0e95")
buf.write("\5\u0459\u022d\2\u0e95\u0e96\5\u043d\u021f\2\u0e96\u0e97")
buf.write("\5\u045b\u022e\2\u0e97\u0e98\5\u0459\u022d\2\u0e98\u02b6")
buf.write("\3\2\2\2\u0e99\u0e9a\5\u0459\u022d\2\u0e9a\u0e9b\5\u043d")
buf.write("\u021f\2\u0e9b\u0e9c\5\u045b\u022e\2\u0e9c\u0e9d\5\u045b")
buf.write("\u022e\2\u0e9d\u0e9e\5\u0445\u0223\2\u0e9e\u0e9f\5\u044f")
buf.write("\u0228\2\u0e9f\u0ea0\5\u0441\u0221\2\u0ea0\u0ea1\5\u0459")
buf.write("\u022d\2\u0ea1\u02b8\3\2\2\2\u0ea2\u0ea3\5\u0459\u022d")
buf.write("\2\u0ea3\u0ea4\5\u0443\u0222\2\u0ea4\u0ea5\5\u0435\u021b")
buf.write("\2\u0ea5\u0ea6\5\u0457\u022c\2\u0ea6\u0ea7\5\u043d\u021f")
buf.write("\2\u0ea7\u02ba\3\2\2\2\u0ea8\u0ea9\5\u0459\u022d\2\u0ea9")
buf.write("\u0eaa\5\u0443\u0222\2\u0eaa\u0eab\5\u0451\u0229\2\u0eab")
buf.write("\u0eac\5\u0461\u0231\2\u0eac\u02bc\3\2\2\2\u0ead\u0eae")
buf.write("\5\u0459\u022d\2\u0eae\u0eaf\5\u0443\u0222\2\u0eaf\u0eb0")
buf.write("\5\u045d\u022f\2\u0eb0\u0eb1\5\u045b\u022e\2\u0eb1\u0eb2")
buf.write("\5\u043b\u021e\2\u0eb2\u0eb3\5\u0451\u0229\2\u0eb3\u0eb4")
buf.write("\5\u0461\u0231\2\u0eb4\u0eb5\5\u044f\u0228\2\u0eb5\u02be")
buf.write("\3\2\2\2\u0eb6\u0eb7\5\u0459\u022d\2\u0eb7\u0eb8\5\u0445")
buf.write("\u0223\2\u0eb8\u0eb9\5\u0437\u021c\2\u0eb9\u0eba\5\u044b")
buf.write("\u0226\2\u0eba\u0ebb\5\u0445\u0223\2\u0ebb\u0ebc\5\u044f")
buf.write("\u0228\2\u0ebc\u0ebd\5\u0441\u0221\2\u0ebd\u0ebe\5\u0459")
buf.write("\u022d\2\u0ebe\u02c0\3\2\2\2\u0ebf\u0ec0\5\u0459\u022d")
buf.write("\2\u0ec0\u0ec1\5\u0445\u0223\2\u0ec1\u0ec2\5\u0441\u0221")
buf.write("\2\u0ec2\u0ec3\5\u044f\u0228\2\u0ec3\u0ec4\5\u045b\u022e")
buf.write("\2\u0ec4\u0ec5\5\u0465\u0233\2\u0ec5\u0ec6\5\u0453\u022a")
buf.write("\2\u0ec6\u0ec7\5\u043d\u021f\2\u0ec7\u02c2\3\2\2\2\u0ec8")
buf.write("\u0ec9\5\u0459\u022d\2\u0ec9\u0eca\5\u0445\u0223\2\u0eca")
buf.write("\u0ecb\5\u044d\u0227\2\u0ecb\u0ecc\5\u0453\u022a\2\u0ecc")
buf.write("\u0ecd\5\u044b\u0226\2\u0ecd\u0ece\5\u043d\u021f\2\u0ece")
buf.write("\u0ecf\7a\2\2\u0ecf\u0ed0\5\u0445\u0223\2\u0ed0\u0ed1")
buf.write("\5\u044f\u0228\2\u0ed1\u0ed2\5\u045b\u022e\2\u0ed2\u0ed3")
buf.write("\5\u043d\u021f\2\u0ed3\u0ed4\5\u0441\u0221\2\u0ed4\u0ed5")
buf.write("\5\u043d\u021f\2\u0ed5\u0ed6\5\u0457\u022c\2\u0ed6\u02c4")
buf.write("\3\2\2\2\u0ed7\u0ed8\5\u0459\u022d\2\u0ed8\u0ed9\5\u0445")
buf.write("\u0223\2\u0ed9\u0eda\5\u044f\u0228\2\u0eda\u0edb\5\u0441")
buf.write("\u0221\2\u0edb\u0edc\5\u044b\u0226\2\u0edc\u0edd\5\u043d")
buf.write("\u021f\2\u0edd\u02c6\3\2\2\2\u0ede\u0edf\5\u0459\u022d")
buf.write("\2\u0edf\u0ee0\5\u0445\u0223\2\u0ee0\u0ee1\5\u0467\u0234")
buf.write("\2\u0ee1\u0ee2\5\u043d\u021f\2\u0ee2\u02c8\3\2\2\2\u0ee3")
buf.write("\u0ee4\5\u0459\u022d\2\u0ee4\u0ee5\5\u0449\u0225\2\u0ee5")
buf.write("\u0ee6\5\u0445\u0223\2\u0ee6\u0ee7\5\u0453\u022a\2\u0ee7")
buf.write("\u02ca\3\2\2\2\u0ee8\u0ee9\5\u0459\u022d\2\u0ee9\u0eea")
buf.write("\5\u044d\u0227\2\u0eea\u0eeb\5\u0435\u021b\2\u0eeb\u0eec")
buf.write("\5\u044b\u0226\2\u0eec\u0eed\5\u044b\u0226\2\u0eed\u0eee")
buf.write("\5\u0445\u0223\2\u0eee\u0eef\5\u044f\u0228\2\u0eef\u0ef0")
buf.write("\5\u045b\u022e\2\u0ef0\u02cc\3\2\2\2\u0ef1\u0ef2\5\u0459")
buf.write("\u022d\2\u0ef2\u0ef3\5\u044f\u0228\2\u0ef3\u0ef4\5\u0435")
buf.write("\u021b\2\u0ef4\u0ef5\5\u0453\u022a\2\u0ef5\u0ef6\5\u0459")
buf.write("\u022d\2\u0ef6\u0ef7\5\u0443\u0222\2\u0ef7\u0ef8\5\u0451")
buf.write("\u0229\2\u0ef8\u0ef9\5\u045b\u022e\2\u0ef9\u02ce\3\2\2")
buf.write("\2\u0efa\u0efb\5\u0459\u022d\2\u0efb\u0efc\5\u0451\u0229")
buf.write("\2\u0efc\u0efd\5\u044d\u0227\2\u0efd\u0efe\5\u043d\u021f")
buf.write("\2\u0efe\u02d0\3\2\2\2\u0eff\u0f00\5\u0459\u022d\2\u0f00")
buf.write("\u0f01\5\u0453\u022a\2\u0f01\u0f02\5\u043d\u021f\2\u0f02")
buf.write("\u0f03\5\u0439\u021d\2\u0f03\u0f04\5\u0445\u0223\2\u0f04")
buf.write("\u0f05\5\u043f\u0220\2\u0f05\u0f06\5\u0445\u0223\2\u0f06")
buf.write("\u0f07\5\u0439\u021d\2\u0f07\u0f08\5\u0435\u021b\2\u0f08")
buf.write("\u0f09\5\u045b\u022e\2\u0f09\u0f0a\5\u0445\u0223\2\u0f0a")
buf.write("\u0f0b\5\u0451\u0229\2\u0f0b\u0f0c\5\u044f\u0228\2\u0f0c")
buf.write("\u02d2\3\2\2\2\u0f0d\u0f0e\5\u0459\u022d\2\u0f0e\u0f0f")
buf.write("\5\u0455\u022b\2\u0f0f\u0f10\5\u044b\u0226\2\u0f10\u0f11")
buf.write("\5\u043b\u021e\2\u0f11\u0f12\5\u0435\u021b\2\u0f12\u0f13")
buf.write("\5\u045b\u022e\2\u0f13\u0f14\5\u0435\u021b\2\u0f14\u02d4")
buf.write("\3\2\2\2\u0f15\u0f16\5\u0459\u022d\2\u0f16\u0f17\5\u0455")
buf.write("\u022b\2\u0f17\u0f18\5\u044b\u0226\2\u0f18\u0f19\5\u043d")
buf.write("\u021f\2\u0f19\u0f1a\5\u0457\u022c\2\u0f1a\u0f1b\5\u0457")
buf.write("\u022c\2\u0f1b\u0f1c\5\u0451\u0229\2\u0f1c\u0f1d\5\u0457")
buf.write("\u022c\2\u0f1d\u02d6\3\2\2\2\u0f1e\u0f1f\5\u0459\u022d")
buf.write("\2\u0f1f\u0f20\5\u045b\u022e\2\u0f20\u0f21\5\u0435\u021b")
buf.write("\2\u0f21\u0f22\5\u044f\u0228\2\u0f22\u0f23\5\u043b\u021e")
buf.write("\2\u0f23\u0f24\5\u0435\u021b\2\u0f24\u0f25\5\u044b\u0226")
buf.write("\2\u0f25\u0f26\5\u0451\u0229\2\u0f26\u0f27\5\u044f\u0228")
buf.write("\2\u0f27\u0f28\5\u043d\u021f\2\u0f28\u02d8\3\2\2\2\u0f29")
buf.write("\u0f2a\5\u0459\u022d\2\u0f2a\u0f2b\5\u045b\u022e\2\u0f2b")
buf.write("\u0f2c\5\u0435\u021b\2\u0f2c\u0f2d\5\u0457\u022c\2\u0f2d")
buf.write("\u0f2e\5\u045b\u022e\2\u0f2e\u02da\3\2\2\2\u0f2f\u0f30")
buf.write("\5\u0459\u022d\2\u0f30\u0f31\5\u045b\u022e\2\u0f31\u0f32")
buf.write("\5\u0435\u021b\2\u0f32\u0f33\5\u0457\u022c\2\u0f33\u0f34")
buf.write("\5\u045b\u022e\2\u0f34\u0f35\5\u045d\u022f\2\u0f35\u0f36")
buf.write("\5\u0453\u022a\2\u0f36\u02dc\3\2\2\2\u0f37\u0f38\5\u0459")
buf.write("\u022d\2\u0f38\u0f39\5\u045b\u022e\2\u0f39\u0f3a\5\u0435")
buf.write("\u021b\2\u0f3a\u0f3b\5\u045b\u022e\2\u0f3b\u0f3c\5\u043d")
buf.write("\u021f\2\u0f3c\u0f3d\5\u044d\u0227\2\u0f3d\u0f3e\5\u043d")
buf.write("\u021f\2\u0f3e\u0f3f\5\u044f\u0228\2\u0f3f\u0f40\5\u045b")
buf.write("\u022e\2\u0f40\u02de\3\2\2\2\u0f41\u0f42\5\u0459\u022d")
buf.write("\2\u0f42\u0f43\5\u045b\u022e\2\u0f43\u0f44\5\u0435\u021b")
buf.write("\2\u0f44\u0f45\5\u045b\u022e\2\u0f45\u0f46\5\u043d\u021f")
buf.write("\2\u0f46\u0f47\5\u044d\u0227\2\u0f47\u0f48\5\u043d\u021f")
buf.write("\2\u0f48\u0f49\5\u044f\u0228\2\u0f49\u0f4a\5\u045b\u022e")
buf.write("\2\u0f4a\u0f4b\7a\2\2\u0f4b\u0f4c\5\u0445\u0223\2\u0f4c")
buf.write("\u0f4d\5\u043b\u021e\2\u0f4d\u02e0\3\2\2\2\u0f4e\u0f4f")
buf.write("\5\u0459\u022d\2\u0f4f\u0f50\5\u045b\u022e\2\u0f50\u0f51")
buf.write("\5\u0435\u021b\2\u0f51\u0f52\5\u045b\u022e\2\u0f52\u0f53")
buf.write("\5\u0445\u0223\2\u0f53\u0f54\5\u0439\u021d\2\u0f54\u02e2")
buf.write("\3\2\2\2\u0f55\u0f56\5\u0459\u022d\2\u0f56\u0f57\5\u045b")
buf.write("\u022e\2\u0f57\u0f58\5\u0435\u021b\2\u0f58\u0f59\5\u045b")
buf.write("\u022e\2\u0f59\u0f5a\5\u0445\u0223\2\u0f5a\u0f5b\5\u0459")
buf.write("\u022d\2\u0f5b\u0f5c\5\u045b\u022e\2\u0f5c\u0f5d\5\u0445")
buf.write("\u0223\2\u0f5d\u0f5e\5\u0439\u021d\2\u0f5e\u0f5f\5\u0459")
buf.write("\u022d\2\u0f5f\u02e4\3\2\2\2\u0f60\u0f61\5\u0459\u022d")
buf.write("\2\u0f61\u0f62\5\u045b\u022e\2\u0f62\u0f63\5\u0457\u022c")
buf.write("\2\u0f63\u0f64\5\u0445\u0223\2\u0f64\u0f65\5\u044f\u0228")
buf.write("\2\u0f65\u0f66\5\u0441\u0221\2\u0f66\u02e6\3\2\2\2\u0f67")
buf.write("\u0f68\5\u0459\u022d\2\u0f68\u0f69\5\u045d\u022f\2\u0f69")
buf.write("\u0f6a\5\u0437\u021c\2\u0f6a\u0f6b\5\u044d\u0227\2\u0f6b")
buf.write("\u0f6c\5\u045d\u022f\2\u0f6c\u0f6d\5\u044b\u0226\2\u0f6d")
buf.write("\u0f6e\5\u045b\u022e\2\u0f6e\u0f6f\5\u0445\u0223\2\u0f6f")
buf.write("\u0f70\5\u0459\u022d\2\u0f70\u0f71\5\u043d\u021f\2\u0f71")
buf.write("\u0f72\5\u045b\u022e\2\u0f72\u02e8\3\2\2\2\u0f73\u0f74")
buf.write("\5\u0459\u022d\2\u0f74\u0f75\5\u045d\u022f\2\u0f75\u0f76")
buf.write("\5\u0437\u021c\2\u0f76\u0f77\5\u0453\u022a\2\u0f77\u0f78")
buf.write("\5\u0435\u021b\2\u0f78\u0f79\5\u0457\u022c\2\u0f79\u0f7a")
buf.write("\5\u045b\u022e\2\u0f7a\u0f7b\5\u0445\u0223\2\u0f7b\u0f7c")
buf.write("\5\u045b\u022e\2\u0f7c\u0f7d\5\u0445\u0223\2\u0f7d\u0f7e")
buf.write("\5\u0451\u0229\2\u0f7e\u0f7f\5\u044f\u0228\2\u0f7f\u02ea")
buf.write("\3\2\2\2\u0f80\u0f81\5\u0459\u022d\2\u0f81\u0f82\5\u045d")
buf.write("\u022f\2\u0f82\u0f83\5\u0437\u021c\2\u0f83\u0f84\5\u0459")
buf.write("\u022d\2\u0f84\u0f85\5\u045b\u022e\2\u0f85\u0f86\5\u0445")
buf.write("\u0223\2\u0f86\u0f87\5\u045b\u022e\2\u0f87\u0f88\5\u045d")
buf.write("\u022f\2\u0f88\u0f89\5\u045b\u022e\2\u0f89\u0f8a\5\u0435")
buf.write("\u021b\2\u0f8a\u0f8b\5\u0437\u021c\2\u0f8b\u0f8c\5\u044b")
buf.write("\u0226\2\u0f8c\u0f8d\5\u043d\u021f\2\u0f8d\u02ec\3\2\2")
buf.write("\2\u0f8e\u0f8f\5\u0459\u022d\2\u0f8f\u0f90\5\u045d\u022f")
buf.write("\2\u0f90\u0f91\5\u0437\u021c\2\u0f91\u0f92\5\u045b\u022e")
buf.write("\2\u0f92\u0f93\5\u0465\u0233\2\u0f93\u0f94\5\u0453\u022a")
buf.write("\2\u0f94\u0f95\5\u043d\u021f\2\u0f95\u02ee\3\2\2\2\u0f96")
buf.write("\u0f97\5\u0459\u022d\2\u0f97\u0f98\5\u045d\u022f\2\u0f98")
buf.write("\u0f99\5\u0439\u021d\2\u0f99\u0f9a\5\u0439\u021d\2\u0f9a")
buf.write("\u0f9b\5\u043d\u021f\2\u0f9b\u0f9c\5\u0459\u022d\2\u0f9c")
buf.write("\u0f9d\5\u0459\u022d\2\u0f9d\u02f0\3\2\2\2\u0f9e\u0f9f")
buf.write("\5\u0459\u022d\2\u0f9f\u0fa0\5\u045d\u022f\2\u0fa0\u0fa1")
buf.write("\5\u0459\u022d\2\u0fa1\u0fa2\5\u0453\u022a\2\u0fa2\u0fa3")
buf.write("\5\u043d\u021f\2\u0fa3\u0fa4\5\u044f\u0228\2\u0fa4\u0fa5")
buf.write("\5\u043b\u021e\2\u0fa5\u02f2\3\2\2\2\u0fa6\u0fa7\5\u045b")
buf.write("\u022e\2\u0fa7\u0fa8\5\u0435\u021b\2\u0fa8\u0fa9\5\u0437")
buf.write("\u021c\2\u0fa9\u0faa\5\u044b\u0226\2\u0faa\u0fab\5\u043d")
buf.write("\u021f\2\u0fab\u02f4\3\2\2\2\u0fac\u0fad\5\u045b\u022e")
buf.write("\2\u0fad\u0fae\5\u0443\u0222\2\u0fae\u0faf\5\u043d\u021f")
buf.write("\2\u0faf\u02f6\3\2\2\2\u0fb0\u0fb1\5\u045b\u022e\2\u0fb1")
buf.write("\u0fb2\5\u0443\u0222\2\u0fb2\u0fb3\5\u043d\u021f\2\u0fb3")
buf.write("\u0fb4\5\u044f\u0228\2\u0fb4\u02f8\3\2\2\2\u0fb5\u0fb6")
buf.write("\5\u045b\u022e\2\u0fb6\u0fb7\5\u0445\u0223\2\u0fb7\u0fb8")
buf.write("\5\u044d\u0227\2\u0fb8\u0fb9\5\u043d\u021f\2\u0fb9\u02fa")
buf.write("\3\2\2\2\u0fba\u0fbb\5\u045b\u022e\2\u0fbb\u0fbc\5\u0445")
buf.write("\u0223\2\u0fbc\u0fbd\5\u044d\u0227\2\u0fbd\u0fbe\5\u043d")
buf.write("\u021f\2\u0fbe\u0fbf\5\u0459\u022d\2\u0fbf\u0fc0\5\u045b")
buf.write("\u022e\2\u0fc0\u0fc1\5\u0435\u021b\2\u0fc1\u0fc2\5\u044d")
buf.write("\u0227\2\u0fc2\u0fc3\5\u0453\u022a\2\u0fc3\u02fc\3\2\2")
buf.write("\2\u0fc4\u0fc5\5\u045b\u022e\2\u0fc5\u0fc6\5\u0445\u0223")
buf.write("\2\u0fc6\u0fc7\5\u044d\u0227\2\u0fc7\u0fc8\5\u043d\u021f")
buf.write("\2\u0fc8\u0fc9\5\u0459\u022d\2\u0fc9\u0fca\5\u045b\u022e")
buf.write("\2\u0fca\u0fcb\5\u0435\u021b\2\u0fcb\u0fcc\5\u044d\u0227")
buf.write("\2\u0fcc\u0fcd\5\u0453\u022a\2\u0fcd\u0fce\7a\2\2\u0fce")
buf.write("\u0fcf\5\u044b\u0226\2\u0fcf\u0fd0\5\u045b\u022e\2\u0fd0")
buf.write("\u0fd1\5\u0467\u0234\2\u0fd1\u0fd2\7a\2\2\u0fd2\u0fd3")
buf.write("\5\u045d\u022f\2\u0fd3\u0fd4\5\u044f\u0228\2\u0fd4\u0fd5")
buf.write("\5\u0439\u021d\2\u0fd5\u0fd6\5\u0451\u0229\2\u0fd6\u0fd7")
buf.write("\5\u044f\u0228\2\u0fd7\u0fd8\5\u0459\u022d\2\u0fd8\u0fd9")
buf.write("\5\u045b\u022e\2\u0fd9\u0fda\5\u0457\u022c\2\u0fda\u0fdb")
buf.write("\5\u0435\u021b\2\u0fdb\u0fdc\5\u0445\u0223\2\u0fdc\u0fdd")
buf.write("\5\u044f\u0228\2\u0fdd\u0fde\5\u043d\u021f\2\u0fde\u0fdf")
buf.write("\5\u043b\u021e\2\u0fdf\u02fe\3\2\2\2\u0fe0\u0fe1\5\u045b")
buf.write("\u022e\2\u0fe1\u0fe2\5\u0445\u0223\2\u0fe2\u0fe3\5\u044d")
buf.write("\u0227\2\u0fe3\u0fe4\5\u043d\u021f\2\u0fe4\u0fe5\5\u0459")
buf.write("\u022d\2\u0fe5\u0fe6\5\u045b\u022e\2\u0fe6\u0fe7\5\u0435")
buf.write("\u021b\2\u0fe7\u0fe8\5\u044d\u0227\2\u0fe8\u0fe9\5\u0453")
buf.write("\u022a\2\u0fe9\u0fea\7a\2\2\u0fea\u0feb\5\u045b\u022e")
buf.write("\2\u0feb\u0fec\5\u0467\u0234\2\u0fec\u0fed\7a\2\2\u0fed")
buf.write("\u0fee\5\u045d\u022f\2\u0fee\u0fef\5\u044f\u0228\2\u0fef")
buf.write("\u0ff0\5\u0439\u021d\2\u0ff0\u0ff1\5\u0451\u0229\2\u0ff1")
buf.write("\u0ff2\5\u044f\u0228\2\u0ff2\u0ff3\5\u0459\u022d\2\u0ff3")
buf.write("\u0ff4\5\u045b\u022e\2\u0ff4\u0ff5\5\u0457\u022c\2\u0ff5")
buf.write("\u0ff6\5\u0435\u021b\2\u0ff6\u0ff7\5\u0445\u0223\2\u0ff7")
buf.write("\u0ff8\5\u044f\u0228\2\u0ff8\u0ff9\5\u043d\u021f\2\u0ff9")
buf.write("\u0ffa\5\u043b\u021e\2\u0ffa\u0300\3\2\2\2\u0ffb\u0ffc")
buf.write("\5\u045b\u022e\2\u0ffc\u0ffd\5\u0445\u0223\2\u0ffd\u0ffe")
buf.write("\5\u044d\u0227\2\u0ffe\u0fff\5\u043d\u021f\2\u0fff\u1000")
buf.write("\5\u0459\u022d\2\u1000\u1001\5\u045b\u022e\2\u1001\u1002")
buf.write("\5\u0435\u021b\2\u1002\u1003\5\u044d\u0227\2\u1003\u1004")
buf.write("\5\u0453\u022a\2\u1004\u1005\7a\2\2\u1005\u1006\5\u045d")
buf.write("\u022f\2\u1006\u1007\5\u044f\u0228\2\u1007\u1008\5\u0439")
buf.write("\u021d\2\u1008\u1009\5\u0451\u0229\2\u1009\u100a\5\u044f")
buf.write("\u0228\2\u100a\u100b\5\u0459\u022d\2\u100b\u100c\5\u045b")
buf.write("\u022e\2\u100c\u100d\5\u0457\u022c\2\u100d\u100e\5\u0435")
buf.write("\u021b\2\u100e\u100f\5\u0445\u0223\2\u100f\u1010\5\u044f")
buf.write("\u0228\2\u1010\u1011\5\u043d\u021f\2\u1011\u1012\5\u043b")
buf.write("\u021e\2\u1012\u0302\3\2\2\2\u1013\u1014\5\u045b\u022e")
buf.write("\2\u1014\u1015\5\u0445\u0223\2\u1015\u1016\5\u044d\u0227")
buf.write("\2\u1016\u1017\5\u043d\u021f\2\u1017\u1018\5\u0467\u0234")
buf.write("\2\u1018\u1019\5\u0451\u0229\2\u1019\u101a\5\u044f\u0228")
buf.write("\2\u101a\u101b\5\u043d\u021f\2\u101b\u101c\7a\2\2\u101c")
buf.write("\u101d\5\u0435\u021b\2\u101d\u101e\5\u0437\u021c\2\u101e")
buf.write("\u101f\5\u0437\u021c\2\u101f\u1020\5\u0457\u022c\2\u1020")
buf.write("\u0304\3\2\2\2\u1021\u1022\5\u045b\u022e\2\u1022\u1023")
buf.write("\5\u0445\u0223\2\u1023\u1024\5\u044d\u0227\2\u1024\u1025")
buf.write("\5\u043d\u021f\2\u1025\u1026\5\u0467\u0234\2\u1026\u1027")
buf.write("\5\u0451\u0229\2\u1027\u1028\5\u044f\u0228\2\u1028\u1029")
buf.write("\5\u043d\u021f\2\u1029\u102a\7a\2\2\u102a\u102b\5\u0443")
buf.write("\u0222\2\u102b\u102c\5\u0451\u0229\2\u102c\u102d\5\u045d")
buf.write("\u022f\2\u102d\u102e\5\u0457\u022c\2\u102e\u0306\3\2\2")
buf.write("\2\u102f\u1030\5\u045b\u022e\2\u1030\u1031\5\u0445\u0223")
buf.write("\2\u1031\u1032\5\u044d\u0227\2\u1032\u1033\5\u043d\u021f")
buf.write("\2\u1033\u1034\5\u0467\u0234\2\u1034\u1035\5\u0451\u0229")
buf.write("\2\u1035\u1036\5\u044f\u0228\2\u1036\u1037\5\u043d\u021f")
buf.write("\2\u1037\u1038\7a\2\2\u1038\u1039\5\u044d\u0227\2\u1039")
buf.write("\u103a\5\u0445\u0223\2\u103a\u103b\5\u044f\u0228\2\u103b")
buf.write("\u103c\5\u045d\u022f\2\u103c\u103d\5\u045b\u022e\2\u103d")
buf.write("\u103e\5\u043d\u021f\2\u103e\u0308\3\2\2\2\u103f\u1040")
buf.write("\5\u045b\u022e\2\u1040\u1041\5\u0445\u0223\2\u1041\u1042")
buf.write("\5\u044d\u0227\2\u1042\u1043\5\u043d\u021f\2\u1043\u1044")
buf.write("\5\u0467\u0234\2\u1044\u1045\5\u0451\u0229\2\u1045\u1046")
buf.write("\5\u044f\u0228\2\u1046\u1047\5\u043d\u021f\2\u1047\u1048")
buf.write("\7a\2\2\u1048\u1049\5\u0457\u022c\2\u1049\u104a\5\u043d")
buf.write("\u021f\2\u104a\u104b\5\u0441\u0221\2\u104b\u104c\5\u0445")
buf.write("\u0223\2\u104c\u104d\5\u0451\u0229\2\u104d\u104e\5\u044f")
buf.write("\u0228\2\u104e\u030a\3\2\2\2\u104f\u1050\5\u045b\u022e")
buf.write("\2\u1050\u1051\5\u0451\u0229\2\u1051\u030c\3\2\2\2\u1052")
buf.write("\u1053\5\u045b\u022e\2\u1053\u1054\5\u0457\u022c\2\u1054")
buf.write("\u1055\5\u0435\u021b\2\u1055\u1056\5\u0445\u0223\2\u1056")
buf.write("\u1057\5\u044b\u0226\2\u1057\u1058\5\u0445\u0223\2\u1058")
buf.write("\u1059\5\u044f\u0228\2\u1059\u105a\5\u0441\u0221\2\u105a")
buf.write("\u030e\3\2\2\2\u105b\u105c\5\u045b\u022e\2\u105c\u105d")
buf.write("\5\u0457\u022c\2\u105d\u105e\5\u0435\u021b\2\u105e\u105f")
buf.write("\5\u044f\u0228\2\u105f\u1060\5\u0459\u022d\2\u1060\u1061")
buf.write("\5\u0435\u021b\2\u1061\u1062\5\u0439\u021d\2\u1062\u1063")
buf.write("\5\u045b\u022e\2\u1063\u1064\5\u0445\u0223\2\u1064\u1065")
buf.write("\5\u0451\u0229\2\u1065\u1066\5\u044f\u0228\2\u1066\u0310")
buf.write("\3\2\2\2\u1067\u1068\5\u045b\u022e\2\u1068\u1069\5\u0457")
buf.write("\u022c\2\u1069\u106a\5\u0435\u021b\2\u106a\u106b\5\u044f")
buf.write("\u0228\2\u106b\u106c\5\u0459\u022d\2\u106c\u106d\5\u044b")
buf.write("\u0226\2\u106d\u106e\5\u0435\u021b\2\u106e\u106f\5\u045b")
buf.write("\u022e\2\u106f\u1070\5\u043d\u021f\2\u1070\u0312\3\2\2")
buf.write("\2\u1071\u1072\5\u045b\u022e\2\u1072\u1073\5\u0457\u022c")
buf.write("\2\u1073\u1074\5\u043d\u021f\2\u1074\u1075\5\u0435\u021b")
buf.write("\2\u1075\u1076\5\u045b\u022e\2\u1076\u0314\3\2\2\2\u1077")
buf.write("\u1078\5\u045b\u022e\2\u1078\u1079\5\u0457\u022c\2\u1079")
buf.write("\u107a\5\u0445\u0223\2\u107a\u107b\5\u0441\u0221\2\u107b")
buf.write("\u107c\5\u0441\u0221\2\u107c\u107d\5\u043d\u021f\2\u107d")
buf.write("\u107e\5\u0457\u022c\2\u107e\u0316\3\2\2\2\u107f\u1080")
buf.write("\5\u045b\u022e\2\u1080\u1081\5\u0457\u022c\2\u1081\u1082")
buf.write("\5\u0445\u0223\2\u1082\u1083\5\u044d\u0227\2\u1083\u0318")
buf.write("\3\2\2\2\u1084\u1085\5\u045b\u022e\2\u1085\u1086\5\u0457")
buf.write("\u022c\2\u1086\u1087\5\u045d\u022f\2\u1087\u1088\5\u043d")
buf.write("\u021f\2\u1088\u031a\3\2\2\2\u1089\u108a\5\u045b\u022e")
buf.write("\2\u108a\u108b\5\u0457\u022c\2\u108b\u108c\5\u045d\u022f")
buf.write("\2\u108c\u108d\5\u044f\u0228\2\u108d\u108e\5\u0439\u021d")
buf.write("\2\u108e\u108f\5\u0435\u021b\2\u108f\u1090\5\u045b\u022e")
buf.write("\2\u1090\u1091\5\u043d\u021f\2\u1091\u031c\3\2\2\2\u1092")
buf.write("\u1093\5\u045b\u022e\2\u1093\u1094\5\u0465\u0233\2\u1094")
buf.write("\u1095\5\u0453\u022a\2\u1095\u1096\5\u043d\u021f\2\u1096")
buf.write("\u031e\3\2\2\2\u1097\u1098\5\u045d\u022f\2\u1098\u1099")
buf.write("\5\u044f\u0228\2\u1099\u109a\5\u0437\u021c\2\u109a\u109b")
buf.write("\5\u0451\u0229\2\u109b\u109c\5\u045d\u022f\2\u109c\u109d")
buf.write("\5\u044f\u0228\2\u109d\u109e\5\u043b\u021e\2\u109e\u109f")
buf.write("\5\u043d\u021f\2\u109f\u10a0\5\u043b\u021e\2\u10a0\u0320")
buf.write("\3\2\2\2\u10a1\u10a2\5\u045d\u022f\2\u10a2\u10a3\5\u044f")
buf.write("\u0228\2\u10a3\u10a4\5\u043b\u021e\2\u10a4\u10a5\5\u043d")
buf.write("\u021f\2\u10a5\u10a6\5\u0457\u022c\2\u10a6\u0322\3\2\2")
buf.write("\2\u10a7\u10a8\5\u045d\u022f\2\u10a8\u10a9\5\u044f\u0228")
buf.write("\2\u10a9\u10aa\5\u0445\u0223\2\u10aa\u10ab\5\u0451\u0229")
buf.write("\2\u10ab\u10ac\5\u044f\u0228\2\u10ac\u0324\3\2\2\2\u10ad")
buf.write("\u10ae\5\u045d\u022f\2\u10ae\u10af\5\u044f\u0228\2\u10af")
buf.write("\u10b0\5\u0445\u0223\2\u10b0\u10b1\5\u0455\u022b\2\u10b1")
buf.write("\u10b2\5\u045d\u022f\2\u10b2\u10b3\5\u043d\u021f\2\u10b3")
buf.write("\u0326\3\2\2\2\u10b4\u10b5\5\u045d\u022f\2\u10b5\u10b6")
buf.write("\5\u044f\u0228\2\u10b6\u10b7\5\u044b\u0226\2\u10b7\u10b8")
buf.write("\5\u0445\u0223\2\u10b8\u10b9\5\u044d\u0227\2\u10b9\u10ba")
buf.write("\5\u0445\u0223\2\u10ba\u10bb\5\u045b\u022e\2\u10bb\u10bc")
buf.write("\5\u043d\u021f\2\u10bc\u10bd\5\u043b\u021e\2\u10bd\u0328")
buf.write("\3\2\2\2\u10be\u10bf\5\u045d\u022f\2\u10bf\u10c0\5\u044f")
buf.write("\u0228\2\u10c0\u10c1\5\u0453\u022a\2\u10c1\u10c2\5\u0445")
buf.write("\u0223\2\u10c2\u10c3\5\u045f\u0230\2\u10c3\u10c4\5\u0451")
buf.write("\u0229\2\u10c4\u10c5\5\u045b\u022e\2\u10c5\u032a\3\2\2")
buf.write("\2\u10c6\u10c7\5\u045d\u022f\2\u10c7\u10c8\5\u044f\u0228")
buf.write("\2\u10c8\u10c9\5\u045b\u022e\2\u10c9\u10ca\5\u0445\u0223")
buf.write("\2\u10ca\u10cb\5\u044b\u0226\2\u10cb\u032c\3\2\2\2\u10cc")
buf.write("\u10cd\5\u045d\u022f\2\u10cd\u10ce\5\u0453\u022a\2\u10ce")
buf.write("\u10cf\5\u043b\u021e\2\u10cf\u10d0\5\u0435\u021b\2\u10d0")
buf.write("\u10d1\5\u045b\u022e\2\u10d1\u10d2\5\u043d\u021f\2\u10d2")
buf.write("\u032e\3\2\2\2\u10d3\u10d4\5\u045d\u022f\2\u10d4\u10d5")
buf.write("\5\u0453\u022a\2\u10d5\u10d6\5\u043b\u021e\2\u10d6\u10d7")
buf.write("\5\u0435\u021b\2\u10d7\u10d8\5\u045b\u022e\2\u10d8\u10d9")
buf.write("\5\u043d\u021f\2\u10d9\u10da\5\u043b\u021e\2\u10da\u0330")
buf.write("\3\2\2\2\u10db\u10dc\5\u045d\u022f\2\u10dc\u10dd\5\u0453")
buf.write("\u022a\2\u10dd\u10de\5\u0459\u022d\2\u10de\u10df\5\u043d")
buf.write("\u021f\2\u10df\u10e0\5\u0457\u022c\2\u10e0\u10e1\5\u045b")
buf.write("\u022e\2\u10e1\u0332\3\2\2\2\u10e2\u10e3\5\u045d\u022f")
buf.write("\2\u10e3\u10e4\5\u0457\u022c\2\u10e4\u10e5\5\u0451\u0229")
buf.write("\2\u10e5\u10e6\5\u0461\u0231\2\u10e6\u10e7\5\u0445\u0223")
buf.write("\2\u10e7\u10e8\5\u043b\u021e\2\u10e8\u0334\3\2\2\2\u10e9")
buf.write("\u10ea\5\u045d\u022f\2\u10ea\u10eb\5\u0459\u022d\2\u10eb")
buf.write("\u10ec\5\u043d\u021f\2\u10ec\u0336\3\2\2\2\u10ed\u10ee")
buf.write("\5\u045d\u022f\2\u10ee\u10ef\5\u0459\u022d\2\u10ef\u10f0")
buf.write("\5\u0445\u0223\2\u10f0\u10f1\5\u044f\u0228\2\u10f1\u10f2")
buf.write("\5\u0441\u0221\2\u10f2\u0338\3\2\2\2\u10f3\u10f4\5\u045f")
buf.write("\u0230\2\u10f4\u10f5\5\u0435\u021b\2\u10f5\u10f6\5\u044b")
buf.write("\u0226\2\u10f6\u10f7\5\u0445\u0223\2\u10f7\u10f8\5\u043b")
buf.write("\u021e\2\u10f8\u10f9\5\u0435\u021b\2\u10f9\u10fa\5\u045b")
buf.write("\u022e\2\u10fa\u10fb\5\u043d\u021f\2\u10fb\u033a\3\2\2")
buf.write("\2\u10fc\u10fd\5\u045f\u0230\2\u10fd\u10fe\5\u0435\u021b")
buf.write("\2\u10fe\u10ff\5\u044b\u0226\2\u10ff\u1100\5\u045d\u022f")
buf.write("\2\u1100\u1101\5\u043d\u021f\2\u1101\u033c\3\2\2\2\u1102")
buf.write("\u1103\5\u045f\u0230\2\u1103\u1104\5\u0435\u021b\2\u1104")
buf.write("\u1105\5\u044b\u0226\2\u1105\u1106\5\u045d\u022f\2\u1106")
buf.write("\u1107\5\u043d\u021f\2\u1107\u1108\5\u0459\u022d\2\u1108")
buf.write("\u033e\3\2\2\2\u1109\u110a\5\u045f\u0230\2\u110a\u110b")
buf.write("\5\u0435\u021b\2\u110b\u110c\5\u0457\u022c\2\u110c\u110d")
buf.write("\5\u0439\u021d\2\u110d\u110e\5\u0443\u0222\2\u110e\u110f")
buf.write("\5\u0435\u021b\2\u110f\u1110\5\u0457\u022c\2\u1110\u0340")
buf.write("\3\2\2\2\u1111\u1112\5\u045f\u0230\2\u1112\u1113\5\u0435")
buf.write("\u021b\2\u1113\u1114\5\u0457\u022c\2\u1114\u1115\5\u0439")
buf.write("\u021d\2\u1115\u1116\5\u0443\u0222\2\u1116\u1117\5\u0435")
buf.write("\u021b\2\u1117\u1118\5\u0457\u022c\2\u1118\u1119\7\64")
buf.write("\2\2\u1119\u0342\3\2\2\2\u111a\u111b\5\u045f\u0230\2\u111b")
buf.write("\u111c\5\u0435\u021b\2\u111c\u111d\5\u0457\u022c\2\u111d")
buf.write("\u111e\5\u0445\u0223\2\u111e\u111f\5\u0435\u021b\2\u111f")
buf.write("\u1120\5\u0437\u021c\2\u1120\u1121\5\u044b\u0226\2\u1121")
buf.write("\u1122\5\u043d\u021f\2\u1122\u0344\3\2\2\2\u1123\u1124")
buf.write("\5\u045f\u0230\2\u1124\u1125\5\u0435\u021b\2\u1125\u1126")
buf.write("\5\u0457\u022c\2\u1126\u1127\5\u0457\u022c\2\u1127\u1128")
buf.write("\5\u0435\u021b\2\u1128\u1129\5\u0465\u0233\2\u1129\u0346")
buf.write("\3\2\2\2\u112a\u112b\5\u045f\u0230\2\u112b\u112c\5\u0435")
buf.write("\u021b\2\u112c\u112d\5\u0457\u022c\2\u112d\u112e\5\u0465")
buf.write("\u0233\2\u112e\u112f\5\u0445\u0223\2\u112f\u1130\5\u044f")
buf.write("\u0228\2\u1130\u1131\5\u0441\u0221\2\u1131\u0348\3\2\2")
buf.write("\2\u1132\u1133\5\u045f\u0230\2\u1133\u1134\5\u043d\u021f")
buf.write("\2\u1134\u1135\5\u0457\u022c\2\u1135\u1136\5\u0459\u022d")
buf.write("\2\u1136\u1137\5\u0445\u0223\2\u1137\u1138\5\u0451\u0229")
buf.write("\2\u1138\u1139\5\u044f\u0228\2\u1139\u034a\3\2\2\2\u113a")
buf.write("\u113b\5\u045f\u0230\2\u113b\u113c\5\u043d\u021f\2\u113c")
buf.write("\u113d\5\u0457\u022c\2\u113d\u113e\5\u0459\u022d\2\u113e")
buf.write("\u113f\5\u0445\u0223\2\u113f\u1140\5\u0451\u0229\2\u1140")
buf.write("\u1141\5\u044f\u0228\2\u1141\u1142\5\u0459\u022d\2\u1142")
buf.write("\u034c\3\2\2\2\u1143\u1144\5\u0461\u0231\2\u1144\u1145")
buf.write("\5\u0435\u021b\2\u1145\u1146\5\u0445\u0223\2\u1146\u1147")
buf.write("\5\u045b\u022e\2\u1147\u034e\3\2\2\2\u1148\u1149\5\u0461")
buf.write("\u0231\2\u1149\u114a\5\u0435\u021b\2\u114a\u114b\5\u0457")
buf.write("\u022c\2\u114b\u114c\5\u044f\u0228\2\u114c\u114d\5\u0445")
buf.write("\u0223\2\u114d\u114e\5\u044f\u0228\2\u114e\u114f\5\u0441")
buf.write("\u0221\2\u114f\u0350\3\2\2\2\u1150\u1151\5\u0461\u0231")
buf.write("\2\u1151\u1152\5\u043d\u021f\2\u1152\u1153\5\u044b\u0226")
buf.write("\2\u1153\u1154\5\u044b\u0226\2\u1154\u1155\5\u043f\u0220")
buf.write("\2\u1155\u1156\5\u0451\u0229\2\u1156\u1157\5\u0457\u022c")
buf.write("\2\u1157\u1158\5\u044d\u0227\2\u1158\u1159\5\u043d\u021f")
buf.write("\2\u1159\u115a\5\u043b\u021e\2\u115a\u0352\3\2\2\2\u115b")
buf.write("\u115c\5\u0461\u0231\2\u115c\u115d\5\u0443\u0222\2\u115d")
buf.write("\u115e\5\u043d\u021f\2\u115e\u115f\5\u044f\u0228\2\u115f")
buf.write("\u0354\3\2\2\2\u1160\u1161\5\u0461\u0231\2\u1161\u1162")
buf.write("\5\u0443\u0222\2\u1162\u1163\5\u043d\u021f\2\u1163\u1164")
buf.write("\5\u044f\u0228\2\u1164\u1165\5\u043d\u021f\2\u1165\u1166")
buf.write("\5\u045f\u0230\2\u1166\u1167\5\u043d\u021f\2\u1167\u1168")
buf.write("\5\u0457\u022c\2\u1168\u0356\3\2\2\2\u1169\u116a\5\u0461")
buf.write("\u0231\2\u116a\u116b\5\u0443\u0222\2\u116b\u116c\5\u043d")
buf.write("\u021f\2\u116c\u116d\5\u0457\u022c\2\u116d\u116e\5\u043d")
buf.write("\u021f\2\u116e\u0358\3\2\2\2\u116f\u1170\5\u0461\u0231")
buf.write("\2\u1170\u1171\5\u0443\u0222\2\u1171\u1172\5\u0445\u0223")
buf.write("\2\u1172\u1173\5\u044b\u0226\2\u1173\u1174\5\u043d\u021f")
buf.write("\2\u1174\u035a\3\2\2\2\u1175\u1176\5\u0461\u0231\2\u1176")
buf.write("\u1177\5\u0445\u0223\2\u1177\u1178\5\u045b\u022e\2\u1178")
buf.write("\u1179\5\u0443\u0222\2\u1179\u035c\3\2\2\2\u117a\u117b")
buf.write("\5\u0461\u0231\2\u117b\u117c\5\u0445\u0223\2\u117c\u117d")
buf.write("\5\u045b\u022e\2\u117d\u117e\5\u0443\u0222\2\u117e\u117f")
buf.write("\5\u0445\u0223\2\u117f\u1180\5\u044f\u0228\2\u1180\u035e")
buf.write("\3\2\2\2\u1181\u1182\5\u0461\u0231\2\u1182\u1183\5\u0451")
buf.write("\u0229\2\u1183\u1184\5\u0457\u022c\2\u1184\u1185\5\u0449")
buf.write("\u0225\2\u1185\u0360\3\2\2\2\u1186\u1187\5\u0461\u0231")
buf.write("\2\u1187\u1188\5\u0457\u022c\2\u1188\u1189\5\u0445\u0223")
buf.write("\2\u1189\u118a\5\u045b\u022e\2\u118a\u118b\5\u043d\u021f")
buf.write("\2\u118b\u0362\3\2\2\2\u118c\u118d\5\u0463\u0232\2\u118d")
buf.write("\u118e\5\u044d\u0227\2\u118e\u118f\5\u044b\u0226\2\u118f")
buf.write("\u0364\3\2\2\2\u1190\u1191\5\u0463\u0232\2\u1191\u1192")
buf.write("\5\u044d\u0227\2\u1192\u1193\5\u044b\u0226\2\u1193\u1194")
buf.write("\5\u0435\u021b\2\u1194\u1195\5\u0441\u0221\2\u1195\u1196")
buf.write("\5\u0441\u0221\2\u1196\u0366\3\2\2\2\u1197\u1198\5\u0463")
buf.write("\u0232\2\u1198\u1199\5\u044d\u0227\2\u1199\u119a\5\u044b")
buf.write("\u0226\2\u119a\u119b\5\u0435\u021b\2\u119b\u119c\5\u045b")
buf.write("\u022e\2\u119c\u119d\5\u045b\u022e\2\u119d\u119e\5\u0457")
buf.write("\u022c\2\u119e\u119f\5\u0445\u0223\2\u119f\u11a0\5\u0437")
buf.write("\u021c\2\u11a0\u11a1\5\u045d\u022f\2\u11a1\u11a2\5\u045b")
buf.write("\u022e\2\u11a2\u11a3\5\u043d\u021f\2\u11a3\u11a4\5\u0459")
buf.write("\u022d\2\u11a4\u0368\3\2\2\2\u11a5\u11a6\5\u0463\u0232")
buf.write("\2\u11a6\u11a7\5\u044d\u0227\2\u11a7\u11a8\5\u044b\u0226")
buf.write("\2\u11a8\u11a9\5\u0439\u021d\2\u11a9\u11aa\5\u0435\u021b")
buf.write("\2\u11aa\u11ab\5\u0459\u022d\2\u11ab\u11ac\5\u045b\u022e")
buf.write("\2\u11ac\u036a\3\2\2\2\u11ad\u11ae\5\u0463\u0232\2\u11ae")
buf.write("\u11af\5\u044d\u0227\2\u11af\u11b0\5\u044b\u0226\2\u11b0")
buf.write("\u11b1\5\u0439\u021d\2\u11b1\u11b2\5\u0451\u0229\2\u11b2")
buf.write("\u11b3\5\u044b\u0226\2\u11b3\u11b4\5\u0435\u021b\2\u11b4")
buf.write("\u11b5\5\u045b\u022e\2\u11b5\u11b6\5\u045b\u022e\2\u11b6")
buf.write("\u11b7\5\u045f\u0230\2\u11b7\u11b8\5\u0435\u021b\2\u11b8")
buf.write("\u11b9\5\u044b\u0226\2\u11b9\u036c\3\2\2\2\u11ba\u11bb")
buf.write("\5\u0463\u0232\2\u11bb\u11bc\5\u044d\u0227\2\u11bc\u11bd")
buf.write("\5\u044b\u0226\2\u11bd\u11be\5\u043d\u021f\2\u11be\u11bf")
buf.write("\5\u044b\u0226\2\u11bf\u11c0\5\u043d\u021f\2\u11c0\u11c1")
buf.write("\5\u044d\u0227\2\u11c1\u11c2\5\u043d\u021f\2\u11c2\u11c3")
buf.write("\5\u044f\u0228\2\u11c3\u11c4\5\u045b\u022e\2\u11c4\u036e")
buf.write("\3\2\2\2\u11c5\u11c6\5\u0463\u0232\2\u11c6\u11c7\5\u044d")
buf.write("\u0227\2\u11c7\u11c8\5\u044b\u0226\2\u11c8\u11c9\5\u043d")
buf.write("\u021f\2\u11c9\u11ca\5\u0463\u0232\2\u11ca\u11cb\5\u0445")
buf.write("\u0223\2\u11cb\u11cc\5\u0459\u022d\2\u11cc\u11cd\5\u045b")
buf.write("\u022e\2\u11cd\u11ce\5\u0459\u022d\2\u11ce\u0370\3\2\2")
buf.write("\2\u11cf\u11d0\5\u0463\u0232\2\u11d0\u11d1\5\u044d\u0227")
buf.write("\2\u11d1\u11d2\5\u044b\u0226\2\u11d2\u11d3\5\u043f\u0220")
buf.write("\2\u11d3\u11d4\5\u0451\u0229\2\u11d4\u11d5\5\u0457\u022c")
buf.write("\2\u11d5\u11d6\5\u043d\u021f\2\u11d6\u11d7\5\u0459\u022d")
buf.write("\2\u11d7\u11d8\5\u045b\u022e\2\u11d8\u0372\3\2\2\2\u11d9")
buf.write("\u11da\5\u0463\u0232\2\u11da\u11db\5\u044d\u0227\2\u11db")
buf.write("\u11dc\5\u044b\u0226\2\u11dc\u11dd\5\u044f\u0228\2\u11dd")
buf.write("\u11de\5\u0435\u021b\2\u11de\u11df\5\u044d\u0227\2\u11df")
buf.write("\u11e0\5\u043d\u021f\2\u11e0\u11e1\5\u0459\u022d\2\u11e1")
buf.write("\u11e2\5\u0453\u022a\2\u11e2\u11e3\5\u0435\u021b\2\u11e3")
buf.write("\u11e4\5\u0439\u021d\2\u11e4\u11e5\5\u043d\u021f\2\u11e5")
buf.write("\u11e6\5\u0459\u022d\2\u11e6\u0374\3\2\2\2\u11e7\u11e8")
buf.write("\5\u0463\u0232\2\u11e8\u11e9\5\u044d\u0227\2\u11e9\u11ea")
buf.write("\5\u044b\u0226\2\u11ea\u11eb\5\u0453\u022a\2\u11eb\u11ec")
buf.write("\5\u0435\u021b\2\u11ec\u11ed\5\u0457\u022c\2\u11ed\u11ee")
buf.write("\5\u0459\u022d\2\u11ee\u11ef\5\u043d\u021f\2\u11ef\u0376")
buf.write("\3\2\2\2\u11f0\u11f1\5\u0463\u0232\2\u11f1\u11f2\5\u044d")
buf.write("\u0227\2\u11f2\u11f3\5\u044b\u0226\2\u11f3\u11f4\5\u0453")
buf.write("\u022a\2\u11f4\u11f5\5\u0445\u0223\2\u11f5\u0378\3\2\2")
buf.write("\2\u11f6\u11f7\5\u0463\u0232\2\u11f7\u11f8\5\u044d\u0227")
buf.write("\2\u11f8\u11f9\5\u044b\u0226\2\u11f9\u11fa\5\u0455\u022b")
buf.write("\2\u11fa\u11fb\5\u045d\u022f\2\u11fb\u11fc\5\u043d\u021f")
buf.write("\2\u11fc\u11fd\5\u0457\u022c\2\u11fd\u11fe\5\u0465\u0233")
buf.write("\2\u11fe\u037a\3\2\2\2\u11ff\u1200\5\u0463\u0232\2\u1200")
buf.write("\u1201\5\u044d\u0227\2\u1201\u1202\5\u044b\u0226\2\u1202")
buf.write("\u1203\5\u0457\u022c\2\u1203\u1204\5\u0451\u0229\2\u1204")
buf.write("\u1205\5\u0451\u0229\2\u1205\u1206\5\u045b\u022e\2\u1206")
buf.write("\u037c\3\2\2\2\u1207\u1208\5\u0463\u0232\2\u1208\u1209")
buf.write("\5\u044d\u0227\2\u1209\u120a\5\u044b\u0226\2\u120a\u120b")
buf.write("\5\u0459\u022d\2\u120b\u120c\5\u043d\u021f\2\u120c\u120d")
buf.write("\5\u0457\u022c\2\u120d\u120e\5\u0445\u0223\2\u120e\u120f")
buf.write("\5\u0435\u021b\2\u120f\u1210\5\u044b\u0226\2\u1210\u1211")
buf.write("\5\u0445\u0223\2\u1211\u1212\5\u0467\u0234\2\u1212\u1213")
buf.write("\5\u043d\u021f\2\u1213\u037e\3\2\2\2\u1214\u1215\5\u0463")
buf.write("\u0232\2\u1215\u1216\5\u044d\u0227\2\u1216\u1217\5\u044b")
buf.write("\u0226\2\u1217\u1218\5\u045b\u022e\2\u1218\u1219\5\u0435")
buf.write("\u021b\2\u1219\u121a\5\u0437\u021c\2\u121a\u121b\5\u044b")
buf.write("\u0226\2\u121b\u121c\5\u043d\u021f\2\u121c\u0380\3\2\2")
buf.write("\2\u121d\u121e\5\u0465\u0233\2\u121e\u121f\5\u043d\u021f")
buf.write("\2\u121f\u1220\5\u0435\u021b\2\u1220\u1221\5\u0457\u022c")
buf.write("\2\u1221\u0382\3\2\2\2\u1222\u1223\5\u0465\u0233\2\u1223")
buf.write("\u1224\5\u043d\u021f\2\u1224\u1225\5\u0459\u022d\2\u1225")
buf.write("\u0384\3\2\2\2\u1226\u1227\5\u0465\u0233\2\u1227\u1228")
buf.write("\5\u044d\u0227\2\u1228\u1229\5\u0445\u0223\2\u1229\u122a")
buf.write("\5\u044f\u0228\2\u122a\u122b\5\u045b\u022e\2\u122b\u122c")
buf.write("\5\u043d\u021f\2\u122c\u122d\5\u0457\u022c\2\u122d\u122e")
buf.write("\5\u045f\u0230\2\u122e\u122f\5\u0435\u021b\2\u122f\u1230")
buf.write("\5\u044b\u0226\2\u1230\u1231\7a\2\2\u1231\u1232\5\u045d")
buf.write("\u022f\2\u1232\u1233\5\u044f\u0228\2\u1233\u1234\5\u0439")
buf.write("\u021d\2\u1234\u1235\5\u0451\u0229\2\u1235\u1236\5\u044f")
buf.write("\u0228\2\u1236\u1237\5\u0459\u022d\2\u1237\u1238\5\u045b")
buf.write("\u022e\2\u1238\u1239\5\u0457\u022c\2\u1239\u123a\5\u0435")
buf.write("\u021b\2\u123a\u123b\5\u0445\u0223\2\u123b\u123c\5\u044f")
buf.write("\u0228\2\u123c\u123d\5\u043d\u021f\2\u123d\u123e\5\u043b")
buf.write("\u021e\2\u123e\u0386\3\2\2\2\u123f\u1240\5\u0467\u0234")
buf.write("\2\u1240\u1241\5\u0451\u0229\2\u1241\u1242\5\u044f\u0228")
buf.write("\2\u1242\u1243\5\u043d\u021f\2\u1243\u0388\3\2\2\2\u1244")
buf.write("\u1245\5\u0453\u022a\2\u1245\u1246\5\u0457\u022c\2\u1246")
buf.write("\u1247\5\u043d\u021f\2\u1247\u1248\5\u043b\u021e\2\u1248")
buf.write("\u1249\5\u0445\u0223\2\u1249\u124a\5\u0439\u021d\2\u124a")
buf.write("\u124b\5\u045b\u022e\2\u124b\u124c\5\u0445\u0223\2\u124c")
buf.write("\u124d\5\u0451\u0229\2\u124d\u124e\5\u044f\u0228\2\u124e")
buf.write("\u038a\3\2\2\2\u124f\u1250\5\u0453\u022a\2\u1250\u1251")
buf.write("\5\u0457\u022c\2\u1251\u1252\5\u043d\u021f\2\u1252\u1253")
buf.write("\5\u043b\u021e\2\u1253\u1254\5\u0445\u0223\2\u1254\u1255")
buf.write("\5\u0439\u021d\2\u1255\u1256\5\u045b\u022e\2\u1256\u1257")
buf.write("\5\u0445\u0223\2\u1257\u1258\5\u0451\u0229\2\u1258\u1259")
buf.write("\5\u044f\u0228\2\u1259\u125a\7a\2\2\u125a\u125b\5\u0437")
buf.write("\u021c\2\u125b\u125c\5\u0451\u0229\2\u125c\u125d\5\u045d")
buf.write("\u022f\2\u125d\u125e\5\u044f\u0228\2\u125e\u125f\5\u043b")
buf.write("\u021e\2\u125f\u1260\5\u0459\u022d\2\u1260\u038c\3\2\2")
buf.write("\2\u1261\u1262\5\u0453\u022a\2\u1262\u1263\5\u0457\u022c")
buf.write("\2\u1263\u1264\5\u043d\u021f\2\u1264\u1265\5\u043b\u021e")
buf.write("\2\u1265\u1266\5\u0445\u0223\2\u1266\u1267\5\u0439\u021d")
buf.write("\2\u1267\u1268\5\u045b\u022e\2\u1268\u1269\5\u0445\u0223")
buf.write("\2\u1269\u126a\5\u0451\u0229\2\u126a\u126b\5\u044f\u0228")
buf.write("\2\u126b\u126c\7a\2\2\u126c\u126d\5\u0439\u021d\2\u126d")
buf.write("\u126e\5\u0451\u0229\2\u126e\u126f\5\u0459\u022d\2\u126f")
buf.write("\u1270\5\u045b\u022e\2\u1270\u038e\3\2\2\2\u1271\u1272")
buf.write("\5\u0453\u022a\2\u1272\u1273\5\u0457\u022c\2\u1273\u1274")
buf.write("\5\u043d\u021f\2\u1274\u1275\5\u043b\u021e\2\u1275\u1276")
buf.write("\5\u0445\u0223\2\u1276\u1277\5\u0439\u021d\2\u1277\u1278")
buf.write("\5\u045b\u022e\2\u1278\u1279\5\u0445\u0223\2\u1279\u127a")
buf.write("\5\u0451\u0229\2\u127a\u127b\5\u044f\u0228\2\u127b\u127c")
buf.write("\7a\2\2\u127c\u127d\5\u043b\u021e\2\u127d\u127e\5\u043d")
buf.write("\u021f\2\u127e\u127f\5\u045b\u022e\2\u127f\u1280\5\u0435")
buf.write("\u021b\2\u1280\u1281\5\u0445\u0223\2\u1281\u1282\5\u044b")
buf.write("\u0226\2\u1282\u1283\5\u0459\u022d\2\u1283\u0390\3\2\2")
buf.write("\2\u1284\u1285\5\u0453\u022a\2\u1285\u1286\5\u0457\u022c")
buf.write("\2\u1286\u1287\5\u043d\u021f\2\u1287\u1288\5\u043b\u021e")
buf.write("\2\u1288\u1289\5\u0445\u0223\2\u1289\u128a\5\u0439\u021d")
buf.write("\2\u128a\u128b\5\u045b\u022e\2\u128b\u128c\5\u0445\u0223")
buf.write("\2\u128c\u128d\5\u0451\u0229\2\u128d\u128e\5\u044f\u0228")
buf.write("\2\u128e\u128f\7a\2\2\u128f\u1290\5\u0453\u022a\2\u1290")
buf.write("\u1291\5\u0457\u022c\2\u1291\u1292\5\u0451\u0229\2\u1292")
buf.write("\u1293\5\u0437\u021c\2\u1293\u1294\5\u0435\u021b\2\u1294")
buf.write("\u1295\5\u0437\u021c\2\u1295\u1296\5\u0445\u0223\2\u1296")
buf.write("\u1297\5\u044b\u0226\2\u1297\u1298\5\u0445\u0223\2\u1298")
buf.write("\u1299\5\u045b\u022e\2\u1299\u129a\5\u0465\u0233\2\u129a")
buf.write("\u0392\3\2\2\2\u129b\u129c\5\u0453\u022a\2\u129c\u129d")
buf.write("\5\u0457\u022c\2\u129d\u129e\5\u043d\u021f\2\u129e\u129f")
buf.write("\5\u043b\u021e\2\u129f\u12a0\5\u0445\u0223\2\u12a0\u12a1")
buf.write("\5\u0439\u021d\2\u12a1\u12a2\5\u045b\u022e\2\u12a2\u12a3")
buf.write("\5\u0445\u0223\2\u12a3\u12a4\5\u0451\u0229\2\u12a4\u12a5")
buf.write("\5\u044f\u0228\2\u12a5\u12a6\7a\2\2\u12a6\u12a7\5\u0459")
buf.write("\u022d\2\u12a7\u12a8\5\u043d\u021f\2\u12a8\u12a9\5\u045b")
buf.write("\u022e\2\u12a9\u0394\3\2\2\2\u12aa\u12ab\5\u0439\u021d")
buf.write("\2\u12ab\u12ac\5\u045d\u022f\2\u12ac\u12ad\5\u044d\u0227")
buf.write("\2\u12ad\u12ae\5\u043d\u021f\2\u12ae\u12af\7a\2\2\u12af")
buf.write("\u12b0\5\u043b\u021e\2\u12b0\u12b1\5\u0445\u0223\2\u12b1")
buf.write("\u12b2\5\u0459\u022d\2\u12b2\u12b3\5\u045b\u022e\2\u12b3")
buf.write("\u0396\3\2\2\2\u12b4\u12b5\5\u043b\u021e\2\u12b5\u12b6")
buf.write("\5\u043d\u021f\2\u12b6\u12b7\5\u044f\u0228\2\u12b7\u12b8")
buf.write("\5\u0459\u022d\2\u12b8\u12b9\5\u043d\u021f\2\u12b9\u12ba")
buf.write("\7a\2\2\u12ba\u12bb\5\u0457\u022c\2\u12bb\u12bc\5\u0435")
buf.write("\u021b\2\u12bc\u12bd\5\u044f\u0228\2\u12bd\u12be\5\u0449")
buf.write("\u0225\2\u12be\u0398\3\2\2\2\u12bf\u12c0\5\u044b\u0226")
buf.write("\2\u12c0\u12c1\5\u0445\u0223\2\u12c1\u12c2\5\u0459\u022d")
buf.write("\2\u12c2\u12c3\5\u045b\u022e\2\u12c3\u12c4\5\u0435\u021b")
buf.write("\2\u12c4\u12c5\5\u0441\u0221\2\u12c5\u12c6\5\u0441\u0221")
buf.write("\2\u12c6\u039a\3\2\2\2\u12c7\u12c8\5\u0453\u022a\2\u12c8")
buf.write("\u12c9\5\u043d\u021f\2\u12c9\u12ca\5\u0457\u022c\2\u12ca")
buf.write("\u12cb\5\u0439\u021d\2\u12cb\u12cc\5\u043d\u021f\2\u12cc")
buf.write("\u12cd\5\u044f\u0228\2\u12cd\u12ce\5\u045b\u022e\2\u12ce")
buf.write("\u12cf\7a\2\2\u12cf\u12d0\5\u0457\u022c\2\u12d0\u12d1")
buf.write("\5\u0435\u021b\2\u12d1\u12d2\5\u044f\u0228\2\u12d2\u12d3")
buf.write("\5\u0449\u0225\2\u12d3\u039c\3\2\2\2\u12d4\u12d5\5\u0453")
buf.write("\u022a\2\u12d5\u12d6\5\u043d\u021f\2\u12d6\u12d7\5\u0457")
buf.write("\u022c\2\u12d7\u12d8\5\u0439\u021d\2\u12d8\u12d9\5\u043d")
buf.write("\u021f\2\u12d9\u12da\5\u044f\u0228\2\u12da\u12db\5\u045b")
buf.write("\u022e\2\u12db\u12dc\5\u0445\u0223\2\u12dc\u12dd\5\u044b")
buf.write("\u0226\2\u12dd\u12de\5\u043d\u021f\2\u12de\u12df\7a\2")
buf.write("\2\u12df\u12e0\5\u0439\u021d\2\u12e0\u12e1\5\u0451\u0229")
buf.write("\2\u12e1\u12e2\5\u044f\u0228\2\u12e2\u12e3\5\u045b\u022e")
buf.write("\2\u12e3\u039e\3\2\2\2\u12e4\u12e5\5\u0453\u022a\2\u12e5")
buf.write("\u12e6\5\u043d\u021f\2\u12e6\u12e7\5\u0457\u022c\2\u12e7")
buf.write("\u12e8\5\u0439\u021d\2\u12e8\u12e9\5\u043d\u021f\2\u12e9")
buf.write("\u12ea\5\u044f\u0228\2\u12ea\u12eb\5\u045b\u022e\2\u12eb")
buf.write("\u12ec\5\u0445\u0223\2\u12ec\u12ed\5\u044b\u0226\2\u12ed")
buf.write("\u12ee\5\u043d\u021f\2\u12ee\u12ef\7a\2\2\u12ef\u12f0")
buf.write("\5\u043b\u021e\2\u12f0\u12f1\5\u0445\u0223\2\u12f1\u12f2")
buf.write("\5\u0459\u022d\2\u12f2\u12f3\5\u0439\u021d\2\u12f3\u03a0")
buf.write("\3\2\2\2\u12f4\u12f5\5\u0457\u022c\2\u12f5\u12f6\5\u0435")
buf.write("\u021b\2\u12f6\u12f7\5\u044f\u0228\2\u12f7\u12f8\5\u0449")
buf.write("\u0225\2\u12f8\u03a2\3\2\2\2\u12f9\u12fa\5\u0435\u021b")
buf.write("\2\u12fa\u12fb\5\u045f\u0230\2\u12fb\u12fc\5\u0441\u0221")
buf.write("\2\u12fc\u03a4\3\2\2\2\u12fd\u12fe\5\u0439\u021d\2\u12fe")
buf.write("\u12ff\5\u0451\u0229\2\u12ff\u1300\5\u0457\u022c\2\u1300")
buf.write("\u1301\5\u0457\u022c\2\u1301\u03a6\3\2\2\2\u1302\u1303")
buf.write("\5\u044b\u0226\2\u1303\u1304\5\u0435\u021b\2\u1304\u1305")
buf.write("\5\u0441\u0221\2\u1305\u03a8\3\2\2\2\u1306\u1307\5\u044b")
buf.write("\u0226\2\u1307\u1308\5\u043d\u021f\2\u1308\u1309\5\u0435")
buf.write("\u021b\2\u1309\u130a\5\u043b\u021e\2\u130a\u03aa\3\2\2")
buf.write("\2\u130b\u130c\5\u044d\u0227\2\u130c\u130d\5\u0435\u021b")
buf.write("\2\u130d\u130e\5\u0463\u0232\2\u130e\u03ac\3\2\2\2\u130f")
buf.write("\u1310\5\u044d\u0227\2\u1310\u1311\5\u043d\u021f\2\u1311")
buf.write("\u1312\5\u043b\u021e\2\u1312\u1313\5\u0445\u0223\2\u1313")
buf.write("\u1314\5\u0435\u021b\2\u1314\u1315\5\u044f\u0228\2\u1315")
buf.write("\u03ae\3\2\2\2\u1316\u1317\5\u044d\u0227\2\u1317\u1318")
buf.write("\5\u0445\u0223\2\u1318\u1319\5\u044f\u0228\2\u1319\u03b0")
buf.write("\3\2\2\2\u131a\u131b\5\u044f\u0228\2\u131b\u131c\5\u045b")
buf.write("\u022e\2\u131c\u131d\5\u0445\u0223\2\u131d\u131e\5\u044b")
buf.write("\u0226\2\u131e\u131f\5\u043d\u021f\2\u131f\u03b2\3\2\2")
buf.write("\2\u1320\u1321\5\u0457\u022c\2\u1321\u1322\5\u0435\u021b")
buf.write("\2\u1322\u1323\5\u045b\u022e\2\u1323\u1324\5\u0445\u0223")
buf.write("\2\u1324\u1325\5\u0451\u0229\2\u1325\u1326\7a\2\2\u1326")
buf.write("\u1327\5\u045b\u022e\2\u1327\u1328\5\u0451\u0229\2\u1328")
buf.write("\u1329\7a\2\2\u1329\u132a\5\u0457\u022c\2\u132a\u132b")
buf.write("\5\u043d\u021f\2\u132b\u132c\5\u0453\u022a\2\u132c\u132d")
buf.write("\5\u0451\u0229\2\u132d\u132e\5\u0457\u022c\2\u132e\u132f")
buf.write("\5\u045b\u022e\2\u132f\u03b4\3\2\2\2\u1330\u1331\5\u0457")
buf.write("\u022c\2\u1331\u1332\5\u0451\u0229\2\u1332\u1333\5\u0461")
buf.write("\u0231\2\u1333\u1334\7a\2\2\u1334\u1335\5\u044f\u0228")
buf.write("\2\u1335\u1336\5\u045d\u022f\2\u1336\u1337\5\u044d\u0227")
buf.write("\2\u1337\u1338\5\u0437\u021c\2\u1338\u1339\5\u043d\u021f")
buf.write("\2\u1339\u133a\5\u0457\u022c\2\u133a\u03b6\3\2\2\2\u133b")
buf.write("\u133c\5\u0459\u022d\2\u133c\u133d\5\u045d\u022f\2\u133d")
buf.write("\u133e\5\u044d\u0227\2\u133e\u03b8\3\2\2\2\u133f\u1340")
buf.write("\5\u045f\u0230\2\u1340\u1341\5\u0435\u021b\2\u1341\u1342")
buf.write("\5\u0457\u022c\2\u1342\u1343\5\u0445\u0223\2\u1343\u1344")
buf.write("\5\u0435\u021b\2\u1344\u1345\5\u044f\u0228\2\u1345\u1346")
buf.write("\5\u0439\u021d\2\u1346\u1347\5\u043d\u021f\2\u1347\u03ba")
buf.write("\3\2\2\2\u1348\u1349\5\u0457\u022c\2\u1349\u134a\5\u043d")
buf.write("\u021f\2\u134a\u134b\5\u0441\u0221\2\u134b\u134c\5\u0457")
buf.write("\u022c\2\u134c\u134d\7a\2\2\u134d\u03bc\3\2\2\2\u134e")
buf.write("\u134f\5\u0459\u022d\2\u134f\u1350\5\u045b\u022e\2\u1350")
buf.write("\u1351\5\u043b\u021e\2\u1351\u1352\5\u043b\u021e\2\u1352")
buf.write("\u1353\5\u043d\u021f\2\u1353\u1354\5\u045f\u0230\2\u1354")
buf.write("\u03be\3\2\2\2\u1355\u1356\5\u045f\u0230\2\u1356\u1357")
buf.write("\5\u0435\u021b\2\u1357\u1358\5\u0457\u022c\2\u1358\u1359")
buf.write("\7a\2\2\u1359\u03c0\3\2\2\2\u135a\u135b\5\u0439\u021d")
buf.write("\2\u135b\u135c\5\u0451\u0229\2\u135c\u135d\5\u045f\u0230")
buf.write("\2\u135d\u135e\5\u0435\u021b\2\u135e\u135f\5\u0457\u022c")
buf.write("\2\u135f\u1360\7a\2\2\u1360\u03c2\3\2\2\2\u1361\u1362")
buf.write("\5\u044f\u0228\2\u1362\u1369\7)\2\2\u1363\u1368\n\2\2")
buf.write("\2\u1364\u1365\7)\2\2\u1365\u1368\7)\2\2\u1366\u1368\5")
buf.write("\u042d\u0217\2\u1367\u1363\3\2\2\2\u1367\u1364\3\2\2\2")
buf.write("\u1367\u1366\3\2\2\2\u1368\u136b\3\2\2\2\u1369\u1367\3")
buf.write("\2\2\2\u1369\u136a\3\2\2\2\u136a\u136c\3\2\2\2\u136b\u1369")
buf.write("\3\2\2\2\u136c\u136d\7)\2\2\u136d\u03c4\3\2\2\2\u136e")
buf.write("\u1377\5\u0437\u021c\2\u136f\u1373\7)\2\2\u1370\u1372")
buf.write("\4\62\63\2\u1371\u1370\3\2\2\2\u1372\u1375\3\2\2\2\u1373")
buf.write("\u1371\3\2\2\2\u1373\u1374\3\2\2\2\u1374\u1376\3\2\2\2")
buf.write("\u1375\u1373\3\2\2\2\u1376\u1378\7)\2\2\u1377\u136f\3")
buf.write("\2\2\2\u1378\u1379\3\2\2\2\u1379\u1377\3\2\2\2\u1379\u137a")
buf.write("\3\2\2\2\u137a\u03c6\3\2\2\2\u137b\u1384\5\u0463\u0232")
buf.write("\2\u137c\u1380\7)\2\2\u137d\u137f\t\3\2\2\u137e\u137d")
buf.write("\3\2\2\2\u137f\u1382\3\2\2\2\u1380\u137e\3\2\2\2\u1380")
buf.write("\u1381\3\2\2\2\u1381\u1383\3\2\2\2\u1382\u1380\3\2\2\2")
buf.write("\u1383\u1385\7)\2\2\u1384\u137c\3\2\2\2\u1385\u1386\3")
buf.write("\2\2\2\u1386\u1384\3\2\2\2\u1386\u1387\3\2\2\2\u1387\u03c8")
buf.write("\3\2\2\2\u1388\u1389\7\60\2\2\u1389\u138a\7\60\2\2\u138a")
buf.write("\u03ca\3\2\2\2\u138b\u138c\7\60\2\2\u138c\u03cc\3\2\2")
buf.write("\2\u138d\u138e\5\u0423\u0212\2\u138e\u03ce\3\2\2\2\u138f")
buf.write("\u1398\5\u0425\u0213\2\u1390\u1392\t\4\2\2\u1391\u1393")
buf.write("\t\5\2\2\u1392\u1391\3\2\2\2\u1392\u1393\3\2\2\2\u1393")
buf.write("\u1396\3\2\2\2\u1394\u1397\5\u0425\u0213\2\u1395\u1397")
buf.write("\5\u0423\u0212\2\u1396\u1394\3\2\2\2\u1396\u1395\3\2\2")
buf.write("\2\u1397\u1399\3\2\2\2\u1398\u1390\3\2\2\2\u1398\u1399")
buf.write("\3\2\2\2\u1399\u139c\3\2\2\2\u139a\u139d\5\u043b\u021e")
buf.write("\2\u139b\u139d\5\u043f\u0220\2\u139c\u139a\3\2\2\2\u139c")
buf.write("\u139b\3\2\2\2\u139c\u139d\3\2\2\2\u139d\u03d0\3\2\2\2")
buf.write("\u139e\u13a5\7)\2\2\u139f\u13a4\n\2\2\2\u13a0\u13a1\7")
buf.write(")\2\2\u13a1\u13a4\7)\2\2\u13a2\u13a4\5\u042d\u0217\2\u13a3")
buf.write("\u139f\3\2\2\2\u13a3\u13a0\3\2\2\2\u13a3\u13a2\3\2\2\2")
buf.write("\u13a4\u13a7\3\2\2\2\u13a5\u13a3\3\2\2\2\u13a5\u13a6\3")
buf.write("\2\2\2\u13a6\u13a8\3\2\2\2\u13a7\u13a5\3\2\2\2\u13a8\u13a9")
buf.write("\7)\2\2\u13a9\u03d2\3\2\2\2\u13aa\u13af\5\u0455\u022b")
buf.write("\2\u13ab\u13b0\5\u03d7\u01ec\2\u13ac\u13b0\5\u03d9\u01ed")
buf.write("\2\u13ad\u13b0\5\u03db\u01ee\2\u13ae\u13b0\5\u03dd\u01ef")
buf.write("\2\u13af\u13ab\3\2\2\2\u13af\u13ac\3\2\2\2\u13af\u13ad")
buf.write("\3\2\2\2\u13af\u13ae\3\2\2\2\u13b0\u13b1\3\2\2\2\u13b1")
buf.write("\u13b2\b\u01ea\2\2\u13b2\u03d4\3\2\2\2\u13b3\u13b4\7)")
buf.write("\2\2\u13b4\u03d6\3\2\2\2\u13b5\u13b6\5\u03d5\u01eb\2\u13b6")
buf.write("\u13ba\7>\2\2\u13b7\u13b9\13\2\2\2\u13b8\u13b7\3\2\2\2")
buf.write("\u13b9\u13bc\3\2\2\2\u13ba\u13bb\3\2\2\2\u13ba\u13b8\3")
buf.write("\2\2\2\u13bb\u13bd\3\2\2\2\u13bc\u13ba\3\2\2\2\u13bd\u13be")
buf.write("\7@\2\2\u13be\u13bf\5\u03d5\u01eb\2\u13bf\u03d8\3\2\2")
buf.write("\2\u13c0\u13c1\5\u03d5\u01eb\2\u13c1\u13c5\7}\2\2\u13c2")
buf.write("\u13c4\13\2\2\2\u13c3\u13c2\3\2\2\2\u13c4\u13c7\3\2\2")
buf.write("\2\u13c5\u13c6\3\2\2\2\u13c5\u13c3\3\2\2\2\u13c6\u13c8")
buf.write("\3\2\2\2\u13c7\u13c5\3\2\2\2\u13c8\u13c9\7\177\2\2\u13c9")
buf.write("\u13ca\5\u03d5\u01eb\2\u13ca\u03da\3\2\2\2\u13cb\u13cc")
buf.write("\5\u03d5\u01eb\2\u13cc\u13d0\7]\2\2\u13cd\u13cf\13\2\2")
buf.write("\2\u13ce\u13cd\3\2\2\2\u13cf\u13d2\3\2\2\2\u13d0\u13d1")
buf.write("\3\2\2\2\u13d0\u13ce\3\2\2\2\u13d1\u13d3\3\2\2\2\u13d2")
buf.write("\u13d0\3\2\2\2\u13d3\u13d4\7_\2\2\u13d4\u13d5\5\u03d5")
buf.write("\u01eb\2\u13d5\u03dc\3\2\2\2\u13d6\u13d7\5\u03d5\u01eb")
buf.write("\2\u13d7\u13db\7*\2\2\u13d8\u13da\13\2\2\2\u13d9\u13d8")
buf.write("\3\2\2\2\u13da\u13dd\3\2\2\2\u13db\u13dc\3\2\2\2\u13db")
buf.write("\u13d9\3\2\2\2\u13dc\u13de\3\2\2\2\u13dd\u13db\3\2\2\2")
buf.write("\u13de\u13df\7+\2\2\u13df\u13e0\5\u03d5\u01eb\2\u13e0")
buf.write("\u03de\3\2\2\2\u13e1\u13e2\n\6\2\2\u13e2\u03e0\3\2\2\2")
buf.write("\u13e3\u13e7\7$\2\2\u13e4\u13e8\n\7\2\2\u13e5\u13e6\7")
buf.write("$\2\2\u13e6\u13e8\7$\2\2\u13e7\u13e4\3\2\2\2\u13e7\u13e5")
buf.write("\3\2\2\2\u13e8\u13e9\3\2\2\2\u13e9\u13e7\3\2\2\2\u13e9")
buf.write("\u13ea\3\2\2\2\u13ea\u13eb\3\2\2\2\u13eb\u13ec\7$\2\2")
buf.write("\u13ec\u03e2\3\2\2\2\u13ed\u13ee\7\'\2\2\u13ee\u03e4\3")
buf.write("\2\2\2\u13ef\u13f0\7(\2\2\u13f0\u03e6\3\2\2\2\u13f1\u13f2")
buf.write("\7*\2\2\u13f2\u03e8\3\2\2\2\u13f3\u13f4\7+\2\2\u13f4\u03ea")
buf.write("\3\2\2\2\u13f5\u13f6\7,\2\2\u13f6\u13f7\7,\2\2\u13f7\u03ec")
buf.write("\3\2\2\2\u13f8\u13f9\7,\2\2\u13f9\u03ee\3\2\2\2\u13fa")
buf.write("\u13fb\7-\2\2\u13fb\u03f0\3\2\2\2\u13fc\u13fd\7/\2\2\u13fd")
buf.write("\u03f2\3\2\2\2\u13fe\u13ff\7.\2\2\u13ff\u03f4\3\2\2\2")
buf.write("\u1400\u1401\7\61\2\2\u1401\u03f6\3\2\2\2\u1402\u1403")
buf.write("\7B\2\2\u1403\u03f8\3\2\2\2\u1404\u1405\7<\2\2\u1405\u1406")
buf.write("\7?\2\2\u1406\u03fa\3\2\2\2\u1407\u1408\7<\2\2\u1408\u140d")
buf.write("\5\u0421\u0211\2\u1409\u140c\5\u0421\u0211\2\u140a\u140c")
buf.write("\t\b\2\2\u140b\u1409\3\2\2\2\u140b\u140a\3\2\2\2\u140c")
buf.write("\u140f\3\2\2\2\u140d\u140b\3\2\2\2\u140d\u140e\3\2\2\2")
buf.write("\u140e\u1416\3\2\2\2\u140f\u140d\3\2\2\2\u1410\u1411\7")
buf.write("<\2\2\u1411\u1416\5\u03e1\u01f1\2\u1412\u1413\7<\2\2\u1413")
buf.write("\u1416\5\u03cd\u01e7\2\u1414\u1416\5\u0411\u0209\2\u1415")
buf.write("\u1407\3\2\2\2\u1415\u1410\3\2\2\2\u1415\u1412\3\2\2\2")
buf.write("\u1415\u1414\3\2\2\2\u1416\u03fc\3\2\2\2\u1417\u1418\7")
buf.write("<\2\2\u1418\u03fe\3\2\2\2\u1419\u141a\7=\2\2\u141a\u0400")
buf.write("\3\2\2\2\u141b\u141c\7>\2\2\u141c\u141d\7?\2\2\u141d\u0402")
buf.write("\3\2\2\2\u141e\u141f\7>\2\2\u141f\u0404\3\2\2\2\u1420")
buf.write("\u1421\7@\2\2\u1421\u1422\7?\2\2\u1422\u0406\3\2\2\2\u1423")
buf.write("\u1424\7#\2\2\u1424\u142c\7?\2\2\u1425\u1426\7>\2\2\u1426")
buf.write("\u142c\7@\2\2\u1427\u1428\7`\2\2\u1428\u142c\7?\2\2\u1429")
buf.write("\u142a\7\u0080\2\2\u142a\u142c\7?\2\2\u142b\u1423\3\2")
buf.write("\2\2\u142b\u1425\3\2\2\2\u142b\u1427\3\2\2\2\u142b\u1429")
buf.write("\3\2\2\2\u142c\u0408\3\2\2\2\u142d\u142e\7`\2\2\u142e")
buf.write("\u040a\3\2\2\2\u142f\u1430\7\u0080\2\2\u1430\u040c\3\2")
buf.write("\2\2\u1431\u1432\7#\2\2\u1432\u040e\3\2\2\2\u1433\u1434")
buf.write("\7@\2\2\u1434\u0410\3\2\2\2\u1435\u1436\7A\2\2\u1436\u0412")
buf.write("\3\2\2\2\u1437\u1438\7~\2\2\u1438\u1439\7~\2\2\u1439\u0414")
buf.write("\3\2\2\2\u143a\u143b\7~\2\2\u143b\u0416\3\2\2\2\u143c")
buf.write("\u143d\7?\2\2\u143d\u0418\3\2\2\2\u143e\u143f\7]\2\2\u143f")
buf.write("\u041a\3\2\2\2\u1440\u1441\7_\2\2\u1441\u041c\3\2\2\2")
buf.write("\u1442\u1443\7a\2\2\u1443\u041e\3\2\2\2\u1444\u1446\t")
buf.write("\t\2\2\u1445\u1444\3\2\2\2\u1446\u1447\3\2\2\2\u1447\u1445")
buf.write("\3\2\2\2\u1447\u1448\3\2\2\2\u1448\u1449\3\2\2\2\u1449")
buf.write("\u144a\b\u0210\3\2\u144a\u0420\3\2\2\2\u144b\u144c\t\n")
buf.write("\2\2\u144c\u0422\3\2\2\2\u144d\u144f\4\62;\2\u144e\u144d")
buf.write("\3\2\2\2\u144f\u1450\3\2\2\2\u1450\u144e\3\2\2\2\u1450")
buf.write("\u1451\3\2\2\2\u1451\u0424\3\2\2\2\u1452\u1454\5\u03cd")
buf.write("\u01e7\2\u1453\u1452\3\2\2\2\u1454\u1457\3\2\2\2\u1455")
buf.write("\u1453\3\2\2\2\u1455\u1456\3\2\2\2\u1456\u1459\3\2\2\2")
buf.write("\u1457\u1455\3\2\2\2\u1458\u145a\7\60\2\2\u1459\u1458")
buf.write("\3\2\2\2\u1459\u145a\3\2\2\2\u145a\u145c\3\2\2\2\u145b")
buf.write("\u145d\5\u03cd\u01e7\2\u145c\u145b\3\2\2\2\u145d\u145e")
buf.write("\3\2\2\2\u145e\u145c\3\2\2\2\u145e\u145f\3\2\2\2\u145f")
buf.write("\u0426\3\2\2\2\u1460\u1461\7/\2\2\u1461\u1462\7/\2\2\u1462")
buf.write("\u1466\3\2\2\2\u1463\u1465\n\13\2\2\u1464\u1463\3\2\2")
buf.write("\2\u1465\u1468\3\2\2\2\u1466\u1464\3\2\2\2\u1466\u1467")
buf.write("\3\2\2\2\u1467\u146b\3\2\2\2\u1468\u1466\3\2\2\2\u1469")
buf.write("\u146c\5\u042d\u0217\2\u146a\u146c\7\2\2\3\u146b\u1469")
buf.write("\3\2\2\2\u146b\u146a\3\2\2\2\u146c\u146d\3\2\2\2\u146d")
buf.write("\u146e\b\u0214\4\2\u146e\u0428\3\2\2\2\u146f\u1470\7\61")
buf.write("\2\2\u1470\u1471\7,\2\2\u1471\u1475\3\2\2\2\u1472\u1474")
buf.write("\13\2\2\2\u1473\u1472\3\2\2\2\u1474\u1477\3\2\2\2\u1475")
buf.write("\u1476\3\2\2\2\u1475\u1473\3\2\2\2\u1476\u1478\3\2\2\2")
buf.write("\u1477\u1475\3\2\2\2\u1478\u1479\7,\2\2\u1479\u147a\7")
buf.write("\61\2\2\u147a\u147b\3\2\2\2\u147b\u147c\b\u0215\4\2\u147c")
buf.write("\u042a\3\2\2\2\u147d\u147e\7r\2\2\u147e\u147f\7t\2\2\u147f")
buf.write("\u1480\7q\2\2\u1480\u1481\7o\2\2\u1481\u1482\7r\2\2\u1482")
buf.write("\u1483\7v\2\2\u1483\u1484\3\2\2\2\u1484\u1488\5\u042f")
buf.write("\u0218\2\u1485\u1487\n\13\2\2\u1486\u1485\3\2\2\2\u1487")
buf.write("\u148a\3\2\2\2\u1488\u1486\3\2\2\2\u1488\u1489\3\2\2\2")
buf.write("\u1489\u148d\3\2\2\2\u148a\u1488\3\2\2\2\u148b\u148e\5")
buf.write("\u042d\u0217\2\u148c\u148e\7\2\2\3\u148d\u148b\3\2\2\2")
buf.write("\u148d\u148c\3\2\2\2\u148e\u042c\3\2\2\2\u148f\u1491\7")
buf.write("\17\2\2\u1490\u148f\3\2\2\2\u1490\u1491\3\2\2\2\u1491")
buf.write("\u1492\3\2\2\2\u1492\u1493\7\f\2\2\u1493\u042e\3\2\2\2")
buf.write("\u1494\u1495\t\f\2\2\u1495\u0430\3\2\2\2\u1496\u149b\5")
buf.write("\u0421\u0211\2\u1497\u149a\5\u0421\u0211\2\u1498\u149a")
buf.write("\t\r\2\2\u1499\u1497\3\2\2\2\u1499\u1498\3\2\2\2\u149a")
buf.write("\u149d\3\2\2\2\u149b\u1499\3\2\2\2\u149b\u149c\3\2\2\2")
buf.write("\u149c\u0432\3\2\2\2\u149d\u149b\3\2\2\2\u149e\u149f\7")
buf.write("B\2\2\u149f\u14a0\7#\2\2\u14a0\u14a1\3\2\2\2\u14a1\u14a2")
buf.write("\b\u021a\4\2\u14a2\u0434\3\2\2\2\u14a3\u14a4\t\16\2\2")
buf.write("\u14a4\u0436\3\2\2\2\u14a5\u14a6\t\17\2\2\u14a6\u0438")
buf.write("\3\2\2\2\u14a7\u14a8\t\20\2\2\u14a8\u043a\3\2\2\2\u14a9")
buf.write("\u14aa\t\21\2\2\u14aa\u043c\3\2\2\2\u14ab\u14ac\t\4\2")
buf.write("\2\u14ac\u043e\3\2\2\2\u14ad\u14ae\t\22\2\2\u14ae\u0440")
buf.write("\3\2\2\2\u14af\u14b0\t\23\2\2\u14b0\u0442\3\2\2\2\u14b1")
buf.write("\u14b2\t\24\2\2\u14b2\u0444\3\2\2\2\u14b3\u14b4\t\25\2")
buf.write("\2\u14b4\u0446\3\2\2\2\u14b5\u14b6\t\26\2\2\u14b6\u0448")
buf.write("\3\2\2\2\u14b7\u14b8\t\27\2\2\u14b8\u044a\3\2\2\2\u14b9")
buf.write("\u14ba\t\30\2\2\u14ba\u044c\3\2\2\2\u14bb\u14bc\t\31\2")
buf.write("\2\u14bc\u044e\3\2\2\2\u14bd\u14be\t\32\2\2\u14be\u0450")
buf.write("\3\2\2\2\u14bf\u14c0\t\33\2\2\u14c0\u0452\3\2\2\2\u14c1")
buf.write("\u14c2\t\34\2\2\u14c2\u0454\3\2\2\2\u14c3\u14c4\t\35\2")
buf.write("\2\u14c4\u0456\3\2\2\2\u14c5\u14c6\t\36\2\2\u14c6\u0458")
buf.write("\3\2\2\2\u14c7\u14c8\t\37\2\2\u14c8\u045a\3\2\2\2\u14c9")
buf.write("\u14ca\t \2\2\u14ca\u045c\3\2\2\2\u14cb\u14cc\t!\2\2\u14cc")
buf.write("\u045e\3\2\2\2\u14cd\u14ce\t\"\2\2\u14ce\u0460\3\2\2\2")
buf.write("\u14cf\u14d0\t#\2\2\u14d0\u0462\3\2\2\2\u14d1\u14d2\t")
buf.write("$\2\2\u14d2\u0464\3\2\2\2\u14d3\u14d4\t%\2\2\u14d4\u0466")
buf.write("\3\2\2\2\u14d5\u14d6\t&\2\2\u14d6\u0468\3\2\2\2\'\2\u1367")
buf.write("\u1369\u1373\u1379\u1380\u1386\u1392\u1396\u1398\u139c")
buf.write("\u13a3\u13a5\u13af\u13ba\u13c5\u13d0\u13db\u13e7\u13e9")
buf.write("\u140b\u140d\u1415\u142b\u1447\u1450\u1455\u1459\u145e")
buf.write("\u1466\u146b\u1475\u1488\u148d\u1490\u1499\u149b\5\t\u01ea")
buf.write("\2\b\2\2\2\3\2")
return buf.getvalue()
class PlSqlLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
T__0 = 1
A_LETTER = 2
ADD = 3
AFTER = 4
AGENT = 5
AGGREGATE = 6
ALL = 7
ALTER = 8
ANALYZE = 9
AND = 10
ANY = 11
ARRAY = 12
AS = 13
ASSUME = 14
ASSERT = 15
ASC = 16
ASSOCIATE = 17
AT = 18
ATTRIBUTE = 19
AUDIT = 20
AUTHID = 21
AUTO = 22
AUTOMATIC = 23
AUTONOMOUS_TRANSACTION = 24
BATCH = 25
BEFORE = 26
BEGIN = 27
BETWEEN = 28
BFILE = 29
BINARY_DOUBLE = 30
BINARY_FLOAT = 31
BINARY_INTEGER = 32
BLOB = 33
BLOCK = 34
BODY = 35
BOOLEAN = 36
BOTH = 37
BREADTH = 38
BULK = 39
BY = 40
BYTE = 41
C_LETTER = 42
CACHE = 43
CALL = 44
CANONICAL = 45
CASCADE = 46
CASE = 47
CAST = 48
CHAR = 49
CHAR_CS = 50
CHARACTER = 51
CHECK = 52
CHR = 53
CLOB = 54
CLOSE = 55
CLUSTER = 56
COLLECT = 57
COLUMNS = 58
COMMENT = 59
COMMIT = 60
COMMITTED = 61
COMPATIBILITY = 62
COMPILE = 63
COMPOUND = 64
CONNECT = 65
CONNECT_BY_ROOT = 66
CONSTANT = 67
CONSTRAINT = 68
CONSTRAINTS = 69
CONSTRUCTOR = 70
CONTENT = 71
CONTEXT = 72
CONTINUE = 73
CONVERT = 74
CORRUPT_XID = 75
CORRUPT_XID_ALL = 76
COST = 77
COUNT = 78
CREATE = 79
CROSS = 80
CUBE = 81
CURRENT = 82
CURRENT_USER = 83
CURSOR = 84
CUSTOMDATUM = 85
CYCLE = 86
DATA = 87
DATABASE = 88
DATE = 89
DAY = 90
DB_ROLE_CHANGE = 91
DBTIMEZONE = 92
DDL = 93
DEBUG = 94
DEC = 95
DECIMAL = 96
DECLARE = 97
DECOMPOSE = 98
DECREMENT = 99
DEFAULT = 100
DEFAULTS = 101
DEFERRED = 102
DEFINER = 103
DELETE = 104
DEPTH = 105
DESC = 106
DETERMINISTIC = 107
DIMENSION = 108
DISABLE = 109
DISASSOCIATE = 110
DISTINCT = 111
DOCUMENT = 112
DOUBLE = 113
DROP = 114
DSINTERVAL_UNCONSTRAINED = 115
EACH = 116
ELEMENT = 117
ELSE = 118
ELSIF = 119
EMPTY = 120
ENABLE = 121
ENCODING = 122
END = 123
ENTITYESCAPING = 124
ERR = 125
ERRORS = 126
ESCAPE = 127
EVALNAME = 128
EXCEPT = 129
EXCEPTION = 130
EXCEPTION_INIT = 131
EXCEPTIONS = 132
EXCLUDE = 133
EXCLUSIVE = 134
EXECUTE = 135
EXISTS = 136
EXIT = 137
EXPLAIN = 138
EXTERNAL = 139
EXTRACT = 140
FAILURE = 141
FALSE = 142
FETCH = 143
FINAL = 144
FIRST = 145
FIRST_VALUE = 146
FLOAT = 147
FOLLOWING = 148
FOLLOWS = 149
FOR = 150
FORALL = 151
FORCE = 152
FROM = 153
FULL = 154
FUNCTION = 155
GOTO = 156
GRANT = 157
GROUP = 158
GROUPING = 159
HASH = 160
HAVING = 161
HIDE = 162
HOUR = 163
IF = 164
IGNORE = 165
IMMEDIATE = 166
IN = 167
INCLUDE = 168
INCLUDING = 169
INCREMENT = 170
INDENT = 171
INDEX = 172
INDEXED = 173
INDICATOR = 174
INDICES = 175
INFINITE = 176
INLINE = 177
INNER = 178
INOUT = 179
INSERT = 180
INSTANTIABLE = 181
INSTEAD = 182
INT = 183
INTEGER = 184
INTERSECT = 185
INTERVAL = 186
INTO = 187
INVALIDATE = 188
IS = 189
ISOLATION = 190
ITERATE = 191
JAVA = 192
JOIN = 193
KEEP = 194
LANGUAGE = 195
LAST = 196
LAST_VALUE = 197
LEADING = 198
LEFT = 199
LEVEL = 200
LIBRARY = 201
LIKE = 202
LIKE2 = 203
LIKE4 = 204
LIKEC = 205
LIMIT = 206
LOCAL = 207
LOCK = 208
LOCKED = 209
LOG = 210
LOGOFF = 211
LOGON = 212
LONG = 213
LOOP = 214
MAIN = 215
MAP = 216
MATCHED = 217
MAXVALUE = 218
MEASURES = 219
MEMBER = 220
MERGE = 221
MINUS = 222
MINUTE = 223
MINVALUE = 224
MLSLABEL = 225
MODE = 226
MODEL = 227
MODIFY = 228
MONTH = 229
MULTISET = 230
NAME = 231
NAN = 232
NATURAL = 233
NATURALN = 234
NAV = 235
NCHAR = 236
NCHAR_CS = 237
NCLOB = 238
NESTED = 239
NEW = 240
NO = 241
NOAUDIT = 242
NOCACHE = 243
NOCOPY = 244
NOCYCLE = 245
NOENTITYESCAPING = 246
NOMAXVALUE = 247
NOMINVALUE = 248
NONE = 249
NOORDER = 250
NOSCHEMACHECK = 251
NOT = 252
NOWAIT = 253
NULL = 254
NULLS = 255
NUMBER = 256
NUMERIC = 257
NVARCHAR2 = 258
OBJECT = 259
OF = 260
OFF = 261
OID = 262
OLD = 263
ON = 264
ONLY = 265
OPEN = 266
OPTION = 267
OR = 268
ORADATA = 269
ORDER = 270
ORDINALITY = 271
OSERROR = 272
OUT = 273
OUTER = 274
OVER = 275
OVERRIDING = 276
PACKAGE = 277
PARALLEL_ENABLE = 278
PARAMETERS = 279
PARENT = 280
PARTITION = 281
PASSING = 282
PATH = 283
PERCENT_ROWTYPE = 284
PERCENT_TYPE = 285
PIPELINED = 286
PIVOT = 287
PLAN = 288
PLS_INTEGER = 289
POSITIVE = 290
POSITIVEN = 291
PRAGMA = 292
PRECEDING = 293
PRECISION = 294
PRESENT = 295
PRIOR = 296
PROCEDURE = 297
RAISE = 298
RANGE = 299
RAW = 300
READ = 301
REAL = 302
RECORD = 303
REF = 304
REFERENCE = 305
REFERENCING = 306
REJECT = 307
RELIES_ON = 308
RENAME = 309
REPLACE = 310
RESPECT = 311
RESTRICT_REFERENCES = 312
RESULT = 313
RESULT_CACHE = 314
RETURN = 315
RETURNING = 316
REUSE = 317
REVERSE = 318
REVOKE = 319
RIGHT = 320
ROLLBACK = 321
ROLLUP = 322
ROW = 323
ROWID = 324
ROWS = 325
RULES = 326
SAMPLE = 327
SAVE = 328
SAVEPOINT = 329
SCHEMA = 330
SCHEMACHECK = 331
SCN = 332
SEARCH = 333
SECOND = 334
SEED = 335
SEGMENT = 336
SELECT = 337
SELF = 338
SEQUENCE = 339
SEQUENTIAL = 340
SERIALIZABLE = 341
SERIALLY_REUSABLE = 342
SERVERERROR = 343
SESSIONTIMEZONE = 344
SET = 345
SETS = 346
SETTINGS = 347
SHARE = 348
SHOW = 349
SHUTDOWN = 350
SIBLINGS = 351
SIGNTYPE = 352
SIMPLE_INTEGER = 353
SINGLE = 354
SIZE = 355
SKIP_ = 356
SMALLINT = 357
SNAPSHOT = 358
SOME = 359
SPECIFICATION = 360
SQLDATA = 361
SQLERROR = 362
STANDALONE = 363
START = 364
STARTUP = 365
STATEMENT = 366
STATEMENT_ID = 367
STATIC = 368
STATISTICS = 369
STRING = 370
SUBMULTISET = 371
SUBPARTITION = 372
SUBSTITUTABLE = 373
SUBTYPE = 374
SUCCESS = 375
SUSPEND = 376
TABLE = 377
THE = 378
THEN = 379
TIME = 380
TIMESTAMP = 381
TIMESTAMP_LTZ_UNCONSTRAINED = 382
TIMESTAMP_TZ_UNCONSTRAINED = 383
TIMESTAMP_UNCONSTRAINED = 384
TIMEZONE_ABBR = 385
TIMEZONE_HOUR = 386
TIMEZONE_MINUTE = 387
TIMEZONE_REGION = 388
TO = 389
TRAILING = 390
TRANSACTION = 391
TRANSLATE = 392
TREAT = 393
TRIGGER = 394
TRIM = 395
TRUE = 396
TRUNCATE = 397
TYPE = 398
UNBOUNDED = 399
UNDER = 400
UNION = 401
UNIQUE = 402
UNLIMITED = 403
UNPIVOT = 404
UNTIL = 405
UPDATE = 406
UPDATED = 407
UPSERT = 408
UROWID = 409
USE = 410
USING = 411
VALIDATE = 412
VALUE = 413
VALUES = 414
VARCHAR = 415
VARCHAR2 = 416
VARIABLE = 417
VARRAY = 418
VARYING = 419
VERSION = 420
VERSIONS = 421
WAIT = 422
WARNING = 423
WELLFORMED = 424
WHEN = 425
WHENEVER = 426
WHERE = 427
WHILE = 428
WITH = 429
WITHIN = 430
WORK = 431
WRITE = 432
XML = 433
XMLAGG = 434
XMLATTRIBUTES = 435
XMLCAST = 436
XMLCOLATTVAL = 437
XMLELEMENT = 438
XMLEXISTS = 439
XMLFOREST = 440
XMLNAMESPACES = 441
XMLPARSE = 442
XMLPI = 443
XMLQUERY = 444
XMLROOT = 445
XMLSERIALIZE = 446
XMLTABLE = 447
YEAR = 448
YES = 449
YMINTERVAL_UNCONSTRAINED = 450
ZONE = 451
PREDICTION = 452
PREDICTION_BOUNDS = 453
PREDICTION_COST = 454
PREDICTION_DETAILS = 455
PREDICTION_PROBABILITY = 456
PREDICTION_SET = 457
CUME_DIST = 458
DENSE_RANK = 459
LISTAGG = 460
PERCENT_RANK = 461
PERCENTILE_CONT = 462
PERCENTILE_DISC = 463
RANK = 464
AVG = 465
CORR = 466
LAG = 467
LEAD = 468
MAX = 469
MEDIAN = 470
MIN = 471
NTILE = 472
RATIO_TO_REPORT = 473
ROW_NUMBER = 474
SUM = 475
VARIANCE = 476
REGR_ = 477
STDDEV = 478
VAR_ = 479
COVAR_ = 480
NATIONAL_CHAR_STRING_LIT = 481
BIT_STRING_LIT = 482
HEX_STRING_LIT = 483
DOUBLE_PERIOD = 484
PERIOD = 485
UNSIGNED_INTEGER = 486
APPROXIMATE_NUM_LIT = 487
CHAR_STRING = 488
DELIMITED_ID = 489
PERCENT = 490
AMPERSAND = 491
LEFT_PAREN = 492
RIGHT_PAREN = 493
DOUBLE_ASTERISK = 494
ASTERISK = 495
PLUS_SIGN = 496
MINUS_SIGN = 497
COMMA = 498
SOLIDUS = 499
AT_SIGN = 500
ASSIGN_OP = 501
BINDVAR = 502
COLON = 503
SEMICOLON = 504
LESS_THAN_OR_EQUALS_OP = 505
LESS_THAN_OP = 506
GREATER_THAN_OR_EQUALS_OP = 507
NOT_EQUAL_OP = 508
CARRET_OPERATOR_PART = 509
TILDE_OPERATOR_PART = 510
EXCLAMATION_OPERATOR_PART = 511
GREATER_THAN_OP = 512
CONCATENATION_OP = 513
VERTICAL_BAR = 514
EQUALS_OP = 515
LEFT_BRACKET = 516
RIGHT_BRACKET = 517
INTRODUCER = 518
SPACES = 519
SINGLE_LINE_COMMENT = 520
MULTI_LINE_COMMENT = 521
PROMPT = 522
REGULAR_ID = 523
ZV = 524
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ "DEFAULT_MODE" ]
literalNames = [ "<INVALID>",
"'..'", "'.'", "'%'", "'&'", "'('", "')'", "'**'", "'*'", "'+'",
"'-'", "','", "'/'", "'@'", "':='", "':'", "';'", "'<='", "'<'",
"'>='", "'^'", "'~'", "'!'", "'>'", "'||'", "'|'", "'='", "'['",
"']'", "'_'", "'@!'" ]
symbolicNames = [ "<INVALID>",
"A_LETTER", "ADD", "AFTER", "AGENT", "AGGREGATE", "ALL", "ALTER",
"ANALYZE", "AND", "ANY", "ARRAY", "AS", "ASSUME", "ASSERT",
"ASC", "ASSOCIATE", "AT", "ATTRIBUTE", "AUDIT", "AUTHID", "AUTO",
"AUTOMATIC", "AUTONOMOUS_TRANSACTION", "BATCH", "BEFORE", "BEGIN",
"BETWEEN", "BFILE", "BINARY_DOUBLE", "BINARY_FLOAT", "BINARY_INTEGER",
"BLOB", "BLOCK", "BODY", "BOOLEAN", "BOTH", "BREADTH", "BULK",
"BY", "BYTE", "C_LETTER", "CACHE", "CALL", "CANONICAL", "CASCADE",
"CASE", "CAST", "CHAR", "CHAR_CS", "CHARACTER", "CHECK", "CHR",
"CLOB", "CLOSE", "CLUSTER", "COLLECT", "COLUMNS", "COMMENT",
"COMMIT", "COMMITTED", "COMPATIBILITY", "COMPILE", "COMPOUND",
"CONNECT", "CONNECT_BY_ROOT", "CONSTANT", "CONSTRAINT", "CONSTRAINTS",
"CONSTRUCTOR", "CONTENT", "CONTEXT", "CONTINUE", "CONVERT",
"CORRUPT_XID", "CORRUPT_XID_ALL", "COST", "COUNT", "CREATE",
"CROSS", "CUBE", "CURRENT", "CURRENT_USER", "CURSOR", "CUSTOMDATUM",
"CYCLE", "DATA", "DATABASE", "DATE", "DAY", "DB_ROLE_CHANGE",
"DBTIMEZONE", "DDL", "DEBUG", "DEC", "DECIMAL", "DECLARE", "DECOMPOSE",
"DECREMENT", "DEFAULT", "DEFAULTS", "DEFERRED", "DEFINER", "DELETE",
"DEPTH", "DESC", "DETERMINISTIC", "DIMENSION", "DISABLE", "DISASSOCIATE",
"DISTINCT", "DOCUMENT", "DOUBLE", "DROP", "DSINTERVAL_UNCONSTRAINED",
"EACH", "ELEMENT", "ELSE", "ELSIF", "EMPTY", "ENABLE", "ENCODING",
"END", "ENTITYESCAPING", "ERR", "ERRORS", "ESCAPE", "EVALNAME",
"EXCEPT", "EXCEPTION", "EXCEPTION_INIT", "EXCEPTIONS", "EXCLUDE",
"EXCLUSIVE", "EXECUTE", "EXISTS", "EXIT", "EXPLAIN", "EXTERNAL",
"EXTRACT", "FAILURE", "FALSE", "FETCH", "FINAL", "FIRST", "FIRST_VALUE",
"FLOAT", "FOLLOWING", "FOLLOWS", "FOR", "FORALL", "FORCE", "FROM",
"FULL", "FUNCTION", "GOTO", "GRANT", "GROUP", "GROUPING", "HASH",
"HAVING", "HIDE", "HOUR", "IF", "IGNORE", "IMMEDIATE", "IN",
"INCLUDE", "INCLUDING", "INCREMENT", "INDENT", "INDEX", "INDEXED",
"INDICATOR", "INDICES", "INFINITE", "INLINE", "INNER", "INOUT",
"INSERT", "INSTANTIABLE", "INSTEAD", "INT", "INTEGER", "INTERSECT",
"INTERVAL", "INTO", "INVALIDATE", "IS", "ISOLATION", "ITERATE",
"JAVA", "JOIN", "KEEP", "LANGUAGE", "LAST", "LAST_VALUE", "LEADING",
"LEFT", "LEVEL", "LIBRARY", "LIKE", "LIKE2", "LIKE4", "LIKEC",
"LIMIT", "LOCAL", "LOCK", "LOCKED", "LOG", "LOGOFF", "LOGON",
"LONG", "LOOP", "MAIN", "MAP", "MATCHED", "MAXVALUE", "MEASURES",
"MEMBER", "MERGE", "MINUS", "MINUTE", "MINVALUE", "MLSLABEL",
"MODE", "MODEL", "MODIFY", "MONTH", "MULTISET", "NAME", "NAN",
"NATURAL", "NATURALN", "NAV", "NCHAR", "NCHAR_CS", "NCLOB",
"NESTED", "NEW", "NO", "NOAUDIT", "NOCACHE", "NOCOPY", "NOCYCLE",
"NOENTITYESCAPING", "NOMAXVALUE", "NOMINVALUE", "NONE", "NOORDER",
"NOSCHEMACHECK", "NOT", "NOWAIT", "NULL", "NULLS", "NUMBER",
"NUMERIC", "NVARCHAR2", "OBJECT", "OF", "OFF", "OID", "OLD",
"ON", "ONLY", "OPEN", "OPTION", "OR", "ORADATA", "ORDER", "ORDINALITY",
"OSERROR", "OUT", "OUTER", "OVER", "OVERRIDING", "PACKAGE",
"PARALLEL_ENABLE", "PARAMETERS", "PARENT", "PARTITION", "PASSING",
"PATH", "PERCENT_ROWTYPE", "PERCENT_TYPE", "PIPELINED", "PIVOT",
"PLAN", "PLS_INTEGER", "POSITIVE", "POSITIVEN", "PRAGMA", "PRECEDING",
"PRECISION", "PRESENT", "PRIOR", "PROCEDURE", "RAISE", "RANGE",
"RAW", "READ", "REAL", "RECORD", "REF", "REFERENCE", "REFERENCING",
"REJECT", "RELIES_ON", "RENAME", "REPLACE", "RESPECT", "RESTRICT_REFERENCES",
"RESULT", "RESULT_CACHE", "RETURN", "RETURNING", "REUSE", "REVERSE",
"REVOKE", "RIGHT", "ROLLBACK", "ROLLUP", "ROW", "ROWID", "ROWS",
"RULES", "SAMPLE", "SAVE", "SAVEPOINT", "SCHEMA", "SCHEMACHECK",
"SCN", "SEARCH", "SECOND", "SEED", "SEGMENT", "SELECT", "SELF",
"SEQUENCE", "SEQUENTIAL", "SERIALIZABLE", "SERIALLY_REUSABLE",
"SERVERERROR", "SESSIONTIMEZONE", "SET", "SETS", "SETTINGS",
"SHARE", "SHOW", "SHUTDOWN", "SIBLINGS", "SIGNTYPE", "SIMPLE_INTEGER",
"SINGLE", "SIZE", "SKIP_", "SMALLINT", "SNAPSHOT", "SOME", "SPECIFICATION",
"SQLDATA", "SQLERROR", "STANDALONE", "START", "STARTUP", "STATEMENT",
"STATEMENT_ID", "STATIC", "STATISTICS", "STRING", "SUBMULTISET",
"SUBPARTITION", "SUBSTITUTABLE", "SUBTYPE", "SUCCESS", "SUSPEND",
"TABLE", "THE", "THEN", "TIME", "TIMESTAMP", "TIMESTAMP_LTZ_UNCONSTRAINED",
"TIMESTAMP_TZ_UNCONSTRAINED", "TIMESTAMP_UNCONSTRAINED", "TIMEZONE_ABBR",
"TIMEZONE_HOUR", "TIMEZONE_MINUTE", "TIMEZONE_REGION", "TO",
"TRAILING", "TRANSACTION", "TRANSLATE", "TREAT", "TRIGGER",
"TRIM", "TRUE", "TRUNCATE", "TYPE", "UNBOUNDED", "UNDER", "UNION",
"UNIQUE", "UNLIMITED", "UNPIVOT", "UNTIL", "UPDATE", "UPDATED",
"UPSERT", "UROWID", "USE", "USING", "VALIDATE", "VALUE", "VALUES",
"VARCHAR", "VARCHAR2", "VARIABLE", "VARRAY", "VARYING", "VERSION",
"VERSIONS", "WAIT", "WARNING", "WELLFORMED", "WHEN", "WHENEVER",
"WHERE", "WHILE", "WITH", "WITHIN", "WORK", "WRITE", "XML",
"XMLAGG", "XMLATTRIBUTES", "XMLCAST", "XMLCOLATTVAL", "XMLELEMENT",
"XMLEXISTS", "XMLFOREST", "XMLNAMESPACES", "XMLPARSE", "XMLPI",
"XMLQUERY", "XMLROOT", "XMLSERIALIZE", "XMLTABLE", "YEAR", "YES",
"YMINTERVAL_UNCONSTRAINED", "ZONE", "PREDICTION", "PREDICTION_BOUNDS",
"PREDICTION_COST", "PREDICTION_DETAILS", "PREDICTION_PROBABILITY",
"PREDICTION_SET", "CUME_DIST", "DENSE_RANK", "LISTAGG", "PERCENT_RANK",
"PERCENTILE_CONT", "PERCENTILE_DISC", "RANK", "AVG", "CORR",
"LAG", "LEAD", "MAX", "MEDIAN", "MIN", "NTILE", "RATIO_TO_REPORT",
"ROW_NUMBER", "SUM", "VARIANCE", "REGR_", "STDDEV", "VAR_",
"COVAR_", "NATIONAL_CHAR_STRING_LIT", "BIT_STRING_LIT", "HEX_STRING_LIT",
"DOUBLE_PERIOD", "PERIOD", "UNSIGNED_INTEGER", "APPROXIMATE_NUM_LIT",
"CHAR_STRING", "DELIMITED_ID", "PERCENT", "AMPERSAND", "LEFT_PAREN",
"RIGHT_PAREN", "DOUBLE_ASTERISK", "ASTERISK", "PLUS_SIGN", "MINUS_SIGN",
"COMMA", "SOLIDUS", "AT_SIGN", "ASSIGN_OP", "BINDVAR", "COLON",
"SEMICOLON", "LESS_THAN_OR_EQUALS_OP", "LESS_THAN_OP", "GREATER_THAN_OR_EQUALS_OP",
"NOT_EQUAL_OP", "CARRET_OPERATOR_PART", "TILDE_OPERATOR_PART",
"EXCLAMATION_OPERATOR_PART", "GREATER_THAN_OP", "CONCATENATION_OP",
"VERTICAL_BAR", "EQUALS_OP", "LEFT_BRACKET", "RIGHT_BRACKET",
"INTRODUCER", "SPACES", "SINGLE_LINE_COMMENT", "MULTI_LINE_COMMENT",
"PROMPT", "REGULAR_ID", "ZV" ]
ruleNames = [ "T__0", "A_LETTER", "ADD", "AFTER", "AGENT", "AGGREGATE",
"ALL", "ALTER", "ANALYZE", "AND", "ANY", "ARRAY", "AS",
"ASSUME", "ASSERT", "ASC", "ASSOCIATE", "AT", "ATTRIBUTE",
"AUDIT", "AUTHID", "AUTO", "AUTOMATIC", "AUTONOMOUS_TRANSACTION",
"BATCH", "BEFORE", "BEGIN", "BETWEEN", "BFILE", "BINARY_DOUBLE",
"BINARY_FLOAT", "BINARY_INTEGER", "BLOB", "BLOCK", "BODY",
"BOOLEAN", "BOTH", "BREADTH", "BULK", "BY", "BYTE", "C_LETTER",
"CACHE", "CALL", "CANONICAL", "CASCADE", "CASE", "CAST",
"CHAR", "CHAR_CS", "CHARACTER", "CHECK", "CHR", "CLOB",
"CLOSE", "CLUSTER", "COLLECT", "COLUMNS", "COMMENT", "COMMIT",
"COMMITTED", "COMPATIBILITY", "COMPILE", "COMPOUND", "CONNECT",
"CONNECT_BY_ROOT", "CONSTANT", "CONSTRAINT", "CONSTRAINTS",
"CONSTRUCTOR", "CONTENT", "CONTEXT", "CONTINUE", "CONVERT",
"CORRUPT_XID", "CORRUPT_XID_ALL", "COST", "COUNT", "CREATE",
"CROSS", "CUBE", "CURRENT", "CURRENT_USER", "CURSOR",
"CUSTOMDATUM", "CYCLE", "DATA", "DATABASE", "DATE", "DAY",
"DB_ROLE_CHANGE", "DBTIMEZONE", "DDL", "DEBUG", "DEC",
"DECIMAL", "DECLARE", "DECOMPOSE", "DECREMENT", "DEFAULT",
"DEFAULTS", "DEFERRED", "DEFINER", "DELETE", "DEPTH",
"DESC", "DETERMINISTIC", "DIMENSION", "DISABLE", "DISASSOCIATE",
"DISTINCT", "DOCUMENT", "DOUBLE", "DROP", "DSINTERVAL_UNCONSTRAINED",
"EACH", "ELEMENT", "ELSE", "ELSIF", "EMPTY", "ENABLE",
"ENCODING", "END", "ENTITYESCAPING", "ERR", "ERRORS",
"ESCAPE", "EVALNAME", "EXCEPT", "EXCEPTION", "EXCEPTION_INIT",
"EXCEPTIONS", "EXCLUDE", "EXCLUSIVE", "EXECUTE", "EXISTS",
"EXIT", "EXPLAIN", "EXTERNAL", "EXTRACT", "FAILURE", "FALSE",
"FETCH", "FINAL", "FIRST", "FIRST_VALUE", "FLOAT", "FOLLOWING",
"FOLLOWS", "FOR", "FORALL", "FORCE", "FROM", "FULL", "FUNCTION",
"GOTO", "GRANT", "GROUP", "GROUPING", "HASH", "HAVING",
"HIDE", "HOUR", "IF", "IGNORE", "IMMEDIATE", "IN", "INCLUDE",
"INCLUDING", "INCREMENT", "INDENT", "INDEX", "INDEXED",
"INDICATOR", "INDICES", "INFINITE", "INLINE", "INNER",
"INOUT", "INSERT", "INSTANTIABLE", "INSTEAD", "INT", "INTEGER",
"INTERSECT", "INTERVAL", "INTO", "INVALIDATE", "IS", "ISOLATION",
"ITERATE", "JAVA", "JOIN", "KEEP", "LANGUAGE", "LAST",
"LAST_VALUE", "LEADING", "LEFT", "LEVEL", "LIBRARY", "LIKE",
"LIKE2", "LIKE4", "LIKEC", "LIMIT", "LOCAL", "LOCK", "LOCKED",
"LOG", "LOGOFF", "LOGON", "LONG", "LOOP", "MAIN", "MAP",
"MATCHED", "MAXVALUE", "MEASURES", "MEMBER", "MERGE",
"MINUS", "MINUTE", "MINVALUE", "MLSLABEL", "MODE", "MODEL",
"MODIFY", "MONTH", "MULTISET", "NAME", "NAN", "NATURAL",
"NATURALN", "NAV", "NCHAR", "NCHAR_CS", "NCLOB", "NESTED",
"NEW", "NO", "NOAUDIT", "NOCACHE", "NOCOPY", "NOCYCLE",
"NOENTITYESCAPING", "NOMAXVALUE", "NOMINVALUE", "NONE",
"NOORDER", "NOSCHEMACHECK", "NOT", "NOWAIT", "NULL", "NULLS",
"NUMBER", "NUMERIC", "NVARCHAR2", "OBJECT", "OF", "OFF",
"OID", "OLD", "ON", "ONLY", "OPEN", "OPTION", "OR", "ORADATA",
"ORDER", "ORDINALITY", "OSERROR", "OUT", "OUTER", "OVER",
"OVERRIDING", "PACKAGE", "PARALLEL_ENABLE", "PARAMETERS",
"PARENT", "PARTITION", "PASSING", "PATH", "PERCENT_ROWTYPE",
"PERCENT_TYPE", "PIPELINED", "PIVOT", "PLAN", "PLS_INTEGER",
"POSITIVE", "POSITIVEN", "PRAGMA", "PRECEDING", "PRECISION",
"PRESENT", "PRIOR", "PROCEDURE", "RAISE", "RANGE", "RAW",
"READ", "REAL", "RECORD", "REF", "REFERENCE", "REFERENCING",
"REJECT", "RELIES_ON", "RENAME", "REPLACE", "RESPECT",
"RESTRICT_REFERENCES", "RESULT", "RESULT_CACHE", "RETURN",
"RETURNING", "REUSE", "REVERSE", "REVOKE", "RIGHT", "ROLLBACK",
"ROLLUP", "ROW", "ROWID", "ROWS", "RULES", "SAMPLE", "SAVE",
"SAVEPOINT", "SCHEMA", "SCHEMACHECK", "SCN", "SEARCH",
"SECOND", "SEED", "SEGMENT", "SELECT", "SELF", "SEQUENCE",
"SEQUENTIAL", "SERIALIZABLE", "SERIALLY_REUSABLE", "SERVERERROR",
"SESSIONTIMEZONE", "SET", "SETS", "SETTINGS", "SHARE",
"SHOW", "SHUTDOWN", "SIBLINGS", "SIGNTYPE", "SIMPLE_INTEGER",
"SINGLE", "SIZE", "SKIP_", "SMALLINT", "SNAPSHOT", "SOME",
"SPECIFICATION", "SQLDATA", "SQLERROR", "STANDALONE",
"START", "STARTUP", "STATEMENT", "STATEMENT_ID", "STATIC",
"STATISTICS", "STRING", "SUBMULTISET", "SUBPARTITION",
"SUBSTITUTABLE", "SUBTYPE", "SUCCESS", "SUSPEND", "TABLE",
"THE", "THEN", "TIME", "TIMESTAMP", "TIMESTAMP_LTZ_UNCONSTRAINED",
"TIMESTAMP_TZ_UNCONSTRAINED", "TIMESTAMP_UNCONSTRAINED",
"TIMEZONE_ABBR", "TIMEZONE_HOUR", "TIMEZONE_MINUTE", "TIMEZONE_REGION",
"TO", "TRAILING", "TRANSACTION", "TRANSLATE", "TREAT",
"TRIGGER", "TRIM", "TRUE", "TRUNCATE", "TYPE", "UNBOUNDED",
"UNDER", "UNION", "UNIQUE", "UNLIMITED", "UNPIVOT", "UNTIL",
"UPDATE", "UPDATED", "UPSERT", "UROWID", "USE", "USING",
"VALIDATE", "VALUE", "VALUES", "VARCHAR", "VARCHAR2",
"VARIABLE", "VARRAY", "VARYING", "VERSION", "VERSIONS",
"WAIT", "WARNING", "WELLFORMED", "WHEN", "WHENEVER", "WHERE",
"WHILE", "WITH", "WITHIN", "WORK", "WRITE", "XML", "XMLAGG",
"XMLATTRIBUTES", "XMLCAST", "XMLCOLATTVAL", "XMLELEMENT",
"XMLEXISTS", "XMLFOREST", "XMLNAMESPACES", "XMLPARSE",
"XMLPI", "XMLQUERY", "XMLROOT", "XMLSERIALIZE", "XMLTABLE",
"YEAR", "YES", "YMINTERVAL_UNCONSTRAINED", "ZONE", "PREDICTION",
"PREDICTION_BOUNDS", "PREDICTION_COST", "PREDICTION_DETAILS",
"PREDICTION_PROBABILITY", "PREDICTION_SET", "CUME_DIST",
"DENSE_RANK", "LISTAGG", "PERCENT_RANK", "PERCENTILE_CONT",
"PERCENTILE_DISC", "RANK", "AVG", "CORR", "LAG", "LEAD",
"MAX", "MEDIAN", "MIN", "NTILE", "RATIO_TO_REPORT", "ROW_NUMBER",
"SUM", "VARIANCE", "REGR_", "STDDEV", "VAR_", "COVAR_",
"NATIONAL_CHAR_STRING_LIT", "BIT_STRING_LIT", "HEX_STRING_LIT",
"DOUBLE_PERIOD", "PERIOD", "UNSIGNED_INTEGER", "APPROXIMATE_NUM_LIT",
"CHAR_STRING", "CHAR_STRING_PERL", "QUOTE", "QS_ANGLE",
"QS_BRACE", "QS_BRACK", "QS_PAREN", "QS_OTHER_CH", "DELIMITED_ID",
"PERCENT", "AMPERSAND", "LEFT_PAREN", "RIGHT_PAREN", "DOUBLE_ASTERISK",
"ASTERISK", "PLUS_SIGN", "MINUS_SIGN", "COMMA", "SOLIDUS",
"AT_SIGN", "ASSIGN_OP", "BINDVAR", "COLON", "SEMICOLON",
"LESS_THAN_OR_EQUALS_OP", "LESS_THAN_OP", "GREATER_THAN_OR_EQUALS_OP",
"NOT_EQUAL_OP", "CARRET_OPERATOR_PART", "TILDE_OPERATOR_PART",
"EXCLAMATION_OPERATOR_PART", "GREATER_THAN_OP", "QUESTION_MARK",
"CONCATENATION_OP", "VERTICAL_BAR", "EQUALS_OP", "LEFT_BRACKET",
"RIGHT_BRACKET", "INTRODUCER", "SPACES", "SIMPLE_LETTER",
"UNSIGNED_INTEGER_FRAGMENT", "FLOAT_FRAGMENT", "SINGLE_LINE_COMMENT",
"MULTI_LINE_COMMENT", "PROMPT", "NEWLINE", "SPACE", "REGULAR_ID",
"ZV", "A", "B", "C", "D", "E", "F", "G", "H", "I", "J",
"K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U",
"V", "W", "X", "Y", "Z" ]
grammarFileName = "PlSql.g4"
def __init__(self, input=None, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.7.2")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
| [
1,
3,
4,
5,
6
] |
1,553 | c6357e6e0656388fc3fd849879aa6000e0bee1ee | #
# o o
# 8
# .oPYo. .oPYo. odYo. o8P o8 .oPYo. odYo. .oPYo. .oPYo.
# Yb.. 8oooo8 8' `8 8 8 8oooo8 8' `8 8 ' 8oooo8
# 'Yb. 8. 8 8 8 8 8. 8 8 8 . 8.
# `YooP' `Yooo' 8 8 8 8 `Yooo' 8 8 `YooP' `Yooo'
# :.....::.....:..::..::..::..:.....:..::..:.....::.....:
# :::::::::::::::::::::::::::::::::::::::::::::::::::::::
# :::::::::::::::::::::::::::::::::::::::::::::::::::::::
#
# Copyright Yazan Obeidi, 2017
#
# python.learning.learn - single interface for learning
#
from src.python.utils.log import init_log
from src.python.utils.config import init_config
from src.python.learning.models import Model
__author__ = 'yazan'
__version__ = '0.0.1'
__licence__ = 'Apache V2'
class Trainer(object):
"""Consumes data/dataset in streamable or batch format
and trains a single model in the available catalogue.
"""
def __init__(self, log, config, model_handle, model_schema):
""":params:
model_handle: a model object, i.e. a RandomForest clf handler
model_schema: reference to the library for that model, i.e. sklearn
"""
self.log = log
self.config = config
self.model = model_handle
self.schema = model_schema
def train(self):
pass
@property
def score(self):
pass
if __name__ = '__main__':
log = init_log()
config = init_config()
trainer = Trainer(log=log, config=config) | null | null | null | null | [
0
] |
1,554 | 193dcf7bd658f88afe0a1f2fa28605f262e45bc2 | <mask token>
class Session(Destroyable):
def __init__(self, physical_device, queue_index=None):
super(Session, self).__init__()
self.instance = lava.instance()
if physical_device not in lava.devices():
raise RuntimeError('Provided invalid / outdated device object')
self.queue_index = queue_index or physical_device.get_queue_indices(
QueueType.COMPUTE)[0]
self.device = Device(physical_device, [(QueueType.COMPUTE, self.
queue_index)], validation_lvl=lava.VALIDATION_LEVEL)
self.buffers = set()
self.shaders = set()
self.stages = set()
sessions.add(self)
def _destroy(self):
for stage in self.stages:
stage.destroy()
for shader in self.shaders:
shader.destroy()
for buffer in self.buffers:
buffer.destroy()
self.device.destroy()
def register_buffer(self, buffer):
self.buffers.add(buffer)
<mask token>
def register_stage(self, stage):
self.stages.add(stage)
| <mask token>
class Session(Destroyable):
def __init__(self, physical_device, queue_index=None):
super(Session, self).__init__()
self.instance = lava.instance()
if physical_device not in lava.devices():
raise RuntimeError('Provided invalid / outdated device object')
self.queue_index = queue_index or physical_device.get_queue_indices(
QueueType.COMPUTE)[0]
self.device = Device(physical_device, [(QueueType.COMPUTE, self.
queue_index)], validation_lvl=lava.VALIDATION_LEVEL)
self.buffers = set()
self.shaders = set()
self.stages = set()
sessions.add(self)
def _destroy(self):
for stage in self.stages:
stage.destroy()
for shader in self.shaders:
shader.destroy()
for buffer in self.buffers:
buffer.destroy()
self.device.destroy()
def register_buffer(self, buffer):
self.buffers.add(buffer)
def register_shader(self, shader):
self.shaders.add(shader)
def register_stage(self, stage):
self.stages.add(stage)
| <mask token>
__all__ = ['Session']
sessions = set()
class Session(Destroyable):
def __init__(self, physical_device, queue_index=None):
super(Session, self).__init__()
self.instance = lava.instance()
if physical_device not in lava.devices():
raise RuntimeError('Provided invalid / outdated device object')
self.queue_index = queue_index or physical_device.get_queue_indices(
QueueType.COMPUTE)[0]
self.device = Device(physical_device, [(QueueType.COMPUTE, self.
queue_index)], validation_lvl=lava.VALIDATION_LEVEL)
self.buffers = set()
self.shaders = set()
self.stages = set()
sessions.add(self)
def _destroy(self):
for stage in self.stages:
stage.destroy()
for shader in self.shaders:
shader.destroy()
for buffer in self.buffers:
buffer.destroy()
self.device.destroy()
def register_buffer(self, buffer):
self.buffers.add(buffer)
def register_shader(self, shader):
self.shaders.add(shader)
def register_stage(self, stage):
self.stages.add(stage)
| import lava
from lava.api.constants.vk import QueueType
from lava.api.device import Device
from lava.api.util import Destroyable
__all__ = ['Session']
sessions = set()
class Session(Destroyable):
def __init__(self, physical_device, queue_index=None):
super(Session, self).__init__()
self.instance = lava.instance()
if physical_device not in lava.devices():
raise RuntimeError('Provided invalid / outdated device object')
self.queue_index = queue_index or physical_device.get_queue_indices(
QueueType.COMPUTE)[0]
self.device = Device(physical_device, [(QueueType.COMPUTE, self.
queue_index)], validation_lvl=lava.VALIDATION_LEVEL)
self.buffers = set()
self.shaders = set()
self.stages = set()
sessions.add(self)
def _destroy(self):
for stage in self.stages:
stage.destroy()
for shader in self.shaders:
shader.destroy()
for buffer in self.buffers:
buffer.destroy()
self.device.destroy()
def register_buffer(self, buffer):
self.buffers.add(buffer)
def register_shader(self, shader):
self.shaders.add(shader)
def register_stage(self, stage):
self.stages.add(stage)
| # -*- coding: UTF-8 -*-
import lava
from lava.api.constants.vk import QueueType
from lava.api.device import Device
from lava.api.util import Destroyable
__all__ = ["Session"]
sessions = set()
class Session(Destroyable):
def __init__(self, physical_device, queue_index=None):
super(Session, self).__init__()
self.instance = lava.instance() # validation level might has been changed
if physical_device not in lava.devices():
raise RuntimeError("Provided invalid / outdated device object")
self.queue_index = queue_index or physical_device.get_queue_indices(QueueType.COMPUTE)[0]
self.device = Device(physical_device, [(QueueType.COMPUTE, self.queue_index)],
validation_lvl=lava.VALIDATION_LEVEL)
self.buffers = set()
self.shaders = set()
self.stages = set()
sessions.add(self)
def _destroy(self):
for stage in self.stages:
stage.destroy()
for shader in self.shaders:
shader.destroy()
for buffer in self.buffers:
buffer.destroy()
self.device.destroy()
def register_buffer(self, buffer):
self.buffers.add(buffer)
def register_shader(self, shader):
self.shaders.add(shader)
def register_stage(self, stage):
self.stages.add(stage)
| [
5,
6,
7,
8,
9
] |
1,555 | 38e616e35f165d458d774dd0b6837a733b8402d7 | # -*- coding: utf-8 -*-
import sys
#from Constants import *
# start
import CrudMatrixDao
class CrudAccessValue:
def __init__(self):
self.crudAccessValue = {}
self.__run()
def __run(self):
aCrudMatrixDao = CrudMatrixDao.CrudMatrixDao()
# print aCrudMatrixDao.selectCrudAccessValueAction()
for row in aCrudMatrixDao.selectCrudAccessValueAction():
crudGubun = row[0]; accessValue= row[1]
self.crudAccessValue[crudGubun] = accessValue
def getAccessValue(self, crudGubun):
try:
out = self.crudAccessValue[crudGubun]
except KeyError:
out = crudGubun
return out
if __name__ == "__main__":
aCrudAccessValue = CrudAccessValue()
print aCrudAccessValue.getAccessValue('C')
| null | null | null | null | [
0
] |
1,556 | 385dccfab4d7c37d10d968658b51e231691a7b49 | <mask token>
| <mask token>
if __name__ == '__main__':
if len(sys.argv) != 5:
print('Usage: {0} model_file feat_dir feat_dim output_file'.format(
sys.argv[0]))
print('model_file -- path of the trained svm file')
print('feat_dir -- dir of feature files')
print('file_list_path -- path of list file (val.lst or test.lst)')
print('output_file -- path to save the prediction score')
exit(1)
model_file = sys.argv[1]
feat_dir = sys.argv[2]
file_list_path = sys.argv[3]
output_file = sys.argv[4]
file_list = []
with open(file_list_path) as f:
for line in f.readlines():
L = line.replace('\n', ' ').split()
file_list.append(L[0])
smodel = pickle.load(open(model_file, 'rb'))
possible_results = ['NULL', 'P001', 'P002', 'P003']
pred = []
conf = []
print('SVM_MODEL: {}'.format(model_file))
for file in file_list:
bow_file = feat_dir + 'bow' + file + '.pkl'
if os.path.isfile(bow_file):
with open(bow_file, 'rb') as f:
data = pickle.load(f)
pred.extend(smodel.predict([data]))
conf.extend(smodel.decision_function([data]))
else:
pred.extend(['NULL'])
conf.extend([[1, 0, 0, 0]])
print('NUM PREDICTION TO TEST: {}'.format(len(pred)))
with open(output_file, 'w') as f:
for i in range(0, len(file_list)):
video = file_list[i]
f.write(str(video) + ' ' + pred[i] + '\n')
for i in range(1, 4):
print(output_file[0:-4] + '_' + possible_results[i] + '_val_label')
with open(output_file[0:-4] + '_' + possible_results[i] +
'_val_label', 'w') as f:
for j in range(0, len(pred)):
video = file_list[j]
if j < len(pred) - 1:
f.write(str(conf[j][i]) + ' # confidence for video ' +
video + '\n')
else:
f.write(str(conf[j][i]) + ' # confidence for video ' +
video + '\n')
| import numpy as np
import os
from sklearn.svm.classes import SVC
import pickle
import sys
if __name__ == '__main__':
if len(sys.argv) != 5:
print('Usage: {0} model_file feat_dir feat_dim output_file'.format(
sys.argv[0]))
print('model_file -- path of the trained svm file')
print('feat_dir -- dir of feature files')
print('file_list_path -- path of list file (val.lst or test.lst)')
print('output_file -- path to save the prediction score')
exit(1)
model_file = sys.argv[1]
feat_dir = sys.argv[2]
file_list_path = sys.argv[3]
output_file = sys.argv[4]
file_list = []
with open(file_list_path) as f:
for line in f.readlines():
L = line.replace('\n', ' ').split()
file_list.append(L[0])
smodel = pickle.load(open(model_file, 'rb'))
possible_results = ['NULL', 'P001', 'P002', 'P003']
pred = []
conf = []
print('SVM_MODEL: {}'.format(model_file))
for file in file_list:
bow_file = feat_dir + 'bow' + file + '.pkl'
if os.path.isfile(bow_file):
with open(bow_file, 'rb') as f:
data = pickle.load(f)
pred.extend(smodel.predict([data]))
conf.extend(smodel.decision_function([data]))
else:
pred.extend(['NULL'])
conf.extend([[1, 0, 0, 0]])
print('NUM PREDICTION TO TEST: {}'.format(len(pred)))
with open(output_file, 'w') as f:
for i in range(0, len(file_list)):
video = file_list[i]
f.write(str(video) + ' ' + pred[i] + '\n')
for i in range(1, 4):
print(output_file[0:-4] + '_' + possible_results[i] + '_val_label')
with open(output_file[0:-4] + '_' + possible_results[i] +
'_val_label', 'w') as f:
for j in range(0, len(pred)):
video = file_list[j]
if j < len(pred) - 1:
f.write(str(conf[j][i]) + ' # confidence for video ' +
video + '\n')
else:
f.write(str(conf[j][i]) + ' # confidence for video ' +
video + '\n')
| #!/bin/python
import numpy as np
import os
from sklearn.svm.classes import SVC
import pickle
import sys
# Apply the SVM model to the testing videos; Output the score for each video
if __name__ == '__main__':
if len(sys.argv) != 5:
print("Usage: {0} model_file feat_dir feat_dim output_file".format(sys.argv[0]))
print("model_file -- path of the trained svm file")
print("feat_dir -- dir of feature files")
print("file_list_path -- path of list file (val.lst or test.lst)")
print("output_file -- path to save the prediction score")
exit(1)
model_file = sys.argv[1]
feat_dir = sys.argv[2]
file_list_path = sys.argv[3]
output_file = sys.argv[4]
file_list = []
with open(file_list_path) as f:
for line in f.readlines():
L = line.replace('\n', ' ').split()
file_list.append(L[0])
smodel = pickle.load(open(model_file,"rb"))
possible_results = ['NULL', 'P001','P002','P003']
pred = []
conf = []
print('SVM_MODEL: {}'.format(model_file))
for file in file_list:
bow_file = feat_dir + 'bow' + file + '.pkl'
if os.path.isfile(bow_file):
with open(bow_file,'rb') as f:
data = pickle.load(f)
pred.extend(smodel.predict([data]))
conf.extend(smodel.decision_function([data]))
else:
pred.extend(['NULL'])
conf.extend([[1, 0, 0, 0]])
print('NUM PREDICTION TO TEST: {}'.format(len(pred)))
with open(output_file,'w') as f:
for i in range(0, len(file_list)):
video = file_list[i]
f.write(str(video) + ' ' + pred[i] + '\n')
for i in range(1,4):
# tmp = np.asarray(pred)
# template = np.zeros(np.size(tmp))
# with open(possible_results[i] +'_val','w') as f:
# ind = np.where(tmp == possible_results[i])
# for j in range(0, len(ind)):
# template[ind[j]] = 1
# for j in range(0, len(template)):
# f.write(str(int(template[j])) +'\n')
print(output_file[0:-4]+'_'+possible_results[i] +'_val_label')
with open(output_file[0:-4]+'_'+possible_results[i] +'_val_label','w') as f:
for j in range(0, len(pred)):
video = file_list[j]
if j< len(pred)-1:
f.write(str(conf[j][i])+' # confidence for video ' + video + '\n')
else:
f.write(str(conf[j][i])+' # confidence for video ' + video + '\n')
| null | [
0,
1,
2,
3
] |
1,557 | cddd5deba0ddc59a604d2926bdc687716e08f226 | <mask token>
class Solution:
<mask token>
<mask token>
<mask token>
| <mask token>
class Solution:
<mask token>
<mask token>
def checkLand(self, grid, x, y):
print(f'current checkLand(x,y) are {x}, {y}')
if x < 0 or x >= len(grid):
return False
if y < 0 or y >= len(grid[0]):
return False
return grid[x][y] == 1
| <mask token>
class Solution:
<mask token>
def checkValid(self, grid, visited, x, y):
if x < 0 or x >= len(grid):
return False
if y < 0 or y >= len(grid[0]):
return False
if (x, y) in visited:
return False
return grid[x][y] == 1
def checkLand(self, grid, x, y):
print(f'current checkLand(x,y) are {x}, {y}')
if x < 0 or x >= len(grid):
return False
if y < 0 or y >= len(grid[0]):
return False
return grid[x][y] == 1
| <mask token>
class Solution:
def minDays(self, grid: List[List[int]]) ->int:
i, j = 0, 0
islandExists = False
visited = dict()
leastAdjacent = 4
while i < len(grid):
while j < len(grid[i]):
if grid[i][j] == 1 and (i, j) not in visited:
if islandExists == True:
return 0
islandExists = True
s = list()
s.append((i, j))
while s:
n = 0
x, y = s.pop()
print(f'current coords are {x}, {y}')
visited[x, y] = True
if self.checkLand(grid, x - 1, y):
n += 1
if self.checkLand(grid, x + 1, y):
n += 1
if self.checkLand(grid, x, y - 1):
n += 1
if self.checkLand(grid, x, y + 1):
n += 1
leastAdjacent = min(leastAdjacent, n)
if self.checkValid(grid, visited, x - 1, y):
s.append((x - 1, y))
if self.checkValid(grid, visited, x + 1, y):
s.append((x + 1, y))
if self.checkValid(grid, visited, x, y - 1):
s.append((x, y - 1))
if self.checkValid(grid, visited, x, y + 1):
s.append((x, y + 1))
j += 1
i += 1
if len(grid[0]) == 2:
return 2
return leastAdjacent
def checkValid(self, grid, visited, x, y):
if x < 0 or x >= len(grid):
return False
if y < 0 or y >= len(grid[0]):
return False
if (x, y) in visited:
return False
return grid[x][y] == 1
def checkLand(self, grid, x, y):
print(f'current checkLand(x,y) are {x}, {y}')
if x < 0 or x >= len(grid):
return False
if y < 0 or y >= len(grid[0]):
return False
return grid[x][y] == 1
| #!/usr/bin/python3
"""
@author : Chris Phibbs
@created : Sunday Aug 30, 2020 14:05:56 AEST
@file : q3
"""
class Solution:
def minDays(self, grid: List[List[int]]) -> int:
# bfs - find 1, run bfs. Then loop through - if any other ones found then disconnected
i, j = 0, 0
islandExists = False
visited = dict()
leastAdjacent = 4
while i < len(grid):
while j < len(grid[i]):
if grid[i][j] == 1 and (i,j) not in visited:
# new land - return 0 if already disconnected from already found land
if islandExists == True: return 0
islandExists = True
# run bfs
s = list()
s.append((i,j))
while s:
n = 0
x, y = s.pop()
print(f"current coords are {x}, {y}")
visited[(x,y)] = True
if self.checkLand(grid, x-1, y): n+=1
if self.checkLand(grid, x+1, y): n+=1
if self.checkLand(grid, x, y-1): n+=1
if self.checkLand(grid, x, y+1): n+=1
leastAdjacent = min(leastAdjacent, n)
if self.checkValid(grid, visited, x-1, y): s.append((x-1, y))
if self.checkValid(grid, visited, x+1, y): s.append((x+1, y))
if self.checkValid(grid, visited, x, y-1): s.append((x, y-1))
if self.checkValid(grid, visited, x, y+1): s.append((x, y+1))
# Did not handle the "bridge" case - i.e. element of n == 2 that when removed disconnects everything
# TL;DR If not in the corner and n==2 then answer is 1
j += 1
i += 1
if len(grid[0]) == 2: return 2
return leastAdjacent
# if land and not visited, run bfs
# else do nothing
# returns True if valid land
def checkValid(self, grid, visited, x, y):
if x < 0 or x >= len(grid): return False
if y < 0 or y >= len(grid[0]): return False
if (x,y) in visited: return False
return grid[x][y] == 1
def checkLand(self, grid, x, y):
print(f"current checkLand(x,y) are {x}, {y}")
if x < 0 or x >= len(grid): return False
if y < 0 or y >= len(grid[0]): return False
return grid[x][y] == 1
| [
1,
2,
3,
4,
5
] |
1,558 | 06dd963b62c0a746438dcf01c67ef5de1a4c5e8f | <mask token>
def collect(yt, dir):
code = yt.thumbnail_url
urllib.request.urlretrieve(code, os.path.join(dir, yt.title + '.jpg'))
out = yt.streams.filter(only_audio=True, file_extension='mp4').order_by(
'abr').desc().first().download(dir)
def list_update(code):
link = 'https://www.youtube.com/playlist?list=' + code
dir = os.path.join(p, code)
list = Playlist(link)
files = [os.path.splitext(filename)[0] for filename in os.listdir(dir)]
for l in list:
yt = YouTube(l)
if yt.title not in files:
collect(yt, dir)
def add_music(code):
dir = os.path.join(p, 'all')
link = 'https://www.youtube.com/watch?v=' + code
yt = YouTube(link)
collect(yt, os.path.join(p, 'all'))
<mask token>
| <mask token>
def collect(yt, dir):
code = yt.thumbnail_url
urllib.request.urlretrieve(code, os.path.join(dir, yt.title + '.jpg'))
out = yt.streams.filter(only_audio=True, file_extension='mp4').order_by(
'abr').desc().first().download(dir)
def list_update(code):
link = 'https://www.youtube.com/playlist?list=' + code
dir = os.path.join(p, code)
list = Playlist(link)
files = [os.path.splitext(filename)[0] for filename in os.listdir(dir)]
for l in list:
yt = YouTube(l)
if yt.title not in files:
collect(yt, dir)
def add_music(code):
dir = os.path.join(p, 'all')
link = 'https://www.youtube.com/watch?v=' + code
yt = YouTube(link)
collect(yt, os.path.join(p, 'all'))
<mask token>
if query == 'addlist':
code = sys.argv[2]
list = os.listdir(p)
if code not in list:
os.mkdir(os.path.join(p, code))
list_update(code)
elif query == 'addmusic':
code = sys.argv[2]
add_music(code)
elif query == 'update':
with open(os.path.abspath('playlists.json'), 'r', encoding='utf-8') as f:
dic = json.load(f)
l = dic['dcodes']
for code in l:
list_update(code)
| <mask token>
p = os.path.abspath('appdata')
def collect(yt, dir):
code = yt.thumbnail_url
urllib.request.urlretrieve(code, os.path.join(dir, yt.title + '.jpg'))
out = yt.streams.filter(only_audio=True, file_extension='mp4').order_by(
'abr').desc().first().download(dir)
def list_update(code):
link = 'https://www.youtube.com/playlist?list=' + code
dir = os.path.join(p, code)
list = Playlist(link)
files = [os.path.splitext(filename)[0] for filename in os.listdir(dir)]
for l in list:
yt = YouTube(l)
if yt.title not in files:
collect(yt, dir)
def add_music(code):
dir = os.path.join(p, 'all')
link = 'https://www.youtube.com/watch?v=' + code
yt = YouTube(link)
collect(yt, os.path.join(p, 'all'))
query = sys.argv[1]
if query == 'addlist':
code = sys.argv[2]
list = os.listdir(p)
if code not in list:
os.mkdir(os.path.join(p, code))
list_update(code)
elif query == 'addmusic':
code = sys.argv[2]
add_music(code)
elif query == 'update':
with open(os.path.abspath('playlists.json'), 'r', encoding='utf-8') as f:
dic = json.load(f)
l = dic['dcodes']
for code in l:
list_update(code)
| from pytube import YouTube, Playlist
import json
import sys
import os
import urllib.request
p = os.path.abspath('appdata')
def collect(yt, dir):
code = yt.thumbnail_url
urllib.request.urlretrieve(code, os.path.join(dir, yt.title + '.jpg'))
out = yt.streams.filter(only_audio=True, file_extension='mp4').order_by(
'abr').desc().first().download(dir)
def list_update(code):
link = 'https://www.youtube.com/playlist?list=' + code
dir = os.path.join(p, code)
list = Playlist(link)
files = [os.path.splitext(filename)[0] for filename in os.listdir(dir)]
for l in list:
yt = YouTube(l)
if yt.title not in files:
collect(yt, dir)
def add_music(code):
dir = os.path.join(p, 'all')
link = 'https://www.youtube.com/watch?v=' + code
yt = YouTube(link)
collect(yt, os.path.join(p, 'all'))
query = sys.argv[1]
if query == 'addlist':
code = sys.argv[2]
list = os.listdir(p)
if code not in list:
os.mkdir(os.path.join(p, code))
list_update(code)
elif query == 'addmusic':
code = sys.argv[2]
add_music(code)
elif query == 'update':
with open(os.path.abspath('playlists.json'), 'r', encoding='utf-8') as f:
dic = json.load(f)
l = dic['dcodes']
for code in l:
list_update(code)
| null | [
3,
4,
5,
6
] |
1,559 | 314f6cc97f53fa5bd8bf0ec0e1e305ca6384f1a2 | <mask token>
| <mask token>
class DojoBookAppConfig(AppConfig):
<mask token>
<mask token>
| <mask token>
class DojoBookAppConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'dojo_book_app'
| from django.apps import AppConfig
class DojoBookAppConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'dojo_book_app'
| null | [
0,
1,
2,
3
] |
1,560 | 98bf0a332a6753e500b24bed2af16fe4a1cb9568 | <mask token>
class RealVelocityController(RegulatorBaseClass):
settings = {'kp': 10, 'ki': 0, 'kd': 1}
v_d = 4
emergency_break_constant = 0.4
emergency_break_safety_factor = 1
def __init__(self):
self.orientation_controller = PID(**self.settings, signed_error=
True, deadzone=0.05)
self.dt = 0
self.last_commanded_velocity = Position()
def execute(self, robot: Robot, dt):
self.dt = dt
speed_norm = self.get_next_speed(robot)
path_correction = self.following_path_vector(robot)
velocity = (robot.position_error * speed_norm / robot.
position_error.norm + path_correction * speed_norm / self.v_d)
velocity /= max(1.0, abs(velocity.norm) / speed_norm)
cmd_orientation = self.orientation_controller.execute(robot.
orientation_error)
cmd_orientation /= max(1.0, abs(cmd_orientation) / MAX_ANGULAR_SPEED)
self.last_commanded_velocity = velocity
return Pose(velocity, cmd_orientation)
def following_path_vector(self, robot):
direction_error = (self.last_commanded_velocity - robot.velocity.
position)
if direction_error.norm > 0:
return normalize(direction_error)
else:
return direction_error
def get_next_speed(self, robot, acc=MAX_LINEAR_ACCELERATION):
acceleration_offset = 1
emergency_break_offset = self.emergency_break_constant / self.dt * (
robot.current_speed / 1000)
emergency_break_offset = max(1.0, emergency_break_offset)
if robot.target_speed > robot.current_speed:
next_speed = (robot.current_speed + acc * self.dt *
acceleration_offset)
elif self.is_distance_for_break(robot, acc, offset=1):
next_speed = (robot.current_speed + acc * self.dt *
acceleration_offset)
else:
distance = 0.5 * abs(robot.current_speed ** 2 - robot.
target_speed ** 2) / acc
if (robot.position_error.norm < distance / self.
emergency_break_safety_factor):
next_speed = (robot.current_speed - acc * self.dt *
emergency_break_offset)
else:
next_speed = robot.current_speed - acc * self.dt
return clamp(next_speed, -1 * robot.cruise_speed, robot.cruise_speed)
@staticmethod
def is_distance_for_break(robot, acc, offset=1) ->bool:
distance = 0.5 * abs(robot.current_speed ** 2 - robot.target_speed ** 2
) / acc
return robot.position_error.norm > distance * offset
def reset(self):
self.orientation_controller.reset()
class GrSimVelocityController(RealVelocityController):
settings = {'kp': 2, 'ki': 0.3, 'kd': 0}
v_d = 15
emergency_break_constant = 0
emergency_break_safety_factor = 1
<mask token>
| <mask token>
class RealVelocityController(RegulatorBaseClass):
settings = {'kp': 10, 'ki': 0, 'kd': 1}
v_d = 4
emergency_break_constant = 0.4
emergency_break_safety_factor = 1
def __init__(self):
self.orientation_controller = PID(**self.settings, signed_error=
True, deadzone=0.05)
self.dt = 0
self.last_commanded_velocity = Position()
def execute(self, robot: Robot, dt):
self.dt = dt
speed_norm = self.get_next_speed(robot)
path_correction = self.following_path_vector(robot)
velocity = (robot.position_error * speed_norm / robot.
position_error.norm + path_correction * speed_norm / self.v_d)
velocity /= max(1.0, abs(velocity.norm) / speed_norm)
cmd_orientation = self.orientation_controller.execute(robot.
orientation_error)
cmd_orientation /= max(1.0, abs(cmd_orientation) / MAX_ANGULAR_SPEED)
self.last_commanded_velocity = velocity
return Pose(velocity, cmd_orientation)
def following_path_vector(self, robot):
direction_error = (self.last_commanded_velocity - robot.velocity.
position)
if direction_error.norm > 0:
return normalize(direction_error)
else:
return direction_error
def get_next_speed(self, robot, acc=MAX_LINEAR_ACCELERATION):
acceleration_offset = 1
emergency_break_offset = self.emergency_break_constant / self.dt * (
robot.current_speed / 1000)
emergency_break_offset = max(1.0, emergency_break_offset)
if robot.target_speed > robot.current_speed:
next_speed = (robot.current_speed + acc * self.dt *
acceleration_offset)
elif self.is_distance_for_break(robot, acc, offset=1):
next_speed = (robot.current_speed + acc * self.dt *
acceleration_offset)
else:
distance = 0.5 * abs(robot.current_speed ** 2 - robot.
target_speed ** 2) / acc
if (robot.position_error.norm < distance / self.
emergency_break_safety_factor):
next_speed = (robot.current_speed - acc * self.dt *
emergency_break_offset)
else:
next_speed = robot.current_speed - acc * self.dt
return clamp(next_speed, -1 * robot.cruise_speed, robot.cruise_speed)
@staticmethod
def is_distance_for_break(robot, acc, offset=1) ->bool:
distance = 0.5 * abs(robot.current_speed ** 2 - robot.target_speed ** 2
) / acc
return robot.position_error.norm > distance * offset
def reset(self):
self.orientation_controller.reset()
class GrSimVelocityController(RealVelocityController):
settings = {'kp': 2, 'ki': 0.3, 'kd': 0}
v_d = 15
emergency_break_constant = 0
emergency_break_safety_factor = 1
def is_time_to_break(robot, destination, cruise_speed, acceleration,
target_speed):
offset = 1.2
dist_to_target = (destination - robot.pose.position).norm
return dist_to_target < abs(cruise_speed ** 2 - target_speed ** 2) / (2 *
acceleration) * offset
<mask token>
| <mask token>
config = Config()
class RealVelocityController(RegulatorBaseClass):
settings = {'kp': 10, 'ki': 0, 'kd': 1}
v_d = 4
emergency_break_constant = 0.4
emergency_break_safety_factor = 1
def __init__(self):
self.orientation_controller = PID(**self.settings, signed_error=
True, deadzone=0.05)
self.dt = 0
self.last_commanded_velocity = Position()
def execute(self, robot: Robot, dt):
self.dt = dt
speed_norm = self.get_next_speed(robot)
path_correction = self.following_path_vector(robot)
velocity = (robot.position_error * speed_norm / robot.
position_error.norm + path_correction * speed_norm / self.v_d)
velocity /= max(1.0, abs(velocity.norm) / speed_norm)
cmd_orientation = self.orientation_controller.execute(robot.
orientation_error)
cmd_orientation /= max(1.0, abs(cmd_orientation) / MAX_ANGULAR_SPEED)
self.last_commanded_velocity = velocity
return Pose(velocity, cmd_orientation)
def following_path_vector(self, robot):
direction_error = (self.last_commanded_velocity - robot.velocity.
position)
if direction_error.norm > 0:
return normalize(direction_error)
else:
return direction_error
def get_next_speed(self, robot, acc=MAX_LINEAR_ACCELERATION):
acceleration_offset = 1
emergency_break_offset = self.emergency_break_constant / self.dt * (
robot.current_speed / 1000)
emergency_break_offset = max(1.0, emergency_break_offset)
if robot.target_speed > robot.current_speed:
next_speed = (robot.current_speed + acc * self.dt *
acceleration_offset)
elif self.is_distance_for_break(robot, acc, offset=1):
next_speed = (robot.current_speed + acc * self.dt *
acceleration_offset)
else:
distance = 0.5 * abs(robot.current_speed ** 2 - robot.
target_speed ** 2) / acc
if (robot.position_error.norm < distance / self.
emergency_break_safety_factor):
next_speed = (robot.current_speed - acc * self.dt *
emergency_break_offset)
else:
next_speed = robot.current_speed - acc * self.dt
return clamp(next_speed, -1 * robot.cruise_speed, robot.cruise_speed)
@staticmethod
def is_distance_for_break(robot, acc, offset=1) ->bool:
distance = 0.5 * abs(robot.current_speed ** 2 - robot.target_speed ** 2
) / acc
return robot.position_error.norm > distance * offset
def reset(self):
self.orientation_controller.reset()
class GrSimVelocityController(RealVelocityController):
settings = {'kp': 2, 'ki': 0.3, 'kd': 0}
v_d = 15
emergency_break_constant = 0
emergency_break_safety_factor = 1
def is_time_to_break(robot, destination, cruise_speed, acceleration,
target_speed):
offset = 1.2
dist_to_target = (destination - robot.pose.position).norm
return dist_to_target < abs(cruise_speed ** 2 - target_speed ** 2) / (2 *
acceleration) * offset
def optimal_speed(robot, destination, cruise_speed, acceleration, target_speed
):
dist_to_target = (destination - robot.pose.position).norm
return max(cruise_speed, sqrt(abs(2 * acceleration * dist_to_target -
target_speed ** 2)))
| from math import sqrt
from Engine.regulators.PID import PID
from Engine.regulators.regulator_base_class import RegulatorBaseClass
from Engine.robot import Robot, MAX_LINEAR_ACCELERATION, MAX_ANGULAR_SPEED
from Util import Pose
from Util.geometry import clamp, normalize
from Util.pose import Position
from config.config import Config
config = Config()
class RealVelocityController(RegulatorBaseClass):
settings = {'kp': 10, 'ki': 0, 'kd': 1}
v_d = 4
emergency_break_constant = 0.4
emergency_break_safety_factor = 1
def __init__(self):
self.orientation_controller = PID(**self.settings, signed_error=
True, deadzone=0.05)
self.dt = 0
self.last_commanded_velocity = Position()
def execute(self, robot: Robot, dt):
self.dt = dt
speed_norm = self.get_next_speed(robot)
path_correction = self.following_path_vector(robot)
velocity = (robot.position_error * speed_norm / robot.
position_error.norm + path_correction * speed_norm / self.v_d)
velocity /= max(1.0, abs(velocity.norm) / speed_norm)
cmd_orientation = self.orientation_controller.execute(robot.
orientation_error)
cmd_orientation /= max(1.0, abs(cmd_orientation) / MAX_ANGULAR_SPEED)
self.last_commanded_velocity = velocity
return Pose(velocity, cmd_orientation)
def following_path_vector(self, robot):
direction_error = (self.last_commanded_velocity - robot.velocity.
position)
if direction_error.norm > 0:
return normalize(direction_error)
else:
return direction_error
def get_next_speed(self, robot, acc=MAX_LINEAR_ACCELERATION):
acceleration_offset = 1
emergency_break_offset = self.emergency_break_constant / self.dt * (
robot.current_speed / 1000)
emergency_break_offset = max(1.0, emergency_break_offset)
if robot.target_speed > robot.current_speed:
next_speed = (robot.current_speed + acc * self.dt *
acceleration_offset)
elif self.is_distance_for_break(robot, acc, offset=1):
next_speed = (robot.current_speed + acc * self.dt *
acceleration_offset)
else:
distance = 0.5 * abs(robot.current_speed ** 2 - robot.
target_speed ** 2) / acc
if (robot.position_error.norm < distance / self.
emergency_break_safety_factor):
next_speed = (robot.current_speed - acc * self.dt *
emergency_break_offset)
else:
next_speed = robot.current_speed - acc * self.dt
return clamp(next_speed, -1 * robot.cruise_speed, robot.cruise_speed)
@staticmethod
def is_distance_for_break(robot, acc, offset=1) ->bool:
distance = 0.5 * abs(robot.current_speed ** 2 - robot.target_speed ** 2
) / acc
return robot.position_error.norm > distance * offset
def reset(self):
self.orientation_controller.reset()
class GrSimVelocityController(RealVelocityController):
settings = {'kp': 2, 'ki': 0.3, 'kd': 0}
v_d = 15
emergency_break_constant = 0
emergency_break_safety_factor = 1
def is_time_to_break(robot, destination, cruise_speed, acceleration,
target_speed):
offset = 1.2
dist_to_target = (destination - robot.pose.position).norm
return dist_to_target < abs(cruise_speed ** 2 - target_speed ** 2) / (2 *
acceleration) * offset
def optimal_speed(robot, destination, cruise_speed, acceleration, target_speed
):
dist_to_target = (destination - robot.pose.position).norm
return max(cruise_speed, sqrt(abs(2 * acceleration * dist_to_target -
target_speed ** 2)))
| from math import sqrt
from Engine.regulators.PID import PID
from Engine.regulators.regulator_base_class import RegulatorBaseClass
from Engine.robot import Robot, MAX_LINEAR_ACCELERATION, MAX_ANGULAR_SPEED
from Util import Pose
from Util.geometry import clamp, normalize
from Util.pose import Position
from config.config import Config
config = Config()
class RealVelocityController(RegulatorBaseClass):
settings = {'kp': 10, 'ki': 0, 'kd': 1}
v_d = 4 # lower = bigger path correction
emergency_break_constant = 0.4 # Higher = higher correction of trajectory
emergency_break_safety_factor = 1 # lower = bigger break distance
def __init__(self):
self.orientation_controller = PID(**self.settings, signed_error=True, deadzone=0.05)
self.dt = 0
self.last_commanded_velocity = Position()
def execute(self, robot: Robot, dt):
self.dt = dt
speed_norm = self.get_next_speed(robot)
path_correction = self.following_path_vector(robot)
velocity = robot.position_error * speed_norm / robot.position_error.norm + path_correction * speed_norm / self.v_d
velocity /= max(1.0, abs(velocity.norm) / speed_norm)
cmd_orientation = self.orientation_controller.execute(robot.orientation_error)
cmd_orientation /= max(1.0, abs(cmd_orientation) / MAX_ANGULAR_SPEED)
self.last_commanded_velocity = velocity
return Pose(velocity, cmd_orientation)
def following_path_vector(self, robot):
direction_error = self.last_commanded_velocity - robot.velocity.position
if direction_error.norm > 0:
return normalize(direction_error)
else:
return direction_error
def get_next_speed(self, robot, acc=MAX_LINEAR_ACCELERATION):
acceleration_offset = 1 # on veut que le robot soit plus aggressif en début de trajet
emergency_break_offset = self.emergency_break_constant / self.dt * (robot.current_speed / 1000) # on veut que le robot break le plus qu'il peut si on s'approche trop vite de la target
emergency_break_offset = max(1.0, emergency_break_offset)
if robot.target_speed > robot.current_speed:
next_speed = robot.current_speed + acc * self.dt * acceleration_offset
else:
if self.is_distance_for_break(robot, acc, offset=1):
next_speed = robot.current_speed + acc * self.dt * acceleration_offset
else:
distance = 0.5 * abs(robot.current_speed ** 2 - robot.target_speed ** 2) / acc
if robot.position_error.norm < (distance/self.emergency_break_safety_factor):
next_speed = robot.current_speed - acc * self.dt * emergency_break_offset
else:
next_speed = robot.current_speed - acc * self.dt
return clamp(next_speed, -1 * robot.cruise_speed, robot.cruise_speed)
@staticmethod
def is_distance_for_break(robot, acc, offset=1) -> bool:
distance = 0.5 * abs(robot.current_speed ** 2 - robot.target_speed ** 2) / acc
return robot.position_error.norm > (distance * offset)
def reset(self):
self.orientation_controller.reset()
class GrSimVelocityController(RealVelocityController):
settings = {'kp': 2, 'ki': 0.3, 'kd': 0}
v_d = 15
emergency_break_constant = 0
emergency_break_safety_factor = 1 # lower = bigger break distance
def is_time_to_break(robot, destination, cruise_speed, acceleration, target_speed):
# formule physique: v_finale ** 2 = v_init ** 2 - 2 * acceleration * distance_deplacement
offset = 1.2 # petite marge pour break avant le point vue qu'il y a du délais
dist_to_target = (destination - robot.pose.position).norm
return dist_to_target < (abs(cruise_speed ** 2 - target_speed**2) / (2 * acceleration)) * offset
def optimal_speed(robot, destination, cruise_speed, acceleration, target_speed):
# formule physique: v_finale ** 2 = v_init ** 2 - 2 * acceleration * distance_deplacement
dist_to_target = (destination - robot.pose.position).norm
return max(cruise_speed, sqrt(abs(2 * acceleration * dist_to_target - target_speed**2)))
| [
10,
11,
13,
14,
15
] |
1,561 | a486ec6b27a6b84e454a1bed096be9fe22d91612 | <mask token>
def makeimage(text, point_size=100, width=30):
tw = textwrap.TextWrapper(width=width)
text = '\n'.join(a.replace('\\n', '\n') for a in tw.wrap(text))
filename = ''.join(c for c in text.replace(' ', '-') if c.isalpha() or
c.isdigit() or c in ['-', '_'])
os.system(COMMAND.format(point_size, text, filename))
def main():
text = None
if len(sys.argv) > 1:
pt = int(sys.argv[1])
width = int(-0.3 * float(sys.argv[1]) + 60)
if width < 10:
print('Too large.')
sys.exit(2)
if len(sys.argv) > 2:
text = ' '.join(sys.argv[2:])
else:
pt = 100
width = 30
if not text:
text = input('Text: ')
makeimage(text, pt, width)
<mask token>
| <mask token>
def makeimage(text, point_size=100, width=30):
tw = textwrap.TextWrapper(width=width)
text = '\n'.join(a.replace('\\n', '\n') for a in tw.wrap(text))
filename = ''.join(c for c in text.replace(' ', '-') if c.isalpha() or
c.isdigit() or c in ['-', '_'])
os.system(COMMAND.format(point_size, text, filename))
def main():
text = None
if len(sys.argv) > 1:
pt = int(sys.argv[1])
width = int(-0.3 * float(sys.argv[1]) + 60)
if width < 10:
print('Too large.')
sys.exit(2)
if len(sys.argv) > 2:
text = ' '.join(sys.argv[2:])
else:
pt = 100
width = 30
if not text:
text = input('Text: ')
makeimage(text, pt, width)
if __name__ == '__main__':
main()
| <mask token>
COMMAND = (
'convert -size 1920x1080 canvas:"rgb(149, 1, 1)" -font Dejavu-Sans-Bold -pointsize {0} -gravity center -stroke none -fill white -annotate 0 "{1}" -size 1920x1080 "{2}.png"'
)
def makeimage(text, point_size=100, width=30):
tw = textwrap.TextWrapper(width=width)
text = '\n'.join(a.replace('\\n', '\n') for a in tw.wrap(text))
filename = ''.join(c for c in text.replace(' ', '-') if c.isalpha() or
c.isdigit() or c in ['-', '_'])
os.system(COMMAND.format(point_size, text, filename))
def main():
text = None
if len(sys.argv) > 1:
pt = int(sys.argv[1])
width = int(-0.3 * float(sys.argv[1]) + 60)
if width < 10:
print('Too large.')
sys.exit(2)
if len(sys.argv) > 2:
text = ' '.join(sys.argv[2:])
else:
pt = 100
width = 30
if not text:
text = input('Text: ')
makeimage(text, pt, width)
if __name__ == '__main__':
main()
| import os
import sys
import textwrap
COMMAND = (
'convert -size 1920x1080 canvas:"rgb(149, 1, 1)" -font Dejavu-Sans-Bold -pointsize {0} -gravity center -stroke none -fill white -annotate 0 "{1}" -size 1920x1080 "{2}.png"'
)
def makeimage(text, point_size=100, width=30):
tw = textwrap.TextWrapper(width=width)
text = '\n'.join(a.replace('\\n', '\n') for a in tw.wrap(text))
filename = ''.join(c for c in text.replace(' ', '-') if c.isalpha() or
c.isdigit() or c in ['-', '_'])
os.system(COMMAND.format(point_size, text, filename))
def main():
text = None
if len(sys.argv) > 1:
pt = int(sys.argv[1])
width = int(-0.3 * float(sys.argv[1]) + 60)
if width < 10:
print('Too large.')
sys.exit(2)
if len(sys.argv) > 2:
text = ' '.join(sys.argv[2:])
else:
pt = 100
width = 30
if not text:
text = input('Text: ')
makeimage(text, pt, width)
if __name__ == '__main__':
main()
| #!/usr/bin/env python2
import os
import sys
import textwrap
COMMAND = (
'convert -size 1920x1080 canvas:"rgb(149, 1, 1)" '
'-font Dejavu-Sans-Bold -pointsize {0} -gravity center -stroke none '
'-fill white -annotate 0 "{1}" -size 1920x1080 "{2}.png"'
)
def makeimage(text, point_size=100, width=30):
tw = textwrap.TextWrapper(width=width)
text = "\n".join(
a.replace("\\n", "\n") for a in tw.wrap(text)
)
filename = "".join(
c
for c in text.replace(" ", "-")
if c.isalpha() or c.isdigit() or c in ["-", "_"]
)
os.system(COMMAND.format(point_size, text, filename))
def main():
text = None
if len(sys.argv) > 1:
pt = int(sys.argv[1])
width = int(-0.3 * float(sys.argv[1]) + 60)
if width < 10:
print("Too large.")
sys.exit(2)
if len(sys.argv) > 2:
text = " ".join(sys.argv[2:])
else:
pt = 100
width = 30
if not text:
text = input("Text: ")
makeimage(text, pt, width)
if __name__ == '__main__':
main()
| [
2,
3,
4,
5,
6
] |
1,562 | 8cba57e3552e0072720fe42fa1949534f29d71b5 | <mask token>
def WriteToFile(f, output, condition=False, conditionID=''):
f.write(output if not condition else WrapInGuard(conditionID, output))
<mask token>
def WrapInGuard(condition, innerbody):
return TIfGuard.substitute(dict(condition=condition, innerbody=innerbody))
<mask token>
def WriteRequiredVariables(f):
variables = [dict(var='INCLUDES', value='""'), dict(var='SOURCES',
value='""'), dict(var='LIBS', value='""')]
for v in variables:
f.write(TMakeVariable.substitute(v))
def WriteDefinitions(f, sections):
for s in sections:
defs = s.data[':']
output = ''
for d in defs:
output += TDefinition.substitute(dict(definition=d)) + '\n'
WriteToFile(f, output, s.HasCondition(), s.condition)
<mask token>
def WriteProjectLibDirectories(f, rootDir, sections):
for s in sections:
dirs = s.data[':']
output = ''
for d in dirs:
if ContainsEnvVariable(d):
d = InsertEnvVariable(d)
else:
d = d if d.startswith('/') else '/' + d
d = rootDir + d
output = TLinkDirectory.substitute(dict(dir=d)) + '\n'
WriteToFile(f, output, s.HasCondition(), s.condition)
def WriteLinkLibs(f, rootDir, sections):
for s in sections:
libs = s.data[':']
output = ''
for l in libs:
if '-framework' in l:
frameworkName = l.replace('-framework ', '')
frameworkName = frameworkName.strip()
output = TLinkFramework.substitute(dict(framework=
frameworkName)) + '\n'
WriteToFile(f, output, s.HasCondition(), s.condition)
elif '-system' in l:
systemLibName = l.replace('-system ', '')
systemLibName = systemLibName.strip()
output = TLinkSystemLib.substitute(dict(framework=
systemLibName, framework_upper=systemLibName.upper())
) + '\n'
WriteToFile(f, output, s.HasCondition(), s.condition)
elif '-object' in l:
objectLibName = l.replace('-object ', '')
objectLibName = objectLibName.strip()
output = TLinkObject.substitute(dict(object=objectLibName)
) + '\n'
WriteToFile(f, output, s.HasCondition(), s.condition)
else:
output = TAppendPythonVariable.substitute(dict(var='LIBS',
appendedval=l))
WriteToFile(f, output, s.HasCondition(), s.condition)
def WriteOutputs(f, rootDir, sections):
for s in sections:
if 'Executable' in s.data:
runtime = s.data['Executable']
if ContainsEnvVariable(runtime):
runtime = InsertEnvVariable(runtime)
else:
runtime = runtime if runtime.startswith('/') else '/' + runtime
runtime = rootDir + runtime
output = TRuntimeOutput.substitute(dict(dir=runtime))
WriteToFile(f, output, s.HasCondition(), s.condition)
if 'Runtime' in s.data:
runtime = s.data['Runtime']
if ContainsEnvVariable(runtime):
runtime = InsertEnvVariable(runtime)
else:
runtime = runtime if runtime.startswith('/') else '/' + runtime
runtime = rootDir + runtime
output = TExecutableOutput.substitute(dict(dir=runtime))
WriteToFile(f, output, s.HasCondition(), s.condition)
if 'Libs' in s.data:
print('LIBS OUTPUT BEING SET')
statics = s.data['Libs']
if ContainsEnvVariable(statics):
statics = InsertEnvVariable(statics)
else:
statics = statics if statics.startswith('/') else '/' + statics
statics = rootDir + statics
output = TLibraryoutput.substitute(dict(dir=statics))
WriteToFile(f, output, s.HasCondition(), s.condition)
def WriteModuleOutput(f, rootDir, m):
name = m.settings.data['Name']
t = m.settings.data['Type']
if 'exe' in t:
f.write(TExecutable.substitute(dict(project=name)))
f.write(TTargetLinkLibs.substitute(dict(name=name)))
elif 'shared' in t:
f.write(TSharedLib.substitute(dict(project=name)))
f.write(TTargetLinkLibs.substitute(dict(name=name)))
elif 'static' in t:
f.write(TStaticLib.substitute(dict(project=name)))
f.write(TTargetLinkLibs.substitute(dict(name=name)))
elif 'object' in t:
f.write(TObjectLib.substitute(dict(project=name)))
f.write(TTargetLinkLibs.substitute(dict(name=name)))
return None
<mask token>
| <mask token>
def WriteToFile(f, output, condition=False, conditionID=''):
f.write(output if not condition else WrapInGuard(conditionID, output))
<mask token>
def Strip(s):
chars = '${}'
for i in range(0, len(chars)):
s = s.replace(chars[i], '')
return s
def WrapInGuard(condition, innerbody):
return TIfGuard.substitute(dict(condition=condition, innerbody=innerbody))
def WriteProjectSettings(f, section):
if 'UseFolders' not in section.data:
section.data['UseFolders'] = 'OFF'
output = TProjectSettings.substitute(section.data)
f.write(output)
def WriteRequiredVariables(f):
variables = [dict(var='INCLUDES', value='""'), dict(var='SOURCES',
value='""'), dict(var='LIBS', value='""')]
for v in variables:
f.write(TMakeVariable.substitute(v))
def WriteDefinitions(f, sections):
for s in sections:
defs = s.data[':']
output = ''
for d in defs:
output += TDefinition.substitute(dict(definition=d)) + '\n'
WriteToFile(f, output, s.HasCondition(), s.condition)
def WriteIncludeDirectories(f, rootDir, sections):
for s in sections:
dirs = s.data[':']
output = ''
for d in dirs:
localDir = d if d.startswith('/') else '/' + d
headerID = Strip(localDir.replace('/', '_'))
if ContainsEnvVariable(d):
d = InsertEnvVariable(d)
else:
d = rootDir + localDir
output = TIncludeDirectory.substitute(dict(dir=d)) + '\n'
WriteToFile(f, output, s.HasCondition(), s.condition)
output = THeaderGlob.substitute(dict(dir=d, header_id=headerID)
) + '\n'
WriteToFile(f, output, s.HasCondition(), s.condition)
output = TAppendVariable.substitute(dict(var='HEADERS',
appendedval=headerID))
WriteToFile(f, output, s.HasCondition(), s.condition)
localDir = Strip(localDir.replace('/', '\\\\'))
output = TSourceGroup.substitute(dict(folder='Header Files' +
localDir, files=headerID))
WriteToFile(f, output, s.HasCondition(), s.condition)
def WriteSourceDirectories(f, rootDir, sections):
for s in sections:
dirs = s.data[':']
output = ''
for d in dirs:
localDir = d if d.startswith('/') else '/' + d
sourceID = Strip(localDir.replace('/', '_'))
if ContainsEnvVariable(d):
d = InsertEnvVariable(d)
else:
d = rootDir + localDir
output = TSourceGlob.substitute(dict(dir=d, source_id=sourceID)
) + '\n'
WriteToFile(f, output, s.HasCondition(), s.condition)
output = TAppendVariable.substitute(dict(var='SOURCES',
appendedval=sourceID))
WriteToFile(f, output, s.HasCondition(), s.condition)
localDir = Strip(localDir.replace('/', '\\\\'))
output = TSourceGroup.substitute(dict(folder='Source Files' +
localDir, files=sourceID))
WriteToFile(f, output, s.HasCondition(), s.condition)
def WriteProjectLibDirectories(f, rootDir, sections):
for s in sections:
dirs = s.data[':']
output = ''
for d in dirs:
if ContainsEnvVariable(d):
d = InsertEnvVariable(d)
else:
d = d if d.startswith('/') else '/' + d
d = rootDir + d
output = TLinkDirectory.substitute(dict(dir=d)) + '\n'
WriteToFile(f, output, s.HasCondition(), s.condition)
def WriteLinkLibs(f, rootDir, sections):
for s in sections:
libs = s.data[':']
output = ''
for l in libs:
if '-framework' in l:
frameworkName = l.replace('-framework ', '')
frameworkName = frameworkName.strip()
output = TLinkFramework.substitute(dict(framework=
frameworkName)) + '\n'
WriteToFile(f, output, s.HasCondition(), s.condition)
elif '-system' in l:
systemLibName = l.replace('-system ', '')
systemLibName = systemLibName.strip()
output = TLinkSystemLib.substitute(dict(framework=
systemLibName, framework_upper=systemLibName.upper())
) + '\n'
WriteToFile(f, output, s.HasCondition(), s.condition)
elif '-object' in l:
objectLibName = l.replace('-object ', '')
objectLibName = objectLibName.strip()
output = TLinkObject.substitute(dict(object=objectLibName)
) + '\n'
WriteToFile(f, output, s.HasCondition(), s.condition)
else:
output = TAppendPythonVariable.substitute(dict(var='LIBS',
appendedval=l))
WriteToFile(f, output, s.HasCondition(), s.condition)
def WriteOutputs(f, rootDir, sections):
for s in sections:
if 'Executable' in s.data:
runtime = s.data['Executable']
if ContainsEnvVariable(runtime):
runtime = InsertEnvVariable(runtime)
else:
runtime = runtime if runtime.startswith('/') else '/' + runtime
runtime = rootDir + runtime
output = TRuntimeOutput.substitute(dict(dir=runtime))
WriteToFile(f, output, s.HasCondition(), s.condition)
if 'Runtime' in s.data:
runtime = s.data['Runtime']
if ContainsEnvVariable(runtime):
runtime = InsertEnvVariable(runtime)
else:
runtime = runtime if runtime.startswith('/') else '/' + runtime
runtime = rootDir + runtime
output = TExecutableOutput.substitute(dict(dir=runtime))
WriteToFile(f, output, s.HasCondition(), s.condition)
if 'Libs' in s.data:
print('LIBS OUTPUT BEING SET')
statics = s.data['Libs']
if ContainsEnvVariable(statics):
statics = InsertEnvVariable(statics)
else:
statics = statics if statics.startswith('/') else '/' + statics
statics = rootDir + statics
output = TLibraryoutput.substitute(dict(dir=statics))
WriteToFile(f, output, s.HasCondition(), s.condition)
def WriteModuleOutput(f, rootDir, m):
name = m.settings.data['Name']
t = m.settings.data['Type']
if 'exe' in t:
f.write(TExecutable.substitute(dict(project=name)))
f.write(TTargetLinkLibs.substitute(dict(name=name)))
elif 'shared' in t:
f.write(TSharedLib.substitute(dict(project=name)))
f.write(TTargetLinkLibs.substitute(dict(name=name)))
elif 'static' in t:
f.write(TStaticLib.substitute(dict(project=name)))
f.write(TTargetLinkLibs.substitute(dict(name=name)))
elif 'object' in t:
f.write(TObjectLib.substitute(dict(project=name)))
f.write(TTargetLinkLibs.substitute(dict(name=name)))
return None
<mask token>
| <mask token>
def WriteToFile(f, output, condition=False, conditionID=''):
f.write(output if not condition else WrapInGuard(conditionID, output))
def InsertEnvVariable(s):
return Template(s).substitute(os.environ)
def ContainsEnvVariable(s):
return '$' in s
def Strip(s):
chars = '${}'
for i in range(0, len(chars)):
s = s.replace(chars[i], '')
return s
def WrapInGuard(condition, innerbody):
return TIfGuard.substitute(dict(condition=condition, innerbody=innerbody))
def WriteProjectSettings(f, section):
if 'UseFolders' not in section.data:
section.data['UseFolders'] = 'OFF'
output = TProjectSettings.substitute(section.data)
f.write(output)
def WriteRequiredVariables(f):
variables = [dict(var='INCLUDES', value='""'), dict(var='SOURCES',
value='""'), dict(var='LIBS', value='""')]
for v in variables:
f.write(TMakeVariable.substitute(v))
def WriteDefinitions(f, sections):
for s in sections:
defs = s.data[':']
output = ''
for d in defs:
output += TDefinition.substitute(dict(definition=d)) + '\n'
WriteToFile(f, output, s.HasCondition(), s.condition)
def WriteIncludeDirectories(f, rootDir, sections):
for s in sections:
dirs = s.data[':']
output = ''
for d in dirs:
localDir = d if d.startswith('/') else '/' + d
headerID = Strip(localDir.replace('/', '_'))
if ContainsEnvVariable(d):
d = InsertEnvVariable(d)
else:
d = rootDir + localDir
output = TIncludeDirectory.substitute(dict(dir=d)) + '\n'
WriteToFile(f, output, s.HasCondition(), s.condition)
output = THeaderGlob.substitute(dict(dir=d, header_id=headerID)
) + '\n'
WriteToFile(f, output, s.HasCondition(), s.condition)
output = TAppendVariable.substitute(dict(var='HEADERS',
appendedval=headerID))
WriteToFile(f, output, s.HasCondition(), s.condition)
localDir = Strip(localDir.replace('/', '\\\\'))
output = TSourceGroup.substitute(dict(folder='Header Files' +
localDir, files=headerID))
WriteToFile(f, output, s.HasCondition(), s.condition)
def WriteSourceDirectories(f, rootDir, sections):
for s in sections:
dirs = s.data[':']
output = ''
for d in dirs:
localDir = d if d.startswith('/') else '/' + d
sourceID = Strip(localDir.replace('/', '_'))
if ContainsEnvVariable(d):
d = InsertEnvVariable(d)
else:
d = rootDir + localDir
output = TSourceGlob.substitute(dict(dir=d, source_id=sourceID)
) + '\n'
WriteToFile(f, output, s.HasCondition(), s.condition)
output = TAppendVariable.substitute(dict(var='SOURCES',
appendedval=sourceID))
WriteToFile(f, output, s.HasCondition(), s.condition)
localDir = Strip(localDir.replace('/', '\\\\'))
output = TSourceGroup.substitute(dict(folder='Source Files' +
localDir, files=sourceID))
WriteToFile(f, output, s.HasCondition(), s.condition)
def WriteProjectLibDirectories(f, rootDir, sections):
for s in sections:
dirs = s.data[':']
output = ''
for d in dirs:
if ContainsEnvVariable(d):
d = InsertEnvVariable(d)
else:
d = d if d.startswith('/') else '/' + d
d = rootDir + d
output = TLinkDirectory.substitute(dict(dir=d)) + '\n'
WriteToFile(f, output, s.HasCondition(), s.condition)
def WriteLinkLibs(f, rootDir, sections):
for s in sections:
libs = s.data[':']
output = ''
for l in libs:
if '-framework' in l:
frameworkName = l.replace('-framework ', '')
frameworkName = frameworkName.strip()
output = TLinkFramework.substitute(dict(framework=
frameworkName)) + '\n'
WriteToFile(f, output, s.HasCondition(), s.condition)
elif '-system' in l:
systemLibName = l.replace('-system ', '')
systemLibName = systemLibName.strip()
output = TLinkSystemLib.substitute(dict(framework=
systemLibName, framework_upper=systemLibName.upper())
) + '\n'
WriteToFile(f, output, s.HasCondition(), s.condition)
elif '-object' in l:
objectLibName = l.replace('-object ', '')
objectLibName = objectLibName.strip()
output = TLinkObject.substitute(dict(object=objectLibName)
) + '\n'
WriteToFile(f, output, s.HasCondition(), s.condition)
else:
output = TAppendPythonVariable.substitute(dict(var='LIBS',
appendedval=l))
WriteToFile(f, output, s.HasCondition(), s.condition)
def WriteOutputs(f, rootDir, sections):
for s in sections:
if 'Executable' in s.data:
runtime = s.data['Executable']
if ContainsEnvVariable(runtime):
runtime = InsertEnvVariable(runtime)
else:
runtime = runtime if runtime.startswith('/') else '/' + runtime
runtime = rootDir + runtime
output = TRuntimeOutput.substitute(dict(dir=runtime))
WriteToFile(f, output, s.HasCondition(), s.condition)
if 'Runtime' in s.data:
runtime = s.data['Runtime']
if ContainsEnvVariable(runtime):
runtime = InsertEnvVariable(runtime)
else:
runtime = runtime if runtime.startswith('/') else '/' + runtime
runtime = rootDir + runtime
output = TExecutableOutput.substitute(dict(dir=runtime))
WriteToFile(f, output, s.HasCondition(), s.condition)
if 'Libs' in s.data:
print('LIBS OUTPUT BEING SET')
statics = s.data['Libs']
if ContainsEnvVariable(statics):
statics = InsertEnvVariable(statics)
else:
statics = statics if statics.startswith('/') else '/' + statics
statics = rootDir + statics
output = TLibraryoutput.substitute(dict(dir=statics))
WriteToFile(f, output, s.HasCondition(), s.condition)
def WriteModuleOutput(f, rootDir, m):
name = m.settings.data['Name']
t = m.settings.data['Type']
if 'exe' in t:
f.write(TExecutable.substitute(dict(project=name)))
f.write(TTargetLinkLibs.substitute(dict(name=name)))
elif 'shared' in t:
f.write(TSharedLib.substitute(dict(project=name)))
f.write(TTargetLinkLibs.substitute(dict(name=name)))
elif 'static' in t:
f.write(TStaticLib.substitute(dict(project=name)))
f.write(TTargetLinkLibs.substitute(dict(name=name)))
elif 'object' in t:
f.write(TObjectLib.substitute(dict(project=name)))
f.write(TTargetLinkLibs.substitute(dict(name=name)))
return None
def WriteSubmoduleIncludes(f, rootDir, sections):
for s in sections:
submods = s.data[':']
for sm in submods:
sm = sm if sm.startswith('/') else '/' + sm
output = TSubmoduleInclude.substitute(dict(dir=rootDir + sm)
) + '\n'
WriteToFile(f, output, s.HasCondition(), s.condition)
| <mask token>
TIfGuard = Template("""if(${condition})
${innerbody}
endif()
""")
TProjectSettings = Template(
"""cmake_minimum_required (VERSION ${MinCmakeVer})
project(${Name})
set_property(GLOBAL PROPERTY USE_FOLDERS ${UseFolders})
set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)
"""
)
TDefinition = Template('add_definitions(-D${definition})')
TIncludeDirectory = Template('include_directories("${dir}")')
TSourceGlob = Template('FILE(GLOB ${source_id} "${dir}/*.c*")')
THeaderGlob = Template('FILE(GLOB ${header_id} "${dir}/*.h*")')
TSourceGroup = Template('source_group("${folder}" FILES $${${files}})\n')
TExecutable = Template('add_executable(${project} $${SOURCES} $${HEADERS})\n')
TSharedLib = Template(
'add_library(${project} SHARED $${SOURCES} $${HEADERS})\n')
TStaticLib = Template(
'add_library(${project} STATIC $${SOURCES} $${HEADERS})\n')
TObjectLib = Template('add_library(${project} OBJECT $${SOURCES}')
TAppendVariable = Template('set( ${var} $${${var}} $${${appendedval}})\n')
TAppendPythonVariable = Template('set( ${var} $${${var}} ${appendedval})\n')
TMakeVariable = Template('set (${var} ${value})\n')
TLinkDirectory = Template('link_directories("${dir}")')
TTargetLinkLibs = Template(
"""if(NOT LIBS STREQUAL "")
target_link_libraries(${name} $${LIBS})
endif()
"""
)
TLinkFramework = Template(
"""find_library(${framework}_LIB ${framework})
MARK_AS_ADVANCED(${framework}_LIB)
set(LIBS $${LIBS} $${${framework}_LIB})"""
)
TLinkSystemLib = Template(
"""find_package(${framework} REQUIRED)
include_directories($${${framework_upper}_INCLUDE_DIRS})
set(LIBS $${LIBS} $${${framework_upper}_LIBRARIES})"""
)
TLinkObject = Template('set(LIBS $${LIBS} $<TARGET_OBJECTS>:${object})')
TExecutableOutput = Template('set(EXECUTABLE_OUTPUT_PATH "${dir}")\n')
TRuntimeOutput = Template('set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "${dir}")\n')
TLibraryoutput = Template(
"""set(CMAKE_LIBRARY_OUTPUT_DIRECTORY "${dir}")
set(LIBRARY_OUTPUT_PATH "${dir}")
"""
)
TSubmoduleInclude = Template('add_subdirectory(${dir})')
def WriteToFile(f, output, condition=False, conditionID=''):
f.write(output if not condition else WrapInGuard(conditionID, output))
def InsertEnvVariable(s):
return Template(s).substitute(os.environ)
def ContainsEnvVariable(s):
return '$' in s
def Strip(s):
chars = '${}'
for i in range(0, len(chars)):
s = s.replace(chars[i], '')
return s
def WrapInGuard(condition, innerbody):
return TIfGuard.substitute(dict(condition=condition, innerbody=innerbody))
def WriteProjectSettings(f, section):
if 'UseFolders' not in section.data:
section.data['UseFolders'] = 'OFF'
output = TProjectSettings.substitute(section.data)
f.write(output)
def WriteRequiredVariables(f):
variables = [dict(var='INCLUDES', value='""'), dict(var='SOURCES',
value='""'), dict(var='LIBS', value='""')]
for v in variables:
f.write(TMakeVariable.substitute(v))
def WriteDefinitions(f, sections):
for s in sections:
defs = s.data[':']
output = ''
for d in defs:
output += TDefinition.substitute(dict(definition=d)) + '\n'
WriteToFile(f, output, s.HasCondition(), s.condition)
def WriteIncludeDirectories(f, rootDir, sections):
for s in sections:
dirs = s.data[':']
output = ''
for d in dirs:
localDir = d if d.startswith('/') else '/' + d
headerID = Strip(localDir.replace('/', '_'))
if ContainsEnvVariable(d):
d = InsertEnvVariable(d)
else:
d = rootDir + localDir
output = TIncludeDirectory.substitute(dict(dir=d)) + '\n'
WriteToFile(f, output, s.HasCondition(), s.condition)
output = THeaderGlob.substitute(dict(dir=d, header_id=headerID)
) + '\n'
WriteToFile(f, output, s.HasCondition(), s.condition)
output = TAppendVariable.substitute(dict(var='HEADERS',
appendedval=headerID))
WriteToFile(f, output, s.HasCondition(), s.condition)
localDir = Strip(localDir.replace('/', '\\\\'))
output = TSourceGroup.substitute(dict(folder='Header Files' +
localDir, files=headerID))
WriteToFile(f, output, s.HasCondition(), s.condition)
def WriteSourceDirectories(f, rootDir, sections):
for s in sections:
dirs = s.data[':']
output = ''
for d in dirs:
localDir = d if d.startswith('/') else '/' + d
sourceID = Strip(localDir.replace('/', '_'))
if ContainsEnvVariable(d):
d = InsertEnvVariable(d)
else:
d = rootDir + localDir
output = TSourceGlob.substitute(dict(dir=d, source_id=sourceID)
) + '\n'
WriteToFile(f, output, s.HasCondition(), s.condition)
output = TAppendVariable.substitute(dict(var='SOURCES',
appendedval=sourceID))
WriteToFile(f, output, s.HasCondition(), s.condition)
localDir = Strip(localDir.replace('/', '\\\\'))
output = TSourceGroup.substitute(dict(folder='Source Files' +
localDir, files=sourceID))
WriteToFile(f, output, s.HasCondition(), s.condition)
def WriteProjectLibDirectories(f, rootDir, sections):
for s in sections:
dirs = s.data[':']
output = ''
for d in dirs:
if ContainsEnvVariable(d):
d = InsertEnvVariable(d)
else:
d = d if d.startswith('/') else '/' + d
d = rootDir + d
output = TLinkDirectory.substitute(dict(dir=d)) + '\n'
WriteToFile(f, output, s.HasCondition(), s.condition)
def WriteLinkLibs(f, rootDir, sections):
for s in sections:
libs = s.data[':']
output = ''
for l in libs:
if '-framework' in l:
frameworkName = l.replace('-framework ', '')
frameworkName = frameworkName.strip()
output = TLinkFramework.substitute(dict(framework=
frameworkName)) + '\n'
WriteToFile(f, output, s.HasCondition(), s.condition)
elif '-system' in l:
systemLibName = l.replace('-system ', '')
systemLibName = systemLibName.strip()
output = TLinkSystemLib.substitute(dict(framework=
systemLibName, framework_upper=systemLibName.upper())
) + '\n'
WriteToFile(f, output, s.HasCondition(), s.condition)
elif '-object' in l:
objectLibName = l.replace('-object ', '')
objectLibName = objectLibName.strip()
output = TLinkObject.substitute(dict(object=objectLibName)
) + '\n'
WriteToFile(f, output, s.HasCondition(), s.condition)
else:
output = TAppendPythonVariable.substitute(dict(var='LIBS',
appendedval=l))
WriteToFile(f, output, s.HasCondition(), s.condition)
def WriteOutputs(f, rootDir, sections):
for s in sections:
if 'Executable' in s.data:
runtime = s.data['Executable']
if ContainsEnvVariable(runtime):
runtime = InsertEnvVariable(runtime)
else:
runtime = runtime if runtime.startswith('/') else '/' + runtime
runtime = rootDir + runtime
output = TRuntimeOutput.substitute(dict(dir=runtime))
WriteToFile(f, output, s.HasCondition(), s.condition)
if 'Runtime' in s.data:
runtime = s.data['Runtime']
if ContainsEnvVariable(runtime):
runtime = InsertEnvVariable(runtime)
else:
runtime = runtime if runtime.startswith('/') else '/' + runtime
runtime = rootDir + runtime
output = TExecutableOutput.substitute(dict(dir=runtime))
WriteToFile(f, output, s.HasCondition(), s.condition)
if 'Libs' in s.data:
print('LIBS OUTPUT BEING SET')
statics = s.data['Libs']
if ContainsEnvVariable(statics):
statics = InsertEnvVariable(statics)
else:
statics = statics if statics.startswith('/') else '/' + statics
statics = rootDir + statics
output = TLibraryoutput.substitute(dict(dir=statics))
WriteToFile(f, output, s.HasCondition(), s.condition)
def WriteModuleOutput(f, rootDir, m):
name = m.settings.data['Name']
t = m.settings.data['Type']
if 'exe' in t:
f.write(TExecutable.substitute(dict(project=name)))
f.write(TTargetLinkLibs.substitute(dict(name=name)))
elif 'shared' in t:
f.write(TSharedLib.substitute(dict(project=name)))
f.write(TTargetLinkLibs.substitute(dict(name=name)))
elif 'static' in t:
f.write(TStaticLib.substitute(dict(project=name)))
f.write(TTargetLinkLibs.substitute(dict(name=name)))
elif 'object' in t:
f.write(TObjectLib.substitute(dict(project=name)))
f.write(TTargetLinkLibs.substitute(dict(name=name)))
return None
def WriteSubmoduleIncludes(f, rootDir, sections):
for s in sections:
submods = s.data[':']
for sm in submods:
sm = sm if sm.startswith('/') else '/' + sm
output = TSubmoduleInclude.substitute(dict(dir=rootDir + sm)
) + '\n'
WriteToFile(f, output, s.HasCondition(), s.condition)
| from string import Template
import os
#-----template objects-----
#for putting a template inside an ifdef guard
TIfGuard = Template("""if(${condition})
${innerbody}
endif()\n""")
#For minimum cmake version and project name
TProjectSettings = Template("""cmake_minimum_required (VERSION ${MinCmakeVer})
project(${Name})
set_property(GLOBAL PROPERTY USE_FOLDERS ${UseFolders})
set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)\n""")
#for including a definition
TDefinition = Template("add_definitions(-D${definition})")
#include directories
TIncludeDirectory = Template('include_directories("${dir}")')
#for globbing source files in a dir
TSourceGlob = Template('FILE(GLOB ${source_id} "${dir}/*.c*")')
#for globbing header files in a dir
THeaderGlob = Template('FILE(GLOB ${header_id} "${dir}/*.h*")')
#template for source group (so they appear in VS filters etc.
TSourceGroup = Template('source_group("${folder}" FILES $${${files}})\n')
#for outputting an executable
TExecutable = Template("add_executable(${project} $${SOURCES} $${HEADERS})\n")
#for outputting a shared library
TSharedLib = Template("add_library(${project} SHARED $${SOURCES} $${HEADERS})\n")
#for outputting a static library
TStaticLib = Template("add_library(${project} STATIC $${SOURCES} $${HEADERS})\n")
#for outputting a collection of code files to an object file
TObjectLib = Template("add_library(${project} OBJECT $${SOURCES}")
#template for appending a cmake variable to another cmake variable
TAppendVariable = Template("set( ${var} $${${var}} $${${appendedval}})\n")
#template for appending a python variable to a cmake variable
TAppendPythonVariable = Template("set( ${var} $${${var}} ${appendedval})\n")
#template for setting cmake variable
TMakeVariable = Template('set (${var} ${value})\n')
#template for adding a link directory
TLinkDirectory = Template('link_directories("${dir}")')
#template for targeting link libs
TTargetLinkLibs = Template("""if(NOT LIBS STREQUAL "")
target_link_libraries(${name} $${LIBS})
endif()
""")
#for linking a framework on the mac
TLinkFramework = Template("""find_library(${framework}_LIB ${framework})
MARK_AS_ADVANCED(${framework}_LIB)
set(LIBS $${LIBS} $${${framework}_LIB})""")
#for linking a system library
TLinkSystemLib = Template("""find_package(${framework} REQUIRED)
include_directories($${${framework_upper}_INCLUDE_DIRS})
set(LIBS $${LIBS} $${${framework_upper}_LIBRARIES})""")
#for linking objects into this module
TLinkObject = Template("set(LIBS $${LIBS} $<TARGET_OBJECTS>:${object})")
#template for exectuable output
TExecutableOutput = Template('set(EXECUTABLE_OUTPUT_PATH "${dir}")\n')
#template for exectuable output
TRuntimeOutput = Template('set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "${dir}")\n')
#template for library output
TLibraryoutput = Template('set(CMAKE_LIBRARY_OUTPUT_DIRECTORY "${dir}")\nset(LIBRARY_OUTPUT_PATH "${dir}")\n')
#template for including a submodule
TSubmoduleInclude = Template('add_subdirectory(${dir})')
#-----Helper Functions----
def WriteToFile(f, output, condition = False, conditionID = ""):
f.write(output if not condition else WrapInGuard(conditionID, output))
def InsertEnvVariable(s):
return Template(s).substitute(os.environ)
def ContainsEnvVariable(s):
return ("$" in s)
#removes all characters that may cause issues with cmake
#such as ${} characters for environment variables
def Strip(s):
chars = "${}"
for i in range(0,len(chars)):
s=s.replace(chars[i],"")
return s
#-----Write Functions-----
#Puts innerbody into TIfGuard template with the given condition
#then returns the string
def WrapInGuard(condition, innerbody):
return TIfGuard.substitute(dict(condition=condition, innerbody=innerbody))
def WriteProjectSettings(f, section):
#defaults
if "UseFolders" not in section.data: section.data["UseFolders"] = "OFF"
#output
output = TProjectSettings.substitute(section.data)
f.write(output)
#writes required CMAKE variables to the file
def WriteRequiredVariables(f):
#all required variables go here to initialise
variables = [
dict(var="INCLUDES", value='""'),
dict(var="SOURCES", value='""'),
dict(var="LIBS", value='""')
]
#write them to file
for v in variables:
f.write(TMakeVariable.substitute(v))
#definitions such as #defines
def WriteDefinitions(f, sections):
#first write the one which is not platform specific
for s in sections:
defs = s.data[":"]
#gather definitions to be output
output = ""
for d in defs:
output += TDefinition.substitute(dict(definition=d)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#project include directories
def WriteIncludeDirectories(f, rootDir, sections):
#first write the one which is not platform specific
for s in sections:
dirs = s.data[":"]
#gather definitions to be output
output = ""
for d in dirs:
localDir = d if d.startswith("/") else "/"+d
headerID = Strip(localDir.replace('/','_'))
#insert any environment variables
if ContainsEnvVariable(d):
d = InsertEnvVariable(d)
else:
d = rootDir + localDir
#add include directory
output = TIncludeDirectory.substitute(dict(dir=d)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#glob all header files
output = THeaderGlob.substitute(dict(dir=d, header_id=headerID)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#append to HEADERS variable
output = TAppendVariable.substitute(dict(var="HEADERS", appendedval=headerID))
WriteToFile(f,output, s.HasCondition(), s.condition)
#make source group so they appear in filters
localDir = Strip(localDir.replace('/','\\\\'))
output = TSourceGroup.substitute(dict(folder="Header Files" + localDir, files=headerID))
WriteToFile(f,output, s.HasCondition(), s.condition)
#project source directories
def WriteSourceDirectories(f, rootDir, sections):
#first write the one which is not platform specific
for s in sections:
dirs = s.data[":"]
output = ""
for d in dirs:
localDir = d if d.startswith("/") else "/"+d
sourceID = Strip(localDir.replace('/','_'))
#insert any environment variables
if ContainsEnvVariable(d):
d = InsertEnvVariable(d)
else:
d = rootDir + localDir
#glob all source files
output = TSourceGlob.substitute(dict(dir=d, source_id=sourceID)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#append globbed source files to SOURCES cmake variable
output = TAppendVariable.substitute(dict(var="SOURCES", appendedval=sourceID))
WriteToFile(f,output, s.HasCondition(), s.condition)
#make source group so they appear in filters
localDir = Strip(localDir.replace('/','\\\\'))
output = TSourceGroup.substitute(dict(folder="Source Files" + localDir, files=sourceID))
WriteToFile(f,output, s.HasCondition(), s.condition)
#includes local library directories
def WriteProjectLibDirectories(f, rootDir, sections):
#first write the one which is not platform specific
for s in sections:
dirs = s.data[":"]
output = ""
for d in dirs:
#insert any environment variables
if ContainsEnvVariable(d):
d = InsertEnvVariable(d)
else:
d = d if d.startswith('/') else "/"+d
d = rootDir + d
#include lib directory
output = TLinkDirectory.substitute(dict(dir=d)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#adds all libs to the LIBS cmake var
def WriteLinkLibs(f, rootDir, sections):
#first write the one which is not platform specific
for s in sections:
libs = s.data[":"]
output = ""
for l in libs:
if "-framework" in l:
frameworkName = l.replace("-framework ", "")
frameworkName = frameworkName.strip()
output = TLinkFramework.substitute(dict(framework=frameworkName)) +"\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
elif "-system" in l:
systemLibName = l.replace("-system ", "")
systemLibName = systemLibName.strip()
output = TLinkSystemLib.substitute(dict(framework=systemLibName,framework_upper=systemLibName.upper())) +"\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
elif "-object" in l:
objectLibName = l.replace("-object ", "")
objectLibName = objectLibName.strip()
output = TLinkObject.substitute(dict(object=objectLibName)) +"\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
else:
#add to LIBS cmake var
output = TAppendPythonVariable.substitute(dict(var="LIBS", appendedval=l))
WriteToFile(f,output, s.HasCondition(), s.condition)
#Writes the cmake runtime/lib etc. outputs
def WriteOutputs(f, rootDir, sections):
for s in sections:
if "Executable" in s.data:
runtime = s.data["Executable"]
#insert any environment variables
if ContainsEnvVariable(runtime):
runtime = InsertEnvVariable(runtime)
else:
runtime = runtime if runtime.startswith('/') else "/"+runtime
runtime = rootDir + runtime
output = TRuntimeOutput.substitute(dict(dir=runtime))
WriteToFile(f,output, s.HasCondition(), s.condition)
if "Runtime" in s.data:
runtime = s.data["Runtime"]
#insert any environment variables
if ContainsEnvVariable(runtime):
runtime = InsertEnvVariable(runtime)
else:
runtime = runtime if runtime.startswith('/') else "/"+runtime
runtime = rootDir + runtime
output = TExecutableOutput.substitute(dict(dir=runtime))
WriteToFile(f,output, s.HasCondition(), s.condition)
if "Libs" in s.data:
print("LIBS OUTPUT BEING SET")
statics = s.data["Libs"]
#insert any environment variables
if ContainsEnvVariable(statics):
statics = InsertEnvVariable(statics)
else:
statics = statics if statics.startswith('/') else "/"+statics
statics = rootDir + statics
output = TLibraryoutput.substitute(dict(dir=statics))
WriteToFile(f,output, s.HasCondition(), s.condition)
#Writes the module output section of the CmakeLists file
def WriteModuleOutput(f, rootDir, m):
name = m.settings.data["Name"] #name of lib/exe
t = m.settings.data["Type"] #build type (lib/exe)
if "exe" in t:
f.write(TExecutable.substitute(dict(project=name)))
f.write(TTargetLinkLibs.substitute(dict(name=name)))
elif "shared" in t:
f.write(TSharedLib.substitute(dict(project=name)))
f.write(TTargetLinkLibs.substitute(dict(name=name)))
elif "static" in t:
f.write(TStaticLib.substitute(dict(project=name)))
f.write(TTargetLinkLibs.substitute(dict(name=name)))
elif "object" in t:
f.write(TObjectLib.substitute(dict(project=name)))
f.write(TTargetLinkLibs.substitute(dict(name=name)))
return None
#writes the include for a submodule
def WriteSubmoduleIncludes(f, rootDir, sections):
for s in sections:
submods = s.data[":"]
for sm in submods:
sm = sm if sm.startswith('/') else "/"+sm
output = TSubmoduleInclude.substitute(dict(dir=rootDir+sm)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition) | [
8,
12,
15,
16,
18
] |
1,563 | 602d2c545c6e3eabe5c6285d2ab0c7f4216a00f5 | <mask token>
class bcolors:
RED = '\x1b[31m'
GREEN = '\x1b[32m'
NORMAL = '\x1b[0m'
def check_result(title, map1, map2):
result = True
print(title)
for y in range(0, common.constants.MAP_HEIGHT):
v = ''
for x in range(0, common.constants.MAP_WIDTH):
if map1[y][x] == map2[y][x]:
v += bcolors.GREEN + str(map1[y][x]) + bcolors.NORMAL
else:
result = False
v += bcolors.RED + str(map1[y][x]) + bcolors.NORMAL
print(v)
if result:
print('Test Result: ' + bcolors.GREEN + 'Passed' + bcolors.NORMAL)
else:
print('Test Result: ' + bcolors.RED + 'Failed' + bcolors.NORMAL)
return result
<mask token>
| <mask token>
class bcolors:
RED = '\x1b[31m'
GREEN = '\x1b[32m'
NORMAL = '\x1b[0m'
def check_result(title, map1, map2):
result = True
print(title)
for y in range(0, common.constants.MAP_HEIGHT):
v = ''
for x in range(0, common.constants.MAP_WIDTH):
if map1[y][x] == map2[y][x]:
v += bcolors.GREEN + str(map1[y][x]) + bcolors.NORMAL
else:
result = False
v += bcolors.RED + str(map1[y][x]) + bcolors.NORMAL
print(v)
if result:
print('Test Result: ' + bcolors.GREEN + 'Passed' + bcolors.NORMAL)
else:
print('Test Result: ' + bcolors.RED + 'Failed' + bcolors.NORMAL)
return result
<mask token>
common.set_map(gold_dfmap1, gold_df1)
<mask token>
common.set_map(dfmap1, data1)
<mask token>
common.set_map(gold_dfmap2, gold_df2)
<mask token>
common.set_map(dfmap2, data2)
<mask token>
common.set_map(gold_dfmap3, gold_df3)
<mask token>
common.set_map(dfmap3, data3)
<mask token>
if all_passed:
exit(0)
else:
exit(1)
| <mask token>
class bcolors:
RED = '\x1b[31m'
GREEN = '\x1b[32m'
NORMAL = '\x1b[0m'
def check_result(title, map1, map2):
result = True
print(title)
for y in range(0, common.constants.MAP_HEIGHT):
v = ''
for x in range(0, common.constants.MAP_WIDTH):
if map1[y][x] == map2[y][x]:
v += bcolors.GREEN + str(map1[y][x]) + bcolors.NORMAL
else:
result = False
v += bcolors.RED + str(map1[y][x]) + bcolors.NORMAL
print(v)
if result:
print('Test Result: ' + bcolors.GREEN + 'Passed' + bcolors.NORMAL)
else:
print('Test Result: ' + bcolors.RED + 'Failed' + bcolors.NORMAL)
return result
data1 = '100000011110111011111111011110000003111111011111020000'
gold_df1 = '100000011110111011111111011110000555111111511111055540'
data2 = '200000011011111011000001011111011003111111011111000011111111011'
gold_df2 = '555555511411111511444441511111411555111111011111000011111111011'
data3 = '100000011111011011000011011111011003110011011111200011'
gold_df3 = '100000011111011011000011011111411555110411511111555511'
all_passed = True
gold_dfmap1 = common.init_map()
common.set_map(gold_dfmap1, gold_df1)
dfmap1 = common.init_map()
common.set_map(dfmap1, data1)
df1 = student_code.astar_search(dfmap1)
tdf1 = 'Reachable goal:'
cdf1 = check_result(tdf1, dfmap1, gold_dfmap1)
all_passed = all_passed and cdf1 and df1
gold_dfmap2 = common.init_map()
common.set_map(gold_dfmap2, gold_df2)
dfmap2 = common.init_map()
common.set_map(dfmap2, data2)
df2 = student_code.astar_search(dfmap2)
tdf2 = 'Reachable goal:'
cdf2 = check_result(tdf2, dfmap2, gold_dfmap2)
all_passed = all_passed and cdf2 and df2
gold_dfmap3 = common.init_map()
common.set_map(gold_dfmap3, gold_df3)
dfmap3 = common.init_map()
common.set_map(dfmap3, data3)
df3 = student_code.astar_search(dfmap3)
tdf3 = 'Reachable goal:'
cdf3 = check_result(tdf3, dfmap3, gold_dfmap3)
all_passed = all_passed and cdf3 and df3
all_passed = all_passed and cdf5 and df5
if all_passed:
exit(0)
else:
exit(1)
| import common
import student_code
class bcolors:
RED = '\x1b[31m'
GREEN = '\x1b[32m'
NORMAL = '\x1b[0m'
def check_result(title, map1, map2):
result = True
print(title)
for y in range(0, common.constants.MAP_HEIGHT):
v = ''
for x in range(0, common.constants.MAP_WIDTH):
if map1[y][x] == map2[y][x]:
v += bcolors.GREEN + str(map1[y][x]) + bcolors.NORMAL
else:
result = False
v += bcolors.RED + str(map1[y][x]) + bcolors.NORMAL
print(v)
if result:
print('Test Result: ' + bcolors.GREEN + 'Passed' + bcolors.NORMAL)
else:
print('Test Result: ' + bcolors.RED + 'Failed' + bcolors.NORMAL)
return result
data1 = '100000011110111011111111011110000003111111011111020000'
gold_df1 = '100000011110111011111111011110000555111111511111055540'
data2 = '200000011011111011000001011111011003111111011111000011111111011'
gold_df2 = '555555511411111511444441511111411555111111011111000011111111011'
data3 = '100000011111011011000011011111011003110011011111200011'
gold_df3 = '100000011111011011000011011111411555110411511111555511'
all_passed = True
gold_dfmap1 = common.init_map()
common.set_map(gold_dfmap1, gold_df1)
dfmap1 = common.init_map()
common.set_map(dfmap1, data1)
df1 = student_code.astar_search(dfmap1)
tdf1 = 'Reachable goal:'
cdf1 = check_result(tdf1, dfmap1, gold_dfmap1)
all_passed = all_passed and cdf1 and df1
gold_dfmap2 = common.init_map()
common.set_map(gold_dfmap2, gold_df2)
dfmap2 = common.init_map()
common.set_map(dfmap2, data2)
df2 = student_code.astar_search(dfmap2)
tdf2 = 'Reachable goal:'
cdf2 = check_result(tdf2, dfmap2, gold_dfmap2)
all_passed = all_passed and cdf2 and df2
gold_dfmap3 = common.init_map()
common.set_map(gold_dfmap3, gold_df3)
dfmap3 = common.init_map()
common.set_map(dfmap3, data3)
df3 = student_code.astar_search(dfmap3)
tdf3 = 'Reachable goal:'
cdf3 = check_result(tdf3, dfmap3, gold_dfmap3)
all_passed = all_passed and cdf3 and df3
all_passed = all_passed and cdf5 and df5
if all_passed:
exit(0)
else:
exit(1)
| import common
import student_code
class bcolors:
RED = "\x1b[31m"
GREEN = "\x1b[32m"
NORMAL = "\x1b[0m"
def check_result(title, map1, map2):
result=True
print(title)
for y in range(0,common.constants.MAP_HEIGHT):
v=""
for x in range(0,common.constants.MAP_WIDTH):
if (map1[y][x]==map2[y][x]):
v+=bcolors.GREEN+str(map1[y][x])+bcolors.NORMAL
else:
result = False
v+=bcolors.RED+str(map1[y][x])+bcolors.NORMAL
print(v)
if (result):
print("Test Result: " + bcolors.GREEN+"Passed"+bcolors.NORMAL)
else:
print("Test Result: " + bcolors.RED+"Failed"+bcolors.NORMAL)
return result
data1 = (
"100000011"
"110111011"
"111111011"
"110000003"
"111111011"
"111020000")
gold_df1 = ("100000011"
"110111011"
"111111011"
"110000555"
"111111511"
"111055540")
data2 = (
"200000011"
"011111011"
"000001011"
"111011003"
"111111011"
"111000011"
"111111011")
gold_df2 = ("555555511"
"411111511"
"444441511"
"111411555"
"111111011"
"111000011"
"111111011")
data3 = (
"100000011"
"111011011"
"000011011"
"111011003"
"110011011"
"111200011")
gold_df3 = (
"100000011"
"111011011"
"000011011"
"111411555"
"110411511"
"111555511")
all_passed = True
gold_dfmap1 = common.init_map();
common.set_map(gold_dfmap1, gold_df1)
dfmap1 = common.init_map()
common.set_map(dfmap1, data1)
df1 = student_code.astar_search(dfmap1)
tdf1 ="Reachable goal:"
cdf1 = check_result(tdf1,dfmap1,gold_dfmap1)
all_passed = all_passed and cdf1 and df1
gold_dfmap2 = common.init_map();
common.set_map(gold_dfmap2, gold_df2)
dfmap2 = common.init_map()
common.set_map(dfmap2, data2)
df2 = student_code.astar_search(dfmap2)
tdf2 ="Reachable goal:"
cdf2 = check_result(tdf2,dfmap2,gold_dfmap2)
all_passed = all_passed and cdf2 and df2
gold_dfmap3 = common.init_map();
common.set_map(gold_dfmap3, gold_df3)
dfmap3 = common.init_map()
common.set_map(dfmap3, data3)
df3 = student_code.astar_search(dfmap3)
tdf3 ="Reachable goal:"
cdf3 = check_result(tdf3,dfmap3,gold_dfmap3)
all_passed = all_passed and cdf3 and df3
all_passed = all_passed and cdf5 and df5
if all_passed:
exit(0)
else:
exit(1)
| [
3,
4,
5,
6,
7
] |
1,564 | 27d5ff5b0253eea36d6b492e929c4220f4b4a5eb | <mask token>
class ModelIncrStateFlattener(BaseIncrStateFlattener):
<mask token>
def reorder_decoder_incremental_state(self, flat_incr_state: Dict[str,
torch.Tensor], inds: torch.Tensor) ->Dict[str, torch.Tensor]:
structured_incr_state = self._unflatten_incr_state(flat_incr_state)
new_structured_incr_state = (self.module.
reorder_decoder_incremental_state(incremental_state=
structured_incr_state, inds=inds))
return self._flatten_incr_state(new_structured_incr_state)
def output(self, tensor: torch.Tensor) ->torch.Tensor:
return self.module.output(tensor)
@torch.jit.script
class ScriptableGpt2BpeHelper(object):
"""
Version of parlai.utils.bpe.Gpt2BpeHelper that can be TorchScripted.
"""
@classmethod
def findall(cls, text: str) ->List[str]:
"""
Split tokens in a manner that replicates parlai.utils.bpe.Gpt2BpeHelper.
"""
contraction_endings = ['s', 't', 're', 've', 'm', 'll', 'd']
tokens: List[str] = []
idx = 0
num_passes = 0
while idx < len(text):
num_passes += 1
if num_passes > 10000:
return [
'*** Infinite loop in ScriptableGpt2BpeHelper.findall()! ***'
]
if text[idx] == "'":
captured_suffix = False
for ending in contraction_endings:
if text[idx + 1:idx + 1 + len(ending)] == ending:
tokens.append("'" + ending)
idx += 1 + len(ending)
captured_suffix = True
break
if captured_suffix:
continue
if not text[idx].isspace() or text[idx] == ' ' and idx + 1 < len(
text) and not text[idx + 1].isspace():
if text[idx] == ' ':
last_matching_idx = idx + 1
else:
last_matching_idx = idx
if text[last_matching_idx].isalpha():
while last_matching_idx + 1 < len(text) and text[
last_matching_idx + 1].isalpha():
last_matching_idx += 1
elif text[last_matching_idx].isnumeric():
while last_matching_idx + 1 < len(text) and text[
last_matching_idx + 1].isnumeric():
last_matching_idx += 1
else:
while last_matching_idx + 1 < len(text) and not text[
last_matching_idx + 1].isspace() and not text[
last_matching_idx + 1].isalpha() and not text[
last_matching_idx + 1].isnumeric():
last_matching_idx += 1
tokens.append(text[idx:last_matching_idx + 1])
idx = last_matching_idx + 1
continue
if idx + 1 < len(text) and text[idx + 1].isspace():
last_space_idx = idx + 1
while last_space_idx + 1 < len(text) and text[
last_space_idx + 1].isspace():
last_space_idx += 1
if last_space_idx + 1 == len(text):
tokens.append(text[idx:last_space_idx + 1])
idx = last_space_idx + 1
else:
tokens.append(text[idx:last_space_idx])
idx = last_space_idx
continue
if True:
last_space_idx = idx
while last_space_idx + 1 < len(text) and text[
last_space_idx + 1].isspace():
last_space_idx += 1
tokens.append(text[idx:last_space_idx + 1])
idx = last_space_idx + 1
return tokens
def __init__(self, add_prefix_space: bool, encoder: Dict[str, str],
byte_encoder: Dict[int, str], fused_key_bpe_ranks: Dict[str, float],
special_tokens: List[str]):
self.add_prefix_space = add_prefix_space
self.encoder = encoder
self.decoder: Dict[str, str] = {}
for k, v in self.encoder.items():
self.decoder[v] = k
self.byte_encoder = byte_encoder
self.byte_decoder: Dict[str, int] = {}
for k, v in self.byte_encoder.items():
self.byte_decoder[v] = k
self.bpe_ranks = fused_key_bpe_ranks
self._special_tokens: Dict[str, int] = {}
for st in special_tokens:
self._special_tokens[st] = 1
def encode(self, text: str) ->List[str]:
"""
Tokenize text.
Checks for add_prefix_space; handles accordingly.
:param text:
text to tokenize
:return tokens:
A list of tokens
"""
if self.add_prefix_space:
text = f' {text}'
FINAL = 1
SPLITABLE = 0
pieces: List[Tuple[str, int]] = [(text, SPLITABLE)]
for special_token in self._special_tokens.keys():
i = 0
while i < len(pieces):
subtext, status = pieces[i]
if status == FINAL:
i += 1
continue
split = subtext.split(special_token)
if len(split) > 1:
pieces.pop(i)
for j, piece in enumerate(split):
if j > 0:
pieces.insert(i + j, (special_token, FINAL))
pieces.insert(i + j + int(j > 0), (piece, SPLITABLE))
else:
i += 1
output: List[str] = []
for piece, state in pieces:
if state is FINAL:
output.append(piece)
else:
output += self.helper_encode(piece)
text = ''.join(output)
return output
def get_pairs(self, word: List[str]) ->List[Tuple[str, str]]:
"""
Return set of symbol pairs in a word.
Word is represented as list of symbols (symbols being variable-length strings).
:param word:
word to symbolize
:return pairs:
set of tuples of symbols
"""
pairs: List[Tuple[str, str]] = []
prev_char = word[0]
for char in word[1:]:
pairs.append((prev_char, char))
prev_char = char
return pairs
def bpe(self, word: List[str]) ->List[str]:
"""
Convert token to BPE.
:param word:
list of tokens token to convert
:return bpe_encoding:
string bpe encoding
"""
pairs = self.get_pairs(word)
if len(pairs) == 0:
return word
while True:
min_rank = self.bpe_ranks.get('\n'.join(pairs[0]), float('inf'))
bigram = pairs[0]
for pair in pairs[1:]:
current_rank = self.bpe_ranks.get('\n'.join(pair), float('inf')
)
if current_rank < min_rank:
min_rank = current_rank
bigram = pair
if '\n'.join(bigram) not in self.bpe_ranks:
break
first, second = bigram
new_word: List[str] = []
i = 0
while i < len(word):
found = False
for j in range(i, len(word)):
if word[j] == first:
new_word.extend(word[i:j])
i = j
found = True
break
if not found:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and word[i + 1
] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
word = new_word.copy()
if len(word) == 1:
break
else:
pairs = self.get_pairs(word)
return word
def helper_encode(self, text: str) ->List[str]:
"""
Tokenize text.
:param text:
text to tokenize
:return tokens:
A list of tokens
"""
bpe_tokens: List[str] = []
for token in self.findall(text):
byte_encoded: List[str] = []
for b in token:
byte_encoded.append(self.byte_encoder[ord(b)])
encoded: List[str] = []
for bpe_token in self.bpe(byte_encoded):
encoded.append(self.encoder[bpe_token])
bpe_tokens.extend(encoded)
return bpe_tokens
def decode(self, tokens: List[str]) ->str:
"""
Decode list of tokens into a text string.
:param tokens:
list of tokens
:return text:
decoded text
"""
output: List[str] = []
accum: List[str] = []
for token in tokens:
if token in self._special_tokens:
if len(accum) > 0:
output.append(self.helper_decode(accum))
accum.clear()
output.append(token)
else:
accum.append(token)
if len(accum) > 0:
output.append(self.helper_decode(accum))
text = ''.join(output)
if self.add_prefix_space:
assert text.startswith(' ')
text = text.lstrip(' ')
return text
def helper_decode(self, tokens: List[str]) ->str:
"""
Decode list of tokens into text string.
:param tokens:
list of tokens
:return:
decoded text
"""
chars: List[str] = []
for token in tokens:
decoded_token = self.decoder[token]
token_chars = self.utf8_chars(decoded_token)
for char in token_chars:
if not torch.jit.is_scripting():
chars.extend(list(char))
else:
chars.append(char)
decoded_chars: List[str] = []
for char in chars:
decoded_chars.append(chr(self.byte_decoder[char]))
return ''.join(decoded_chars)
def utf8_chars(self, s: str) ->List[str]:
"""
An implementation of UTF8 character iteration in TorchScript. There are no
bitwise operations in torchscript, so we compare directly to integer values.
There isn't a lot of validation, for instance if you pass in an improperly
encoded string with an out-of-place continuation byte, or with a non-left-to-
right byte order, you'll get unexpected results and likely throw. Torch itself
takes in unicode strings and encodes them as UTF8, so that should be actively
hard to do.
The logic is simple: looking at the current start-of-character byte.
If its high bit is 0, it's a 1-byte character. Otherwise, the number of
bytes is the number of leading 1s in its binary representation, so
find that number by comparing it directly to ints with the appropriate
representation, then append that many bytes as a character and move past
them to the next start byte.
From pytext.torchscript.utils.
"""
chars: List[str] = []
i = 0
while i < len(s):
byte = ord(s[i])
if byte < 128:
chars.append(s[i])
i += 1
else:
if byte < 224:
num_bytes = 2
elif byte < 240:
num_bytes = 3
elif byte < 248:
num_bytes = 4
elif byte < 252:
num_bytes = 5
elif byte < 254:
num_bytes = 6
elif byte < 255:
num_bytes = 7
else:
num_bytes = 8
chars.append(s[i:i + num_bytes])
i += num_bytes
return chars
@torch.jit.script
class ScriptableDictionaryAgent:
"""
Builds and/or loads a dictionary.
All code is TorchScriptable.
"""
def __init__(self, null_token: str, end_token: str, unk_token: str,
start_token: str, freq: Dict[str, int], tok2ind: Dict[str, int],
ind2tok: Dict[int, str], bpe_add_prefix_space: bool, bpe_encoder:
Dict[str, str], bpe_byte_encoder: Dict[int, str],
fused_key_bpe_ranks: Dict[str, float], special_tokens: List[str]):
self.null_token = null_token
self.end_token = end_token
self.unk_token = unk_token
self.start_token = start_token
self.freq = freq
self.tok2ind = tok2ind
self.ind2tok = ind2tok
self._unk_token_idx = self.tok2ind[self.unk_token]
self.bpe = ScriptableGpt2BpeHelper(add_prefix_space=
bpe_add_prefix_space, encoder=bpe_encoder, byte_encoder=
bpe_byte_encoder, fused_key_bpe_ranks=fused_key_bpe_ranks,
special_tokens=special_tokens)
def _word_lookup(self, key: str) ->int:
"""
Return index from token, or unk_token's index, or None.
"""
if key in self.tok2ind:
return self.tok2ind[key]
else:
return self._unk_token_idx
def _index_lookup(self, key: int) ->str:
"""
Return token from index, or unk_token.
"""
if key in self.ind2tok:
return self.ind2tok[key]
else:
return self.unk_token
def gpt2_tokenize(self, text: str):
"""
Tokenize using Gpt2 BPE tokenizer.
"""
return self.bpe_tokenize(text)
def tokenize(self, text: str) ->List[str]:
"""
Return a sequence of tokens from the iterable.
Also handles special tokens for some tokenizers
"""
word_tokens = self.gpt2_tokenize(text)
return word_tokens
def bpe_tokenize(self, text: str) ->List[str]:
"""
Return a sequence of BPE-tokens from the text.
"""
return self.bpe.encode(text)
def txt2vec(self, text: str) ->List[int]:
"""
Convert a string to a vector (list of ints).
First runs a sentence tokenizer, then a word tokenizer.
"""
itr: List[int] = []
for token in self.tokenize(str(text)):
itr.append(self._word_lookup(token))
return itr
def vec2txt(self, vector: List[int]) ->str:
"""
Convert a vector of IDs to a string.
Converts a vector (iterable of ints) into a string, with each token separated by
the delimiter (default ``' '``).
"""
tokens = [self._index_lookup(idx) for idx in vector]
text = self.bpe.decode(tokens)
return text
| <mask token>
class BaseIncrStateFlattener(nn.Module):
<mask token>
def __init__(self, module: nn.Module):
super().__init__()
self.module = module
<mask token>
def _flatten_incr_state(self, structured_incr_state: Dict[int, Dict[str,
Dict[str, torch.Tensor]]]) ->Dict[str, torch.Tensor]:
"""
Flatten the input incremental state.
For instance, structured_incr_state[0]['self_attn']['prev_key'] will be stored
in flat_incr_state['layer_0__self_attn__prev_key'].
"""
flat_incr_state = {}
for layer_idx, dict1 in structured_incr_state.items():
for attn_type, dict2 in dict1.items():
for state_type, state in dict2.items():
key = f'{layer_idx:d}__{attn_type}__{state_type}'
flat_incr_state[key] = state
return flat_incr_state
class DecoderIncrStateFlattener(BaseIncrStateFlattener):
"""
Wrapper for a TransformerDecoder that will unflatten/flatten the incremental state.
Unflattening/flattening will occur before passing the incremental state into and out
of .forward().
"""
def forward(self, input_: torch.LongTensor, encoder_state: Tuple[torch.
Tensor, torch.Tensor], flat_incr_state: Optional[Dict[str, torch.
Tensor]]=None) ->Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
if flat_incr_state is not None:
structured_incr_state = self._unflatten_incr_state(flat_incr_state)
else:
structured_incr_state = None
tensor, new_structured_incr_state = self.module.forward(input=
input_, encoder_state=encoder_state, incr_state=
structured_incr_state)
new_flat_incr_state = self._flatten_incr_state(
new_structured_incr_state)
return tensor, new_flat_incr_state
class ModelIncrStateFlattener(BaseIncrStateFlattener):
"""
Wrapper for a TransformerGeneratorModel to unflatten/flatten the incremental state.
Unflattening/flattening will occur before passing the incremental state into and out
of .reorder_decoder_incremental_state(). We also support .output(), which is also
traced.
"""
def reorder_decoder_incremental_state(self, flat_incr_state: Dict[str,
torch.Tensor], inds: torch.Tensor) ->Dict[str, torch.Tensor]:
structured_incr_state = self._unflatten_incr_state(flat_incr_state)
new_structured_incr_state = (self.module.
reorder_decoder_incremental_state(incremental_state=
structured_incr_state, inds=inds))
return self._flatten_incr_state(new_structured_incr_state)
def output(self, tensor: torch.Tensor) ->torch.Tensor:
return self.module.output(tensor)
@torch.jit.script
class ScriptableGpt2BpeHelper(object):
"""
Version of parlai.utils.bpe.Gpt2BpeHelper that can be TorchScripted.
"""
@classmethod
def findall(cls, text: str) ->List[str]:
"""
Split tokens in a manner that replicates parlai.utils.bpe.Gpt2BpeHelper.
"""
contraction_endings = ['s', 't', 're', 've', 'm', 'll', 'd']
tokens: List[str] = []
idx = 0
num_passes = 0
while idx < len(text):
num_passes += 1
if num_passes > 10000:
return [
'*** Infinite loop in ScriptableGpt2BpeHelper.findall()! ***'
]
if text[idx] == "'":
captured_suffix = False
for ending in contraction_endings:
if text[idx + 1:idx + 1 + len(ending)] == ending:
tokens.append("'" + ending)
idx += 1 + len(ending)
captured_suffix = True
break
if captured_suffix:
continue
if not text[idx].isspace() or text[idx] == ' ' and idx + 1 < len(
text) and not text[idx + 1].isspace():
if text[idx] == ' ':
last_matching_idx = idx + 1
else:
last_matching_idx = idx
if text[last_matching_idx].isalpha():
while last_matching_idx + 1 < len(text) and text[
last_matching_idx + 1].isalpha():
last_matching_idx += 1
elif text[last_matching_idx].isnumeric():
while last_matching_idx + 1 < len(text) and text[
last_matching_idx + 1].isnumeric():
last_matching_idx += 1
else:
while last_matching_idx + 1 < len(text) and not text[
last_matching_idx + 1].isspace() and not text[
last_matching_idx + 1].isalpha() and not text[
last_matching_idx + 1].isnumeric():
last_matching_idx += 1
tokens.append(text[idx:last_matching_idx + 1])
idx = last_matching_idx + 1
continue
if idx + 1 < len(text) and text[idx + 1].isspace():
last_space_idx = idx + 1
while last_space_idx + 1 < len(text) and text[
last_space_idx + 1].isspace():
last_space_idx += 1
if last_space_idx + 1 == len(text):
tokens.append(text[idx:last_space_idx + 1])
idx = last_space_idx + 1
else:
tokens.append(text[idx:last_space_idx])
idx = last_space_idx
continue
if True:
last_space_idx = idx
while last_space_idx + 1 < len(text) and text[
last_space_idx + 1].isspace():
last_space_idx += 1
tokens.append(text[idx:last_space_idx + 1])
idx = last_space_idx + 1
return tokens
def __init__(self, add_prefix_space: bool, encoder: Dict[str, str],
byte_encoder: Dict[int, str], fused_key_bpe_ranks: Dict[str, float],
special_tokens: List[str]):
self.add_prefix_space = add_prefix_space
self.encoder = encoder
self.decoder: Dict[str, str] = {}
for k, v in self.encoder.items():
self.decoder[v] = k
self.byte_encoder = byte_encoder
self.byte_decoder: Dict[str, int] = {}
for k, v in self.byte_encoder.items():
self.byte_decoder[v] = k
self.bpe_ranks = fused_key_bpe_ranks
self._special_tokens: Dict[str, int] = {}
for st in special_tokens:
self._special_tokens[st] = 1
def encode(self, text: str) ->List[str]:
"""
Tokenize text.
Checks for add_prefix_space; handles accordingly.
:param text:
text to tokenize
:return tokens:
A list of tokens
"""
if self.add_prefix_space:
text = f' {text}'
FINAL = 1
SPLITABLE = 0
pieces: List[Tuple[str, int]] = [(text, SPLITABLE)]
for special_token in self._special_tokens.keys():
i = 0
while i < len(pieces):
subtext, status = pieces[i]
if status == FINAL:
i += 1
continue
split = subtext.split(special_token)
if len(split) > 1:
pieces.pop(i)
for j, piece in enumerate(split):
if j > 0:
pieces.insert(i + j, (special_token, FINAL))
pieces.insert(i + j + int(j > 0), (piece, SPLITABLE))
else:
i += 1
output: List[str] = []
for piece, state in pieces:
if state is FINAL:
output.append(piece)
else:
output += self.helper_encode(piece)
text = ''.join(output)
return output
def get_pairs(self, word: List[str]) ->List[Tuple[str, str]]:
"""
Return set of symbol pairs in a word.
Word is represented as list of symbols (symbols being variable-length strings).
:param word:
word to symbolize
:return pairs:
set of tuples of symbols
"""
pairs: List[Tuple[str, str]] = []
prev_char = word[0]
for char in word[1:]:
pairs.append((prev_char, char))
prev_char = char
return pairs
def bpe(self, word: List[str]) ->List[str]:
"""
Convert token to BPE.
:param word:
list of tokens token to convert
:return bpe_encoding:
string bpe encoding
"""
pairs = self.get_pairs(word)
if len(pairs) == 0:
return word
while True:
min_rank = self.bpe_ranks.get('\n'.join(pairs[0]), float('inf'))
bigram = pairs[0]
for pair in pairs[1:]:
current_rank = self.bpe_ranks.get('\n'.join(pair), float('inf')
)
if current_rank < min_rank:
min_rank = current_rank
bigram = pair
if '\n'.join(bigram) not in self.bpe_ranks:
break
first, second = bigram
new_word: List[str] = []
i = 0
while i < len(word):
found = False
for j in range(i, len(word)):
if word[j] == first:
new_word.extend(word[i:j])
i = j
found = True
break
if not found:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and word[i + 1
] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
word = new_word.copy()
if len(word) == 1:
break
else:
pairs = self.get_pairs(word)
return word
def helper_encode(self, text: str) ->List[str]:
"""
Tokenize text.
:param text:
text to tokenize
:return tokens:
A list of tokens
"""
bpe_tokens: List[str] = []
for token in self.findall(text):
byte_encoded: List[str] = []
for b in token:
byte_encoded.append(self.byte_encoder[ord(b)])
encoded: List[str] = []
for bpe_token in self.bpe(byte_encoded):
encoded.append(self.encoder[bpe_token])
bpe_tokens.extend(encoded)
return bpe_tokens
def decode(self, tokens: List[str]) ->str:
"""
Decode list of tokens into a text string.
:param tokens:
list of tokens
:return text:
decoded text
"""
output: List[str] = []
accum: List[str] = []
for token in tokens:
if token in self._special_tokens:
if len(accum) > 0:
output.append(self.helper_decode(accum))
accum.clear()
output.append(token)
else:
accum.append(token)
if len(accum) > 0:
output.append(self.helper_decode(accum))
text = ''.join(output)
if self.add_prefix_space:
assert text.startswith(' ')
text = text.lstrip(' ')
return text
def helper_decode(self, tokens: List[str]) ->str:
"""
Decode list of tokens into text string.
:param tokens:
list of tokens
:return:
decoded text
"""
chars: List[str] = []
for token in tokens:
decoded_token = self.decoder[token]
token_chars = self.utf8_chars(decoded_token)
for char in token_chars:
if not torch.jit.is_scripting():
chars.extend(list(char))
else:
chars.append(char)
decoded_chars: List[str] = []
for char in chars:
decoded_chars.append(chr(self.byte_decoder[char]))
return ''.join(decoded_chars)
def utf8_chars(self, s: str) ->List[str]:
"""
An implementation of UTF8 character iteration in TorchScript. There are no
bitwise operations in torchscript, so we compare directly to integer values.
There isn't a lot of validation, for instance if you pass in an improperly
encoded string with an out-of-place continuation byte, or with a non-left-to-
right byte order, you'll get unexpected results and likely throw. Torch itself
takes in unicode strings and encodes them as UTF8, so that should be actively
hard to do.
The logic is simple: looking at the current start-of-character byte.
If its high bit is 0, it's a 1-byte character. Otherwise, the number of
bytes is the number of leading 1s in its binary representation, so
find that number by comparing it directly to ints with the appropriate
representation, then append that many bytes as a character and move past
them to the next start byte.
From pytext.torchscript.utils.
"""
chars: List[str] = []
i = 0
while i < len(s):
byte = ord(s[i])
if byte < 128:
chars.append(s[i])
i += 1
else:
if byte < 224:
num_bytes = 2
elif byte < 240:
num_bytes = 3
elif byte < 248:
num_bytes = 4
elif byte < 252:
num_bytes = 5
elif byte < 254:
num_bytes = 6
elif byte < 255:
num_bytes = 7
else:
num_bytes = 8
chars.append(s[i:i + num_bytes])
i += num_bytes
return chars
@torch.jit.script
class ScriptableDictionaryAgent:
"""
Builds and/or loads a dictionary.
All code is TorchScriptable.
"""
def __init__(self, null_token: str, end_token: str, unk_token: str,
start_token: str, freq: Dict[str, int], tok2ind: Dict[str, int],
ind2tok: Dict[int, str], bpe_add_prefix_space: bool, bpe_encoder:
Dict[str, str], bpe_byte_encoder: Dict[int, str],
fused_key_bpe_ranks: Dict[str, float], special_tokens: List[str]):
self.null_token = null_token
self.end_token = end_token
self.unk_token = unk_token
self.start_token = start_token
self.freq = freq
self.tok2ind = tok2ind
self.ind2tok = ind2tok
self._unk_token_idx = self.tok2ind[self.unk_token]
self.bpe = ScriptableGpt2BpeHelper(add_prefix_space=
bpe_add_prefix_space, encoder=bpe_encoder, byte_encoder=
bpe_byte_encoder, fused_key_bpe_ranks=fused_key_bpe_ranks,
special_tokens=special_tokens)
def _word_lookup(self, key: str) ->int:
"""
Return index from token, or unk_token's index, or None.
"""
if key in self.tok2ind:
return self.tok2ind[key]
else:
return self._unk_token_idx
def _index_lookup(self, key: int) ->str:
"""
Return token from index, or unk_token.
"""
if key in self.ind2tok:
return self.ind2tok[key]
else:
return self.unk_token
def gpt2_tokenize(self, text: str):
"""
Tokenize using Gpt2 BPE tokenizer.
"""
return self.bpe_tokenize(text)
def tokenize(self, text: str) ->List[str]:
"""
Return a sequence of tokens from the iterable.
Also handles special tokens for some tokenizers
"""
word_tokens = self.gpt2_tokenize(text)
return word_tokens
def bpe_tokenize(self, text: str) ->List[str]:
"""
Return a sequence of BPE-tokens from the text.
"""
return self.bpe.encode(text)
def txt2vec(self, text: str) ->List[int]:
"""
Convert a string to a vector (list of ints).
First runs a sentence tokenizer, then a word tokenizer.
"""
itr: List[int] = []
for token in self.tokenize(str(text)):
itr.append(self._word_lookup(token))
return itr
def vec2txt(self, vector: List[int]) ->str:
"""
Convert a vector of IDs to a string.
Converts a vector (iterable of ints) into a string, with each token separated by
the delimiter (default ``' '``).
"""
tokens = [self._index_lookup(idx) for idx in vector]
text = self.bpe.decode(tokens)
return text
| <mask token>
class BaseIncrStateFlattener(nn.Module):
<mask token>
def __init__(self, module: nn.Module):
super().__init__()
self.module = module
def _unflatten_incr_state(self, flat_incr_state: Dict[str, torch.Tensor]
) ->Dict[int, Dict[str, Dict[str, torch.Tensor]]]:
"""
Unflatten the input incremental state.
For instance, flat_incr_state['layer_0__self_attn__prev_key'] will be stored in
structured_incr_state[0]['self_attn']['prev_key'].
"""
structured_incr_state = defaultdict(lambda : defaultdict(dict))
for key, state in flat_incr_state.items():
layer_idx_str, attn_type, state_type = key.split('__')
structured_incr_state[int(layer_idx_str)][attn_type][state_type
] = state
return dict({k: dict(v) for k, v in structured_incr_state.items()})
def _flatten_incr_state(self, structured_incr_state: Dict[int, Dict[str,
Dict[str, torch.Tensor]]]) ->Dict[str, torch.Tensor]:
"""
Flatten the input incremental state.
For instance, structured_incr_state[0]['self_attn']['prev_key'] will be stored
in flat_incr_state['layer_0__self_attn__prev_key'].
"""
flat_incr_state = {}
for layer_idx, dict1 in structured_incr_state.items():
for attn_type, dict2 in dict1.items():
for state_type, state in dict2.items():
key = f'{layer_idx:d}__{attn_type}__{state_type}'
flat_incr_state[key] = state
return flat_incr_state
class DecoderIncrStateFlattener(BaseIncrStateFlattener):
"""
Wrapper for a TransformerDecoder that will unflatten/flatten the incremental state.
Unflattening/flattening will occur before passing the incremental state into and out
of .forward().
"""
def forward(self, input_: torch.LongTensor, encoder_state: Tuple[torch.
Tensor, torch.Tensor], flat_incr_state: Optional[Dict[str, torch.
Tensor]]=None) ->Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
if flat_incr_state is not None:
structured_incr_state = self._unflatten_incr_state(flat_incr_state)
else:
structured_incr_state = None
tensor, new_structured_incr_state = self.module.forward(input=
input_, encoder_state=encoder_state, incr_state=
structured_incr_state)
new_flat_incr_state = self._flatten_incr_state(
new_structured_incr_state)
return tensor, new_flat_incr_state
class ModelIncrStateFlattener(BaseIncrStateFlattener):
"""
Wrapper for a TransformerGeneratorModel to unflatten/flatten the incremental state.
Unflattening/flattening will occur before passing the incremental state into and out
of .reorder_decoder_incremental_state(). We also support .output(), which is also
traced.
"""
def reorder_decoder_incremental_state(self, flat_incr_state: Dict[str,
torch.Tensor], inds: torch.Tensor) ->Dict[str, torch.Tensor]:
structured_incr_state = self._unflatten_incr_state(flat_incr_state)
new_structured_incr_state = (self.module.
reorder_decoder_incremental_state(incremental_state=
structured_incr_state, inds=inds))
return self._flatten_incr_state(new_structured_incr_state)
def output(self, tensor: torch.Tensor) ->torch.Tensor:
return self.module.output(tensor)
@torch.jit.script
class ScriptableGpt2BpeHelper(object):
"""
Version of parlai.utils.bpe.Gpt2BpeHelper that can be TorchScripted.
"""
@classmethod
def findall(cls, text: str) ->List[str]:
"""
Split tokens in a manner that replicates parlai.utils.bpe.Gpt2BpeHelper.
"""
contraction_endings = ['s', 't', 're', 've', 'm', 'll', 'd']
tokens: List[str] = []
idx = 0
num_passes = 0
while idx < len(text):
num_passes += 1
if num_passes > 10000:
return [
'*** Infinite loop in ScriptableGpt2BpeHelper.findall()! ***'
]
if text[idx] == "'":
captured_suffix = False
for ending in contraction_endings:
if text[idx + 1:idx + 1 + len(ending)] == ending:
tokens.append("'" + ending)
idx += 1 + len(ending)
captured_suffix = True
break
if captured_suffix:
continue
if not text[idx].isspace() or text[idx] == ' ' and idx + 1 < len(
text) and not text[idx + 1].isspace():
if text[idx] == ' ':
last_matching_idx = idx + 1
else:
last_matching_idx = idx
if text[last_matching_idx].isalpha():
while last_matching_idx + 1 < len(text) and text[
last_matching_idx + 1].isalpha():
last_matching_idx += 1
elif text[last_matching_idx].isnumeric():
while last_matching_idx + 1 < len(text) and text[
last_matching_idx + 1].isnumeric():
last_matching_idx += 1
else:
while last_matching_idx + 1 < len(text) and not text[
last_matching_idx + 1].isspace() and not text[
last_matching_idx + 1].isalpha() and not text[
last_matching_idx + 1].isnumeric():
last_matching_idx += 1
tokens.append(text[idx:last_matching_idx + 1])
idx = last_matching_idx + 1
continue
if idx + 1 < len(text) and text[idx + 1].isspace():
last_space_idx = idx + 1
while last_space_idx + 1 < len(text) and text[
last_space_idx + 1].isspace():
last_space_idx += 1
if last_space_idx + 1 == len(text):
tokens.append(text[idx:last_space_idx + 1])
idx = last_space_idx + 1
else:
tokens.append(text[idx:last_space_idx])
idx = last_space_idx
continue
if True:
last_space_idx = idx
while last_space_idx + 1 < len(text) and text[
last_space_idx + 1].isspace():
last_space_idx += 1
tokens.append(text[idx:last_space_idx + 1])
idx = last_space_idx + 1
return tokens
def __init__(self, add_prefix_space: bool, encoder: Dict[str, str],
byte_encoder: Dict[int, str], fused_key_bpe_ranks: Dict[str, float],
special_tokens: List[str]):
self.add_prefix_space = add_prefix_space
self.encoder = encoder
self.decoder: Dict[str, str] = {}
for k, v in self.encoder.items():
self.decoder[v] = k
self.byte_encoder = byte_encoder
self.byte_decoder: Dict[str, int] = {}
for k, v in self.byte_encoder.items():
self.byte_decoder[v] = k
self.bpe_ranks = fused_key_bpe_ranks
self._special_tokens: Dict[str, int] = {}
for st in special_tokens:
self._special_tokens[st] = 1
def encode(self, text: str) ->List[str]:
"""
Tokenize text.
Checks for add_prefix_space; handles accordingly.
:param text:
text to tokenize
:return tokens:
A list of tokens
"""
if self.add_prefix_space:
text = f' {text}'
FINAL = 1
SPLITABLE = 0
pieces: List[Tuple[str, int]] = [(text, SPLITABLE)]
for special_token in self._special_tokens.keys():
i = 0
while i < len(pieces):
subtext, status = pieces[i]
if status == FINAL:
i += 1
continue
split = subtext.split(special_token)
if len(split) > 1:
pieces.pop(i)
for j, piece in enumerate(split):
if j > 0:
pieces.insert(i + j, (special_token, FINAL))
pieces.insert(i + j + int(j > 0), (piece, SPLITABLE))
else:
i += 1
output: List[str] = []
for piece, state in pieces:
if state is FINAL:
output.append(piece)
else:
output += self.helper_encode(piece)
text = ''.join(output)
return output
def get_pairs(self, word: List[str]) ->List[Tuple[str, str]]:
"""
Return set of symbol pairs in a word.
Word is represented as list of symbols (symbols being variable-length strings).
:param word:
word to symbolize
:return pairs:
set of tuples of symbols
"""
pairs: List[Tuple[str, str]] = []
prev_char = word[0]
for char in word[1:]:
pairs.append((prev_char, char))
prev_char = char
return pairs
def bpe(self, word: List[str]) ->List[str]:
"""
Convert token to BPE.
:param word:
list of tokens token to convert
:return bpe_encoding:
string bpe encoding
"""
pairs = self.get_pairs(word)
if len(pairs) == 0:
return word
while True:
min_rank = self.bpe_ranks.get('\n'.join(pairs[0]), float('inf'))
bigram = pairs[0]
for pair in pairs[1:]:
current_rank = self.bpe_ranks.get('\n'.join(pair), float('inf')
)
if current_rank < min_rank:
min_rank = current_rank
bigram = pair
if '\n'.join(bigram) not in self.bpe_ranks:
break
first, second = bigram
new_word: List[str] = []
i = 0
while i < len(word):
found = False
for j in range(i, len(word)):
if word[j] == first:
new_word.extend(word[i:j])
i = j
found = True
break
if not found:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and word[i + 1
] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
word = new_word.copy()
if len(word) == 1:
break
else:
pairs = self.get_pairs(word)
return word
def helper_encode(self, text: str) ->List[str]:
"""
Tokenize text.
:param text:
text to tokenize
:return tokens:
A list of tokens
"""
bpe_tokens: List[str] = []
for token in self.findall(text):
byte_encoded: List[str] = []
for b in token:
byte_encoded.append(self.byte_encoder[ord(b)])
encoded: List[str] = []
for bpe_token in self.bpe(byte_encoded):
encoded.append(self.encoder[bpe_token])
bpe_tokens.extend(encoded)
return bpe_tokens
def decode(self, tokens: List[str]) ->str:
"""
Decode list of tokens into a text string.
:param tokens:
list of tokens
:return text:
decoded text
"""
output: List[str] = []
accum: List[str] = []
for token in tokens:
if token in self._special_tokens:
if len(accum) > 0:
output.append(self.helper_decode(accum))
accum.clear()
output.append(token)
else:
accum.append(token)
if len(accum) > 0:
output.append(self.helper_decode(accum))
text = ''.join(output)
if self.add_prefix_space:
assert text.startswith(' ')
text = text.lstrip(' ')
return text
def helper_decode(self, tokens: List[str]) ->str:
"""
Decode list of tokens into text string.
:param tokens:
list of tokens
:return:
decoded text
"""
chars: List[str] = []
for token in tokens:
decoded_token = self.decoder[token]
token_chars = self.utf8_chars(decoded_token)
for char in token_chars:
if not torch.jit.is_scripting():
chars.extend(list(char))
else:
chars.append(char)
decoded_chars: List[str] = []
for char in chars:
decoded_chars.append(chr(self.byte_decoder[char]))
return ''.join(decoded_chars)
def utf8_chars(self, s: str) ->List[str]:
"""
An implementation of UTF8 character iteration in TorchScript. There are no
bitwise operations in torchscript, so we compare directly to integer values.
There isn't a lot of validation, for instance if you pass in an improperly
encoded string with an out-of-place continuation byte, or with a non-left-to-
right byte order, you'll get unexpected results and likely throw. Torch itself
takes in unicode strings and encodes them as UTF8, so that should be actively
hard to do.
The logic is simple: looking at the current start-of-character byte.
If its high bit is 0, it's a 1-byte character. Otherwise, the number of
bytes is the number of leading 1s in its binary representation, so
find that number by comparing it directly to ints with the appropriate
representation, then append that many bytes as a character and move past
them to the next start byte.
From pytext.torchscript.utils.
"""
chars: List[str] = []
i = 0
while i < len(s):
byte = ord(s[i])
if byte < 128:
chars.append(s[i])
i += 1
else:
if byte < 224:
num_bytes = 2
elif byte < 240:
num_bytes = 3
elif byte < 248:
num_bytes = 4
elif byte < 252:
num_bytes = 5
elif byte < 254:
num_bytes = 6
elif byte < 255:
num_bytes = 7
else:
num_bytes = 8
chars.append(s[i:i + num_bytes])
i += num_bytes
return chars
@torch.jit.script
class ScriptableDictionaryAgent:
"""
Builds and/or loads a dictionary.
All code is TorchScriptable.
"""
def __init__(self, null_token: str, end_token: str, unk_token: str,
start_token: str, freq: Dict[str, int], tok2ind: Dict[str, int],
ind2tok: Dict[int, str], bpe_add_prefix_space: bool, bpe_encoder:
Dict[str, str], bpe_byte_encoder: Dict[int, str],
fused_key_bpe_ranks: Dict[str, float], special_tokens: List[str]):
self.null_token = null_token
self.end_token = end_token
self.unk_token = unk_token
self.start_token = start_token
self.freq = freq
self.tok2ind = tok2ind
self.ind2tok = ind2tok
self._unk_token_idx = self.tok2ind[self.unk_token]
self.bpe = ScriptableGpt2BpeHelper(add_prefix_space=
bpe_add_prefix_space, encoder=bpe_encoder, byte_encoder=
bpe_byte_encoder, fused_key_bpe_ranks=fused_key_bpe_ranks,
special_tokens=special_tokens)
def _word_lookup(self, key: str) ->int:
"""
Return index from token, or unk_token's index, or None.
"""
if key in self.tok2ind:
return self.tok2ind[key]
else:
return self._unk_token_idx
def _index_lookup(self, key: int) ->str:
"""
Return token from index, or unk_token.
"""
if key in self.ind2tok:
return self.ind2tok[key]
else:
return self.unk_token
def gpt2_tokenize(self, text: str):
"""
Tokenize using Gpt2 BPE tokenizer.
"""
return self.bpe_tokenize(text)
def tokenize(self, text: str) ->List[str]:
"""
Return a sequence of tokens from the iterable.
Also handles special tokens for some tokenizers
"""
word_tokens = self.gpt2_tokenize(text)
return word_tokens
def bpe_tokenize(self, text: str) ->List[str]:
"""
Return a sequence of BPE-tokens from the text.
"""
return self.bpe.encode(text)
def txt2vec(self, text: str) ->List[int]:
"""
Convert a string to a vector (list of ints).
First runs a sentence tokenizer, then a word tokenizer.
"""
itr: List[int] = []
for token in self.tokenize(str(text)):
itr.append(self._word_lookup(token))
return itr
def vec2txt(self, vector: List[int]) ->str:
"""
Convert a vector of IDs to a string.
Converts a vector (iterable of ints) into a string, with each token separated by
the delimiter (default ``' '``).
"""
tokens = [self._index_lookup(idx) for idx in vector]
text = self.bpe.decode(tokens)
return text
| <mask token>
class TorchScriptGreedySearch(nn.Module):
<mask token>
<mask token>
def __init__(self, agent: TorchAgent):
super().__init__()
self.is_bart = agent.opt['model'] == 'bart'
for key, val in self.CAIRAOKE_DICT_PARAMS.items():
assert agent.opt.get(key, val
) == val, f'The only currently supported value of "{key}" is {val}!'
orig_dict: DictionaryAgent = agent.dict
orig_bpe: Gpt2BpeHelper = orig_dict.bpe
assert all(len(key) == 2 for key in orig_bpe.bpe_ranks.keys())
assert not any(i for key in orig_bpe.bpe_ranks.keys() for i in key if
'\n' in i
), "We need to temporarily merge the bpe_ranks dict's keys with a newline character in order to use it as a TorchScript arg, but at least one of the dict's keys contains a newline character already!"
fused_key_bpe_ranks = {'\n'.join(key): float(val) for key, val in
orig_bpe.bpe_ranks.items()}
self.dict = ScriptableDictionaryAgent(null_token=orig_dict.
null_token, end_token=orig_dict.end_token, unk_token=orig_dict.
unk_token, start_token=orig_dict.start_token, freq=orig_dict.
freq, tok2ind=orig_dict.tok2ind, ind2tok=orig_dict.ind2tok,
bpe_add_prefix_space=agent.opt['bpe_add_prefix_space'],
bpe_encoder=orig_bpe.encoder, bpe_byte_encoder=orig_bpe.
byte_encoder, fused_key_bpe_ranks=fused_key_bpe_ranks,
special_tokens=agent._get_special_tokens())
self.delimiter_tok = agent.history.delimiter_tok
self.history_size = agent.opt['history_size']
if agent.opt.get('history_add_global_end_token', None) is not None:
self.global_end_token = agent.dict[agent.dict.end_token]
else:
self.global_end_token = None
self.text_truncate = agent.opt.get('text_truncate') or agent.opt[
'truncate']
self.text_truncate = (self.text_truncate if self.text_truncate >= 0
else None)
self.start_idx = agent.model.START_IDX
self.end_idx = agent.model.END_IDX
self.null_idx = agent.model.NULL_IDX
if self.is_bart:
self.initial_decoder_input = [self.end_idx, self.start_idx]
else:
self.initial_decoder_input = [self.start_idx]
agent.model.eval()
wrapped_decoder = DecoderIncrStateFlattener(agent.model.decoder)
wrapped_model = ModelIncrStateFlattener(agent.model)
sample_tokens = torch.tensor([[1, 2, 3, 4, 5]], dtype=torch.long)
encoder_states = agent.model.encoder(sample_tokens)
initial_generations = self._get_initial_decoder_input(sample_tokens)
latent, initial_incr_state = wrapped_decoder(initial_generations,
encoder_states)
logits = agent.model.output(latent[:, -1:, :])
_, preds = logits.max(dim=2)
incr_state = {k: torch.clone(v) for k, v in initial_incr_state.items()}
incr_state = wrapped_model.reorder_decoder_incremental_state(incr_state
, torch.tensor([0], dtype=torch.long, device=sample_tokens.device))
generations = torch.cat([initial_generations, preds], dim=1)
self.encoder = torch.jit.trace(agent.model.encoder, sample_tokens)
self.decoder_first_pass = torch.jit.trace(wrapped_decoder, (
initial_generations, encoder_states), strict=False)
self.partially_traced_model = torch.jit.trace_module(wrapped_model,
{'output': latent[:, -1:, :],
'reorder_decoder_incremental_state': (initial_incr_state, torch
.tensor([0], dtype=torch.long, device=sample_tokens.device))},
strict=False)
self.decoder_later_pass = torch.jit.trace(wrapped_decoder, (
generations, encoder_states, incr_state), strict=False)
<mask token>
<mask token>
<mask token>
def forward(self, context: str, max_len: int=128) ->str:
history_vecs: List[List[int]] = []
context_lines = context.split('\n')
if self.history_size > 0:
context_lines = context_lines[-self.history_size:]
for line in context_lines:
history_vecs.append(self.parse(line))
text_vecs: List[List[int]] = []
for vec in history_vecs[:-1]:
text_vecs += [vec]
text_vecs += [self.delimiter_tok]
text_vecs += [history_vecs[-1]]
if self.global_end_token is not None:
text_vecs += [[self.global_end_token]]
flattened_text_vec: List[int] = []
for vec in text_vecs:
for token in vec:
flattened_text_vec.append(token)
if self.text_truncate is not None:
if self.is_bart:
truncate_length = self.text_truncate - 2
else:
truncate_length = self.text_truncate
if len(flattened_text_vec) > truncate_length:
flattened_text_vec = flattened_text_vec[-truncate_length:]
flattened_text_vec = torch.tensor(flattened_text_vec, dtype=torch.long)
if self.is_bart:
flattened_text_vec = torch.cat([torch.tensor([self.start_idx],
dtype=torch.long), flattened_text_vec, torch.tensor([self.
end_idx], dtype=torch.long)], dim=0)
batch_text_vec = torch.unsqueeze(flattened_text_vec, dim=0)
encoder_states = self.encoder(batch_text_vec)
generations = self._get_initial_decoder_input(batch_text_vec)
seen_end = torch.zeros(batch_text_vec.size(0), device=
batch_text_vec.device, dtype=torch.bool)
incr_state: Dict[str, torch.Tensor] = {}
for token_idx in range(max_len):
if token_idx == 0:
latent, incr_state = self.decoder_first_pass(generations,
encoder_states)
else:
latent, incr_state = self.decoder_later_pass(generations,
encoder_states, incr_state)
logits = self.partially_traced_model.output(latent[:, -1:, :])
_, preds = logits.max(dim=2)
incr_state = (self.partially_traced_model.
reorder_decoder_incremental_state(incr_state, torch.tensor(
[0], dtype=torch.long, device=batch_text_vec.device)))
seen_end = seen_end + (preds == self.end_idx).squeeze(1)
generations = torch.cat([generations, preds], dim=1)
if torch.all(seen_end):
break
if self.is_bart:
assert generations[0, 0].item() == self.end_idx
generations = generations[:, 1:]
generation_tokens: List[int] = generations[0].tolist()
label = self._v2t(generation_tokens)
return label
class BaseIncrStateFlattener(nn.Module):
"""
Flatten/unflatten the incremental state for use with TorchScripting.
Typically, the incremental state will be stored as a Dict[int, Dict[str, Dict[str,
torch.Tensor]]], where the 3 dictionary levels map decoder layer, attention type,
and previous key/value/mask, respectively. However, TorchScript expects dicts to be
of type Dict[str, torch.Tensor], and thus all input incremental states when
TorchScripting will have to be of that type. We thus unflatten the input incremental
state, already of type Dict[str, torch.Tensor], to pass it into whatever method
needs it, and we flatten it again after the updated incremental state is passed back
out.
This is a base class that provides methods for flattening/unflattening: subclasses
will call these methods as the incremental state is passed into and out of their own
methods.
"""
def __init__(self, module: nn.Module):
super().__init__()
self.module = module
def _unflatten_incr_state(self, flat_incr_state: Dict[str, torch.Tensor]
) ->Dict[int, Dict[str, Dict[str, torch.Tensor]]]:
"""
Unflatten the input incremental state.
For instance, flat_incr_state['layer_0__self_attn__prev_key'] will be stored in
structured_incr_state[0]['self_attn']['prev_key'].
"""
structured_incr_state = defaultdict(lambda : defaultdict(dict))
for key, state in flat_incr_state.items():
layer_idx_str, attn_type, state_type = key.split('__')
structured_incr_state[int(layer_idx_str)][attn_type][state_type
] = state
return dict({k: dict(v) for k, v in structured_incr_state.items()})
def _flatten_incr_state(self, structured_incr_state: Dict[int, Dict[str,
Dict[str, torch.Tensor]]]) ->Dict[str, torch.Tensor]:
"""
Flatten the input incremental state.
For instance, structured_incr_state[0]['self_attn']['prev_key'] will be stored
in flat_incr_state['layer_0__self_attn__prev_key'].
"""
flat_incr_state = {}
for layer_idx, dict1 in structured_incr_state.items():
for attn_type, dict2 in dict1.items():
for state_type, state in dict2.items():
key = f'{layer_idx:d}__{attn_type}__{state_type}'
flat_incr_state[key] = state
return flat_incr_state
class DecoderIncrStateFlattener(BaseIncrStateFlattener):
"""
Wrapper for a TransformerDecoder that will unflatten/flatten the incremental state.
Unflattening/flattening will occur before passing the incremental state into and out
of .forward().
"""
def forward(self, input_: torch.LongTensor, encoder_state: Tuple[torch.
Tensor, torch.Tensor], flat_incr_state: Optional[Dict[str, torch.
Tensor]]=None) ->Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
if flat_incr_state is not None:
structured_incr_state = self._unflatten_incr_state(flat_incr_state)
else:
structured_incr_state = None
tensor, new_structured_incr_state = self.module.forward(input=
input_, encoder_state=encoder_state, incr_state=
structured_incr_state)
new_flat_incr_state = self._flatten_incr_state(
new_structured_incr_state)
return tensor, new_flat_incr_state
class ModelIncrStateFlattener(BaseIncrStateFlattener):
"""
Wrapper for a TransformerGeneratorModel to unflatten/flatten the incremental state.
Unflattening/flattening will occur before passing the incremental state into and out
of .reorder_decoder_incremental_state(). We also support .output(), which is also
traced.
"""
def reorder_decoder_incremental_state(self, flat_incr_state: Dict[str,
torch.Tensor], inds: torch.Tensor) ->Dict[str, torch.Tensor]:
structured_incr_state = self._unflatten_incr_state(flat_incr_state)
new_structured_incr_state = (self.module.
reorder_decoder_incremental_state(incremental_state=
structured_incr_state, inds=inds))
return self._flatten_incr_state(new_structured_incr_state)
def output(self, tensor: torch.Tensor) ->torch.Tensor:
return self.module.output(tensor)
@torch.jit.script
class ScriptableGpt2BpeHelper(object):
"""
Version of parlai.utils.bpe.Gpt2BpeHelper that can be TorchScripted.
"""
@classmethod
def findall(cls, text: str) ->List[str]:
"""
Split tokens in a manner that replicates parlai.utils.bpe.Gpt2BpeHelper.
"""
contraction_endings = ['s', 't', 're', 've', 'm', 'll', 'd']
tokens: List[str] = []
idx = 0
num_passes = 0
while idx < len(text):
num_passes += 1
if num_passes > 10000:
return [
'*** Infinite loop in ScriptableGpt2BpeHelper.findall()! ***'
]
if text[idx] == "'":
captured_suffix = False
for ending in contraction_endings:
if text[idx + 1:idx + 1 + len(ending)] == ending:
tokens.append("'" + ending)
idx += 1 + len(ending)
captured_suffix = True
break
if captured_suffix:
continue
if not text[idx].isspace() or text[idx] == ' ' and idx + 1 < len(
text) and not text[idx + 1].isspace():
if text[idx] == ' ':
last_matching_idx = idx + 1
else:
last_matching_idx = idx
if text[last_matching_idx].isalpha():
while last_matching_idx + 1 < len(text) and text[
last_matching_idx + 1].isalpha():
last_matching_idx += 1
elif text[last_matching_idx].isnumeric():
while last_matching_idx + 1 < len(text) and text[
last_matching_idx + 1].isnumeric():
last_matching_idx += 1
else:
while last_matching_idx + 1 < len(text) and not text[
last_matching_idx + 1].isspace() and not text[
last_matching_idx + 1].isalpha() and not text[
last_matching_idx + 1].isnumeric():
last_matching_idx += 1
tokens.append(text[idx:last_matching_idx + 1])
idx = last_matching_idx + 1
continue
if idx + 1 < len(text) and text[idx + 1].isspace():
last_space_idx = idx + 1
while last_space_idx + 1 < len(text) and text[
last_space_idx + 1].isspace():
last_space_idx += 1
if last_space_idx + 1 == len(text):
tokens.append(text[idx:last_space_idx + 1])
idx = last_space_idx + 1
else:
tokens.append(text[idx:last_space_idx])
idx = last_space_idx
continue
if True:
last_space_idx = idx
while last_space_idx + 1 < len(text) and text[
last_space_idx + 1].isspace():
last_space_idx += 1
tokens.append(text[idx:last_space_idx + 1])
idx = last_space_idx + 1
return tokens
def __init__(self, add_prefix_space: bool, encoder: Dict[str, str],
byte_encoder: Dict[int, str], fused_key_bpe_ranks: Dict[str, float],
special_tokens: List[str]):
self.add_prefix_space = add_prefix_space
self.encoder = encoder
self.decoder: Dict[str, str] = {}
for k, v in self.encoder.items():
self.decoder[v] = k
self.byte_encoder = byte_encoder
self.byte_decoder: Dict[str, int] = {}
for k, v in self.byte_encoder.items():
self.byte_decoder[v] = k
self.bpe_ranks = fused_key_bpe_ranks
self._special_tokens: Dict[str, int] = {}
for st in special_tokens:
self._special_tokens[st] = 1
def encode(self, text: str) ->List[str]:
"""
Tokenize text.
Checks for add_prefix_space; handles accordingly.
:param text:
text to tokenize
:return tokens:
A list of tokens
"""
if self.add_prefix_space:
text = f' {text}'
FINAL = 1
SPLITABLE = 0
pieces: List[Tuple[str, int]] = [(text, SPLITABLE)]
for special_token in self._special_tokens.keys():
i = 0
while i < len(pieces):
subtext, status = pieces[i]
if status == FINAL:
i += 1
continue
split = subtext.split(special_token)
if len(split) > 1:
pieces.pop(i)
for j, piece in enumerate(split):
if j > 0:
pieces.insert(i + j, (special_token, FINAL))
pieces.insert(i + j + int(j > 0), (piece, SPLITABLE))
else:
i += 1
output: List[str] = []
for piece, state in pieces:
if state is FINAL:
output.append(piece)
else:
output += self.helper_encode(piece)
text = ''.join(output)
return output
def get_pairs(self, word: List[str]) ->List[Tuple[str, str]]:
"""
Return set of symbol pairs in a word.
Word is represented as list of symbols (symbols being variable-length strings).
:param word:
word to symbolize
:return pairs:
set of tuples of symbols
"""
pairs: List[Tuple[str, str]] = []
prev_char = word[0]
for char in word[1:]:
pairs.append((prev_char, char))
prev_char = char
return pairs
def bpe(self, word: List[str]) ->List[str]:
"""
Convert token to BPE.
:param word:
list of tokens token to convert
:return bpe_encoding:
string bpe encoding
"""
pairs = self.get_pairs(word)
if len(pairs) == 0:
return word
while True:
min_rank = self.bpe_ranks.get('\n'.join(pairs[0]), float('inf'))
bigram = pairs[0]
for pair in pairs[1:]:
current_rank = self.bpe_ranks.get('\n'.join(pair), float('inf')
)
if current_rank < min_rank:
min_rank = current_rank
bigram = pair
if '\n'.join(bigram) not in self.bpe_ranks:
break
first, second = bigram
new_word: List[str] = []
i = 0
while i < len(word):
found = False
for j in range(i, len(word)):
if word[j] == first:
new_word.extend(word[i:j])
i = j
found = True
break
if not found:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and word[i + 1
] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
word = new_word.copy()
if len(word) == 1:
break
else:
pairs = self.get_pairs(word)
return word
def helper_encode(self, text: str) ->List[str]:
"""
Tokenize text.
:param text:
text to tokenize
:return tokens:
A list of tokens
"""
bpe_tokens: List[str] = []
for token in self.findall(text):
byte_encoded: List[str] = []
for b in token:
byte_encoded.append(self.byte_encoder[ord(b)])
encoded: List[str] = []
for bpe_token in self.bpe(byte_encoded):
encoded.append(self.encoder[bpe_token])
bpe_tokens.extend(encoded)
return bpe_tokens
def decode(self, tokens: List[str]) ->str:
"""
Decode list of tokens into a text string.
:param tokens:
list of tokens
:return text:
decoded text
"""
output: List[str] = []
accum: List[str] = []
for token in tokens:
if token in self._special_tokens:
if len(accum) > 0:
output.append(self.helper_decode(accum))
accum.clear()
output.append(token)
else:
accum.append(token)
if len(accum) > 0:
output.append(self.helper_decode(accum))
text = ''.join(output)
if self.add_prefix_space:
assert text.startswith(' ')
text = text.lstrip(' ')
return text
def helper_decode(self, tokens: List[str]) ->str:
"""
Decode list of tokens into text string.
:param tokens:
list of tokens
:return:
decoded text
"""
chars: List[str] = []
for token in tokens:
decoded_token = self.decoder[token]
token_chars = self.utf8_chars(decoded_token)
for char in token_chars:
if not torch.jit.is_scripting():
chars.extend(list(char))
else:
chars.append(char)
decoded_chars: List[str] = []
for char in chars:
decoded_chars.append(chr(self.byte_decoder[char]))
return ''.join(decoded_chars)
def utf8_chars(self, s: str) ->List[str]:
"""
An implementation of UTF8 character iteration in TorchScript. There are no
bitwise operations in torchscript, so we compare directly to integer values.
There isn't a lot of validation, for instance if you pass in an improperly
encoded string with an out-of-place continuation byte, or with a non-left-to-
right byte order, you'll get unexpected results and likely throw. Torch itself
takes in unicode strings and encodes them as UTF8, so that should be actively
hard to do.
The logic is simple: looking at the current start-of-character byte.
If its high bit is 0, it's a 1-byte character. Otherwise, the number of
bytes is the number of leading 1s in its binary representation, so
find that number by comparing it directly to ints with the appropriate
representation, then append that many bytes as a character and move past
them to the next start byte.
From pytext.torchscript.utils.
"""
chars: List[str] = []
i = 0
while i < len(s):
byte = ord(s[i])
if byte < 128:
chars.append(s[i])
i += 1
else:
if byte < 224:
num_bytes = 2
elif byte < 240:
num_bytes = 3
elif byte < 248:
num_bytes = 4
elif byte < 252:
num_bytes = 5
elif byte < 254:
num_bytes = 6
elif byte < 255:
num_bytes = 7
else:
num_bytes = 8
chars.append(s[i:i + num_bytes])
i += num_bytes
return chars
@torch.jit.script
class ScriptableDictionaryAgent:
"""
Builds and/or loads a dictionary.
All code is TorchScriptable.
"""
def __init__(self, null_token: str, end_token: str, unk_token: str,
start_token: str, freq: Dict[str, int], tok2ind: Dict[str, int],
ind2tok: Dict[int, str], bpe_add_prefix_space: bool, bpe_encoder:
Dict[str, str], bpe_byte_encoder: Dict[int, str],
fused_key_bpe_ranks: Dict[str, float], special_tokens: List[str]):
self.null_token = null_token
self.end_token = end_token
self.unk_token = unk_token
self.start_token = start_token
self.freq = freq
self.tok2ind = tok2ind
self.ind2tok = ind2tok
self._unk_token_idx = self.tok2ind[self.unk_token]
self.bpe = ScriptableGpt2BpeHelper(add_prefix_space=
bpe_add_prefix_space, encoder=bpe_encoder, byte_encoder=
bpe_byte_encoder, fused_key_bpe_ranks=fused_key_bpe_ranks,
special_tokens=special_tokens)
def _word_lookup(self, key: str) ->int:
"""
Return index from token, or unk_token's index, or None.
"""
if key in self.tok2ind:
return self.tok2ind[key]
else:
return self._unk_token_idx
def _index_lookup(self, key: int) ->str:
"""
Return token from index, or unk_token.
"""
if key in self.ind2tok:
return self.ind2tok[key]
else:
return self.unk_token
def gpt2_tokenize(self, text: str):
"""
Tokenize using Gpt2 BPE tokenizer.
"""
return self.bpe_tokenize(text)
def tokenize(self, text: str) ->List[str]:
"""
Return a sequence of tokens from the iterable.
Also handles special tokens for some tokenizers
"""
word_tokens = self.gpt2_tokenize(text)
return word_tokens
def bpe_tokenize(self, text: str) ->List[str]:
"""
Return a sequence of BPE-tokens from the text.
"""
return self.bpe.encode(text)
def txt2vec(self, text: str) ->List[int]:
"""
Convert a string to a vector (list of ints).
First runs a sentence tokenizer, then a word tokenizer.
"""
itr: List[int] = []
for token in self.tokenize(str(text)):
itr.append(self._word_lookup(token))
return itr
def vec2txt(self, vector: List[int]) ->str:
"""
Convert a vector of IDs to a string.
Converts a vector (iterable of ints) into a string, with each token separated by
the delimiter (default ``' '``).
"""
tokens = [self._index_lookup(idx) for idx in vector]
text = self.bpe.decode(tokens)
return text
| #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import defaultdict
from typing import List, Dict, Optional, Tuple
import torch.jit
from torch import nn as nn
from parlai.core.dict import DictionaryAgent
from parlai.core.torch_agent import TorchAgent
from parlai.utils.bpe import Gpt2BpeHelper
class TorchScriptGreedySearch(nn.Module):
"""
A helper class for exporting simple greedy-search models via TorchScript.
Models with extra inputs will need to override to include more variables.
"""
# We currently only support these specific dictionary settings
CAIRAOKE_DICT_PARAMS = {
"dict_class": "parlai.core.dict:DictionaryAgent",
"dict_initpath": None,
"dict_language": "english",
"dict_max_ngram_size": -1,
"dict_minfreq": 0,
"dict_maxtokens": -1,
"dict_tokenizer": "gpt2",
"dict_lower": False,
"dict_textfields": "text,labels",
"dict_loaded": True,
'bpe_debug': False,
}
def __init__(self, agent: TorchAgent):
super().__init__()
self.is_bart = agent.opt['model'] == 'bart'
# Dictionary/tokenization setup
for key, val in self.CAIRAOKE_DICT_PARAMS.items():
assert (
agent.opt.get(key, val) == val
), f'The only currently supported value of "{key}" is {val}!'
orig_dict: DictionaryAgent = agent.dict
orig_bpe: Gpt2BpeHelper = orig_dict.bpe
assert all(len(key) == 2 for key in orig_bpe.bpe_ranks.keys())
assert not any(
i for key in orig_bpe.bpe_ranks.keys() for i in key if '\n' in i
), "We need to temporarily merge the bpe_ranks dict's keys with a newline character in order to use it as a TorchScript arg, but at least one of the dict's keys contains a newline character already!"
fused_key_bpe_ranks = {
'\n'.join(key): float(val) for key, val in orig_bpe.bpe_ranks.items()
}
# Cast the values as floats to be able to compare to float('inf') when doing BPE
# splitting
self.dict = ScriptableDictionaryAgent(
null_token=orig_dict.null_token,
end_token=orig_dict.end_token,
unk_token=orig_dict.unk_token,
start_token=orig_dict.start_token,
freq=orig_dict.freq,
tok2ind=orig_dict.tok2ind,
ind2tok=orig_dict.ind2tok,
bpe_add_prefix_space=agent.opt['bpe_add_prefix_space'],
bpe_encoder=orig_bpe.encoder,
bpe_byte_encoder=orig_bpe.byte_encoder,
fused_key_bpe_ranks=fused_key_bpe_ranks,
special_tokens=agent._get_special_tokens(),
)
# History tracking and start/end tokens
self.delimiter_tok = agent.history.delimiter_tok
self.history_size = agent.opt['history_size']
if agent.opt.get('history_add_global_end_token', None) is not None:
self.global_end_token = agent.dict[agent.dict.end_token]
else:
self.global_end_token = None
self.text_truncate = agent.opt.get('text_truncate') or agent.opt['truncate']
self.text_truncate = self.text_truncate if self.text_truncate >= 0 else None
self.start_idx = agent.model.START_IDX
self.end_idx = agent.model.END_IDX
self.null_idx = agent.model.NULL_IDX
if self.is_bart:
self.initial_decoder_input = [self.end_idx, self.start_idx]
else:
self.initial_decoder_input = [self.start_idx]
agent.model.eval()
# Create versions of the model and decoder that will flatten the incremental
# state dict, as required by TorchScript
wrapped_decoder = DecoderIncrStateFlattener(agent.model.decoder)
wrapped_model = ModelIncrStateFlattener(agent.model)
# Create sample inputs for tracing
sample_tokens = torch.tensor([[1, 2, 3, 4, 5]], dtype=torch.long)
encoder_states = agent.model.encoder(sample_tokens)
initial_generations = self._get_initial_decoder_input(sample_tokens)
latent, initial_incr_state = wrapped_decoder(
initial_generations, encoder_states
)
logits = agent.model.output(latent[:, -1:, :])
_, preds = logits.max(dim=2)
incr_state = {k: torch.clone(v) for k, v in initial_incr_state.items()}
# Copy the initial incremental state, used when tracing the
# .reorder_decoder_incremental_state() method below, to avoid having it be
# mutated by the following line
incr_state = wrapped_model.reorder_decoder_incremental_state(
incr_state, torch.tensor([0], dtype=torch.long, device=sample_tokens.device)
)
generations = torch.cat([initial_generations, preds], dim=1)
# Do tracing
self.encoder = torch.jit.trace(agent.model.encoder, sample_tokens)
self.decoder_first_pass = torch.jit.trace(
wrapped_decoder, (initial_generations, encoder_states), strict=False
)
# We do strict=False to avoid an error when passing a Dict out of
# decoder.forward()
self.partially_traced_model = torch.jit.trace_module(
wrapped_model,
{
'output': (latent[:, -1:, :]),
'reorder_decoder_incremental_state': (
initial_incr_state,
torch.tensor([0], dtype=torch.long, device=sample_tokens.device),
),
},
strict=False,
)
self.decoder_later_pass = torch.jit.trace(
wrapped_decoder, (generations, encoder_states, incr_state), strict=False
)
def _get_initial_decoder_input(self, x: torch.Tensor) -> torch.Tensor:
"""
Workaround because we can't use TGM._get_initial_decoder_input() directly.
When we try to call that function, we get a "RuntimeError: Type 'Tuple[int,
int]' cannot be traced. Only Tensors and (possibly nested) Lists, Dicts, and
Tuples of Tensors can be traced" error.
"""
bsz = x.size(0)
return (
torch.tensor(self.initial_decoder_input, dtype=torch.long)
.expand(bsz, len(self.initial_decoder_input))
.to(x.device)
)
def parse(self, text: str) -> List[int]:
return self.dict.txt2vec(text)
def _v2t(self, vec: List[int]) -> str:
"""
Convert token indices to string of tokens.
"""
new_vec: List[int] = []
for i in vec:
if i == self.end_idx:
break
elif i != self.start_idx:
new_vec.append(i)
return self.dict.vec2txt(new_vec)
def forward(self, context: str, max_len: int = 128) -> str:
# Vectorize all lines of context
history_vecs: List[List[int]] = []
context_lines = context.split('\n')
if self.history_size > 0:
context_lines = context_lines[-self.history_size :]
for line in context_lines:
history_vecs.append(self.parse(line))
# Get full history vec
text_vecs: List[List[int]] = []
for vec in history_vecs[:-1]:
text_vecs += [vec]
text_vecs += [self.delimiter_tok]
text_vecs += [history_vecs[-1]]
if self.global_end_token is not None:
text_vecs += [[self.global_end_token]]
# Flatten text_vecs
flattened_text_vec: List[int] = []
for vec in text_vecs:
for token in vec:
flattened_text_vec.append(token)
# Format history vec given various logic
if self.text_truncate is not None:
if self.is_bart:
truncate_length = self.text_truncate - 2 # Start and end tokens
else:
truncate_length = self.text_truncate
if len(flattened_text_vec) > truncate_length:
flattened_text_vec = flattened_text_vec[-truncate_length:]
flattened_text_vec = torch.tensor(flattened_text_vec, dtype=torch.long)
if self.is_bart:
flattened_text_vec = torch.cat(
[
torch.tensor([self.start_idx], dtype=torch.long),
flattened_text_vec,
torch.tensor([self.end_idx], dtype=torch.long),
],
dim=0,
)
# Pass through the encoder and decoder to generate tokens
batch_text_vec = torch.unsqueeze(flattened_text_vec, dim=0) # Add batch dim
encoder_states = self.encoder(batch_text_vec)
generations = self._get_initial_decoder_input(batch_text_vec)
# keep track of early stopping if all generations finish
seen_end = torch.zeros(
batch_text_vec.size(0), device=batch_text_vec.device, dtype=torch.bool
)
incr_state: Dict[str, torch.Tensor] = {}
for token_idx in range(max_len):
if token_idx == 0:
latent, incr_state = self.decoder_first_pass(
generations, encoder_states
)
else:
latent, incr_state = self.decoder_later_pass(
generations, encoder_states, incr_state
)
logits = self.partially_traced_model.output(latent[:, -1:, :])
_, preds = logits.max(dim=2)
incr_state = self.partially_traced_model.reorder_decoder_incremental_state(
incr_state,
torch.tensor([0], dtype=torch.long, device=batch_text_vec.device),
)
seen_end = seen_end + (preds == self.end_idx).squeeze(1)
generations = torch.cat([generations, preds], dim=1)
if torch.all(seen_end):
break
# Get the label from the generated tokens and update the history
if self.is_bart:
assert generations[0, 0].item() == self.end_idx
generations = generations[:, 1:]
# Hack: remove initial end token. I haven't found in the code where this is
# done, but it seems to happen early on during generation
generation_tokens: List[int] = generations[0].tolist()
label = self._v2t(generation_tokens)
return label
class BaseIncrStateFlattener(nn.Module):
"""
Flatten/unflatten the incremental state for use with TorchScripting.
Typically, the incremental state will be stored as a Dict[int, Dict[str, Dict[str,
torch.Tensor]]], where the 3 dictionary levels map decoder layer, attention type,
and previous key/value/mask, respectively. However, TorchScript expects dicts to be
of type Dict[str, torch.Tensor], and thus all input incremental states when
TorchScripting will have to be of that type. We thus unflatten the input incremental
state, already of type Dict[str, torch.Tensor], to pass it into whatever method
needs it, and we flatten it again after the updated incremental state is passed back
out.
This is a base class that provides methods for flattening/unflattening: subclasses
will call these methods as the incremental state is passed into and out of their own
methods.
"""
def __init__(self, module: nn.Module):
super().__init__()
self.module = module
def _unflatten_incr_state(
self, flat_incr_state: Dict[str, torch.Tensor]
) -> Dict[int, Dict[str, Dict[str, torch.Tensor]]]:
"""
Unflatten the input incremental state.
For instance, flat_incr_state['layer_0__self_attn__prev_key'] will be stored in
structured_incr_state[0]['self_attn']['prev_key'].
"""
structured_incr_state = defaultdict(lambda: defaultdict(dict))
for key, state in flat_incr_state.items():
layer_idx_str, attn_type, state_type = key.split('__')
structured_incr_state[int(layer_idx_str)][attn_type][state_type] = state
return dict({k: dict(v) for k, v in structured_incr_state.items()})
# Turn the nested defaultdicts back into regular dicts
def _flatten_incr_state(
self, structured_incr_state: Dict[int, Dict[str, Dict[str, torch.Tensor]]]
) -> Dict[str, torch.Tensor]:
"""
Flatten the input incremental state.
For instance, structured_incr_state[0]['self_attn']['prev_key'] will be stored
in flat_incr_state['layer_0__self_attn__prev_key'].
"""
flat_incr_state = {}
for layer_idx, dict1 in structured_incr_state.items():
for attn_type, dict2 in dict1.items():
for state_type, state in dict2.items():
key = f'{layer_idx:d}__{attn_type}__{state_type}'
flat_incr_state[key] = state
return flat_incr_state
class DecoderIncrStateFlattener(BaseIncrStateFlattener):
"""
Wrapper for a TransformerDecoder that will unflatten/flatten the incremental state.
Unflattening/flattening will occur before passing the incremental state into and out
of .forward().
"""
def forward(
self,
input_: torch.LongTensor,
encoder_state: Tuple[torch.Tensor, torch.Tensor],
flat_incr_state: Optional[Dict[str, torch.Tensor]] = None,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
if flat_incr_state is not None:
structured_incr_state = self._unflatten_incr_state(flat_incr_state)
else:
structured_incr_state = None
tensor, new_structured_incr_state = self.module.forward(
input=input_, encoder_state=encoder_state, incr_state=structured_incr_state
)
new_flat_incr_state = self._flatten_incr_state(new_structured_incr_state)
return tensor, new_flat_incr_state
class ModelIncrStateFlattener(BaseIncrStateFlattener):
"""
Wrapper for a TransformerGeneratorModel to unflatten/flatten the incremental state.
Unflattening/flattening will occur before passing the incremental state into and out
of .reorder_decoder_incremental_state(). We also support .output(), which is also
traced.
"""
def reorder_decoder_incremental_state(
self, flat_incr_state: Dict[str, torch.Tensor], inds: torch.Tensor
) -> Dict[str, torch.Tensor]:
structured_incr_state = self._unflatten_incr_state(flat_incr_state)
new_structured_incr_state = self.module.reorder_decoder_incremental_state(
incremental_state=structured_incr_state, inds=inds
)
return self._flatten_incr_state(new_structured_incr_state)
def output(self, tensor: torch.Tensor) -> torch.Tensor:
return self.module.output(tensor)
@torch.jit.script
class ScriptableGpt2BpeHelper(object):
"""
Version of parlai.utils.bpe.Gpt2BpeHelper that can be TorchScripted.
"""
@classmethod
def findall(cls, text: str) -> List[str]:
"""
Split tokens in a manner that replicates parlai.utils.bpe.Gpt2BpeHelper.
"""
contraction_endings = ['s', 't', 're', 've', 'm', 'll', 'd']
tokens: List[str] = []
idx = 0
num_passes = 0
while idx < len(text):
num_passes += 1
if num_passes > 10000:
return ['*** Infinite loop in ScriptableGpt2BpeHelper.findall()! ***']
if text[idx] == "'":
# Capture contradiction suffixes
captured_suffix = False
for ending in contraction_endings:
if text[idx + 1 : idx + 1 + len(ending)] == ending:
tokens.append("'" + ending)
idx += 1 + len(ending)
captured_suffix = True
break
if captured_suffix:
continue
if not text[idx].isspace() or (
text[idx] == ' ' and idx + 1 < len(text) and not text[idx + 1].isspace()
):
# Capture runs of one type of character
if text[idx] == ' ':
last_matching_idx = idx + 1
else:
last_matching_idx = idx
if text[last_matching_idx].isalpha():
while (
last_matching_idx + 1 < len(text)
and text[last_matching_idx + 1].isalpha()
):
last_matching_idx += 1
elif text[last_matching_idx].isnumeric():
while (
last_matching_idx + 1 < len(text)
and text[last_matching_idx + 1].isnumeric()
):
last_matching_idx += 1
else:
while (
last_matching_idx + 1 < len(text)
and not text[last_matching_idx + 1].isspace()
and not text[last_matching_idx + 1].isalpha()
and not text[last_matching_idx + 1].isnumeric()
):
last_matching_idx += 1
tokens.append(text[idx : last_matching_idx + 1])
idx = last_matching_idx + 1
continue
if idx + 1 < len(text) and text[idx + 1].isspace():
# Capture runs of space characters up until just before the final one
last_space_idx = idx + 1
while (
last_space_idx + 1 < len(text)
and text[last_space_idx + 1].isspace()
):
last_space_idx += 1
if last_space_idx + 1 == len(text):
# Include the last char, which is a space char
tokens.append(text[idx : last_space_idx + 1])
idx = last_space_idx + 1
else:
tokens.append(text[idx:last_space_idx])
idx = last_space_idx
continue
if True:
# Capture runs of space characters
last_space_idx = idx
while (
last_space_idx + 1 < len(text)
and text[last_space_idx + 1].isspace()
):
last_space_idx += 1
tokens.append(text[idx : last_space_idx + 1])
idx = last_space_idx + 1
return tokens
def __init__(
self,
add_prefix_space: bool,
encoder: Dict[str, str],
byte_encoder: Dict[int, str],
fused_key_bpe_ranks: Dict[str, float],
special_tokens: List[str],
):
self.add_prefix_space = add_prefix_space
self.encoder = encoder
self.decoder: Dict[str, str] = {}
for k, v in self.encoder.items():
self.decoder[v] = k
self.byte_encoder = byte_encoder
self.byte_decoder: Dict[str, int] = {}
for k, v in self.byte_encoder.items():
self.byte_decoder[v] = k
self.bpe_ranks = fused_key_bpe_ranks
# special tokens
self._special_tokens: Dict[str, int] = {}
for st in special_tokens:
self._special_tokens[st] = 1
def encode(self, text: str) -> List[str]:
"""
Tokenize text.
Checks for add_prefix_space; handles accordingly.
:param text:
text to tokenize
:return tokens:
A list of tokens
"""
if self.add_prefix_space:
text = f' {text}'
# constants for readability
FINAL = 1
SPLITABLE = 0
pieces: List[Tuple[str, int]] = [(text, SPLITABLE)]
for special_token in self._special_tokens.keys():
i = 0
while i < len(pieces):
subtext, status = pieces[i]
if status == FINAL:
i += 1
continue
split = subtext.split(special_token)
if len(split) > 1:
# special token detected, replace the chunk with small subchunks
# split by the special token
pieces.pop(i)
for j, piece in enumerate(split):
if j > 0:
# add the special token as a delimiter
pieces.insert(i + j, (special_token, FINAL))
pieces.insert(i + j + int(j > 0), (piece, SPLITABLE))
else:
i += 1
output: List[str] = []
for piece, state in pieces:
if state is FINAL:
output.append(piece)
else:
output += self.helper_encode(piece)
text = ''.join(output)
return output
def get_pairs(self, word: List[str]) -> List[Tuple[str, str]]:
"""
Return set of symbol pairs in a word.
Word is represented as list of symbols (symbols being variable-length strings).
:param word:
word to symbolize
:return pairs:
set of tuples of symbols
"""
pairs: List[Tuple[str, str]] = []
prev_char = word[0]
for char in word[1:]:
pairs.append((prev_char, char))
prev_char = char
return pairs
def bpe(self, word: List[str]) -> List[str]:
"""
Convert token to BPE.
:param word:
list of tokens token to convert
:return bpe_encoding:
string bpe encoding
"""
pairs = self.get_pairs(word)
if len(pairs) == 0:
return word
while True:
min_rank = self.bpe_ranks.get('\n'.join(pairs[0]), float('inf'))
bigram = pairs[0]
for pair in pairs[1:]:
current_rank = self.bpe_ranks.get('\n'.join(pair), float('inf'))
if current_rank < min_rank:
min_rank = current_rank
bigram = pair
if '\n'.join(bigram) not in self.bpe_ranks:
break
first, second = bigram
new_word: List[str] = []
i = 0
while i < len(word):
found = False
for j in range(i, len(word)):
if word[j] == first:
new_word.extend(word[i:j])
i = j
found = True
break
if not found:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
word = new_word.copy()
if len(word) == 1:
break
else:
pairs = self.get_pairs(word)
return word
def helper_encode(self, text: str) -> List[str]:
"""
Tokenize text.
:param text:
text to tokenize
:return tokens:
A list of tokens
"""
bpe_tokens: List[str] = []
for token in self.findall(text):
byte_encoded: List[str] = []
for b in token:
byte_encoded.append(self.byte_encoder[ord(b)])
encoded: List[str] = []
for bpe_token in self.bpe(byte_encoded):
encoded.append(self.encoder[bpe_token])
bpe_tokens.extend(encoded)
return bpe_tokens
def decode(self, tokens: List[str]) -> str:
"""
Decode list of tokens into a text string.
:param tokens:
list of tokens
:return text:
decoded text
"""
output: List[str] = []
accum: List[str] = []
for token in tokens:
if token in self._special_tokens:
if len(accum) > 0:
output.append(self.helper_decode(accum))
accum.clear()
output.append(token)
else:
accum.append(token)
if len(accum) > 0:
output.append(self.helper_decode(accum))
text = ''.join(output)
if self.add_prefix_space:
assert text.startswith(' ')
text = text.lstrip(' ')
return text
def helper_decode(self, tokens: List[str]) -> str:
"""
Decode list of tokens into text string.
:param tokens:
list of tokens
:return:
decoded text
"""
chars: List[str] = []
for token in tokens:
decoded_token = self.decoder[token]
token_chars = self.utf8_chars(decoded_token)
for char in token_chars:
if not torch.jit.is_scripting():
# We iterate over "char", which is supposed to be a single
# character, because the TorchScripted version of the code
# correctly splits a string into single characters in
# self.utf8_chars() but the non-TorchScripted version doesn't
chars.extend(list(char))
else:
chars.append(char)
decoded_chars: List[str] = []
for char in chars:
decoded_chars.append(chr(self.byte_decoder[char]))
return ''.join(decoded_chars)
def utf8_chars(self, s: str) -> List[str]:
"""
An implementation of UTF8 character iteration in TorchScript. There are no
bitwise operations in torchscript, so we compare directly to integer values.
There isn't a lot of validation, for instance if you pass in an improperly
encoded string with an out-of-place continuation byte, or with a non-left-to-
right byte order, you'll get unexpected results and likely throw. Torch itself
takes in unicode strings and encodes them as UTF8, so that should be actively
hard to do.
The logic is simple: looking at the current start-of-character byte.
If its high bit is 0, it's a 1-byte character. Otherwise, the number of
bytes is the number of leading 1s in its binary representation, so
find that number by comparing it directly to ints with the appropriate
representation, then append that many bytes as a character and move past
them to the next start byte.
From pytext.torchscript.utils.
"""
chars: List[str] = []
i = 0
while i < len(s):
byte = ord(s[i])
if byte < 0b10000000:
chars.append(s[i])
i += 1
else:
if byte < 0b11100000:
num_bytes = 2
elif byte < 0b11110000:
num_bytes = 3
elif byte < 0b11111000:
num_bytes = 4
elif byte < 0b11111100:
num_bytes = 5
elif byte < 0b11111110:
num_bytes = 6
elif byte < 0b11111111:
num_bytes = 7
else:
num_bytes = 8
chars.append(s[i : i + num_bytes])
i += num_bytes
return chars
@torch.jit.script
class ScriptableDictionaryAgent:
"""
Builds and/or loads a dictionary.
All code is TorchScriptable.
"""
def __init__(
self,
null_token: str,
end_token: str,
unk_token: str,
start_token: str,
freq: Dict[str, int],
tok2ind: Dict[str, int],
ind2tok: Dict[int, str],
bpe_add_prefix_space: bool,
bpe_encoder: Dict[str, str],
bpe_byte_encoder: Dict[int, str],
fused_key_bpe_ranks: Dict[str, float],
special_tokens: List[str],
):
self.null_token = null_token
self.end_token = end_token
self.unk_token = unk_token
self.start_token = start_token
self.freq = freq
self.tok2ind = tok2ind
self.ind2tok = ind2tok
# cache unk token for later
self._unk_token_idx = self.tok2ind[self.unk_token]
# Initialize tokenizer
self.bpe = ScriptableGpt2BpeHelper(
add_prefix_space=bpe_add_prefix_space,
encoder=bpe_encoder,
byte_encoder=bpe_byte_encoder,
fused_key_bpe_ranks=fused_key_bpe_ranks,
special_tokens=special_tokens,
)
def _word_lookup(self, key: str) -> int:
"""
Return index from token, or unk_token's index, or None.
"""
if key in self.tok2ind:
return self.tok2ind[key]
else:
return self._unk_token_idx
def _index_lookup(self, key: int) -> str:
"""
Return token from index, or unk_token.
"""
if key in self.ind2tok:
return self.ind2tok[key]
else:
return self.unk_token
def gpt2_tokenize(self, text: str):
"""
Tokenize using Gpt2 BPE tokenizer.
"""
return self.bpe_tokenize(text)
def tokenize(self, text: str) -> List[str]:
"""
Return a sequence of tokens from the iterable.
Also handles special tokens for some tokenizers
"""
# calls the selected tokenizer function e.g. 're' => re_tokenize(text)
word_tokens = self.gpt2_tokenize(text)
return word_tokens
def bpe_tokenize(self, text: str) -> List[str]:
"""
Return a sequence of BPE-tokens from the text.
"""
return self.bpe.encode(text)
def txt2vec(self, text: str) -> List[int]:
"""
Convert a string to a vector (list of ints).
First runs a sentence tokenizer, then a word tokenizer.
"""
itr: List[int] = []
for token in self.tokenize(str(text)):
itr.append(self._word_lookup(token))
return itr
def vec2txt(self, vector: List[int]) -> str:
"""
Convert a vector of IDs to a string.
Converts a vector (iterable of ints) into a string, with each token separated by
the delimiter (default ``' '``).
"""
tokens = [self._index_lookup(idx) for idx in vector]
text = self.bpe.decode(tokens)
return text
| [
24,
31,
32,
36,
43
] |
1,565 | 74aa93bf3731d4e3ddb920bedc7daced50b4f2c3 | <mask token>
@app.route('/')
def index():
return render_template('index.html')
@app.route('/survey', methods=['POST', 'GET'])
def survey():
if request.method == 'GET':
return render_template('survey.html')
if request.method == 'POST':
is_oversea = request.form['oversea']
gender = request.form['gender']
age = request.form['age']
income = request.form['income']
knowledge = request.form['knowledge']
exp = request.form['exp']
risk = request.form['risk']
term = request.form['term']
s1 = request.form['s1']
s2 = request.form['s2']
s3 = request.form['s3']
s4 = request.form['s4']
s5 = request.form['s5']
i_list = [gender, age, income, knowledge, exp, risk, term, s1, s2,
s3, s4, s5]
i_list = list(map(int, i_list))
score = sum(i_list)
i_list.append(score)
data = np.array(i_list).reshape(1, -1)
clf = joblib.load('./models/rf_model.pkl')
type_num = clf.predict(data)
if type_num == 0:
invest_type = '안정추구형'
elif type_num == 1:
invest_type = '안정형'
elif type_num == 2:
invest_type = '적극투자형'
elif type_num == 3:
invest_type = '공격투자형'
else:
invest_type = '위험중립형'
return render_template('result.html', KEY_INVEST_TYPE=invest_type,
IS_OVERSEA=is_oversea)
<mask token>
| <mask token>
@app.route('/')
def index():
return render_template('index.html')
@app.route('/survey', methods=['POST', 'GET'])
def survey():
if request.method == 'GET':
return render_template('survey.html')
if request.method == 'POST':
is_oversea = request.form['oversea']
gender = request.form['gender']
age = request.form['age']
income = request.form['income']
knowledge = request.form['knowledge']
exp = request.form['exp']
risk = request.form['risk']
term = request.form['term']
s1 = request.form['s1']
s2 = request.form['s2']
s3 = request.form['s3']
s4 = request.form['s4']
s5 = request.form['s5']
i_list = [gender, age, income, knowledge, exp, risk, term, s1, s2,
s3, s4, s5]
i_list = list(map(int, i_list))
score = sum(i_list)
i_list.append(score)
data = np.array(i_list).reshape(1, -1)
clf = joblib.load('./models/rf_model.pkl')
type_num = clf.predict(data)
if type_num == 0:
invest_type = '안정추구형'
elif type_num == 1:
invest_type = '안정형'
elif type_num == 2:
invest_type = '적극투자형'
elif type_num == 3:
invest_type = '공격투자형'
else:
invest_type = '위험중립형'
return render_template('result.html', KEY_INVEST_TYPE=invest_type,
IS_OVERSEA=is_oversea)
@app.route('/portfolio', methods=['POST', 'GET'])
def portfolio():
if request.method == 'POST':
portfolio0 = ['195930', '133690', '273130', '284430', '183700']
portfolio1 = ['195930', '133690', '239660', '284430', '183700']
portfolio2 = ['195930', '133690', '239660', '278620', '284430']
portfolio3 = ['195930', '278530', '133690', '239660', '284430']
portfolio4 = ['195930', '278530', '277630', '133690', '284430']
portfolio5 = ['OILK', 'BBJP', 'ARKK', 'PALL', 'QQQ']
portfolio6 = ['OILK', 'SPHB', 'BBJP', 'ARKK', 'PALL']
portfolio7 = ['OILK', 'SPHB', 'JAGG', 'BBJP', 'ARKK']
portfolio8 = ['OILK', 'BBCA', 'SPHB', 'JAGG', 'ARKK']
portfolio9 = ['OILK', 'BBCA', 'BBEU', 'BBJP', 'ARKK']
price = request.form['price']
invest_type = request.form['type']
risk_no = request.form['risk_no']
is_oversea = request.form['oversea']
db = ''
if is_oversea == '0':
db = 'ETF_US'
else:
db = 'ETF_KR'
print(db)
with oracle_engine.connect() as conn:
try:
sql = 'select * from ' + db + ' where invest_type=:1'
results = conn.execute(sql, invest_type).fetchall()
name_list = []
risk_list = []
weight_list = []
returns_1y = []
returns_3y = []
returns_5y = []
for etf in results:
name_list.append(etf[0])
risk_list.append(etf[2])
weight_list.append(etf[3])
returns_1y.append(etf[4])
returns_3y.append(etf[5])
returns_5y.append(etf[6])
sql = 'select * from RETURN'
return_df = pd.read_sql(sql, conn)
etf_list = []
return_list = {}
date_list = {}
if is_oversea == '0':
if invest_type == '안정형':
portfolio_data = portfolio5
elif invest_type == '안정추구형':
portfolio_data = portfolio6
elif invest_type == '위험중립형':
portfolio_data = portfolio7
elif invest_type == '적극투자형':
portfolio_data = portfolio8
else:
portfolio_data = portfolio9
elif invest_type == '안정형':
portfolio_data = portfolio0
elif invest_type == '안정추구형':
portfolio_data = portfolio1
elif invest_type == '위험중립형':
portfolio_data = portfolio2
elif invest_type == '적극투자형':
portfolio_data = portfolio3
else:
portfolio_data = portfolio4
for i, ticker in enumerate(portfolio_data):
name = return_df[return_df['ticker'] == ticker]['name'
].unique().tolist()[0]
if name not in etf_list:
etf_list.append(name)
return_list[i] = list(return_df[return_df['ticker'] ==
ticker]['return'].map(float).values)
date_list[i] = list(return_df[return_df['ticker'] ==
ticker]['rdate'].dt.strftime('%Y-%m-%d').unique())
if is_oversea == '0':
sql = 'select * from pf_us'
pf_df = pd.read_sql(sql, conn)
pf_df = pf_df[46:]
else:
sql = 'select * from pf_kr'
pf_df = pd.read_sql(sql, conn)
pf_df = pf_df[140:]
pf_list = pf_df[invest_type].map(float).tolist()
bt_data = []
for i, pf in enumerate(pf_list):
bt_data.append({'x': i, 'y': pf})
except Exception as e:
print(e)
count_list = [0, 0, 0]
for risk_type in risk_list:
if risk_type == '위험':
count_list[0] += 1
elif risk_type == '중립':
count_list[1] += 1
else:
count_list[2] += 1
return render_template('portfolio.html', KEY_PRICE=price,
KEY_INVEST_TYPE=invest_type, KEY_NAME_LIST=name_list,
KEY_RISK_LIST=risk_list, KEY_WEIGHT_LIST=weight_list,
KEY_COUNT_LIST=count_list, KEY_RETURN_1Y=returns_1y,
KEY_RETURN_3Y=returns_3y, KEY_RETURN_5Y=returns_5y,
KEY_ETF_LIST=etf_list, KEY_RETURN_LIST=return_list,
KEY_DATE_LIST=date_list, KEY_BACKTESTING=bt_data)
if __name__ == '__main__':
app.run(debug=True)
| <mask token>
app = Flask(__name__)
oracle_engine = sa.create_engine('oracle://ft:1234@localhost:1522/xe')
@app.route('/')
def index():
return render_template('index.html')
@app.route('/survey', methods=['POST', 'GET'])
def survey():
if request.method == 'GET':
return render_template('survey.html')
if request.method == 'POST':
is_oversea = request.form['oversea']
gender = request.form['gender']
age = request.form['age']
income = request.form['income']
knowledge = request.form['knowledge']
exp = request.form['exp']
risk = request.form['risk']
term = request.form['term']
s1 = request.form['s1']
s2 = request.form['s2']
s3 = request.form['s3']
s4 = request.form['s4']
s5 = request.form['s5']
i_list = [gender, age, income, knowledge, exp, risk, term, s1, s2,
s3, s4, s5]
i_list = list(map(int, i_list))
score = sum(i_list)
i_list.append(score)
data = np.array(i_list).reshape(1, -1)
clf = joblib.load('./models/rf_model.pkl')
type_num = clf.predict(data)
if type_num == 0:
invest_type = '안정추구형'
elif type_num == 1:
invest_type = '안정형'
elif type_num == 2:
invest_type = '적극투자형'
elif type_num == 3:
invest_type = '공격투자형'
else:
invest_type = '위험중립형'
return render_template('result.html', KEY_INVEST_TYPE=invest_type,
IS_OVERSEA=is_oversea)
@app.route('/portfolio', methods=['POST', 'GET'])
def portfolio():
if request.method == 'POST':
portfolio0 = ['195930', '133690', '273130', '284430', '183700']
portfolio1 = ['195930', '133690', '239660', '284430', '183700']
portfolio2 = ['195930', '133690', '239660', '278620', '284430']
portfolio3 = ['195930', '278530', '133690', '239660', '284430']
portfolio4 = ['195930', '278530', '277630', '133690', '284430']
portfolio5 = ['OILK', 'BBJP', 'ARKK', 'PALL', 'QQQ']
portfolio6 = ['OILK', 'SPHB', 'BBJP', 'ARKK', 'PALL']
portfolio7 = ['OILK', 'SPHB', 'JAGG', 'BBJP', 'ARKK']
portfolio8 = ['OILK', 'BBCA', 'SPHB', 'JAGG', 'ARKK']
portfolio9 = ['OILK', 'BBCA', 'BBEU', 'BBJP', 'ARKK']
price = request.form['price']
invest_type = request.form['type']
risk_no = request.form['risk_no']
is_oversea = request.form['oversea']
db = ''
if is_oversea == '0':
db = 'ETF_US'
else:
db = 'ETF_KR'
print(db)
with oracle_engine.connect() as conn:
try:
sql = 'select * from ' + db + ' where invest_type=:1'
results = conn.execute(sql, invest_type).fetchall()
name_list = []
risk_list = []
weight_list = []
returns_1y = []
returns_3y = []
returns_5y = []
for etf in results:
name_list.append(etf[0])
risk_list.append(etf[2])
weight_list.append(etf[3])
returns_1y.append(etf[4])
returns_3y.append(etf[5])
returns_5y.append(etf[6])
sql = 'select * from RETURN'
return_df = pd.read_sql(sql, conn)
etf_list = []
return_list = {}
date_list = {}
if is_oversea == '0':
if invest_type == '안정형':
portfolio_data = portfolio5
elif invest_type == '안정추구형':
portfolio_data = portfolio6
elif invest_type == '위험중립형':
portfolio_data = portfolio7
elif invest_type == '적극투자형':
portfolio_data = portfolio8
else:
portfolio_data = portfolio9
elif invest_type == '안정형':
portfolio_data = portfolio0
elif invest_type == '안정추구형':
portfolio_data = portfolio1
elif invest_type == '위험중립형':
portfolio_data = portfolio2
elif invest_type == '적극투자형':
portfolio_data = portfolio3
else:
portfolio_data = portfolio4
for i, ticker in enumerate(portfolio_data):
name = return_df[return_df['ticker'] == ticker]['name'
].unique().tolist()[0]
if name not in etf_list:
etf_list.append(name)
return_list[i] = list(return_df[return_df['ticker'] ==
ticker]['return'].map(float).values)
date_list[i] = list(return_df[return_df['ticker'] ==
ticker]['rdate'].dt.strftime('%Y-%m-%d').unique())
if is_oversea == '0':
sql = 'select * from pf_us'
pf_df = pd.read_sql(sql, conn)
pf_df = pf_df[46:]
else:
sql = 'select * from pf_kr'
pf_df = pd.read_sql(sql, conn)
pf_df = pf_df[140:]
pf_list = pf_df[invest_type].map(float).tolist()
bt_data = []
for i, pf in enumerate(pf_list):
bt_data.append({'x': i, 'y': pf})
except Exception as e:
print(e)
count_list = [0, 0, 0]
for risk_type in risk_list:
if risk_type == '위험':
count_list[0] += 1
elif risk_type == '중립':
count_list[1] += 1
else:
count_list[2] += 1
return render_template('portfolio.html', KEY_PRICE=price,
KEY_INVEST_TYPE=invest_type, KEY_NAME_LIST=name_list,
KEY_RISK_LIST=risk_list, KEY_WEIGHT_LIST=weight_list,
KEY_COUNT_LIST=count_list, KEY_RETURN_1Y=returns_1y,
KEY_RETURN_3Y=returns_3y, KEY_RETURN_5Y=returns_5y,
KEY_ETF_LIST=etf_list, KEY_RETURN_LIST=return_list,
KEY_DATE_LIST=date_list, KEY_BACKTESTING=bt_data)
if __name__ == '__main__':
app.run(debug=True)
| import json
import joblib
import numpy as np
import datetime
import sqlalchemy as sa
import cx_Oracle
import pandas as pd
from flask import Flask, render_template, session, request, redirect, url_for
app = Flask(__name__)
oracle_engine = sa.create_engine('oracle://ft:1234@localhost:1522/xe')
@app.route('/')
def index():
return render_template('index.html')
@app.route('/survey', methods=['POST', 'GET'])
def survey():
if request.method == 'GET':
return render_template('survey.html')
if request.method == 'POST':
is_oversea = request.form['oversea']
gender = request.form['gender']
age = request.form['age']
income = request.form['income']
knowledge = request.form['knowledge']
exp = request.form['exp']
risk = request.form['risk']
term = request.form['term']
s1 = request.form['s1']
s2 = request.form['s2']
s3 = request.form['s3']
s4 = request.form['s4']
s5 = request.form['s5']
i_list = [gender, age, income, knowledge, exp, risk, term, s1, s2,
s3, s4, s5]
i_list = list(map(int, i_list))
score = sum(i_list)
i_list.append(score)
data = np.array(i_list).reshape(1, -1)
clf = joblib.load('./models/rf_model.pkl')
type_num = clf.predict(data)
if type_num == 0:
invest_type = '안정추구형'
elif type_num == 1:
invest_type = '안정형'
elif type_num == 2:
invest_type = '적극투자형'
elif type_num == 3:
invest_type = '공격투자형'
else:
invest_type = '위험중립형'
return render_template('result.html', KEY_INVEST_TYPE=invest_type,
IS_OVERSEA=is_oversea)
@app.route('/portfolio', methods=['POST', 'GET'])
def portfolio():
if request.method == 'POST':
portfolio0 = ['195930', '133690', '273130', '284430', '183700']
portfolio1 = ['195930', '133690', '239660', '284430', '183700']
portfolio2 = ['195930', '133690', '239660', '278620', '284430']
portfolio3 = ['195930', '278530', '133690', '239660', '284430']
portfolio4 = ['195930', '278530', '277630', '133690', '284430']
portfolio5 = ['OILK', 'BBJP', 'ARKK', 'PALL', 'QQQ']
portfolio6 = ['OILK', 'SPHB', 'BBJP', 'ARKK', 'PALL']
portfolio7 = ['OILK', 'SPHB', 'JAGG', 'BBJP', 'ARKK']
portfolio8 = ['OILK', 'BBCA', 'SPHB', 'JAGG', 'ARKK']
portfolio9 = ['OILK', 'BBCA', 'BBEU', 'BBJP', 'ARKK']
price = request.form['price']
invest_type = request.form['type']
risk_no = request.form['risk_no']
is_oversea = request.form['oversea']
db = ''
if is_oversea == '0':
db = 'ETF_US'
else:
db = 'ETF_KR'
print(db)
with oracle_engine.connect() as conn:
try:
sql = 'select * from ' + db + ' where invest_type=:1'
results = conn.execute(sql, invest_type).fetchall()
name_list = []
risk_list = []
weight_list = []
returns_1y = []
returns_3y = []
returns_5y = []
for etf in results:
name_list.append(etf[0])
risk_list.append(etf[2])
weight_list.append(etf[3])
returns_1y.append(etf[4])
returns_3y.append(etf[5])
returns_5y.append(etf[6])
sql = 'select * from RETURN'
return_df = pd.read_sql(sql, conn)
etf_list = []
return_list = {}
date_list = {}
if is_oversea == '0':
if invest_type == '안정형':
portfolio_data = portfolio5
elif invest_type == '안정추구형':
portfolio_data = portfolio6
elif invest_type == '위험중립형':
portfolio_data = portfolio7
elif invest_type == '적극투자형':
portfolio_data = portfolio8
else:
portfolio_data = portfolio9
elif invest_type == '안정형':
portfolio_data = portfolio0
elif invest_type == '안정추구형':
portfolio_data = portfolio1
elif invest_type == '위험중립형':
portfolio_data = portfolio2
elif invest_type == '적극투자형':
portfolio_data = portfolio3
else:
portfolio_data = portfolio4
for i, ticker in enumerate(portfolio_data):
name = return_df[return_df['ticker'] == ticker]['name'
].unique().tolist()[0]
if name not in etf_list:
etf_list.append(name)
return_list[i] = list(return_df[return_df['ticker'] ==
ticker]['return'].map(float).values)
date_list[i] = list(return_df[return_df['ticker'] ==
ticker]['rdate'].dt.strftime('%Y-%m-%d').unique())
if is_oversea == '0':
sql = 'select * from pf_us'
pf_df = pd.read_sql(sql, conn)
pf_df = pf_df[46:]
else:
sql = 'select * from pf_kr'
pf_df = pd.read_sql(sql, conn)
pf_df = pf_df[140:]
pf_list = pf_df[invest_type].map(float).tolist()
bt_data = []
for i, pf in enumerate(pf_list):
bt_data.append({'x': i, 'y': pf})
except Exception as e:
print(e)
count_list = [0, 0, 0]
for risk_type in risk_list:
if risk_type == '위험':
count_list[0] += 1
elif risk_type == '중립':
count_list[1] += 1
else:
count_list[2] += 1
return render_template('portfolio.html', KEY_PRICE=price,
KEY_INVEST_TYPE=invest_type, KEY_NAME_LIST=name_list,
KEY_RISK_LIST=risk_list, KEY_WEIGHT_LIST=weight_list,
KEY_COUNT_LIST=count_list, KEY_RETURN_1Y=returns_1y,
KEY_RETURN_3Y=returns_3y, KEY_RETURN_5Y=returns_5y,
KEY_ETF_LIST=etf_list, KEY_RETURN_LIST=return_list,
KEY_DATE_LIST=date_list, KEY_BACKTESTING=bt_data)
if __name__ == '__main__':
app.run(debug=True)
| import json
import joblib
import numpy as np
import datetime
import sqlalchemy as sa
import cx_Oracle
import pandas as pd
from flask import Flask, render_template, session, request, redirect, url_for
app = Flask(__name__)
oracle_engine = sa.create_engine('oracle://ft:1234@localhost:1522/xe')
@app.route("/")
def index():
return render_template('index.html')
@app.route("/survey", methods=['POST', 'GET'])
def survey():
if request.method == 'GET':
return render_template('survey.html')
if request.method == 'POST':
is_oversea = request.form['oversea']
gender = request.form['gender']
age = request.form['age']
income = request.form['income']
knowledge = request.form['knowledge']
exp = request.form['exp']
risk = request.form['risk']
term = request.form['term']
s1 = request.form['s1']
s2 = request.form['s2']
s3 = request.form['s3']
s4 = request.form['s4']
s5 = request.form['s5']
i_list = [gender, age, income, knowledge, exp, risk, term, s1, s2, s3, s4, s5]
i_list = list(map(int, i_list)) # str -> int
score = sum(i_list)
i_list.append(score)
data = np.array(i_list).reshape(1, -1)
clf = joblib.load('./models/rf_model.pkl')
type_num = clf.predict(data)
if type_num == 0:
invest_type = "안정추구형"
elif type_num == 1:
invest_type = "안정형"
elif type_num == 2:
invest_type = "적극투자형"
elif type_num == 3:
invest_type = "공격투자형"
else:
invest_type = "위험중립형"
return render_template('result.html', KEY_INVEST_TYPE=invest_type, IS_OVERSEA=is_oversea)
@app.route("/portfolio", methods=['POST', 'GET'])
def portfolio():
if request.method == 'POST':
# 국내
portfolio0 = ['195930', '133690', '273130', '284430', '183700'] # 안정형
portfolio1 = ['195930', '133690', '239660', '284430', '183700'] # 안정추구형
portfolio2 = ['195930', '133690', '239660', '278620', '284430'] # 위험중립형
portfolio3 = ['195930', '278530', '133690', '239660', '284430'] # 적극투자형
portfolio4 = ['195930', '278530', '277630', '133690', '284430'] # 공격투자형
# 미국
portfolio5 = ['OILK', 'BBJP', 'ARKK', 'PALL', 'QQQ'] # 안정형
portfolio6 = ['OILK', 'SPHB', 'BBJP', 'ARKK', 'PALL'] # 안정추구형
portfolio7 = ['OILK', 'SPHB', 'JAGG', 'BBJP', 'ARKK'] # 위험중립형
portfolio8 = ['OILK', 'BBCA', 'SPHB', 'JAGG', 'ARKK'] # 적극투자형
portfolio9 = ['OILK', 'BBCA', 'BBEU', 'BBJP', 'ARKK'] # 공격투자형
price = request.form['price']
invest_type = request.form['type']
risk_no = request.form['risk_no']
is_oversea = request.form['oversea']
db = ""
if is_oversea == '0': # 해외 ETF
db = "ETF_US"
else: # 국내 ETF
db = "ETF_KR"
print(db)
with oracle_engine.connect() as conn:
try:
sql = "select * from " + db + " where invest_type=:1"
results = conn.execute(sql, (invest_type)).fetchall()
name_list = [] # 상품명
risk_list = [] # 위험등급
weight_list = [] # 가중치
returns_1y = [] # 1년 수익률
returns_3y = [] # 3년 수익률
returns_5y = [] # 5년 수익률
for etf in results:
name_list.append(etf[0])
risk_list.append(etf[2])
weight_list.append(etf[3])
returns_1y.append(etf[4])
returns_3y.append(etf[5])
returns_5y.append(etf[6])
# 투자성향 상품별 과거 수익률 데이터 가져오기
sql = "select * from RETURN"
return_df = pd.read_sql(sql, conn)
etf_list = []
return_list = {}
date_list = {}
if is_oversea == '0': # 해외
if invest_type == '안정형':
portfolio_data = portfolio5
elif invest_type == '안정추구형':
portfolio_data = portfolio6
elif invest_type == '위험중립형':
portfolio_data = portfolio7
elif invest_type == '적극투자형':
portfolio_data = portfolio8
else:
portfolio_data = portfolio9
else:
if invest_type == '안정형':
portfolio_data = portfolio0
elif invest_type == '안정추구형':
portfolio_data = portfolio1
elif invest_type == '위험중립형':
portfolio_data = portfolio2
elif invest_type == '적극투자형':
portfolio_data = portfolio3
else:
portfolio_data = portfolio4
for i, ticker in enumerate(portfolio_data):
name = return_df[return_df['ticker'] == ticker]['name'].unique().tolist()[0]
if name not in etf_list:
etf_list.append(name)
return_list[i] = list(return_df[return_df['ticker'] == ticker]['return'].map(float).values)
date_list[i] = list(
return_df[return_df['ticker'] == ticker]['rdate'].dt.strftime('%Y-%m-%d').unique())
# 포트폴리오 수익률 데이터 가져오기
if is_oversea == '0': # 해외
sql = "select * from pf_us"
pf_df = pd.read_sql(sql, conn)
pf_df = pf_df[46:]
else: # 국내
sql = "select * from pf_kr"
pf_df = pd.read_sql(sql, conn)
pf_df = pf_df[140:]
pf_list = pf_df[invest_type].map(float).tolist()
bt_data = []
for i, pf in enumerate(pf_list):
bt_data.append({'x': i, 'y': pf});
except Exception as e:
print(e)
# 투자 등급 카운팅 (파이차트에 비중 나타내기 위해 사용)
count_list = [0,0,0]
for risk_type in risk_list:
if risk_type == '위험':
count_list[0] += 1
elif risk_type == '중립':
count_list[1] += 1
else:
count_list[2] += 1
return render_template('portfolio.html', KEY_PRICE=price, KEY_INVEST_TYPE=invest_type, KEY_NAME_LIST=name_list,
KEY_RISK_LIST=risk_list, KEY_WEIGHT_LIST=weight_list, KEY_COUNT_LIST=count_list,
KEY_RETURN_1Y=returns_1y, KEY_RETURN_3Y=returns_3y, KEY_RETURN_5Y=returns_5y,
KEY_ETF_LIST=etf_list, KEY_RETURN_LIST=return_list, KEY_DATE_LIST=date_list,
KEY_BACKTESTING=bt_data)
if __name__ == '__main__':
app.run(debug=True)
| [
2,
4,
5,
6,
7
] |
1,566 | a6154c5d855dc53d73db08bbb5b5d7437056e156 | <mask token>
def loadModel(name):
model = load_model('./Model/%s.h5' % name)
return model
<mask token>
| <mask token>
def loadModel(name):
model = load_model('./Model/%s.h5' % name)
return model
def predict(tag):
test = getPIData(tag, '2019-11-05', '2019-11-06')
test_arg = addFeature(test)
test_norm = normalize(test_arg)
X_test, Y_test = buildTrain(test_norm, 12 * 12, 1)
model = loadModel(tag)
return model.predict(X_test)
<mask token>
| <mask token>
def loadModel(name):
model = load_model('./Model/%s.h5' % name)
return model
def predict(tag):
test = getPIData(tag, '2019-11-05', '2019-11-06')
test_arg = addFeature(test)
test_norm = normalize(test_arg)
X_test, Y_test = buildTrain(test_norm, 12 * 12, 1)
model = loadModel(tag)
return model.predict(X_test)
print(predict('USG60_eth0_ifInOctets'))
| from keras.models import load_model
from DataManager import *
def loadModel(name):
model = load_model('./Model/%s.h5' % name)
return model
def predict(tag):
test = getPIData(tag, '2019-11-05', '2019-11-06')
test_arg = addFeature(test)
test_norm = normalize(test_arg)
X_test, Y_test = buildTrain(test_norm, 12 * 12, 1)
model = loadModel(tag)
return model.predict(X_test)
print(predict('USG60_eth0_ifInOctets'))
| null | [
1,
2,
3,
4
] |
1,567 | 09f2fabaf3c19aa0d4cb522c6dbf5fd8d720b4df | """
Problem Statement
You and Fredrick are good friends. Yesterday, Fredrick received N credit
cards from ABCD Bank. He wants to verify whether his credit card numbers are
valid or not. You happen to be great at regex so he is asking for your help!
A valid credit card from ABCD Bank has the following characteristics:
It must start with a 4, 5 or 6.
It must contain exactly 16 digits.
It must only consist of digits (0-9).
It may have digits in groups of 4, separated by one hyphen "-".
It must NOT use any other separator like ' ' , '_', etc.
It must NOT have 4 or more consecutive repeated digits.
Examples:
Valid Credit Card Numbers
---------------------------
4253625879615786
4424424424442444
5122-2368-7954-3214
Invalid Credit Card Numbers
---------------------------
42536258796157867 #17 digits in card number --> Invalid
4424444424442444 #Consecutive digits are repeating 4 or more times
--> Invalid
5122-2368-7954 - 3214 #Separators other than '-' are used --> Invalid
44244x4424442444 #Contains non digit characters --> Invalid
0525362587961578 #Doesn't start with 4, 5 or 6 --> Invalid
Input Format
The first line of input contains an integer N.
The next N lines contain credit card numbers.
Constraints
0<N<100
Output Format
Print 'Valid' if the credit card number is valid. Otherwise,
print 'Invalid'. Do not print the quotes.
Sample Input
------------
6
4123456789123456
5123-4567-8912-3456
61234-567-8912-3456
4123356789123456
5133-3367-8912-3456
5123 - 3567 - 8912 - 3456
Sample Output
------------
Valid
Valid
Invalid
Valid
Invalid
Invalid
Explanation
-----------
4123456789123456 : Valid
5123-4567-8912-3456 : Valid
61234-567-8912-3456 : Invalid, because the card number is not divided into
equal groups of 4.
4123356789123456 : Valid
5133-3367-8912-3456 : Invalid, consecutive digits 3333 is repeating 4 times.
5123 - 4567 - 8912 - 3456 : Invalid, because space ' ' and - are used as
separators.
"""
import re
for _ in range(int(raw_input())):
credit_card_number = raw_input()
if len(credit_card_number) == 16 or len(credit_card_number) == 19:
if credit_card_number.count('-') == 3 and len(
credit_card_number) != 19:
print "Invalid"
continue
if credit_card_number.count('-') == 3:
cc_split = credit_card_number.split('-')
is_invalid = False
for cc in cc_split:
if len(cc) != 4:
is_invalid = True
break
if is_invalid:
print "Invalid"
continue
credit_card_number = credit_card_number.replace('-', '')
#print credit_card_number
start_pattern = r"[456]"
digit_pattern = r"\d*([0-9])\1\1\1"
start_match = re.match(start_pattern, credit_card_number)
digit_match = re.match(digit_pattern, credit_card_number)
#print start_match, digit_match
if start_match and not digit_match:
print "Valid"
else:
print "Invalid"
else:
print "Invalid"
for i in range(int(raw_input())):
S = raw_input().strip()
pre_match = re.search(r'^[456]\d{3}(-?)\d{4}\1\d{4}\1\d{4}$',S)
if pre_match:
processed_string = "".join(pre_match.group(0).split('-'))
final_match = re.search(r'(\d)\1{3,}',processed_string)
print 'Invalid' if final_match else 'Valid'
else:
print 'Invalid' | null | null | null | null | [
0
] |
1,568 | 3a88ff479e3b01518d79e9930c29514863f96f9b | <mask token>
class TestRandomSelectNode(unittest.TestCase):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
class TestRandomSample(unittest.TestCase):
def setUp(self):
np.random.seed(127)
def test_okay(self):
graph = BELGraph()
nodes = make_nodes(50)
edges = list(itt.combinations(nodes, r=2))
random.shuffle(edges)
n_edges = 500
for u, v in edges[:n_edges]:
graph.add_qualified_edge(u, v, relation=INCREASES, citation=str
(uuid4()), evidence=str(uuid4()))
self.assertEqual(n_edges, graph.number_of_edges())
sg = get_random_subgraph(graph, number_edges=250, number_seed_edges
=5, seed=127)
self.assertEqual(250, sg.number_of_edges())
def test_too_small(self):
graph = BELGraph()
nodes = make_nodes(11)
edges = list(itt.combinations(nodes, r=2))
random.shuffle(edges)
n_edges = 25
for u, v in edges[:n_edges]:
graph.add_qualified_edge(u, v, relation=INCREASES, citation=str
(uuid4()), evidence=str(uuid4()))
self.assertEqual(n_edges, graph.number_of_edges())
sg = get_random_subgraph(graph, number_edges=250, number_seed_edges
=5, seed=127)
self.assertEqual(graph.number_of_edges(), sg.number_of_edges(), msg
=
'since graph is too small, the subgraph should contain the whole thing'
)
| <mask token>
class TestRandomSelectNode(unittest.TestCase):
<mask token>
def setUp(self):
self.random_state = np.random.RandomState(seed=127)
self.trials = 30000
def test_randomly_select_node_1(self):
"""Tests that randomly selecting nodes works"""
a, b, c, d = (n() for _ in range(4))
g = BELGraph()
g.add_edge(a, b)
g.add_edge(b, c)
g.add_edge(b, d)
self.assertEqual(1, g.degree(a))
self.assertEqual(3, g.degree(b))
self.assertEqual(1, g.degree(c))
self.assertEqual(1, g.degree(d))
no_grow = set()
node_counter = Counter(randomly_select_node(g, no_grow, self.
random_state) for _ in range(self.trials))
self.assertIn(a, node_counter)
self.assertAlmostEqual(1 / 6, node_counter[a] / self.trials, places=2)
self.assertIn(b, node_counter)
self.assertAlmostEqual(3 / 6, node_counter[b] / self.trials, places=2)
self.assertIn(c, node_counter)
self.assertAlmostEqual(1 / 6, node_counter[c] / self.trials, places=2)
self.assertIn(d, node_counter)
self.assertAlmostEqual(1 / 6, node_counter[d] / self.trials, places=2)
def test_randomly_select_node_2(self):
"""Tests that randomly selecting nodes works, but disallow C"""
a, b, c, d = (n() for _ in range(4))
g = BELGraph()
g.add_edge(a, b)
g.add_edge(b, c)
g.add_edge(b, d)
self.assertEqual(1, g.degree(a))
self.assertEqual(3, g.degree(b))
self.assertEqual(1, g.degree(c))
self.assertEqual(1, g.degree(d))
no_grow = {c}
node_counter = Counter(randomly_select_node(g, no_grow, self.
random_state) for _ in range(self.trials))
self.assertIn(a, node_counter)
self.assertAlmostEqual(1 / 5, node_counter[a] / self.trials, places=2)
self.assertIn(b, node_counter)
self.assertAlmostEqual(3 / 5, node_counter[b] / self.trials, places=2)
self.assertNotIn(c, node_counter)
self.assertIn(d, node_counter)
self.assertAlmostEqual(1 / 5, node_counter[d] / self.trials, places=2)
<mask token>
class TestRandomSample(unittest.TestCase):
def setUp(self):
np.random.seed(127)
def test_okay(self):
graph = BELGraph()
nodes = make_nodes(50)
edges = list(itt.combinations(nodes, r=2))
random.shuffle(edges)
n_edges = 500
for u, v in edges[:n_edges]:
graph.add_qualified_edge(u, v, relation=INCREASES, citation=str
(uuid4()), evidence=str(uuid4()))
self.assertEqual(n_edges, graph.number_of_edges())
sg = get_random_subgraph(graph, number_edges=250, number_seed_edges
=5, seed=127)
self.assertEqual(250, sg.number_of_edges())
def test_too_small(self):
graph = BELGraph()
nodes = make_nodes(11)
edges = list(itt.combinations(nodes, r=2))
random.shuffle(edges)
n_edges = 25
for u, v in edges[:n_edges]:
graph.add_qualified_edge(u, v, relation=INCREASES, citation=str
(uuid4()), evidence=str(uuid4()))
self.assertEqual(n_edges, graph.number_of_edges())
sg = get_random_subgraph(graph, number_edges=250, number_seed_edges
=5, seed=127)
self.assertEqual(graph.number_of_edges(), sg.number_of_edges(), msg
=
'since graph is too small, the subgraph should contain the whole thing'
)
| <mask token>
class TestRandomSelectNode(unittest.TestCase):
"""Test random node selection"""
def setUp(self):
self.random_state = np.random.RandomState(seed=127)
self.trials = 30000
def test_randomly_select_node_1(self):
"""Tests that randomly selecting nodes works"""
a, b, c, d = (n() for _ in range(4))
g = BELGraph()
g.add_edge(a, b)
g.add_edge(b, c)
g.add_edge(b, d)
self.assertEqual(1, g.degree(a))
self.assertEqual(3, g.degree(b))
self.assertEqual(1, g.degree(c))
self.assertEqual(1, g.degree(d))
no_grow = set()
node_counter = Counter(randomly_select_node(g, no_grow, self.
random_state) for _ in range(self.trials))
self.assertIn(a, node_counter)
self.assertAlmostEqual(1 / 6, node_counter[a] / self.trials, places=2)
self.assertIn(b, node_counter)
self.assertAlmostEqual(3 / 6, node_counter[b] / self.trials, places=2)
self.assertIn(c, node_counter)
self.assertAlmostEqual(1 / 6, node_counter[c] / self.trials, places=2)
self.assertIn(d, node_counter)
self.assertAlmostEqual(1 / 6, node_counter[d] / self.trials, places=2)
def test_randomly_select_node_2(self):
"""Tests that randomly selecting nodes works, but disallow C"""
a, b, c, d = (n() for _ in range(4))
g = BELGraph()
g.add_edge(a, b)
g.add_edge(b, c)
g.add_edge(b, d)
self.assertEqual(1, g.degree(a))
self.assertEqual(3, g.degree(b))
self.assertEqual(1, g.degree(c))
self.assertEqual(1, g.degree(d))
no_grow = {c}
node_counter = Counter(randomly_select_node(g, no_grow, self.
random_state) for _ in range(self.trials))
self.assertIn(a, node_counter)
self.assertAlmostEqual(1 / 5, node_counter[a] / self.trials, places=2)
self.assertIn(b, node_counter)
self.assertAlmostEqual(3 / 5, node_counter[b] / self.trials, places=2)
self.assertNotIn(c, node_counter)
self.assertIn(d, node_counter)
self.assertAlmostEqual(1 / 5, node_counter[d] / self.trials, places=2)
<mask token>
class TestRandomSample(unittest.TestCase):
def setUp(self):
np.random.seed(127)
def test_okay(self):
graph = BELGraph()
nodes = make_nodes(50)
edges = list(itt.combinations(nodes, r=2))
random.shuffle(edges)
n_edges = 500
for u, v in edges[:n_edges]:
graph.add_qualified_edge(u, v, relation=INCREASES, citation=str
(uuid4()), evidence=str(uuid4()))
self.assertEqual(n_edges, graph.number_of_edges())
sg = get_random_subgraph(graph, number_edges=250, number_seed_edges
=5, seed=127)
self.assertEqual(250, sg.number_of_edges())
def test_too_small(self):
graph = BELGraph()
nodes = make_nodes(11)
edges = list(itt.combinations(nodes, r=2))
random.shuffle(edges)
n_edges = 25
for u, v in edges[:n_edges]:
graph.add_qualified_edge(u, v, relation=INCREASES, citation=str
(uuid4()), evidence=str(uuid4()))
self.assertEqual(n_edges, graph.number_of_edges())
sg = get_random_subgraph(graph, number_edges=250, number_seed_edges
=5, seed=127)
self.assertEqual(graph.number_of_edges(), sg.number_of_edges(), msg
=
'since graph is too small, the subgraph should contain the whole thing'
)
| <mask token>
def n():
"""Generates a PyBEL node tuple
:rtype: tuple
"""
return PROTEIN, 'TEST', str(uuid4())
class TestRandomSelectNode(unittest.TestCase):
"""Test random node selection"""
def setUp(self):
self.random_state = np.random.RandomState(seed=127)
self.trials = 30000
def test_randomly_select_node_1(self):
"""Tests that randomly selecting nodes works"""
a, b, c, d = (n() for _ in range(4))
g = BELGraph()
g.add_edge(a, b)
g.add_edge(b, c)
g.add_edge(b, d)
self.assertEqual(1, g.degree(a))
self.assertEqual(3, g.degree(b))
self.assertEqual(1, g.degree(c))
self.assertEqual(1, g.degree(d))
no_grow = set()
node_counter = Counter(randomly_select_node(g, no_grow, self.
random_state) for _ in range(self.trials))
self.assertIn(a, node_counter)
self.assertAlmostEqual(1 / 6, node_counter[a] / self.trials, places=2)
self.assertIn(b, node_counter)
self.assertAlmostEqual(3 / 6, node_counter[b] / self.trials, places=2)
self.assertIn(c, node_counter)
self.assertAlmostEqual(1 / 6, node_counter[c] / self.trials, places=2)
self.assertIn(d, node_counter)
self.assertAlmostEqual(1 / 6, node_counter[d] / self.trials, places=2)
def test_randomly_select_node_2(self):
"""Tests that randomly selecting nodes works, but disallow C"""
a, b, c, d = (n() for _ in range(4))
g = BELGraph()
g.add_edge(a, b)
g.add_edge(b, c)
g.add_edge(b, d)
self.assertEqual(1, g.degree(a))
self.assertEqual(3, g.degree(b))
self.assertEqual(1, g.degree(c))
self.assertEqual(1, g.degree(d))
no_grow = {c}
node_counter = Counter(randomly_select_node(g, no_grow, self.
random_state) for _ in range(self.trials))
self.assertIn(a, node_counter)
self.assertAlmostEqual(1 / 5, node_counter[a] / self.trials, places=2)
self.assertIn(b, node_counter)
self.assertAlmostEqual(3 / 5, node_counter[b] / self.trials, places=2)
self.assertNotIn(c, node_counter)
self.assertIn(d, node_counter)
self.assertAlmostEqual(1 / 5, node_counter[d] / self.trials, places=2)
<mask token>
class TestRandomSample(unittest.TestCase):
def setUp(self):
np.random.seed(127)
def test_okay(self):
graph = BELGraph()
nodes = make_nodes(50)
edges = list(itt.combinations(nodes, r=2))
random.shuffle(edges)
n_edges = 500
for u, v in edges[:n_edges]:
graph.add_qualified_edge(u, v, relation=INCREASES, citation=str
(uuid4()), evidence=str(uuid4()))
self.assertEqual(n_edges, graph.number_of_edges())
sg = get_random_subgraph(graph, number_edges=250, number_seed_edges
=5, seed=127)
self.assertEqual(250, sg.number_of_edges())
def test_too_small(self):
graph = BELGraph()
nodes = make_nodes(11)
edges = list(itt.combinations(nodes, r=2))
random.shuffle(edges)
n_edges = 25
for u, v in edges[:n_edges]:
graph.add_qualified_edge(u, v, relation=INCREASES, citation=str
(uuid4()), evidence=str(uuid4()))
self.assertEqual(n_edges, graph.number_of_edges())
sg = get_random_subgraph(graph, number_edges=250, number_seed_edges
=5, seed=127)
self.assertEqual(graph.number_of_edges(), sg.number_of_edges(), msg
=
'since graph is too small, the subgraph should contain the whole thing'
)
| # -*- coding: utf-8 -*-
import itertools as itt
import random
import unittest
from collections import Counter
from uuid import uuid4
import numpy as np
from pybel import BELGraph
from pybel.constants import INCREASES, PROTEIN
from pybel.dsl import protein
from pybel_tools.selection import get_random_subgraph
from pybel_tools.selection.random_subgraph import randomly_select_node
def n():
"""Generates a PyBEL node tuple
:rtype: tuple
"""
return PROTEIN, 'TEST', str(uuid4())
class TestRandomSelectNode(unittest.TestCase):
"""Test random node selection"""
def setUp(self):
self.random_state = np.random.RandomState(seed=127)
self.trials = 30000
def test_randomly_select_node_1(self):
"""Tests that randomly selecting nodes works"""
a, b, c, d = (n() for _ in range(4))
g = BELGraph()
g.add_edge(a, b)
g.add_edge(b, c)
g.add_edge(b, d)
self.assertEqual(1, g.degree(a))
self.assertEqual(3, g.degree(b))
self.assertEqual(1, g.degree(c))
self.assertEqual(1, g.degree(d))
no_grow = set()
node_counter = Counter(
randomly_select_node(g, no_grow, self.random_state)
for _ in range(self.trials)
)
self.assertIn(a, node_counter)
self.assertAlmostEqual((1 / 6), node_counter[a] / self.trials, places=2)
self.assertIn(b, node_counter)
self.assertAlmostEqual((3 / 6), node_counter[b] / self.trials, places=2)
self.assertIn(c, node_counter)
self.assertAlmostEqual((1 / 6), node_counter[c] / self.trials, places=2)
self.assertIn(d, node_counter)
self.assertAlmostEqual((1 / 6), node_counter[d] / self.trials, places=2)
def test_randomly_select_node_2(self):
"""Tests that randomly selecting nodes works, but disallow C"""
a, b, c, d = (n() for _ in range(4))
g = BELGraph()
g.add_edge(a, b)
g.add_edge(b, c)
g.add_edge(b, d)
self.assertEqual(1, g.degree(a))
self.assertEqual(3, g.degree(b))
self.assertEqual(1, g.degree(c))
self.assertEqual(1, g.degree(d))
no_grow = {c}
node_counter = Counter(
randomly_select_node(g, no_grow, self.random_state)
for _ in range(self.trials)
)
self.assertIn(a, node_counter)
self.assertAlmostEqual((1 / 5), node_counter[a] / self.trials, places=2)
self.assertIn(b, node_counter)
self.assertAlmostEqual((3 / 5), node_counter[b] / self.trials, places=2)
self.assertNotIn(c, node_counter)
self.assertIn(d, node_counter)
self.assertAlmostEqual((1 / 5), node_counter[d] / self.trials, places=2)
def make_nodes(n):
"""Returns a list of PyBEL node data dictionaries
:param int n: number nodes
:rtype: list[protein]
"""
return [
protein(namespace='NS', name=str(i))
for i in range(1, n)
]
class TestRandomSample(unittest.TestCase):
def setUp(self):
np.random.seed(127)
def test_okay(self):
graph = BELGraph()
nodes = make_nodes(50)
edges = list(itt.combinations(nodes, r=2))
random.shuffle(edges)
n_edges = 500
for u, v in edges[:n_edges]:
graph.add_qualified_edge(
u, v,
relation=INCREASES,
citation=str(uuid4()),
evidence=str(uuid4()),
)
self.assertEqual(n_edges, graph.number_of_edges())
sg = get_random_subgraph(graph, number_edges=250, number_seed_edges=5, seed=127)
self.assertEqual(250, sg.number_of_edges())
def test_too_small(self):
graph = BELGraph()
nodes = make_nodes(11)
edges = list(itt.combinations(nodes, r=2))
random.shuffle(edges)
n_edges = 25
for u, v in edges[:n_edges]:
graph.add_qualified_edge(
u, v,
relation=INCREASES,
citation=str(uuid4()),
evidence=str(uuid4()),
)
self.assertEqual(n_edges, graph.number_of_edges())
sg = get_random_subgraph(graph, number_edges=250, number_seed_edges=5, seed=127)
self.assertEqual(graph.number_of_edges(), sg.number_of_edges(),
msg='since graph is too small, the subgraph should contain the whole thing')
| [
5,
8,
9,
10,
13
] |
1,569 | b16c847912944e0563492d35768b5b5bf3a506c7 | <mask token>
class IEMRunInfoReader:
"""
Illumina Experimental Manager RunInfo xml reader.
"""
def __init__(self, f):
self.xml_file = f
self.tree = ET.parse(self.xml_file)
self.root = self.tree.getroot()
def get_reads(self):
reads = [r.attrib for r in self.root.iter('Read')]
return reads
def get_indexed_reads(self):
reads = self.get_reads()
return filter(lambda item: item['IsIndexedRead'] == 'Y', reads)
def get_index_cycles(self):
indexed_reads = self.get_indexed_reads()
return dict(index=next((item['NumCycles'] for item in indexed_reads if
item['IsIndexedRead'] == 'Y' and item['Number'] == '2'), None),
index1=next((item['NumCycles'] for item in indexed_reads if
item['IsIndexedRead'] == 'Y' and item['Number'] != '2'), None))
@staticmethod
def get_default_index_cycles():
return DEFAULT_INDEX_CYCLES
def set_index_cycles(self, index_cycles, write=True):
for read in self.root.iter('Read'):
if read.attrib['IsIndexedRead'] == 'Y':
if read.attrib['Number'] == '2':
read.attrib.update(NumCycles=index_cycles.get('index',
DEFAULT_INDEX_CYCLES['index']))
else:
read.attrib.update(NumCycles=index_cycles.get('index',
DEFAULT_INDEX_CYCLES['index']))
if write:
self.tree.write(self.xml_file)
def is_paired_end_sequencing(self):
reads = self.get_reads()
reads = filter(lambda item: item['IsIndexedRead'] == 'N', reads)
if len(reads) == 1:
return False
return True
class LogBook:
"""
Logbook manager
"""
def __init__(self, filename):
self.filename = filename
self.logfile = None
self.logbook = dict()
def dump(self):
a = []
if not os.path.isfile(self.filename):
a.append(self.logbook)
with open(self.filename, mode='w') as f:
f.write(json.dumps(a, indent=4, sort_keys=True, default=str))
else:
with open(self.filename) as feedsjson:
feeds = json.load(feedsjson)
feeds.append(self.logbook)
with open(self.filename, mode='w') as f:
f.write(json.dumps(feeds, indent=4, sort_keys=True, default
=str))
def start(self, task_name, args=None):
self.logbook.update(task_name=task_name)
self.logbook.update(args=args)
self.logbook.update(start_time=datetime.datetime.now())
def end(self):
self.logbook.update(end_time=datetime.datetime.now())
execution_time = self.logbook.get('end_time') - self.logbook.get(
'start_time')
self.logbook.update(execution_time=execution_time)
self.dump()
class IEMSampleSheetReader(csv.DictReader):
"""
Illumina Experimental Manager SampleSheet reader.
"""
def __init__(self, f):
csv.DictReader.__init__(self, f, delimiter=',')
self.header = ''
self.data = ''
first_line = f.readline()
if not first_line.startswith('[Header]'):
raise ValueError('%s is not an IEM samplesheet'.format(f.name))
header = [first_line.strip()]
l = f.readline()
while not l.startswith('[Data]'):
header.append(l.strip())
l = f.readline()
else:
header.append(l.strip())
self.header = header
self.data = csv.DictReader(f.readlines(), delimiter=',')
def barcodes_have_the_same_size(self):
return False if self.get_barcode_mask() is None else True
def get_body(self, label='Sample_Name', new_value='', replace=True):
def sanitize(mystr):
"""
Sanitize string in accordance with Illumina's documentation
bcl2fastq2 Conversion Software v2.17 Guide
"""
retainlist = '_-'
return re.sub('[^\\w' + retainlist + ']', '_', mystr)
body = []
for i in self.header:
body.append(i)
body.append('\n')
body.append(string.join(self.data.fieldnames, ','))
body.append('\n')
to_be_sanitized = ['Sample_Project', 'Sample_Name']
for row in self.data:
for f in self.data.fieldnames:
if replace and f == label:
body.append(new_value)
elif f in to_be_sanitized and row[f]:
body.append(sanitize(row[f]))
else:
body.append(row[f])
body.append(',')
body.append('\n')
return body
def get_barcode_mask(self):
barcodes_mask = dict()
for row in self.data:
index = len(row['index']) if 'index' in row else None
index1 = None
if 'index1' in row or 'index2' in row:
index1 = len(row['index2']) if 'index2' in row else len(row
['index1'])
if row['Lane'] not in barcodes_mask:
barcodes_mask[row['Lane']] = dict(index=index, index1=index1)
elif index != barcodes_mask[row['Lane']]['index'
] or index1 != barcodes_mask[row['Lane']]['index1']:
return None
return barcodes_mask
class WeightedPath(object):
def __init__(self, path, weight):
self.path = path
self.weight = weight
def __repr__(self):
return '{}: {} {}'.format(self.__class__.__name__, self.path, self.
weight)
def __cmp__(self, other):
if hasattr(other, 'weight'):
return self.weight.__cmp__(other.weight)
<mask token>
| <mask token>
class IEMRunInfoReader:
"""
Illumina Experimental Manager RunInfo xml reader.
"""
def __init__(self, f):
self.xml_file = f
self.tree = ET.parse(self.xml_file)
self.root = self.tree.getroot()
def get_reads(self):
reads = [r.attrib for r in self.root.iter('Read')]
return reads
def get_indexed_reads(self):
reads = self.get_reads()
return filter(lambda item: item['IsIndexedRead'] == 'Y', reads)
def get_index_cycles(self):
indexed_reads = self.get_indexed_reads()
return dict(index=next((item['NumCycles'] for item in indexed_reads if
item['IsIndexedRead'] == 'Y' and item['Number'] == '2'), None),
index1=next((item['NumCycles'] for item in indexed_reads if
item['IsIndexedRead'] == 'Y' and item['Number'] != '2'), None))
@staticmethod
def get_default_index_cycles():
return DEFAULT_INDEX_CYCLES
def set_index_cycles(self, index_cycles, write=True):
for read in self.root.iter('Read'):
if read.attrib['IsIndexedRead'] == 'Y':
if read.attrib['Number'] == '2':
read.attrib.update(NumCycles=index_cycles.get('index',
DEFAULT_INDEX_CYCLES['index']))
else:
read.attrib.update(NumCycles=index_cycles.get('index',
DEFAULT_INDEX_CYCLES['index']))
if write:
self.tree.write(self.xml_file)
def is_paired_end_sequencing(self):
reads = self.get_reads()
reads = filter(lambda item: item['IsIndexedRead'] == 'N', reads)
if len(reads) == 1:
return False
return True
class LogBook:
"""
Logbook manager
"""
def __init__(self, filename):
self.filename = filename
self.logfile = None
self.logbook = dict()
def dump(self):
a = []
if not os.path.isfile(self.filename):
a.append(self.logbook)
with open(self.filename, mode='w') as f:
f.write(json.dumps(a, indent=4, sort_keys=True, default=str))
else:
with open(self.filename) as feedsjson:
feeds = json.load(feedsjson)
feeds.append(self.logbook)
with open(self.filename, mode='w') as f:
f.write(json.dumps(feeds, indent=4, sort_keys=True, default
=str))
def start(self, task_name, args=None):
self.logbook.update(task_name=task_name)
self.logbook.update(args=args)
self.logbook.update(start_time=datetime.datetime.now())
def end(self):
self.logbook.update(end_time=datetime.datetime.now())
execution_time = self.logbook.get('end_time') - self.logbook.get(
'start_time')
self.logbook.update(execution_time=execution_time)
self.dump()
class IEMSampleSheetReader(csv.DictReader):
"""
Illumina Experimental Manager SampleSheet reader.
"""
def __init__(self, f):
csv.DictReader.__init__(self, f, delimiter=',')
self.header = ''
self.data = ''
first_line = f.readline()
if not first_line.startswith('[Header]'):
raise ValueError('%s is not an IEM samplesheet'.format(f.name))
header = [first_line.strip()]
l = f.readline()
while not l.startswith('[Data]'):
header.append(l.strip())
l = f.readline()
else:
header.append(l.strip())
self.header = header
self.data = csv.DictReader(f.readlines(), delimiter=',')
def barcodes_have_the_same_size(self):
return False if self.get_barcode_mask() is None else True
def get_body(self, label='Sample_Name', new_value='', replace=True):
def sanitize(mystr):
"""
Sanitize string in accordance with Illumina's documentation
bcl2fastq2 Conversion Software v2.17 Guide
"""
retainlist = '_-'
return re.sub('[^\\w' + retainlist + ']', '_', mystr)
body = []
for i in self.header:
body.append(i)
body.append('\n')
body.append(string.join(self.data.fieldnames, ','))
body.append('\n')
to_be_sanitized = ['Sample_Project', 'Sample_Name']
for row in self.data:
for f in self.data.fieldnames:
if replace and f == label:
body.append(new_value)
elif f in to_be_sanitized and row[f]:
body.append(sanitize(row[f]))
else:
body.append(row[f])
body.append(',')
body.append('\n')
return body
def get_barcode_mask(self):
barcodes_mask = dict()
for row in self.data:
index = len(row['index']) if 'index' in row else None
index1 = None
if 'index1' in row or 'index2' in row:
index1 = len(row['index2']) if 'index2' in row else len(row
['index1'])
if row['Lane'] not in barcodes_mask:
barcodes_mask[row['Lane']] = dict(index=index, index1=index1)
elif index != barcodes_mask[row['Lane']]['index'
] or index1 != barcodes_mask[row['Lane']]['index1']:
return None
return barcodes_mask
class WeightedPath(object):
def __init__(self, path, weight):
self.path = path
self.weight = weight
def __repr__(self):
return '{}: {} {}'.format(self.__class__.__name__, self.path, self.
weight)
def __cmp__(self, other):
if hasattr(other, 'weight'):
return self.weight.__cmp__(other.weight)
<mask token>
def format_dataset_filename(sample_label, lane=None, read=None, ext=None,
uid=False):
filename = sanitize_filename(sample_label)
if read:
filename = '_'.join([filename, lane, read]) if lane else '_'.join([
filename, read])
if uid:
filename = '.'.join([filename, str(uuid.uuid4())])
if ext:
filename = '.'.join([filename, ext])
return sanitize_filename(filename)
<mask token>
def touch(path, logger):
try:
with open(path, 'a'):
os.utime(path, None)
except IOError as e:
logger.error('While touching {} file: {}'.format(path, e.strerror))
<mask token>
| <mask token>
class IEMRunInfoReader:
"""
Illumina Experimental Manager RunInfo xml reader.
"""
def __init__(self, f):
self.xml_file = f
self.tree = ET.parse(self.xml_file)
self.root = self.tree.getroot()
def get_reads(self):
reads = [r.attrib for r in self.root.iter('Read')]
return reads
def get_indexed_reads(self):
reads = self.get_reads()
return filter(lambda item: item['IsIndexedRead'] == 'Y', reads)
def get_index_cycles(self):
indexed_reads = self.get_indexed_reads()
return dict(index=next((item['NumCycles'] for item in indexed_reads if
item['IsIndexedRead'] == 'Y' and item['Number'] == '2'), None),
index1=next((item['NumCycles'] for item in indexed_reads if
item['IsIndexedRead'] == 'Y' and item['Number'] != '2'), None))
@staticmethod
def get_default_index_cycles():
return DEFAULT_INDEX_CYCLES
def set_index_cycles(self, index_cycles, write=True):
for read in self.root.iter('Read'):
if read.attrib['IsIndexedRead'] == 'Y':
if read.attrib['Number'] == '2':
read.attrib.update(NumCycles=index_cycles.get('index',
DEFAULT_INDEX_CYCLES['index']))
else:
read.attrib.update(NumCycles=index_cycles.get('index',
DEFAULT_INDEX_CYCLES['index']))
if write:
self.tree.write(self.xml_file)
def is_paired_end_sequencing(self):
reads = self.get_reads()
reads = filter(lambda item: item['IsIndexedRead'] == 'N', reads)
if len(reads) == 1:
return False
return True
class LogBook:
"""
Logbook manager
"""
def __init__(self, filename):
self.filename = filename
self.logfile = None
self.logbook = dict()
def dump(self):
a = []
if not os.path.isfile(self.filename):
a.append(self.logbook)
with open(self.filename, mode='w') as f:
f.write(json.dumps(a, indent=4, sort_keys=True, default=str))
else:
with open(self.filename) as feedsjson:
feeds = json.load(feedsjson)
feeds.append(self.logbook)
with open(self.filename, mode='w') as f:
f.write(json.dumps(feeds, indent=4, sort_keys=True, default
=str))
def start(self, task_name, args=None):
self.logbook.update(task_name=task_name)
self.logbook.update(args=args)
self.logbook.update(start_time=datetime.datetime.now())
def end(self):
self.logbook.update(end_time=datetime.datetime.now())
execution_time = self.logbook.get('end_time') - self.logbook.get(
'start_time')
self.logbook.update(execution_time=execution_time)
self.dump()
class IEMSampleSheetReader(csv.DictReader):
"""
Illumina Experimental Manager SampleSheet reader.
"""
def __init__(self, f):
csv.DictReader.__init__(self, f, delimiter=',')
self.header = ''
self.data = ''
first_line = f.readline()
if not first_line.startswith('[Header]'):
raise ValueError('%s is not an IEM samplesheet'.format(f.name))
header = [first_line.strip()]
l = f.readline()
while not l.startswith('[Data]'):
header.append(l.strip())
l = f.readline()
else:
header.append(l.strip())
self.header = header
self.data = csv.DictReader(f.readlines(), delimiter=',')
def barcodes_have_the_same_size(self):
return False if self.get_barcode_mask() is None else True
def get_body(self, label='Sample_Name', new_value='', replace=True):
def sanitize(mystr):
"""
Sanitize string in accordance with Illumina's documentation
bcl2fastq2 Conversion Software v2.17 Guide
"""
retainlist = '_-'
return re.sub('[^\\w' + retainlist + ']', '_', mystr)
body = []
for i in self.header:
body.append(i)
body.append('\n')
body.append(string.join(self.data.fieldnames, ','))
body.append('\n')
to_be_sanitized = ['Sample_Project', 'Sample_Name']
for row in self.data:
for f in self.data.fieldnames:
if replace and f == label:
body.append(new_value)
elif f in to_be_sanitized and row[f]:
body.append(sanitize(row[f]))
else:
body.append(row[f])
body.append(',')
body.append('\n')
return body
def get_barcode_mask(self):
barcodes_mask = dict()
for row in self.data:
index = len(row['index']) if 'index' in row else None
index1 = None
if 'index1' in row or 'index2' in row:
index1 = len(row['index2']) if 'index2' in row else len(row
['index1'])
if row['Lane'] not in barcodes_mask:
barcodes_mask[row['Lane']] = dict(index=index, index1=index1)
elif index != barcodes_mask[row['Lane']]['index'
] or index1 != barcodes_mask[row['Lane']]['index1']:
return None
return barcodes_mask
class WeightedPath(object):
def __init__(self, path, weight):
self.path = path
self.weight = weight
def __repr__(self):
return '{}: {} {}'.format(self.__class__.__name__, self.path, self.
weight)
def __cmp__(self, other):
if hasattr(other, 'weight'):
return self.weight.__cmp__(other.weight)
<mask token>
def format_dataset_filename(sample_label, lane=None, read=None, ext=None,
uid=False):
filename = sanitize_filename(sample_label)
if read:
filename = '_'.join([filename, lane, read]) if lane else '_'.join([
filename, read])
if uid:
filename = '.'.join([filename, str(uuid.uuid4())])
if ext:
filename = '.'.join([filename, ext])
return sanitize_filename(filename)
def config_file_setup(logger, cf_label, cf_from_cli=None):
"""
Create a config file if does not exists, copying it from the package
default into the user_config_dir.
Return a configuration file path from cli args if present, otherwise return
a path from the user_config_dir
:param logger: logger
:param cf_label: label of the configuration file (required)
:param cf_from_cli: path to configuration file from cli arg
:return: Path
"""
presta_config_dir = os.path.join(user_config_dir(__appname__))
config_file_from_home = os.path.join(presta_config_dir, cf_label)
if not path_exists(config_file_from_home, logger, force=False):
logger.info('Creating config path {}'.format(presta_config_dir))
ensure_dir(presta_config_dir)
config_file_path = '/'.join(['config', cf_label])
config_file_from_package = resource_filename(__appname__,
config_file_path)
copyfile(config_file_from_package, config_file_from_home)
config_file_paths = []
if cf_from_cli and path_exists(cf_from_cli, logger, force=False):
config_file_paths.append(WeightedPath(cf_from_cli, 0))
if path_exists(config_file_from_home, logger, force=False):
config_file_paths.append(WeightedPath(config_file_from_home, 1))
logger.debug('config file paths: {}'.format(config_file_paths))
config_file_path = sorted(config_file_paths)[0].path
logger.info('Reading configuration from {}'.format(config_file_path))
return config_file_path
def touch(path, logger):
try:
with open(path, 'a'):
os.utime(path, None)
except IOError as e:
logger.error('While touching {} file: {}'.format(path, e.strerror))
<mask token>
| <mask token>
SAMPLES_WITHOUT_BARCODES = [2, 8]
DEFAULT_INDEX_CYCLES = dict(index='8', index1='8')
PROGRESS_STATUS = dict(COMPLETED='completed', STARTED='started', TODO='todo')
class IEMRunInfoReader:
"""
Illumina Experimental Manager RunInfo xml reader.
"""
def __init__(self, f):
self.xml_file = f
self.tree = ET.parse(self.xml_file)
self.root = self.tree.getroot()
def get_reads(self):
reads = [r.attrib for r in self.root.iter('Read')]
return reads
def get_indexed_reads(self):
reads = self.get_reads()
return filter(lambda item: item['IsIndexedRead'] == 'Y', reads)
def get_index_cycles(self):
indexed_reads = self.get_indexed_reads()
return dict(index=next((item['NumCycles'] for item in indexed_reads if
item['IsIndexedRead'] == 'Y' and item['Number'] == '2'), None),
index1=next((item['NumCycles'] for item in indexed_reads if
item['IsIndexedRead'] == 'Y' and item['Number'] != '2'), None))
@staticmethod
def get_default_index_cycles():
return DEFAULT_INDEX_CYCLES
def set_index_cycles(self, index_cycles, write=True):
for read in self.root.iter('Read'):
if read.attrib['IsIndexedRead'] == 'Y':
if read.attrib['Number'] == '2':
read.attrib.update(NumCycles=index_cycles.get('index',
DEFAULT_INDEX_CYCLES['index']))
else:
read.attrib.update(NumCycles=index_cycles.get('index',
DEFAULT_INDEX_CYCLES['index']))
if write:
self.tree.write(self.xml_file)
def is_paired_end_sequencing(self):
reads = self.get_reads()
reads = filter(lambda item: item['IsIndexedRead'] == 'N', reads)
if len(reads) == 1:
return False
return True
class LogBook:
"""
Logbook manager
"""
def __init__(self, filename):
self.filename = filename
self.logfile = None
self.logbook = dict()
def dump(self):
a = []
if not os.path.isfile(self.filename):
a.append(self.logbook)
with open(self.filename, mode='w') as f:
f.write(json.dumps(a, indent=4, sort_keys=True, default=str))
else:
with open(self.filename) as feedsjson:
feeds = json.load(feedsjson)
feeds.append(self.logbook)
with open(self.filename, mode='w') as f:
f.write(json.dumps(feeds, indent=4, sort_keys=True, default
=str))
def start(self, task_name, args=None):
self.logbook.update(task_name=task_name)
self.logbook.update(args=args)
self.logbook.update(start_time=datetime.datetime.now())
def end(self):
self.logbook.update(end_time=datetime.datetime.now())
execution_time = self.logbook.get('end_time') - self.logbook.get(
'start_time')
self.logbook.update(execution_time=execution_time)
self.dump()
class IEMSampleSheetReader(csv.DictReader):
"""
Illumina Experimental Manager SampleSheet reader.
"""
def __init__(self, f):
csv.DictReader.__init__(self, f, delimiter=',')
self.header = ''
self.data = ''
first_line = f.readline()
if not first_line.startswith('[Header]'):
raise ValueError('%s is not an IEM samplesheet'.format(f.name))
header = [first_line.strip()]
l = f.readline()
while not l.startswith('[Data]'):
header.append(l.strip())
l = f.readline()
else:
header.append(l.strip())
self.header = header
self.data = csv.DictReader(f.readlines(), delimiter=',')
def barcodes_have_the_same_size(self):
return False if self.get_barcode_mask() is None else True
def get_body(self, label='Sample_Name', new_value='', replace=True):
def sanitize(mystr):
"""
Sanitize string in accordance with Illumina's documentation
bcl2fastq2 Conversion Software v2.17 Guide
"""
retainlist = '_-'
return re.sub('[^\\w' + retainlist + ']', '_', mystr)
body = []
for i in self.header:
body.append(i)
body.append('\n')
body.append(string.join(self.data.fieldnames, ','))
body.append('\n')
to_be_sanitized = ['Sample_Project', 'Sample_Name']
for row in self.data:
for f in self.data.fieldnames:
if replace and f == label:
body.append(new_value)
elif f in to_be_sanitized and row[f]:
body.append(sanitize(row[f]))
else:
body.append(row[f])
body.append(',')
body.append('\n')
return body
def get_barcode_mask(self):
barcodes_mask = dict()
for row in self.data:
index = len(row['index']) if 'index' in row else None
index1 = None
if 'index1' in row or 'index2' in row:
index1 = len(row['index2']) if 'index2' in row else len(row
['index1'])
if row['Lane'] not in barcodes_mask:
barcodes_mask[row['Lane']] = dict(index=index, index1=index1)
elif index != barcodes_mask[row['Lane']]['index'
] or index1 != barcodes_mask[row['Lane']]['index1']:
return None
return barcodes_mask
class WeightedPath(object):
def __init__(self, path, weight):
self.path = path
self.weight = weight
def __repr__(self):
return '{}: {} {}'.format(self.__class__.__name__, self.path, self.
weight)
def __cmp__(self, other):
if hasattr(other, 'weight'):
return self.weight.__cmp__(other.weight)
def get_conf(logger, config_file_from_cli=None, profile=None):
profiles = {'presta': 'presta_config.yml', 'celery': 'celery_config.yml'}
default_config_file_label = profiles.get(profile, profiles['presta'])
config_file_path = config_file_setup(logger, default_config_file_label,
cf_from_cli=config_file_from_cli)
return ConfigurationFromYamlFile(config_file_path)
def path_exists(path, logger, force=True):
def file_missing(path, logger, force):
msg = "path - {} - doesn't exists".format(path)
if force:
logger.error(msg)
sys.exit()
logger.warning(msg)
return False
return True if os.path.exists(os.path.expanduser(path)) else file_missing(
path, logger, force)
def sanitize_filename(filename):
valid_chars = '-_.%s%s' % (string.ascii_letters, string.digits)
return ''.join(c for c in filename if c in valid_chars)
def format_dataset_filename(sample_label, lane=None, read=None, ext=None,
uid=False):
filename = sanitize_filename(sample_label)
if read:
filename = '_'.join([filename, lane, read]) if lane else '_'.join([
filename, read])
if uid:
filename = '.'.join([filename, str(uuid.uuid4())])
if ext:
filename = '.'.join([filename, ext])
return sanitize_filename(filename)
def config_file_setup(logger, cf_label, cf_from_cli=None):
"""
Create a config file if does not exists, copying it from the package
default into the user_config_dir.
Return a configuration file path from cli args if present, otherwise return
a path from the user_config_dir
:param logger: logger
:param cf_label: label of the configuration file (required)
:param cf_from_cli: path to configuration file from cli arg
:return: Path
"""
presta_config_dir = os.path.join(user_config_dir(__appname__))
config_file_from_home = os.path.join(presta_config_dir, cf_label)
if not path_exists(config_file_from_home, logger, force=False):
logger.info('Creating config path {}'.format(presta_config_dir))
ensure_dir(presta_config_dir)
config_file_path = '/'.join(['config', cf_label])
config_file_from_package = resource_filename(__appname__,
config_file_path)
copyfile(config_file_from_package, config_file_from_home)
config_file_paths = []
if cf_from_cli and path_exists(cf_from_cli, logger, force=False):
config_file_paths.append(WeightedPath(cf_from_cli, 0))
if path_exists(config_file_from_home, logger, force=False):
config_file_paths.append(WeightedPath(config_file_from_home, 1))
logger.debug('config file paths: {}'.format(config_file_paths))
config_file_path = sorted(config_file_paths)[0].path
logger.info('Reading configuration from {}'.format(config_file_path))
return config_file_path
def touch(path, logger):
try:
with open(path, 'a'):
os.utime(path, None)
except IOError as e:
logger.error('While touching {} file: {}'.format(path, e.strerror))
def read_chunks(file_handle, chunk_size=8192):
while True:
data = file_handle.read(chunk_size)
if not data:
break
yield data
def get_md5(file_handle):
hasher = hashlib.md5()
for chunk in read_chunks(file_handle):
hasher.update(chunk)
return hasher.hexdigest()
def check_progress_status(root_path, started_file, completed_file):
localroot, dirnames, filenames = os.walk(root_path).next()
if started_file not in filenames:
return PROGRESS_STATUS.get('TODO')
elif completed_file not in filenames:
return PROGRESS_STATUS.get('STARTED')
else:
started_file = os.path.join(root_path, started_file)
completed_file = os.path.join(root_path, completed_file)
if os.path.getmtime(started_file) > os.path.getmtime(completed_file):
return PROGRESS_STATUS.get('STARTED')
return PROGRESS_STATUS.get('COMPLETED')
def runJob(cmd, logger):
try:
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=
subprocess.STDOUT)
output = process.communicate()[0]
ret = process.wait()
return True
except subprocess.CalledProcessError as e:
logger.info(e)
if e.output:
logger.info('command output: %s', e.output)
else:
logger.info('no command output available')
return False
| """
Utilities used by other modules.
"""
import csv
import datetime
import hashlib
import json
import re
import string
import subprocess
import uuid
import xml.etree.ElementTree as ET
from alta import ConfigurationFromYamlFile
from pkg_resources import resource_filename
from ..__details__ import __appname__
from appdirs import *
from comoda import ensure_dir
from shutil import copyfile
SAMPLES_WITHOUT_BARCODES = [2, 8]
DEFAULT_INDEX_CYCLES = dict(index='8', index1='8')
PROGRESS_STATUS = dict(COMPLETED='completed', STARTED='started', TODO='todo')
class IEMRunInfoReader:
"""
Illumina Experimental Manager RunInfo xml reader.
"""
def __init__(self, f):
self.xml_file = f
self.tree = ET.parse(self.xml_file)
self.root = self.tree.getroot()
def get_reads(self):
reads = [r.attrib for r in self.root.iter('Read')]
return reads
def get_indexed_reads(self):
reads = self.get_reads()
return filter(lambda item: item["IsIndexedRead"] == "Y", reads)
def get_index_cycles(self):
indexed_reads = self.get_indexed_reads()
return dict(
index=next((item['NumCycles'] for item in indexed_reads
if item["IsIndexedRead"] == "Y" and item['Number'] == "2"), None),
index1=next((item['NumCycles'] for item in indexed_reads
if item["IsIndexedRead"] == "Y" and item['Number'] != "2"), None))
@staticmethod
def get_default_index_cycles():
return DEFAULT_INDEX_CYCLES
def set_index_cycles(self, index_cycles, write=True):
for read in self.root.iter('Read'):
if read.attrib["IsIndexedRead"] == "Y":
if read.attrib['Number'] == '2':
read.attrib.update(NumCycles=index_cycles.get('index', DEFAULT_INDEX_CYCLES['index']))
else:
read.attrib.update(NumCycles=index_cycles.get('index', DEFAULT_INDEX_CYCLES['index']))
if write:
self.tree.write(self.xml_file)
def is_paired_end_sequencing(self):
reads = self.get_reads()
reads = filter(lambda item: item["IsIndexedRead"] == "N", reads)
if len(reads) == 1:
return False
return True
class LogBook:
"""
Logbook manager
"""
def __init__(self, filename):
self.filename = filename
self.logfile = None
self.logbook = dict()
def dump(self):
a = []
if not os.path.isfile(self.filename):
a.append(self.logbook)
with open(self.filename, mode='w') as f:
f.write(json.dumps(a, indent=4, sort_keys=True, default=str))
else:
with open(self.filename) as feedsjson:
feeds = json.load(feedsjson)
feeds.append(self.logbook)
with open(self.filename, mode='w') as f:
f.write(json.dumps(feeds, indent=4, sort_keys=True, default=str))
def start(self, task_name, args=None):
self.logbook.update(task_name=task_name)
self.logbook.update(args=args)
self.logbook.update(start_time=datetime.datetime.now())
def end(self):
self.logbook.update(end_time=datetime.datetime.now())
execution_time = self.logbook.get('end_time') - self.logbook.get('start_time')
self.logbook.update(execution_time=execution_time)
self.dump()
class IEMSampleSheetReader(csv.DictReader):
"""
Illumina Experimental Manager SampleSheet reader.
"""
def __init__(self, f):
csv.DictReader.__init__(self, f, delimiter=',')
self.header = ''
self.data = ''
first_line = f.readline()
if not first_line.startswith('[Header]'):
raise ValueError('%s is not an IEM samplesheet'.format(f.name))
header = [first_line.strip()]
l = f.readline()
while not l.startswith('[Data]'):
header.append(l.strip()) # ms-dos
l = f.readline()
else:
header.append(l.strip())
self.header = header
self.data = csv.DictReader(f.readlines(), delimiter=',')
def barcodes_have_the_same_size(self):
return False if self.get_barcode_mask() is None else True
def get_body(self, label='Sample_Name', new_value='', replace=True):
def sanitize(mystr):
"""
Sanitize string in accordance with Illumina's documentation
bcl2fastq2 Conversion Software v2.17 Guide
"""
retainlist = "_-"
return re.sub(r'[^\w' + retainlist + ']', '_', mystr)
body = []
for i in self.header:
body.append(i)
body.append('\n')
body.append(string.join(self.data.fieldnames, ','))
body.append('\n')
to_be_sanitized = ['Sample_Project', 'Sample_Name']
for row in self.data:
for f in self.data.fieldnames:
if replace and f == label:
body.append(new_value)
else:
if f in to_be_sanitized and row[f]:
body.append(sanitize(row[f]))
else:
body.append(row[f])
body.append(',')
body.append('\n')
return body
def get_barcode_mask(self):
barcodes_mask = dict()
for row in self.data:
index = len(row['index']) if 'index' in row else None
index1 = None
if 'index1' in row or 'index2' in row:
index1 = len(row['index2']) if 'index2' in row else len(row['index1'])
if row['Lane'] not in barcodes_mask:
barcodes_mask[row['Lane']] = dict(
index=index,
index1=index1,
)
else:
if index != barcodes_mask[row['Lane']]['index'] or index1 != barcodes_mask[row['Lane']]['index1']:
return None
return barcodes_mask
class WeightedPath(object):
def __init__(self, path, weight):
self.path = path
self.weight = weight
def __repr__(self):
return '{}: {} {}'.format(self.__class__.__name__,
self.path,
self.weight)
def __cmp__(self, other):
if hasattr(other, 'weight'):
return self.weight.__cmp__(other.weight)
def get_conf(logger, config_file_from_cli=None, profile=None):
profiles = {'presta': 'presta_config.yml',
'celery': 'celery_config.yml'}
default_config_file_label = profiles.get(profile, profiles['presta'])
config_file_path = config_file_setup(logger, default_config_file_label,
cf_from_cli=config_file_from_cli)
# Load YAML configuration file
return ConfigurationFromYamlFile(config_file_path)
def path_exists(path, logger, force=True):
def file_missing(path, logger, force):
msg = "path - {} - doesn't exists".format(path)
if force:
logger.error(msg)
sys.exit()
logger.warning(msg)
return False
return True if os.path.exists(os.path.expanduser(path)) else file_missing(path,
logger,
force)
def sanitize_filename(filename):
valid_chars = "-_.%s%s" % (string.ascii_letters, string.digits)
return ''.join(c for c in filename if c in valid_chars)
def format_dataset_filename(sample_label, lane=None, read=None, ext=None, uid=False):
filename = sanitize_filename(sample_label)
if read:
filename = '_'.join(
[filename, lane, read]) if lane else '_'.join(
[filename, read])
if uid:
filename = '.'.join([filename, str(uuid.uuid4())])
if ext:
filename = '.'.join([filename, ext])
return sanitize_filename(filename)
def config_file_setup(logger, cf_label, cf_from_cli=None):
"""
Create a config file if does not exists, copying it from the package
default into the user_config_dir.
Return a configuration file path from cli args if present, otherwise return
a path from the user_config_dir
:param logger: logger
:param cf_label: label of the configuration file (required)
:param cf_from_cli: path to configuration file from cli arg
:return: Path
"""
presta_config_dir = os.path.join(user_config_dir(__appname__))
config_file_from_home = os.path.join(presta_config_dir, cf_label)
if not path_exists(config_file_from_home, logger, force=False):
logger.info('Creating config path {}'.format(presta_config_dir))
ensure_dir(presta_config_dir)
config_file_path = '/'.join(['config', cf_label])
config_file_from_package = resource_filename(__appname__,
config_file_path)
copyfile(config_file_from_package, config_file_from_home)
config_file_paths = []
if cf_from_cli and path_exists(cf_from_cli, logger, force=False):
config_file_paths.append(WeightedPath(cf_from_cli, 0))
if path_exists(config_file_from_home, logger, force=False):
config_file_paths.append(WeightedPath(config_file_from_home, 1))
logger.debug("config file paths: {}".format(config_file_paths))
config_file_path = sorted(config_file_paths)[0].path
logger.info('Reading configuration from {}'.format(config_file_path))
return config_file_path
def touch(path, logger):
try:
with open(path, 'a'):
os.utime(path, None)
except IOError as e:
logger.error("While touching {} file: {}".format(path, e.strerror))
def read_chunks(file_handle, chunk_size=8192):
while True:
data = file_handle.read(chunk_size)
if not data:
break
yield data
def get_md5(file_handle):
hasher = hashlib.md5()
for chunk in read_chunks(file_handle):
hasher.update(chunk)
return hasher.hexdigest()
def check_progress_status(root_path, started_file, completed_file):
localroot, dirnames, filenames = os.walk(root_path).next()
if started_file not in filenames:
return PROGRESS_STATUS.get('TODO')
elif completed_file not in filenames:
return PROGRESS_STATUS.get('STARTED')
else:
started_file = os.path.join(root_path, started_file)
completed_file = os.path.join(root_path, completed_file)
if os.path.getmtime(started_file) > os.path.getmtime(completed_file):
return PROGRESS_STATUS.get('STARTED')
return PROGRESS_STATUS.get('COMPLETED')
def runJob(cmd, logger):
try:
# subprocess.check_output(cmd)
process = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output = process.communicate()[0]
ret = process.wait()
return True
except subprocess.CalledProcessError as e:
logger.info(e)
if e.output:
logger.info("command output: %s", e.output)
else:
logger.info("no command output available")
return False
| [
25,
27,
28,
36,
38
] |
1,570 | 9cb11c2bf032aa16abd3463ecdb8997addedc912 | <mask token>
class TestActor(Actor):
<mask token>
<mask token>
<mask token>
| <mask token>
class TestActor(Actor):
<mask token>
def act(self):
self.key_commands()
<mask token>
| <mask token>
class TestActor(Actor):
def __init__(self):
super(TestActor, self).__init__()
def act(self):
self.key_commands()
<mask token>
| <mask token>
class TestActor(Actor):
def __init__(self):
super(TestActor, self).__init__()
def act(self):
self.key_commands()
def key_commands(self):
if PlayerInput.is_key_down(pygame.K_LEFT):
self.set_location(self.x - 1, self.y)
if PlayerInput.is_key_down(pygame.K_RIGHT):
self.set_location(self.x + 1, self.y)
if PlayerInput.is_key_down(pygame.K_UP):
self.set_location(self.x, self.y - 1)
if PlayerInput.is_key_down(pygame.K_DOWN):
self.set_location(self.x, self.y + 1)
| import pygame
from Actor import Actor
import PlayerInput
class TestActor(Actor):
def __init__(self):
super(TestActor, self).__init__()
def act(self):
self.key_commands()
def key_commands(self):
if PlayerInput.is_key_down(pygame.K_LEFT):
self.set_location(self.x - 1, self.y)
if PlayerInput.is_key_down(pygame.K_RIGHT):
self.set_location(self.x + 1, self.y)
if PlayerInput.is_key_down(pygame.K_UP):
self.set_location(self.x, self.y - 1)
if PlayerInput.is_key_down(pygame.K_DOWN):
self.set_location(self.x, self.y + 1)
| [
1,
2,
3,
4,
5
] |
1,571 | fcc75550e1317a15c36bc8100c28af59b68e1381 | <mask token>
| <mask token>
my_logger.setLevel(logging.DEBUG)
<mask token>
handler.setFormatter(formatter)
my_logger.addHandler(handler)
<mask token>
while 1:
c.execute(
'SELECT * FROM TEMP_HIST WHERE ID=(SELECT MAX(ID) FROM TEMP_HIST);')
row = c.fetchone()
my_logger.debug('Temp actual: ' + str(row['temp']) + ' temp des: ' +
str(row['tem_des']) + ' Estado Caldera: ' + str(row['heating']))
read_date = row['day']
real_temp = row['temp']
desire_temp = row['tem_des']
heating_status = row['heating']
table_thermostat_status.put_item(TableName='thermostat_status', Item={
'id': 1, 'desire_temp': Decimal(desire_temp), 'real_temp': Decimal(
real_temp), 'status': heating_status, 'status_date': str(datetime.
datetime.now())})
alexa_order = table_thermostat_alexa_order.get_item(TableName=
'thermostat_alexa_order', Key={'id': 1})
if 'Item' in alexa_order:
my_logger.debug('Hay orden de Alexa con temperatura = ' + str(
alexa_order['Item']['desire_temp']))
c.execute('UPDATE MANUAL_PROGRAM SET ACTIVE=1, TEMP=' + str(
alexa_order['Item']['desire_temp']))
conn.commit()
table_thermostat_alexa_order.delete_item(TableName=
'thermostat_alexa_order', Key={'id': 1})
my_logger.debug('Orden alexa eliminada')
else:
my_logger.debug('No hay orden de Alexa')
time.sleep(5)
| <mask token>
LOG_FILENAME = '/home/pi/Thermostat/alexaThermostat/logs/alexaThermostat.out'
my_logger = logging.getLogger('MyLogger')
my_logger.setLevel(logging.DEBUG)
handler = logging.handlers.RotatingFileHandler(LOG_FILENAME, maxBytes=25000,
backupCount=10)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
my_logger.addHandler(handler)
conn = sqlite3.connect('/home/pi/Thermostat/backThermostat/thermostat.db')
conn.row_factory = sqlite3.Row
c = conn.cursor()
client = boto3.resource('dynamodb')
table_thermostat_status = client.Table('thermostat_status')
table_thermostat_alexa_order = client.Table('thermostat_alexa_order')
while 1:
c.execute(
'SELECT * FROM TEMP_HIST WHERE ID=(SELECT MAX(ID) FROM TEMP_HIST);')
row = c.fetchone()
my_logger.debug('Temp actual: ' + str(row['temp']) + ' temp des: ' +
str(row['tem_des']) + ' Estado Caldera: ' + str(row['heating']))
read_date = row['day']
real_temp = row['temp']
desire_temp = row['tem_des']
heating_status = row['heating']
table_thermostat_status.put_item(TableName='thermostat_status', Item={
'id': 1, 'desire_temp': Decimal(desire_temp), 'real_temp': Decimal(
real_temp), 'status': heating_status, 'status_date': str(datetime.
datetime.now())})
alexa_order = table_thermostat_alexa_order.get_item(TableName=
'thermostat_alexa_order', Key={'id': 1})
if 'Item' in alexa_order:
my_logger.debug('Hay orden de Alexa con temperatura = ' + str(
alexa_order['Item']['desire_temp']))
c.execute('UPDATE MANUAL_PROGRAM SET ACTIVE=1, TEMP=' + str(
alexa_order['Item']['desire_temp']))
conn.commit()
table_thermostat_alexa_order.delete_item(TableName=
'thermostat_alexa_order', Key={'id': 1})
my_logger.debug('Orden alexa eliminada')
else:
my_logger.debug('No hay orden de Alexa')
time.sleep(5)
| import boto3
import time
import datetime
from datetime import date
import sqlite3
import logging
import logging.handlers
from decimal import *
LOG_FILENAME = '/home/pi/Thermostat/alexaThermostat/logs/alexaThermostat.out'
my_logger = logging.getLogger('MyLogger')
my_logger.setLevel(logging.DEBUG)
handler = logging.handlers.RotatingFileHandler(LOG_FILENAME, maxBytes=25000,
backupCount=10)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
my_logger.addHandler(handler)
conn = sqlite3.connect('/home/pi/Thermostat/backThermostat/thermostat.db')
conn.row_factory = sqlite3.Row
c = conn.cursor()
client = boto3.resource('dynamodb')
table_thermostat_status = client.Table('thermostat_status')
table_thermostat_alexa_order = client.Table('thermostat_alexa_order')
while 1:
c.execute(
'SELECT * FROM TEMP_HIST WHERE ID=(SELECT MAX(ID) FROM TEMP_HIST);')
row = c.fetchone()
my_logger.debug('Temp actual: ' + str(row['temp']) + ' temp des: ' +
str(row['tem_des']) + ' Estado Caldera: ' + str(row['heating']))
read_date = row['day']
real_temp = row['temp']
desire_temp = row['tem_des']
heating_status = row['heating']
table_thermostat_status.put_item(TableName='thermostat_status', Item={
'id': 1, 'desire_temp': Decimal(desire_temp), 'real_temp': Decimal(
real_temp), 'status': heating_status, 'status_date': str(datetime.
datetime.now())})
alexa_order = table_thermostat_alexa_order.get_item(TableName=
'thermostat_alexa_order', Key={'id': 1})
if 'Item' in alexa_order:
my_logger.debug('Hay orden de Alexa con temperatura = ' + str(
alexa_order['Item']['desire_temp']))
c.execute('UPDATE MANUAL_PROGRAM SET ACTIVE=1, TEMP=' + str(
alexa_order['Item']['desire_temp']))
conn.commit()
table_thermostat_alexa_order.delete_item(TableName=
'thermostat_alexa_order', Key={'id': 1})
my_logger.debug('Orden alexa eliminada')
else:
my_logger.debug('No hay orden de Alexa')
time.sleep(5)
| import boto3
import time
import datetime
from datetime import date
import sqlite3
import logging
import logging.handlers
from decimal import *
### LOGS CONFIGURATION ###
LOG_FILENAME = '/home/pi/Thermostat/alexaThermostat/logs/alexaThermostat.out'
# Set up a specific logger with our desired output level
my_logger = logging.getLogger('MyLogger')
my_logger.setLevel(logging.DEBUG)
# Add the log message handler to the logger
handler = logging.handlers.RotatingFileHandler(
LOG_FILENAME, maxBytes=25000, backupCount=10)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
my_logger.addHandler(handler)
### SQLITE3 CONNECTION ###
conn = sqlite3.connect('/home/pi/Thermostat/backThermostat/thermostat.db')
# Para poder utilizar nombres de columnas
conn.row_factory = sqlite3.Row
c = conn.cursor()
### CONECT TO DYNAMODB IN AWS
client = boto3.resource('dynamodb')
table_thermostat_status = client.Table("thermostat_status")
table_thermostat_alexa_order = client.Table("thermostat_alexa_order")
while 1:
### READ DESIRE AND REAL TEMPERATURE
c.execute("SELECT * FROM TEMP_HIST WHERE ID=(SELECT MAX(ID) FROM TEMP_HIST);")
row=c.fetchone()
my_logger.debug("Temp actual: " + str(row["temp"]) + " temp des: "+ str(row["tem_des"]) + " Estado Caldera: " + str(row["heating"]))
read_date = row["day"]
real_temp = row["temp"]
desire_temp = row["tem_des"]
heating_status = row["heating"]
table_thermostat_status.put_item(TableName='thermostat_status', Item={'id' : 1, 'desire_temp' : Decimal(desire_temp) , 'real_temp' : Decimal(real_temp) , 'status' : heating_status , 'status_date':str(datetime.datetime.now())})
### SEARCH FOR ANY ALEXA ORDER IN AWS DYNAMODB ###
alexa_order = table_thermostat_alexa_order.get_item(TableName='thermostat_alexa_order' , Key={'id' : 1})
if 'Item' in (alexa_order):
my_logger.debug("Hay orden de Alexa con temperatura = " + str(alexa_order['Item']['desire_temp']))
c.execute("UPDATE MANUAL_PROGRAM SET ACTIVE=1, TEMP="+str(alexa_order['Item']['desire_temp']))
conn.commit()
table_thermostat_alexa_order.delete_item(TableName='thermostat_alexa_order' , Key={'id' : 1})
my_logger.debug("Orden alexa eliminada")
else:
my_logger.debug("No hay orden de Alexa")
### DELAY 20 SEG
time.sleep(5)
| [
0,
1,
2,
3,
4
] |
1,572 | 50fab726b90f65a82c1206a8c7df955a8b76da99 | <mask token>
def create_graph():
with tf.gfile.FastGFile(out_pb_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
<mask token>
| <mask token>
print_tensors_in_checkpoint_file(checkpoint_path, tensor_name='',
all_tensors=True, all_tensor_names=True)
<mask token>
for key in var_to_shape_map:
print('tensor_name: ', key)
n += 1
print('n:', n)
<mask token>
def create_graph():
with tf.gfile.FastGFile(out_pb_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
create_graph()
<mask token>
for tensor_name in tensor_name_list:
print('pd:', tensor_name, '\n')
m += 1
print('m:', m)
<mask token>
graph_def.ParseFromString(gfile.FastGFile(model, 'rb').read())
tf.import_graph_def(graph_def, name='graph')
<mask token>
| <mask token>
checkpoint_path = '/your/path'
print_tensors_in_checkpoint_file(checkpoint_path, tensor_name='',
all_tensors=True, all_tensor_names=True)
<mask token>
checkpoint_path = '/your/path'
reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)
var_to_shape_map = reader.get_variable_to_shape_map()
n = 0
for key in var_to_shape_map:
print('tensor_name: ', key)
n += 1
print('n:', n)
<mask token>
out_pb_path = '/your/path'
def create_graph():
with tf.gfile.FastGFile(out_pb_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
create_graph()
tensor_name_list = [tensor.name for tensor in tf.get_default_graph().
as_graph_def().node]
m = 0
for tensor_name in tensor_name_list:
print('pd:', tensor_name, '\n')
m += 1
print('m:', m)
<mask token>
model = '/your/path'
graph = tf.get_default_graph()
graph_def = graph.as_graph_def()
graph_def.ParseFromString(gfile.FastGFile(model, 'rb').read())
tf.import_graph_def(graph_def, name='graph')
summaryWriter = tf.summary.FileWriter('log/', graph)
| from tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file
checkpoint_path = '/your/path'
print_tensors_in_checkpoint_file(checkpoint_path, tensor_name='',
all_tensors=True, all_tensor_names=True)
from tensorflow.python import pywrap_tensorflow
checkpoint_path = '/your/path'
reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)
var_to_shape_map = reader.get_variable_to_shape_map()
n = 0
for key in var_to_shape_map:
print('tensor_name: ', key)
n += 1
print('n:', n)
import tensorflow as tf
import os
out_pb_path = '/your/path'
def create_graph():
with tf.gfile.FastGFile(out_pb_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
create_graph()
tensor_name_list = [tensor.name for tensor in tf.get_default_graph().
as_graph_def().node]
m = 0
for tensor_name in tensor_name_list:
print('pd:', tensor_name, '\n')
m += 1
print('m:', m)
import tensorflow as tf
from tensorflow.python.platform import gfile
model = '/your/path'
graph = tf.get_default_graph()
graph_def = graph.as_graph_def()
graph_def.ParseFromString(gfile.FastGFile(model, 'rb').read())
tf.import_graph_def(graph_def, name='graph')
summaryWriter = tf.summary.FileWriter('log/', graph)
| #打印ckpt或pb模型的tensor
# ckpt模型
#第一种方法:
from tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file
checkpoint_path="/your/path"
print_tensors_in_checkpoint_file(checkpoint_path,tensor_name='', all_tensors=True, all_tensor_names=True)
#第二种方法:
from tensorflow.python import pywrap_tensorflow
checkpoint_path = "/your/path"
reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)
var_to_shape_map = reader.get_variable_to_shape_map()
n=0
for key in var_to_shape_map:
print("tensor_name: ", key)
#print("****",reader.get_tensor(key))
n+=1
print("n:",n)
#pb模型
#打印tensor
import tensorflow as tf
import os
out_pb_path="/your/path"
def create_graph():
with tf.gfile.FastGFile(out_pb_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
create_graph()
tensor_name_list = [tensor.name for tensor in tf.get_default_graph().as_graph_def().node]
m=0
for tensor_name in tensor_name_list:
print("pd:",tensor_name,'\n')
m+=1
print("m:",m)
#获得pb模型的图
import tensorflow as tf
from tensorflow.python.platform import gfile
model = "/your/path"
graph = tf.get_default_graph()
graph_def = graph.as_graph_def()
graph_def.ParseFromString(gfile.FastGFile(model, 'rb').read())
tf.import_graph_def(graph_def, name='graph')
summaryWriter = tf.summary.FileWriter('log/', graph)
#命令tensorboard --logdir=/opt/data/hyh/tboard/tusimple_lanenet/vgg
| [
1,
2,
3,
4,
5
] |
1,573 | 843df062702c9abf34cf14d911d927d786f1d912 | <mask token>
| <mask token>
print(numbers + new_numbers)
print(numbers * 5)
| numbers = [1, 1, 1, 1, 1]
new_numbers = [2, 2, 2, 3, 3]
print(numbers + new_numbers)
print(numbers * 5)
| null | null | [
0,
1,
2
] |
1,574 | a58949d25a719dc9ce0626948ab0397814e9ea0e | <mask token>
| <mask token>
class Migration(migrations.Migration):
<mask token>
<mask token>
| <mask token>
class Migration(migrations.Migration):
dependencies = [('analysis', '0018_relatorioquedadeconsumo_justificado')]
operations = [migrations.RemoveField(model_name=
'relatoriocorrentezerada', name='expira'), migrations.RemoveField(
model_name='relatoriotensaozerada', name='expira'), migrations.
AddField(model_name='relatoriotensaozerada', name='data_expira',
field=models.DateTimeField(blank=True, null=True, verbose_name=
'data_expira'))]
| from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('analysis', '0018_relatorioquedadeconsumo_justificado')]
operations = [migrations.RemoveField(model_name=
'relatoriocorrentezerada', name='expira'), migrations.RemoveField(
model_name='relatoriotensaozerada', name='expira'), migrations.
AddField(model_name='relatoriotensaozerada', name='data_expira',
field=models.DateTimeField(blank=True, null=True, verbose_name=
'data_expira'))]
| # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-11-21 00:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('analysis', '0018_relatorioquedadeconsumo_justificado'),
]
operations = [
migrations.RemoveField(
model_name='relatoriocorrentezerada',
name='expira',
),
migrations.RemoveField(
model_name='relatoriotensaozerada',
name='expira',
),
migrations.AddField(
model_name='relatoriotensaozerada',
name='data_expira',
field=models.DateTimeField(blank=True, null=True, verbose_name='data_expira'),
),
]
| [
0,
1,
2,
3,
4
] |
1,575 | 1ef1dcc8fdf4d813dad70c860e33778715d51b0c | <mask token>
class TestWktEmpty:
def __init__(self, inString, expectedOutString):
self.inString = inString
self.expectedOutString = expectedOutString
def isEmpty(self, geom):
try:
ogr.Geometry.IsEmpty
except:
return 'skip'
if geom.IsEmpty() == False:
geom.Destroy()
gdaltest.post_reason(
'IsEmpty returning false for an empty geometry')
return 'fail'
return 'success'
def CheckIsEmpty(self):
geom = ogr.CreateGeometryFromWkt(self.inString)
wkt = geom.ExportToWkt()
if self.expectedOutString != 'POINT EMPTY':
if ogr.CreateGeometryFromWkb(geom.ExportToWkb()).ExportToWkt(
) != wkt:
return 'fail'
if wkt == self.expectedOutString:
if self.isEmpty(geom) == 'fail':
return 'fail'
else:
return 'success'
else:
gdaltest.post_reason('WKT is wrong: ' + wkt +
'. Expected value is: ' + self.expectedOutString)
return 'fail'
<mask token>
def ogr_wktempty_test_partial_empty_geoms():
wkt = 'MULTIPOINT (1 1)'
geom = ogr.CreateGeometryFromWkt(wkt)
geom.AddGeometry(ogr.Geometry(type=ogr.wkbPoint))
if geom.ExportToWkt() != wkt:
gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +
'. Expected value is: ' + wkt)
return 'fail'
geom = ogr.CreateGeometryFromWkt('MULTIPOINT EMPTY')
geom.AddGeometry(ogr.Geometry(type=ogr.wkbPoint))
geom.AddGeometry(ogr.CreateGeometryFromWkt('POINT (1 1)'))
wkt = 'MULTIPOINT (1 1)'
if geom.ExportToWkt() != wkt:
gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +
'. Expected value is: ' + wkt)
return 'fail'
wkt = 'MULTILINESTRING ((0 1,2 3,4 5,0 1))'
geom = ogr.CreateGeometryFromWkt(wkt)
geom.AddGeometry(ogr.Geometry(type=ogr.wkbLineString))
if geom.ExportToWkt() != wkt:
gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +
'. Expected value is: ' + wkt)
return 'fail'
geom = ogr.CreateGeometryFromWkt('MULTILINESTRING EMPTY')
geom.AddGeometry(ogr.Geometry(type=ogr.wkbLineString))
geom.AddGeometry(ogr.CreateGeometryFromWkt('LINESTRING (0 1,2 3,4 5,0 1)'))
wkt = 'MULTILINESTRING ((0 1,2 3,4 5,0 1))'
if geom.ExportToWkt() != wkt:
gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +
'. Expected value is: ' + wkt)
return 'fail'
wkt = 'POLYGON ((100 0,100 10,110 10,100 0))'
geom = ogr.CreateGeometryFromWkt(wkt)
geom.AddGeometry(ogr.Geometry(type=ogr.wkbLinearRing))
if geom.ExportToWkt() != wkt:
gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +
'. Expected value is: ' + wkt)
return 'fail'
wkt = 'POLYGON EMPTY'
geom = ogr.CreateGeometryFromWkt(wkt)
geom.AddGeometry(ogr.Geometry(type=ogr.wkbLinearRing))
ring = ogr.Geometry(type=ogr.wkbLinearRing)
ring.AddPoint_2D(0, 0)
ring.AddPoint_2D(10, 0)
ring.AddPoint_2D(10, 10)
ring.AddPoint_2D(0, 10)
ring.AddPoint_2D(0, 0)
geom.AddGeometry(ring)
if geom.ExportToWkt() != wkt:
gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +
'. Expected value is: ' + wkt)
return 'fail'
wkt = 'MULTIPOLYGON (((0 0,0 10,10 10,0 0)))'
geom = ogr.CreateGeometryFromWkt(wkt)
geom.AddGeometry(ogr.Geometry(type=ogr.wkbPolygon))
if geom.ExportToWkt() != wkt:
gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +
'. Expected value is: ' + wkt)
return 'fail'
geom = ogr.CreateGeometryFromWkt('MULTIPOLYGON EMPTY')
geom.AddGeometry(ogr.Geometry(type=ogr.wkbPolygon))
geom.AddGeometry(ogr.CreateGeometryFromWkt(
'POLYGON ((100 0,100 10,110 10,100 0))'))
wkt = 'MULTIPOLYGON (((100 0,100 10,110 10,100 0)))'
if geom.ExportToWkt() != wkt:
gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +
'. Expected value is: ' + wkt)
return 'fail'
return 'success'
<mask token>
| <mask token>
sys.path.append('../pymod')
<mask token>
class TestWktEmpty:
def __init__(self, inString, expectedOutString):
self.inString = inString
self.expectedOutString = expectedOutString
def isEmpty(self, geom):
try:
ogr.Geometry.IsEmpty
except:
return 'skip'
if geom.IsEmpty() == False:
geom.Destroy()
gdaltest.post_reason(
'IsEmpty returning false for an empty geometry')
return 'fail'
return 'success'
def CheckIsEmpty(self):
geom = ogr.CreateGeometryFromWkt(self.inString)
wkt = geom.ExportToWkt()
if self.expectedOutString != 'POINT EMPTY':
if ogr.CreateGeometryFromWkb(geom.ExportToWkb()).ExportToWkt(
) != wkt:
return 'fail'
if wkt == self.expectedOutString:
if self.isEmpty(geom) == 'fail':
return 'fail'
else:
return 'success'
else:
gdaltest.post_reason('WKT is wrong: ' + wkt +
'. Expected value is: ' + self.expectedOutString)
return 'fail'
<mask token>
def ogr_wktempty_test_partial_empty_geoms():
wkt = 'MULTIPOINT (1 1)'
geom = ogr.CreateGeometryFromWkt(wkt)
geom.AddGeometry(ogr.Geometry(type=ogr.wkbPoint))
if geom.ExportToWkt() != wkt:
gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +
'. Expected value is: ' + wkt)
return 'fail'
geom = ogr.CreateGeometryFromWkt('MULTIPOINT EMPTY')
geom.AddGeometry(ogr.Geometry(type=ogr.wkbPoint))
geom.AddGeometry(ogr.CreateGeometryFromWkt('POINT (1 1)'))
wkt = 'MULTIPOINT (1 1)'
if geom.ExportToWkt() != wkt:
gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +
'. Expected value is: ' + wkt)
return 'fail'
wkt = 'MULTILINESTRING ((0 1,2 3,4 5,0 1))'
geom = ogr.CreateGeometryFromWkt(wkt)
geom.AddGeometry(ogr.Geometry(type=ogr.wkbLineString))
if geom.ExportToWkt() != wkt:
gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +
'. Expected value is: ' + wkt)
return 'fail'
geom = ogr.CreateGeometryFromWkt('MULTILINESTRING EMPTY')
geom.AddGeometry(ogr.Geometry(type=ogr.wkbLineString))
geom.AddGeometry(ogr.CreateGeometryFromWkt('LINESTRING (0 1,2 3,4 5,0 1)'))
wkt = 'MULTILINESTRING ((0 1,2 3,4 5,0 1))'
if geom.ExportToWkt() != wkt:
gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +
'. Expected value is: ' + wkt)
return 'fail'
wkt = 'POLYGON ((100 0,100 10,110 10,100 0))'
geom = ogr.CreateGeometryFromWkt(wkt)
geom.AddGeometry(ogr.Geometry(type=ogr.wkbLinearRing))
if geom.ExportToWkt() != wkt:
gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +
'. Expected value is: ' + wkt)
return 'fail'
wkt = 'POLYGON EMPTY'
geom = ogr.CreateGeometryFromWkt(wkt)
geom.AddGeometry(ogr.Geometry(type=ogr.wkbLinearRing))
ring = ogr.Geometry(type=ogr.wkbLinearRing)
ring.AddPoint_2D(0, 0)
ring.AddPoint_2D(10, 0)
ring.AddPoint_2D(10, 10)
ring.AddPoint_2D(0, 10)
ring.AddPoint_2D(0, 0)
geom.AddGeometry(ring)
if geom.ExportToWkt() != wkt:
gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +
'. Expected value is: ' + wkt)
return 'fail'
wkt = 'MULTIPOLYGON (((0 0,0 10,10 10,0 0)))'
geom = ogr.CreateGeometryFromWkt(wkt)
geom.AddGeometry(ogr.Geometry(type=ogr.wkbPolygon))
if geom.ExportToWkt() != wkt:
gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +
'. Expected value is: ' + wkt)
return 'fail'
geom = ogr.CreateGeometryFromWkt('MULTIPOLYGON EMPTY')
geom.AddGeometry(ogr.Geometry(type=ogr.wkbPolygon))
geom.AddGeometry(ogr.CreateGeometryFromWkt(
'POLYGON ((100 0,100 10,110 10,100 0))'))
wkt = 'MULTIPOLYGON (((100 0,100 10,110 10,100 0)))'
if geom.ExportToWkt() != wkt:
gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +
'. Expected value is: ' + wkt)
return 'fail'
return 'success'
<mask token>
for item in empty_wkt_list:
ut = TestWktEmpty(item[0], item[1])
gdaltest_list.append((ut.CheckIsEmpty, item[0]))
gdaltest_list.append(ogr_wktempty_test_partial_empty_geoms)
if __name__ == '__main__':
gdaltest.setup_run('ogr_wktempty')
gdaltest.run_tests(gdaltest_list)
gdaltest.summarize()
| <mask token>
sys.path.append('../pymod')
<mask token>
class TestWktEmpty:
def __init__(self, inString, expectedOutString):
self.inString = inString
self.expectedOutString = expectedOutString
def isEmpty(self, geom):
try:
ogr.Geometry.IsEmpty
except:
return 'skip'
if geom.IsEmpty() == False:
geom.Destroy()
gdaltest.post_reason(
'IsEmpty returning false for an empty geometry')
return 'fail'
return 'success'
def CheckIsEmpty(self):
geom = ogr.CreateGeometryFromWkt(self.inString)
wkt = geom.ExportToWkt()
if self.expectedOutString != 'POINT EMPTY':
if ogr.CreateGeometryFromWkb(geom.ExportToWkb()).ExportToWkt(
) != wkt:
return 'fail'
if wkt == self.expectedOutString:
if self.isEmpty(geom) == 'fail':
return 'fail'
else:
return 'success'
else:
gdaltest.post_reason('WKT is wrong: ' + wkt +
'. Expected value is: ' + self.expectedOutString)
return 'fail'
empty_wkt_list = [('GEOMETRYCOLLECTION(EMPTY)', 'GEOMETRYCOLLECTION EMPTY'),
('MULTIPOLYGON( EMPTY )', 'MULTIPOLYGON EMPTY'), (
'MULTILINESTRING(EMPTY)', 'MULTILINESTRING EMPTY'), (
'MULTIPOINT(EMPTY)', 'MULTIPOINT EMPTY'), ('POINT ( EMPTY )',
'POINT EMPTY'), ('LINESTRING(EMPTY)', 'LINESTRING EMPTY'), (
'POLYGON ( EMPTY )', 'POLYGON EMPTY'), ('GEOMETRYCOLLECTION EMPTY',
'GEOMETRYCOLLECTION EMPTY'), ('MULTIPOLYGON EMPTY',
'MULTIPOLYGON EMPTY'), ('MULTILINESTRING EMPTY',
'MULTILINESTRING EMPTY'), ('MULTIPOINT EMPTY', 'MULTIPOINT EMPTY'), (
'POINT EMPTY', 'POINT EMPTY'), ('LINESTRING EMPTY', 'LINESTRING EMPTY'),
('POLYGON EMPTY', 'POLYGON EMPTY')]
def ogr_wktempty_test_partial_empty_geoms():
wkt = 'MULTIPOINT (1 1)'
geom = ogr.CreateGeometryFromWkt(wkt)
geom.AddGeometry(ogr.Geometry(type=ogr.wkbPoint))
if geom.ExportToWkt() != wkt:
gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +
'. Expected value is: ' + wkt)
return 'fail'
geom = ogr.CreateGeometryFromWkt('MULTIPOINT EMPTY')
geom.AddGeometry(ogr.Geometry(type=ogr.wkbPoint))
geom.AddGeometry(ogr.CreateGeometryFromWkt('POINT (1 1)'))
wkt = 'MULTIPOINT (1 1)'
if geom.ExportToWkt() != wkt:
gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +
'. Expected value is: ' + wkt)
return 'fail'
wkt = 'MULTILINESTRING ((0 1,2 3,4 5,0 1))'
geom = ogr.CreateGeometryFromWkt(wkt)
geom.AddGeometry(ogr.Geometry(type=ogr.wkbLineString))
if geom.ExportToWkt() != wkt:
gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +
'. Expected value is: ' + wkt)
return 'fail'
geom = ogr.CreateGeometryFromWkt('MULTILINESTRING EMPTY')
geom.AddGeometry(ogr.Geometry(type=ogr.wkbLineString))
geom.AddGeometry(ogr.CreateGeometryFromWkt('LINESTRING (0 1,2 3,4 5,0 1)'))
wkt = 'MULTILINESTRING ((0 1,2 3,4 5,0 1))'
if geom.ExportToWkt() != wkt:
gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +
'. Expected value is: ' + wkt)
return 'fail'
wkt = 'POLYGON ((100 0,100 10,110 10,100 0))'
geom = ogr.CreateGeometryFromWkt(wkt)
geom.AddGeometry(ogr.Geometry(type=ogr.wkbLinearRing))
if geom.ExportToWkt() != wkt:
gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +
'. Expected value is: ' + wkt)
return 'fail'
wkt = 'POLYGON EMPTY'
geom = ogr.CreateGeometryFromWkt(wkt)
geom.AddGeometry(ogr.Geometry(type=ogr.wkbLinearRing))
ring = ogr.Geometry(type=ogr.wkbLinearRing)
ring.AddPoint_2D(0, 0)
ring.AddPoint_2D(10, 0)
ring.AddPoint_2D(10, 10)
ring.AddPoint_2D(0, 10)
ring.AddPoint_2D(0, 0)
geom.AddGeometry(ring)
if geom.ExportToWkt() != wkt:
gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +
'. Expected value is: ' + wkt)
return 'fail'
wkt = 'MULTIPOLYGON (((0 0,0 10,10 10,0 0)))'
geom = ogr.CreateGeometryFromWkt(wkt)
geom.AddGeometry(ogr.Geometry(type=ogr.wkbPolygon))
if geom.ExportToWkt() != wkt:
gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +
'. Expected value is: ' + wkt)
return 'fail'
geom = ogr.CreateGeometryFromWkt('MULTIPOLYGON EMPTY')
geom.AddGeometry(ogr.Geometry(type=ogr.wkbPolygon))
geom.AddGeometry(ogr.CreateGeometryFromWkt(
'POLYGON ((100 0,100 10,110 10,100 0))'))
wkt = 'MULTIPOLYGON (((100 0,100 10,110 10,100 0)))'
if geom.ExportToWkt() != wkt:
gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +
'. Expected value is: ' + wkt)
return 'fail'
return 'success'
gdaltest_list = []
for item in empty_wkt_list:
ut = TestWktEmpty(item[0], item[1])
gdaltest_list.append((ut.CheckIsEmpty, item[0]))
gdaltest_list.append(ogr_wktempty_test_partial_empty_geoms)
if __name__ == '__main__':
gdaltest.setup_run('ogr_wktempty')
gdaltest.run_tests(gdaltest_list)
gdaltest.summarize()
| import os
import sys
import string
sys.path.append('../pymod')
import gdaltest
import ogrtest
from osgeo import ogr
from osgeo import gdal
class TestWktEmpty:
def __init__(self, inString, expectedOutString):
self.inString = inString
self.expectedOutString = expectedOutString
def isEmpty(self, geom):
try:
ogr.Geometry.IsEmpty
except:
return 'skip'
if geom.IsEmpty() == False:
geom.Destroy()
gdaltest.post_reason(
'IsEmpty returning false for an empty geometry')
return 'fail'
return 'success'
def CheckIsEmpty(self):
geom = ogr.CreateGeometryFromWkt(self.inString)
wkt = geom.ExportToWkt()
if self.expectedOutString != 'POINT EMPTY':
if ogr.CreateGeometryFromWkb(geom.ExportToWkb()).ExportToWkt(
) != wkt:
return 'fail'
if wkt == self.expectedOutString:
if self.isEmpty(geom) == 'fail':
return 'fail'
else:
return 'success'
else:
gdaltest.post_reason('WKT is wrong: ' + wkt +
'. Expected value is: ' + self.expectedOutString)
return 'fail'
empty_wkt_list = [('GEOMETRYCOLLECTION(EMPTY)', 'GEOMETRYCOLLECTION EMPTY'),
('MULTIPOLYGON( EMPTY )', 'MULTIPOLYGON EMPTY'), (
'MULTILINESTRING(EMPTY)', 'MULTILINESTRING EMPTY'), (
'MULTIPOINT(EMPTY)', 'MULTIPOINT EMPTY'), ('POINT ( EMPTY )',
'POINT EMPTY'), ('LINESTRING(EMPTY)', 'LINESTRING EMPTY'), (
'POLYGON ( EMPTY )', 'POLYGON EMPTY'), ('GEOMETRYCOLLECTION EMPTY',
'GEOMETRYCOLLECTION EMPTY'), ('MULTIPOLYGON EMPTY',
'MULTIPOLYGON EMPTY'), ('MULTILINESTRING EMPTY',
'MULTILINESTRING EMPTY'), ('MULTIPOINT EMPTY', 'MULTIPOINT EMPTY'), (
'POINT EMPTY', 'POINT EMPTY'), ('LINESTRING EMPTY', 'LINESTRING EMPTY'),
('POLYGON EMPTY', 'POLYGON EMPTY')]
def ogr_wktempty_test_partial_empty_geoms():
wkt = 'MULTIPOINT (1 1)'
geom = ogr.CreateGeometryFromWkt(wkt)
geom.AddGeometry(ogr.Geometry(type=ogr.wkbPoint))
if geom.ExportToWkt() != wkt:
gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +
'. Expected value is: ' + wkt)
return 'fail'
geom = ogr.CreateGeometryFromWkt('MULTIPOINT EMPTY')
geom.AddGeometry(ogr.Geometry(type=ogr.wkbPoint))
geom.AddGeometry(ogr.CreateGeometryFromWkt('POINT (1 1)'))
wkt = 'MULTIPOINT (1 1)'
if geom.ExportToWkt() != wkt:
gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +
'. Expected value is: ' + wkt)
return 'fail'
wkt = 'MULTILINESTRING ((0 1,2 3,4 5,0 1))'
geom = ogr.CreateGeometryFromWkt(wkt)
geom.AddGeometry(ogr.Geometry(type=ogr.wkbLineString))
if geom.ExportToWkt() != wkt:
gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +
'. Expected value is: ' + wkt)
return 'fail'
geom = ogr.CreateGeometryFromWkt('MULTILINESTRING EMPTY')
geom.AddGeometry(ogr.Geometry(type=ogr.wkbLineString))
geom.AddGeometry(ogr.CreateGeometryFromWkt('LINESTRING (0 1,2 3,4 5,0 1)'))
wkt = 'MULTILINESTRING ((0 1,2 3,4 5,0 1))'
if geom.ExportToWkt() != wkt:
gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +
'. Expected value is: ' + wkt)
return 'fail'
wkt = 'POLYGON ((100 0,100 10,110 10,100 0))'
geom = ogr.CreateGeometryFromWkt(wkt)
geom.AddGeometry(ogr.Geometry(type=ogr.wkbLinearRing))
if geom.ExportToWkt() != wkt:
gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +
'. Expected value is: ' + wkt)
return 'fail'
wkt = 'POLYGON EMPTY'
geom = ogr.CreateGeometryFromWkt(wkt)
geom.AddGeometry(ogr.Geometry(type=ogr.wkbLinearRing))
ring = ogr.Geometry(type=ogr.wkbLinearRing)
ring.AddPoint_2D(0, 0)
ring.AddPoint_2D(10, 0)
ring.AddPoint_2D(10, 10)
ring.AddPoint_2D(0, 10)
ring.AddPoint_2D(0, 0)
geom.AddGeometry(ring)
if geom.ExportToWkt() != wkt:
gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +
'. Expected value is: ' + wkt)
return 'fail'
wkt = 'MULTIPOLYGON (((0 0,0 10,10 10,0 0)))'
geom = ogr.CreateGeometryFromWkt(wkt)
geom.AddGeometry(ogr.Geometry(type=ogr.wkbPolygon))
if geom.ExportToWkt() != wkt:
gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +
'. Expected value is: ' + wkt)
return 'fail'
geom = ogr.CreateGeometryFromWkt('MULTIPOLYGON EMPTY')
geom.AddGeometry(ogr.Geometry(type=ogr.wkbPolygon))
geom.AddGeometry(ogr.CreateGeometryFromWkt(
'POLYGON ((100 0,100 10,110 10,100 0))'))
wkt = 'MULTIPOLYGON (((100 0,100 10,110 10,100 0)))'
if geom.ExportToWkt() != wkt:
gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +
'. Expected value is: ' + wkt)
return 'fail'
return 'success'
gdaltest_list = []
for item in empty_wkt_list:
ut = TestWktEmpty(item[0], item[1])
gdaltest_list.append((ut.CheckIsEmpty, item[0]))
gdaltest_list.append(ogr_wktempty_test_partial_empty_geoms)
if __name__ == '__main__':
gdaltest.setup_run('ogr_wktempty')
gdaltest.run_tests(gdaltest_list)
gdaltest.summarize()
| #!/usr/bin/env python
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test support for the various "EMPTY" WKT geometry representations.
# Author: Frank Warmerdam <[email protected]>
#
###############################################################################
# Copyright (c) 2004, Frank Warmerdam <[email protected]>
# Copyright (c) 2008, Even Rouault <even dot rouault at mines-paris dot org>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
###############################################################################
import os
import sys
import string
sys.path.append( '../pymod' )
import gdaltest
import ogrtest
from osgeo import ogr
from osgeo import gdal
class TestWktEmpty:
def __init__( self, inString, expectedOutString ):
self.inString = inString
self.expectedOutString = expectedOutString
def isEmpty(self, geom):
try:
ogr.Geometry.IsEmpty
except:
return 'skip'
if (geom.IsEmpty() == False):
geom.Destroy()
gdaltest.post_reason ("IsEmpty returning false for an empty geometry")
return 'fail'
return 'success'
def CheckIsEmpty(self):
geom = ogr.CreateGeometryFromWkt( self.inString )
wkt = geom.ExportToWkt()
if self.expectedOutString != 'POINT EMPTY':
if ogr.CreateGeometryFromWkb(geom.ExportToWkb()).ExportToWkt() != wkt:
return 'fail'
if wkt == self.expectedOutString:
if self.isEmpty(geom) == 'fail':
return 'fail'
else:
return 'success'
else:
gdaltest.post_reason( 'WKT is wrong: ' + wkt + '. Expected value is: ' + self.expectedOutString )
return 'fail'
empty_wkt_list = [ \
('GEOMETRYCOLLECTION(EMPTY)', 'GEOMETRYCOLLECTION EMPTY'),
('MULTIPOLYGON( EMPTY )', 'MULTIPOLYGON EMPTY'),
('MULTILINESTRING(EMPTY)', 'MULTILINESTRING EMPTY'),
('MULTIPOINT(EMPTY)', 'MULTIPOINT EMPTY'),
('POINT ( EMPTY )', 'POINT EMPTY'),
('LINESTRING(EMPTY)', 'LINESTRING EMPTY'),
('POLYGON ( EMPTY )', 'POLYGON EMPTY'),
('GEOMETRYCOLLECTION EMPTY', 'GEOMETRYCOLLECTION EMPTY'),
('MULTIPOLYGON EMPTY', 'MULTIPOLYGON EMPTY'),
('MULTILINESTRING EMPTY', 'MULTILINESTRING EMPTY'),
('MULTIPOINT EMPTY', 'MULTIPOINT EMPTY'),
('POINT EMPTY', 'POINT EMPTY'),
('LINESTRING EMPTY', 'LINESTRING EMPTY'),
('POLYGON EMPTY', 'POLYGON EMPTY')
]
def ogr_wktempty_test_partial_empty_geoms():
# Multipoint with a valid point and an empty point
wkt = 'MULTIPOINT (1 1)'
geom = ogr.CreateGeometryFromWkt(wkt)
geom.AddGeometry(ogr.Geometry( type = ogr.wkbPoint ))
if geom.ExportToWkt() != wkt:
gdaltest.post_reason( 'WKT is wrong: ' + geom.ExportToWkt() + '. Expected value is: ' + wkt )
return 'fail'
# Multipoint with an empty point and a valid point
geom = ogr.CreateGeometryFromWkt('MULTIPOINT EMPTY')
geom.AddGeometry(ogr.Geometry( type = ogr.wkbPoint ))
geom.AddGeometry(ogr.CreateGeometryFromWkt('POINT (1 1)'))
wkt = 'MULTIPOINT (1 1)'
if geom.ExportToWkt() != wkt:
gdaltest.post_reason( 'WKT is wrong: ' + geom.ExportToWkt() + '. Expected value is: ' + wkt )
return 'fail'
# Multilinestring with a valid string and an empty linestring
wkt = 'MULTILINESTRING ((0 1,2 3,4 5,0 1))'
geom = ogr.CreateGeometryFromWkt(wkt)
geom.AddGeometry(ogr.Geometry( type = ogr.wkbLineString ))
if geom.ExportToWkt() != wkt:
gdaltest.post_reason( 'WKT is wrong: ' + geom.ExportToWkt() + '. Expected value is: ' + wkt )
return 'fail'
# Multilinestring with an empty linestring and a valid linestring
geom = ogr.CreateGeometryFromWkt('MULTILINESTRING EMPTY')
geom.AddGeometry(ogr.Geometry( type = ogr.wkbLineString ))
geom.AddGeometry(ogr.CreateGeometryFromWkt('LINESTRING (0 1,2 3,4 5,0 1)'))
wkt = 'MULTILINESTRING ((0 1,2 3,4 5,0 1))'
if geom.ExportToWkt() != wkt:
gdaltest.post_reason( 'WKT is wrong: ' + geom.ExportToWkt() + '. Expected value is: ' + wkt )
return 'fail'
# Polygon with a valid external ring and an empty internal ring
wkt = 'POLYGON ((100 0,100 10,110 10,100 0))'
geom = ogr.CreateGeometryFromWkt(wkt)
geom.AddGeometry(ogr.Geometry( type = ogr.wkbLinearRing ))
if geom.ExportToWkt() != wkt:
gdaltest.post_reason( 'WKT is wrong: ' + geom.ExportToWkt() + '. Expected value is: ' + wkt )
return 'fail'
# Polygon with an empty external ring and a valid internal ring
wkt = 'POLYGON EMPTY'
geom = ogr.CreateGeometryFromWkt(wkt)
geom.AddGeometry(ogr.Geometry( type = ogr.wkbLinearRing ))
ring = ogr.Geometry( type = ogr.wkbLinearRing )
ring.AddPoint_2D( 0, 0)
ring.AddPoint_2D( 10, 0)
ring.AddPoint_2D( 10, 10)
ring.AddPoint_2D( 0, 10)
ring.AddPoint_2D( 0, 0)
geom.AddGeometry(ring)
if geom.ExportToWkt() != wkt:
gdaltest.post_reason( 'WKT is wrong: ' + geom.ExportToWkt() + '. Expected value is: ' + wkt )
return 'fail'
# Multipolygon with a valid polygon and an empty polygon
wkt = 'MULTIPOLYGON (((0 0,0 10,10 10,0 0)))'
geom = ogr.CreateGeometryFromWkt(wkt)
geom.AddGeometry(ogr.Geometry( type = ogr.wkbPolygon ))
if geom.ExportToWkt() != wkt:
gdaltest.post_reason( 'WKT is wrong: ' + geom.ExportToWkt() + '. Expected value is: ' + wkt )
return 'fail'
# Multipolygon with an empty polygon and a valid polygon
geom = ogr.CreateGeometryFromWkt('MULTIPOLYGON EMPTY')
geom.AddGeometry(ogr.Geometry( type = ogr.wkbPolygon ))
geom.AddGeometry(ogr.CreateGeometryFromWkt('POLYGON ((100 0,100 10,110 10,100 0))'))
wkt = 'MULTIPOLYGON (((100 0,100 10,110 10,100 0)))'
if geom.ExportToWkt() != wkt:
gdaltest.post_reason( 'WKT is wrong: ' + geom.ExportToWkt() + '. Expected value is: ' + wkt )
return 'fail'
return 'success'
gdaltest_list = []
for item in empty_wkt_list:
ut = TestWktEmpty( item[0], item[1] )
gdaltest_list.append( (ut.CheckIsEmpty, item[0]) )
gdaltest_list.append( ogr_wktempty_test_partial_empty_geoms )
if __name__ == '__main__':
gdaltest.setup_run( 'ogr_wktempty' )
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
| [
5,
6,
7,
8,
9
] |
1,576 | be9972d899a167a8ca2728960e55cda538793cc5 | <mask token>
| <mask token>
cgitb.enable()
sys.stdout.write('Content-Type: application/octet-stream\n\n')
sys.stdout.write('yes' if is_admin() else 'no')
sys.stdout.flush()
| import cgitb
import sys
from auth import is_admin
cgitb.enable()
sys.stdout.write('Content-Type: application/octet-stream\n\n')
sys.stdout.write('yes' if is_admin() else 'no')
sys.stdout.flush()
| #!/usr/bin/env python3
import cgitb
import sys
from auth import is_admin
cgitb.enable()
sys.stdout.write('Content-Type: application/octet-stream\n\n')
sys.stdout.write('yes' if is_admin() else 'no')
sys.stdout.flush()
| null | [
0,
1,
2,
3
] |
1,577 | 1e34087719f6fd0456d2722edbd0a7af68d37e4c | <mask token>
def read_atomic_data(path):
if not path or not os.path.exists(path) or not os.path.isfile(path):
print('To begin with, your path to data should be proper!')
sys.exit(1)
df = pd.read_csv(path)
columns = df.columns.tolist()
columns = columns[:-1]
df = pd.read_csv(path, usecols=columns)
return df, columns
def get_dataset(df, columns):
X = df[col[:-1]]
y = df.critical_temp
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=1)
return X_train, X_test, y_train, y_test
<mask token>
| <mask token>
def read_atomic_data(path):
if not path or not os.path.exists(path) or not os.path.isfile(path):
print('To begin with, your path to data should be proper!')
sys.exit(1)
df = pd.read_csv(path)
columns = df.columns.tolist()
columns = columns[:-1]
df = pd.read_csv(path, usecols=columns)
return df, columns
def get_dataset(df, columns):
X = df[col[:-1]]
y = df.critical_temp
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=1)
return X_train, X_test, y_train, y_test
<mask token>
print(dict)
draw.draw(dict, 'r2_score')
draw.draw(dict, 'max_error')
draw.draw(dict, 'explained_variance_score')
draw.draw(dict, 'mean_absolute_error')
draw.draw(dict, 'mean_squared_error')
draw.draw(dict, 'mean_squared_log_error')
draw.draw(dict, 'median_absolute_error')
sys.exit()
| <mask token>
def read_atomic_data(path):
if not path or not os.path.exists(path) or not os.path.isfile(path):
print('To begin with, your path to data should be proper!')
sys.exit(1)
df = pd.read_csv(path)
columns = df.columns.tolist()
columns = columns[:-1]
df = pd.read_csv(path, usecols=columns)
return df, columns
def get_dataset(df, columns):
X = df[col[:-1]]
y = df.critical_temp
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=1)
return X_train, X_test, y_train, y_test
df, col = read_atomic_data('unique_m.csv')
X_train, X_test, y_train, y_test = get_dataset(df, col)
<mask token>
X_train = preprocessing.scale(X_train)
X_test = preprocessing.scale(X_test)
results = {}
R = Regression(X_train, X_test, y_train, y_test)
dict = R.run()
print(dict)
draw.draw(dict, 'r2_score')
draw.draw(dict, 'max_error')
draw.draw(dict, 'explained_variance_score')
draw.draw(dict, 'mean_absolute_error')
draw.draw(dict, 'mean_squared_error')
draw.draw(dict, 'mean_squared_log_error')
draw.draw(dict, 'median_absolute_error')
sys.exit()
| import pandas as pd
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.tree import DecisionTreeRegressor
from sklearn.linear_model import BayesianRidge, LinearRegression
import os
import sys
import sklearn.metrics as mets
from review import set_metrics as set_metrics
from algo import Regression
import draw
def read_atomic_data(path):
if not path or not os.path.exists(path) or not os.path.isfile(path):
print('To begin with, your path to data should be proper!')
sys.exit(1)
df = pd.read_csv(path)
columns = df.columns.tolist()
columns = columns[:-1]
df = pd.read_csv(path, usecols=columns)
return df, columns
def get_dataset(df, columns):
X = df[col[:-1]]
y = df.critical_temp
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=1)
return X_train, X_test, y_train, y_test
df, col = read_atomic_data('unique_m.csv')
X_train, X_test, y_train, y_test = get_dataset(df, col)
from sklearn import preprocessing
X_train = preprocessing.scale(X_train)
X_test = preprocessing.scale(X_test)
results = {}
R = Regression(X_train, X_test, y_train, y_test)
dict = R.run()
print(dict)
draw.draw(dict, 'r2_score')
draw.draw(dict, 'max_error')
draw.draw(dict, 'explained_variance_score')
draw.draw(dict, 'mean_absolute_error')
draw.draw(dict, 'mean_squared_error')
draw.draw(dict, 'mean_squared_log_error')
draw.draw(dict, 'median_absolute_error')
sys.exit()
| import pandas as pd
from sklearn.tree import DecisionTreeClassifier # Import Decision Tree Classifier
from sklearn.model_selection import train_test_split # Import train_test_split function
from sklearn import metrics #Import scikit-learn metrics module for accuracy calculation
from sklearn.tree import DecisionTreeRegressor
from sklearn.linear_model import BayesianRidge, LinearRegression
import os
import sys
import sklearn.metrics as mets
from review import set_metrics as set_metrics
from algo import Regression
import draw
#https://datascience.stackexchange.com/questions/989/svm-using-scikit-learn-runs-endlessly-and-never-completes-execution
#https://machinelearningmastery.com/time-series-prediction-lstm-recurrent-neural-networks-python-keras/
#https://datascienceplus.com/keras-regression-based-neural-networks/
#xgboost
#random forest
#lstm
#rnn
#dec tree
#logistic regression
#ann
#naive bayes
#monte carlo
def read_atomic_data(path):
if not path or not os.path.exists(path) or not os.path.isfile(path):
print("To begin with, your path to data should be proper!")
sys.exit(1)
df = pd.read_csv(path)
columns = df.columns.tolist() # get the columns
columns = columns[:-1]
df = pd.read_csv(path, usecols=columns)
return df, columns
def get_dataset(df, columns):
X = df[col[:-1]]
y = df.critical_temp
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1)
return (X_train, X_test, y_train, y_test)
df, col = read_atomic_data("unique_m.csv")
(X_train, X_test, y_train, y_test) = get_dataset(df, col)
from sklearn import preprocessing
X_train = preprocessing.scale(X_train)
X_test = preprocessing.scale(X_test)
results = {}
R = Regression(X_train, X_test, y_train, y_test)
dict = R.run()
print (dict)
draw.draw(dict, 'r2_score')
draw.draw(dict, 'max_error')
draw.draw(dict, 'explained_variance_score')
draw.draw(dict, 'mean_absolute_error')
draw.draw(dict, 'mean_squared_error')
draw.draw(dict, 'mean_squared_log_error')
draw.draw(dict, 'median_absolute_error')
sys.exit()
| [
2,
3,
4,
5,
6
] |
1,578 | 30aa8405ccf64ce8a05204f3f9fa2ffab436ad3b | <mask token>
| <mask token>
print(tf.__version__)
<mask token>
ninapro.splitImagesLabels()
print('ninapro.TrainImages shape: ', ninapro.TrainImages.shape)
print('ninapro.TrainLabels shape: ', ninapro.TrainLabels.shape)
print('ninapro.TestImages shape: ', ninapro.TestImages.shape)
print('ninapro.TestLabels shape: ', ninapro.TestLabels.shape)
print('ninapro.ValidateImages shape: ', ninapro.ValidateImages.shape)
print('ninapro.ValidateLabels shape: ', ninapro.ValidateLabels.shape)
print('Read successfully done...')
<mask token>
with tf.name_scope('Input'):
x = tf.placeholder(tf.float32, shape=[None, 16, 30], name='X')
y = tf.placeholder(tf.float32, shape=[None, nMV], name='Labels')
if Debug:
print('input x shape: ', x.shape)
print('input y shape: ', y.shape)
<mask token>
if Debug:
print('x_image shape: ', x_image.shape)
<mask token>
with tf.name_scope('First'):
w1 = tf.Variable(tf.truncated_normal([1, 16, firstIn, firstOut], stddev
=0.1), name='W')
b1 = tf.Variable(tf.constant(0.1, shape=[firstOut]), name='B')
s1 = 1
conv1 = tf.nn.conv2d(x_image, w1, strides=[1, s1, s1, 1], padding='SAME')
act1 = tf.nn.relu(conv1 + b1)
tf.summary.histogram('weights', w1)
tf.summary.histogram('biases', b1)
tf.summary.histogram('activation', act1)
if Debug:
print('w1 shape: ', w1.shape)
print('b1 shape: ', b1.shape)
print('conv1 shape: ', conv1.shape)
print('act1 shape: ', act1.shape)
<mask token>
with tf.name_scope('Second'):
w2 = tf.Variable(tf.truncated_normal([3, 3, secondIn, secondOut],
stddev=0.1), name='W')
b2 = tf.Variable(tf.constant(0.1, shape=[secondOut]), name='B')
s2 = 1
conv2 = tf.nn.conv2d(act1, w2, strides=[1, s2, s2, 1], padding='SAME')
act2 = tf.nn.relu(conv2 + b2)
k2 = 3
ms2 = 1
mp2 = tf.nn.max_pool(act2, ksize=[1, k2, k2, 1], strides=[1, ms2, ms2,
1], padding='SAME')
tf.summary.histogram('weights', w2)
tf.summary.histogram('biases', b2)
tf.summary.histogram('activation', act2)
tf.summary.histogram('maxpooling', mp2)
if Debug:
print('w2 shape: ', w2.shape)
print('b2 shape: ', b2.shape)
print('conv2 shape: ', conv2.shape)
print('act2 shape: ', act2.shape)
print('mp2 shape: ', mp2.shape)
<mask token>
with tf.name_scope('Third'):
w3 = tf.Variable(tf.truncated_normal([5, 5, thirdIn, thirdOut], stddev=
0.1), name='W')
b3 = tf.Variable(tf.constant(0.1, shape=[thirdOut]), name='B')
s3 = 1
conv3 = tf.nn.conv2d(mp2, w3, strides=[1, s3, s3, 1], padding='SAME')
act3 = tf.nn.relu(conv3 + b3)
k3 = 3
ms3 = 1
mp3 = tf.nn.max_pool(act3, ksize=[1, k3, k3, 1], strides=[1, ms3, ms3,
1], padding='SAME')
tf.summary.histogram('weights', w3)
tf.summary.histogram('biases', b3)
tf.summary.histogram('activation', act3)
tf.summary.histogram('maxpooling', mp3)
if Debug:
print('w3 shape: ', w3.shape)
print('b3 shape: ', b3.shape)
print('conv3 shape: ', conv3.shape)
print('act3 shape: ', act3.shape)
print('mp3 shape: ', mp3.shape)
<mask token>
with tf.name_scope('Fourth'):
w4 = tf.Variable(tf.truncated_normal([6, 1, fourthIn, fourthOut],
stddev=0.1), name='W')
b4 = tf.Variable(tf.constant(0.1, shape=[fourthOut]), name='B')
s4 = 1
conv4 = tf.nn.conv2d(mp3, w4, strides=[1, s4, s4, 1], padding='SAME')
act4 = tf.nn.relu(conv4 + b4)
tf.summary.histogram('weights', w4)
tf.summary.histogram('biases', b4)
tf.summary.histogram('activation', act4)
if Debug:
print('w4 shape: ', w4.shape)
print('b4 shape: ', b4.shape)
print('conv4 shape: ', conv4.shape)
print('act4 shape: ', act4.shape)
<mask token>
with tf.name_scope('Fifth'):
w5 = tf.Variable(tf.truncated_normal([1, 1, fifthIn, fifthOut], stddev=
0.1), name='W')
b5 = tf.Variable(tf.constant(0.1, shape=[fifthOut]), name='B')
s5 = 1
conv5 = tf.nn.conv2d(act4, w5, strides=[1, s5, s5, 1], padding='SAME')
act5 = tf.nn.relu(conv5 + b5)
with tf.name_scope('Flatten'):
flatten5 = tf.reshape(act5, [-1, 16 * 30 * fifthOut])
with tf.name_scope('FullyCon'):
wfc5 = tf.Variable(tf.truncated_normal([16 * 30 * fifthOut, nMV],
stddev=0.1), name='W')
bfc5 = tf.Variable(tf.constant(0.1, shape=[nMV]), name='B')
y_ = tf.nn.relu(tf.matmul(flatten5, wfc5) + bfc5)
tf.summary.histogram('weights', w5)
tf.summary.histogram('biases', b5)
tf.summary.histogram('activation', act5)
tf.summary.histogram('flatten', flatten5)
tf.summary.histogram('weights_fc5', wfc5)
tf.summary.histogram('biases_fc5', bfc5)
if Debug:
print('w5 shape: ', w5.shape)
print('b5 shape: ', b5.shape)
print('conv5 shape: ', conv5.shape)
print('act5 shape: ', act5.shape)
print('flatten5 shape: ', flatten5.shape)
print('weights_fc5 shape: ', wfc5.shape)
print('biases_fc5 shape: ', bfc5.shape)
print('y_predict shape: ', y_.shape)
with tf.name_scope('Softmaxloss'):
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=y_, labels=y), name='Loss')
tf.summary.scalar('cross_entropy', cross_entropy)
with tf.name_scope('Accuracy'):
correct_prediction = tf.equal(tf.argmax(y_, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', accuracy)
<mask token>
usefulFcns.BuildNewlyDir(graph_dir)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
merged_summary = tf.summary.merge_all()
writer = tf.summary.FileWriter(graph_dir)
writer.add_graph(sess.graph)
for i in range(2000):
x_batch, y_batch = ninapro.next_batch(30)
if i % 100 == 0:
[train_accuracy] = sess.run([accuracy], feed_dict={x: x_batch,
y: y_batch})
[test_accuracy] = sess.run([accuracy], feed_dict={x: ninapro.
TestImages, y: ninapro.TestLabels})
[validate_accuracy] = sess.run([accuracy], feed_dict={x:
ninapro.ValidateImages, y: ninapro.ValidateLabels})
print('Step %d, training %g, testing %g, validate %g.' % (i,
train_accuracy, test_accuracy, validate_accuracy))
if i % 5 == 0:
s = sess.run(merged_summary, feed_dict={x: x_batch, y: y_batch})
writer.add_summary(s, i)
sess.run(train, feed_dict={x: x_batch, y: y_batch})
| <mask token>
print(tf.__version__)
Debug = True
ninapro = Ninapro()
ninapro.splitImagesLabels()
print('ninapro.TrainImages shape: ', ninapro.TrainImages.shape)
print('ninapro.TrainLabels shape: ', ninapro.TrainLabels.shape)
print('ninapro.TestImages shape: ', ninapro.TestImages.shape)
print('ninapro.TestLabels shape: ', ninapro.TestLabels.shape)
print('ninapro.ValidateImages shape: ', ninapro.ValidateImages.shape)
print('ninapro.ValidateLabels shape: ', ninapro.ValidateLabels.shape)
print('Read successfully done...')
nMV = ninapro.TrainLabels.shape[1]
with tf.name_scope('Input'):
x = tf.placeholder(tf.float32, shape=[None, 16, 30], name='X')
y = tf.placeholder(tf.float32, shape=[None, nMV], name='Labels')
if Debug:
print('input x shape: ', x.shape)
print('input y shape: ', y.shape)
x_image = tf.reshape(x, [-1, 16, 30, 1])
if Debug:
print('x_image shape: ', x_image.shape)
firstIn = 1
firstOut = 32
with tf.name_scope('First'):
w1 = tf.Variable(tf.truncated_normal([1, 16, firstIn, firstOut], stddev
=0.1), name='W')
b1 = tf.Variable(tf.constant(0.1, shape=[firstOut]), name='B')
s1 = 1
conv1 = tf.nn.conv2d(x_image, w1, strides=[1, s1, s1, 1], padding='SAME')
act1 = tf.nn.relu(conv1 + b1)
tf.summary.histogram('weights', w1)
tf.summary.histogram('biases', b1)
tf.summary.histogram('activation', act1)
if Debug:
print('w1 shape: ', w1.shape)
print('b1 shape: ', b1.shape)
print('conv1 shape: ', conv1.shape)
print('act1 shape: ', act1.shape)
secondIn = firstOut
secondOut = 32
with tf.name_scope('Second'):
w2 = tf.Variable(tf.truncated_normal([3, 3, secondIn, secondOut],
stddev=0.1), name='W')
b2 = tf.Variable(tf.constant(0.1, shape=[secondOut]), name='B')
s2 = 1
conv2 = tf.nn.conv2d(act1, w2, strides=[1, s2, s2, 1], padding='SAME')
act2 = tf.nn.relu(conv2 + b2)
k2 = 3
ms2 = 1
mp2 = tf.nn.max_pool(act2, ksize=[1, k2, k2, 1], strides=[1, ms2, ms2,
1], padding='SAME')
tf.summary.histogram('weights', w2)
tf.summary.histogram('biases', b2)
tf.summary.histogram('activation', act2)
tf.summary.histogram('maxpooling', mp2)
if Debug:
print('w2 shape: ', w2.shape)
print('b2 shape: ', b2.shape)
print('conv2 shape: ', conv2.shape)
print('act2 shape: ', act2.shape)
print('mp2 shape: ', mp2.shape)
thirdIn = secondOut
thirdOut = 64
with tf.name_scope('Third'):
w3 = tf.Variable(tf.truncated_normal([5, 5, thirdIn, thirdOut], stddev=
0.1), name='W')
b3 = tf.Variable(tf.constant(0.1, shape=[thirdOut]), name='B')
s3 = 1
conv3 = tf.nn.conv2d(mp2, w3, strides=[1, s3, s3, 1], padding='SAME')
act3 = tf.nn.relu(conv3 + b3)
k3 = 3
ms3 = 1
mp3 = tf.nn.max_pool(act3, ksize=[1, k3, k3, 1], strides=[1, ms3, ms3,
1], padding='SAME')
tf.summary.histogram('weights', w3)
tf.summary.histogram('biases', b3)
tf.summary.histogram('activation', act3)
tf.summary.histogram('maxpooling', mp3)
if Debug:
print('w3 shape: ', w3.shape)
print('b3 shape: ', b3.shape)
print('conv3 shape: ', conv3.shape)
print('act3 shape: ', act3.shape)
print('mp3 shape: ', mp3.shape)
fourthIn = thirdOut
fourthOut = 64
with tf.name_scope('Fourth'):
w4 = tf.Variable(tf.truncated_normal([6, 1, fourthIn, fourthOut],
stddev=0.1), name='W')
b4 = tf.Variable(tf.constant(0.1, shape=[fourthOut]), name='B')
s4 = 1
conv4 = tf.nn.conv2d(mp3, w4, strides=[1, s4, s4, 1], padding='SAME')
act4 = tf.nn.relu(conv4 + b4)
tf.summary.histogram('weights', w4)
tf.summary.histogram('biases', b4)
tf.summary.histogram('activation', act4)
if Debug:
print('w4 shape: ', w4.shape)
print('b4 shape: ', b4.shape)
print('conv4 shape: ', conv4.shape)
print('act4 shape: ', act4.shape)
fifthIn = fourthOut
fifthOut = 8
with tf.name_scope('Fifth'):
w5 = tf.Variable(tf.truncated_normal([1, 1, fifthIn, fifthOut], stddev=
0.1), name='W')
b5 = tf.Variable(tf.constant(0.1, shape=[fifthOut]), name='B')
s5 = 1
conv5 = tf.nn.conv2d(act4, w5, strides=[1, s5, s5, 1], padding='SAME')
act5 = tf.nn.relu(conv5 + b5)
with tf.name_scope('Flatten'):
flatten5 = tf.reshape(act5, [-1, 16 * 30 * fifthOut])
with tf.name_scope('FullyCon'):
wfc5 = tf.Variable(tf.truncated_normal([16 * 30 * fifthOut, nMV],
stddev=0.1), name='W')
bfc5 = tf.Variable(tf.constant(0.1, shape=[nMV]), name='B')
y_ = tf.nn.relu(tf.matmul(flatten5, wfc5) + bfc5)
tf.summary.histogram('weights', w5)
tf.summary.histogram('biases', b5)
tf.summary.histogram('activation', act5)
tf.summary.histogram('flatten', flatten5)
tf.summary.histogram('weights_fc5', wfc5)
tf.summary.histogram('biases_fc5', bfc5)
if Debug:
print('w5 shape: ', w5.shape)
print('b5 shape: ', b5.shape)
print('conv5 shape: ', conv5.shape)
print('act5 shape: ', act5.shape)
print('flatten5 shape: ', flatten5.shape)
print('weights_fc5 shape: ', wfc5.shape)
print('biases_fc5 shape: ', bfc5.shape)
print('y_predict shape: ', y_.shape)
with tf.name_scope('Softmaxloss'):
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=y_, labels=y), name='Loss')
tf.summary.scalar('cross_entropy', cross_entropy)
with tf.name_scope('Accuracy'):
correct_prediction = tf.equal(tf.argmax(y_, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', accuracy)
train = tf.train.AdamOptimizer(0.1).minimize(cross_entropy)
graph_dir = 'sEMGCNN'
<mask token>
usefulFcns.BuildNewlyDir(graph_dir)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
merged_summary = tf.summary.merge_all()
writer = tf.summary.FileWriter(graph_dir)
writer.add_graph(sess.graph)
for i in range(2000):
x_batch, y_batch = ninapro.next_batch(30)
if i % 100 == 0:
[train_accuracy] = sess.run([accuracy], feed_dict={x: x_batch,
y: y_batch})
[test_accuracy] = sess.run([accuracy], feed_dict={x: ninapro.
TestImages, y: ninapro.TestLabels})
[validate_accuracy] = sess.run([accuracy], feed_dict={x:
ninapro.ValidateImages, y: ninapro.ValidateLabels})
print('Step %d, training %g, testing %g, validate %g.' % (i,
train_accuracy, test_accuracy, validate_accuracy))
if i % 5 == 0:
s = sess.run(merged_summary, feed_dict={x: x_batch, y: y_batch})
writer.add_summary(s, i)
sess.run(train, feed_dict={x: x_batch, y: y_batch})
| from classNinapro import Ninapro
import numpy as np
import tensorflow as tf
print(tf.__version__)
Debug = True
ninapro = Ninapro()
ninapro.splitImagesLabels()
print('ninapro.TrainImages shape: ', ninapro.TrainImages.shape)
print('ninapro.TrainLabels shape: ', ninapro.TrainLabels.shape)
print('ninapro.TestImages shape: ', ninapro.TestImages.shape)
print('ninapro.TestLabels shape: ', ninapro.TestLabels.shape)
print('ninapro.ValidateImages shape: ', ninapro.ValidateImages.shape)
print('ninapro.ValidateLabels shape: ', ninapro.ValidateLabels.shape)
print('Read successfully done...')
nMV = ninapro.TrainLabels.shape[1]
with tf.name_scope('Input'):
x = tf.placeholder(tf.float32, shape=[None, 16, 30], name='X')
y = tf.placeholder(tf.float32, shape=[None, nMV], name='Labels')
if Debug:
print('input x shape: ', x.shape)
print('input y shape: ', y.shape)
x_image = tf.reshape(x, [-1, 16, 30, 1])
if Debug:
print('x_image shape: ', x_image.shape)
firstIn = 1
firstOut = 32
with tf.name_scope('First'):
w1 = tf.Variable(tf.truncated_normal([1, 16, firstIn, firstOut], stddev
=0.1), name='W')
b1 = tf.Variable(tf.constant(0.1, shape=[firstOut]), name='B')
s1 = 1
conv1 = tf.nn.conv2d(x_image, w1, strides=[1, s1, s1, 1], padding='SAME')
act1 = tf.nn.relu(conv1 + b1)
tf.summary.histogram('weights', w1)
tf.summary.histogram('biases', b1)
tf.summary.histogram('activation', act1)
if Debug:
print('w1 shape: ', w1.shape)
print('b1 shape: ', b1.shape)
print('conv1 shape: ', conv1.shape)
print('act1 shape: ', act1.shape)
secondIn = firstOut
secondOut = 32
with tf.name_scope('Second'):
w2 = tf.Variable(tf.truncated_normal([3, 3, secondIn, secondOut],
stddev=0.1), name='W')
b2 = tf.Variable(tf.constant(0.1, shape=[secondOut]), name='B')
s2 = 1
conv2 = tf.nn.conv2d(act1, w2, strides=[1, s2, s2, 1], padding='SAME')
act2 = tf.nn.relu(conv2 + b2)
k2 = 3
ms2 = 1
mp2 = tf.nn.max_pool(act2, ksize=[1, k2, k2, 1], strides=[1, ms2, ms2,
1], padding='SAME')
tf.summary.histogram('weights', w2)
tf.summary.histogram('biases', b2)
tf.summary.histogram('activation', act2)
tf.summary.histogram('maxpooling', mp2)
if Debug:
print('w2 shape: ', w2.shape)
print('b2 shape: ', b2.shape)
print('conv2 shape: ', conv2.shape)
print('act2 shape: ', act2.shape)
print('mp2 shape: ', mp2.shape)
thirdIn = secondOut
thirdOut = 64
with tf.name_scope('Third'):
w3 = tf.Variable(tf.truncated_normal([5, 5, thirdIn, thirdOut], stddev=
0.1), name='W')
b3 = tf.Variable(tf.constant(0.1, shape=[thirdOut]), name='B')
s3 = 1
conv3 = tf.nn.conv2d(mp2, w3, strides=[1, s3, s3, 1], padding='SAME')
act3 = tf.nn.relu(conv3 + b3)
k3 = 3
ms3 = 1
mp3 = tf.nn.max_pool(act3, ksize=[1, k3, k3, 1], strides=[1, ms3, ms3,
1], padding='SAME')
tf.summary.histogram('weights', w3)
tf.summary.histogram('biases', b3)
tf.summary.histogram('activation', act3)
tf.summary.histogram('maxpooling', mp3)
if Debug:
print('w3 shape: ', w3.shape)
print('b3 shape: ', b3.shape)
print('conv3 shape: ', conv3.shape)
print('act3 shape: ', act3.shape)
print('mp3 shape: ', mp3.shape)
fourthIn = thirdOut
fourthOut = 64
with tf.name_scope('Fourth'):
w4 = tf.Variable(tf.truncated_normal([6, 1, fourthIn, fourthOut],
stddev=0.1), name='W')
b4 = tf.Variable(tf.constant(0.1, shape=[fourthOut]), name='B')
s4 = 1
conv4 = tf.nn.conv2d(mp3, w4, strides=[1, s4, s4, 1], padding='SAME')
act4 = tf.nn.relu(conv4 + b4)
tf.summary.histogram('weights', w4)
tf.summary.histogram('biases', b4)
tf.summary.histogram('activation', act4)
if Debug:
print('w4 shape: ', w4.shape)
print('b4 shape: ', b4.shape)
print('conv4 shape: ', conv4.shape)
print('act4 shape: ', act4.shape)
fifthIn = fourthOut
fifthOut = 8
with tf.name_scope('Fifth'):
w5 = tf.Variable(tf.truncated_normal([1, 1, fifthIn, fifthOut], stddev=
0.1), name='W')
b5 = tf.Variable(tf.constant(0.1, shape=[fifthOut]), name='B')
s5 = 1
conv5 = tf.nn.conv2d(act4, w5, strides=[1, s5, s5, 1], padding='SAME')
act5 = tf.nn.relu(conv5 + b5)
with tf.name_scope('Flatten'):
flatten5 = tf.reshape(act5, [-1, 16 * 30 * fifthOut])
with tf.name_scope('FullyCon'):
wfc5 = tf.Variable(tf.truncated_normal([16 * 30 * fifthOut, nMV],
stddev=0.1), name='W')
bfc5 = tf.Variable(tf.constant(0.1, shape=[nMV]), name='B')
y_ = tf.nn.relu(tf.matmul(flatten5, wfc5) + bfc5)
tf.summary.histogram('weights', w5)
tf.summary.histogram('biases', b5)
tf.summary.histogram('activation', act5)
tf.summary.histogram('flatten', flatten5)
tf.summary.histogram('weights_fc5', wfc5)
tf.summary.histogram('biases_fc5', bfc5)
if Debug:
print('w5 shape: ', w5.shape)
print('b5 shape: ', b5.shape)
print('conv5 shape: ', conv5.shape)
print('act5 shape: ', act5.shape)
print('flatten5 shape: ', flatten5.shape)
print('weights_fc5 shape: ', wfc5.shape)
print('biases_fc5 shape: ', bfc5.shape)
print('y_predict shape: ', y_.shape)
with tf.name_scope('Softmaxloss'):
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=y_, labels=y), name='Loss')
tf.summary.scalar('cross_entropy', cross_entropy)
with tf.name_scope('Accuracy'):
correct_prediction = tf.equal(tf.argmax(y_, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', accuracy)
train = tf.train.AdamOptimizer(0.1).minimize(cross_entropy)
graph_dir = 'sEMGCNN'
import usefulFcns
usefulFcns.BuildNewlyDir(graph_dir)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
merged_summary = tf.summary.merge_all()
writer = tf.summary.FileWriter(graph_dir)
writer.add_graph(sess.graph)
for i in range(2000):
x_batch, y_batch = ninapro.next_batch(30)
if i % 100 == 0:
[train_accuracy] = sess.run([accuracy], feed_dict={x: x_batch,
y: y_batch})
[test_accuracy] = sess.run([accuracy], feed_dict={x: ninapro.
TestImages, y: ninapro.TestLabels})
[validate_accuracy] = sess.run([accuracy], feed_dict={x:
ninapro.ValidateImages, y: ninapro.ValidateLabels})
print('Step %d, training %g, testing %g, validate %g.' % (i,
train_accuracy, test_accuracy, validate_accuracy))
if i % 5 == 0:
s = sess.run(merged_summary, feed_dict={x: x_batch, y: y_batch})
writer.add_summary(s, i)
sess.run(train, feed_dict={x: x_batch, y: y_batch})
| from classNinapro import Ninapro
import numpy as np
import tensorflow as tf
print(tf.__version__)
Debug = True # for tensor dimensionality checking
ninapro = Ninapro()
ninapro.splitImagesLabels()
# Train
print('ninapro.TrainImages shape: ', ninapro.TrainImages.shape) # m x 16 x 30
print('ninapro.TrainLabels shape: ', ninapro.TrainLabels.shape) # m x 8
# Test
print('ninapro.TestImages shape: ', ninapro.TestImages.shape) # m x 16 x 30
print('ninapro.TestLabels shape: ', ninapro.TestLabels.shape) # m x 8
# Validate
print('ninapro.ValidateImages shape: ', ninapro.ValidateImages.shape) # m x 16 x 30
print('ninapro.ValidateLabels shape: ', ninapro.ValidateLabels.shape) # m x 8
print('Read successfully done...')
# number of total classes of movements, 8 for exampel.
nMV = ninapro.TrainLabels.shape[1]
# - build the Convolutional Neural Network
#-------------------------------------------------add Full+Dropout+Fully
# Setup placeholders for input data
with tf.name_scope('Input'):
x = tf.placeholder(tf.float32, shape=[None, 16,30], name='X')
y = tf.placeholder(tf.float32, shape=[None, nMV], name='Labels')
if Debug:
print('input x shape: ', x.shape)
print('input y shape: ', y.shape)
# every sample with the dimensionality, 16x30
x_image = tf.reshape(x, [-1, 16, 30, 1])
if Debug:
print('x_image shape: ', x_image.shape)
# summary
#tf.summary.image('input', x, 4)
firstIn = 1
firstOut = 32
with tf.name_scope('First'):
# convolution
w1 = tf.Variable(tf.truncated_normal([1,16, firstIn, firstOut], stddev=0.1), name = 'W')
b1 = tf.Variable(tf.constant(0.1, shape=[firstOut]), name = 'B' )
s1 = 1
conv1 = tf.nn.conv2d(x_image, w1, strides=[1, s1, s1, 1], padding='SAME' )
act1 = tf.nn.relu(conv1 + b1)
# summary
tf.summary.histogram('weights', w1)
tf.summary.histogram('biases', b1)
tf.summary.histogram('activation', act1)
# dimensionality checking
if Debug:
print('w1 shape: ', w1.shape)
print('b1 shape: ', b1.shape)
print('conv1 shape: ', conv1.shape)
print('act1 shape: ', act1.shape)
secondIn = firstOut
secondOut = 32
with tf.name_scope('Second'):
# convolution
w2 = tf.Variable(tf.truncated_normal([3,3, secondIn, secondOut], stddev=0.1), name='W')
b2 = tf.Variable(tf.constant(0.1, shape=[secondOut]), name='B')
s2 = 1
conv2 = tf.nn.conv2d(act1, w2, strides=[1, s2, s2, 1], padding='SAME')
# detector
act2 = tf.nn.relu(conv2 + b2)
# maxpooling
k2 = 3
ms2 = 1
mp2 = tf.nn.max_pool(act2, ksize=[1, k2,k2, 1], strides=[1,ms2,ms2,1], padding='SAME')
# summary
tf.summary.histogram('weights', w2)
tf.summary.histogram('biases', b2)
tf.summary.histogram('activation', act2)
tf.summary.histogram('maxpooling', mp2)
# dimensionality checking
if Debug:
print('w2 shape: ', w2.shape)
print('b2 shape: ', b2.shape)
print('conv2 shape: ', conv2.shape)
print('act2 shape: ', act2.shape)
print('mp2 shape: ', mp2.shape)
thirdIn = secondOut
thirdOut = 64
with tf.name_scope('Third'):
# convolution
w3 = tf.Variable(tf.truncated_normal([5,5, thirdIn, thirdOut], stddev=0.1), name='W')
b3 = tf.Variable(tf.constant(0.1, shape=[thirdOut]), name='B')
s3 = 1
conv3 = tf.nn.conv2d(mp2, w3, strides=[1,s3,s3,1], padding='SAME')
# detector
act3 = tf.nn.relu(conv3 + b3)
# maxpooling
k3 = 3 # ksize of maxpooling
ms3 = 1 # maxpooling stride = 3
mp3 = tf.nn.max_pool(act3, ksize=[1,k3,k3,1], strides=[1, ms3, ms3, 1], padding='SAME')
# summary
tf.summary.histogram('weights', w3)
tf.summary.histogram('biases', b3)
tf.summary.histogram('activation', act3)
tf.summary.histogram('maxpooling', mp3)
# dimensionality checking
if Debug:
print('w3 shape: ', w3.shape)
print('b3 shape: ', b3.shape)
print('conv3 shape: ', conv3.shape)
print('act3 shape: ', act3.shape)
print('mp3 shape: ', mp3.shape)
fourthIn = thirdOut
fourthOut = 64
with tf.name_scope('Fourth'):
# convolution
w4 = tf.Variable(tf.truncated_normal([6,1, fourthIn, fourthOut], stddev=0.1), name='W')
b4 = tf.Variable(tf.constant(0.1, shape=[fourthOut]), name='B')
s4 = 1
conv4 = tf.nn.conv2d(mp3, w4, strides=[1,s4,s4,1], padding='SAME')
# detector
act4 = tf.nn.relu(conv4 + b4)
# summary
tf.summary.histogram('weights', w4)
tf.summary.histogram('biases', b4)
tf.summary.histogram('activation', act4)
# dimensionality checking
if Debug:
print('w4 shape: ', w4.shape)
print('b4 shape: ', b4.shape)
print('conv4 shape: ', conv4.shape)
print('act4 shape: ', act4.shape)
fifthIn = fourthOut
fifthOut = 8
with tf.name_scope('Fifth'):
# convolution
w5 = tf.Variable(tf.truncated_normal([1,1, fifthIn, fifthOut], stddev=0.1), name='W')
b5 = tf.Variable(tf.constant(0.1, shape=[fifthOut]), name='B')
s5 = 1
conv5 = tf.nn.conv2d(act4, w5, strides=[1,s5,s5,1], padding='SAME')
# detector
act5 = tf.nn.relu(conv5 + b5)
# flatten
with tf.name_scope('Flatten'):
flatten5 = tf.reshape(act5, [-1, 16*30*fifthOut])
# fully-connect layer
with tf.name_scope('FullyCon'):
wfc5 = tf.Variable(tf.truncated_normal( [16*30*fifthOut, nMV], stddev=0.1), name='W')
bfc5 = tf.Variable(tf.constant(0.1, shape=[nMV]), name='B')
y_ = tf.nn.relu(tf.matmul(flatten5, wfc5) + bfc5)
# summary
tf.summary.histogram('weights', w5)
tf.summary.histogram('biases', b5)
tf.summary.histogram('activation', act5)
tf.summary.histogram('flatten', flatten5)
tf.summary.histogram('weights_fc5', wfc5)
tf.summary.histogram('biases_fc5', bfc5)
# dimensionality checking
if Debug:
print('w5 shape: ', w5.shape)
print('b5 shape: ', b5.shape)
print('conv5 shape: ', conv5.shape)
print('act5 shape: ', act5.shape)
print('flatten5 shape: ', flatten5.shape)
print('weights_fc5 shape: ', wfc5.shape)
print('biases_fc5 shape: ', bfc5.shape)
print('y_predict shape: ', y_.shape)
with tf.name_scope('Softmaxloss'):
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_, labels=y), name='Loss')
# summary
tf.summary.scalar('cross_entropy', cross_entropy)
with tf.name_scope('Accuracy'):
correct_prediction = tf.equal(tf.argmax(y_, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# summary
tf.summary.scalar('accuracy', accuracy)
# Use an AdamOptimizer to train the network
train = tf.train.AdamOptimizer(1e-1).minimize(cross_entropy)
# Visualization directory
graph_dir = 'sEMGCNN'
import usefulFcns
usefulFcns.BuildNewlyDir(graph_dir)
# Train the model
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
merged_summary = tf.summary.merge_all()
writer = tf.summary.FileWriter(graph_dir)
writer.add_graph(sess.graph)
for i in range(2000):
x_batch, y_batch = ninapro.next_batch(30)
# Occasionaly report accuracy of [train] and [test]
if i%100==0:
[train_accuracy] = sess.run([accuracy], feed_dict={x:x_batch, y:y_batch})
[test_accuracy] = sess.run([accuracy], feed_dict={x:ninapro.TestImages, y:ninapro.TestLabels})
[validate_accuracy] = sess.run([accuracy], feed_dict={x:ninapro.ValidateImages, y:ninapro.ValidateLabels} )
print('Step %d, training %g, testing %g, validate %g.' % (i, train_accuracy, test_accuracy, validate_accuracy) )
# Occasionaly write visualization summary to disk file.
if i%5==0:
s = sess.run(merged_summary, feed_dict={x:x_batch, y:y_batch})
writer.add_summary(s,i)
# Training the model
sess.run(train, feed_dict={x:x_batch, y:y_batch})
| [
0,
1,
2,
3,
4
] |
1,579 | 47be41bd5838b828acdc90c3ef5abdeec9da1e85 | <mask token>
| <mask token>
with open('/Users/neeraj.joshi/Downloads/index.html') as html_file:
soup = BeautifulSoup(html_file, 'lxml')
<mask token>
for tree in soup.find_all('tr'):
data = []
for todd in tree.find_all('td'):
data.append(todd.text)
print(data)
csv_writer.writerow(data)
| <mask token>
with open('/Users/neeraj.joshi/Downloads/index.html') as html_file:
soup = BeautifulSoup(html_file, 'lxml')
filename = '/Users/neeraj.joshi/Downloads/test.csv'
csv_writer = csv.writer(open(filename, 'w'))
for tree in soup.find_all('tr'):
data = []
for todd in tree.find_all('td'):
data.append(todd.text)
print(data)
csv_writer.writerow(data)
| import csv
import os
import requests
from bs4 import BeautifulSoup
with open('/Users/neeraj.joshi/Downloads/index.html') as html_file:
soup = BeautifulSoup(html_file, 'lxml')
filename = '/Users/neeraj.joshi/Downloads/test.csv'
csv_writer = csv.writer(open(filename, 'w'))
for tree in soup.find_all('tr'):
data = []
for todd in tree.find_all('td'):
data.append(todd.text)
print(data)
csv_writer.writerow(data)
| import csv
import os
import requests
from bs4 import BeautifulSoup
# open html file and parsing lxml
with open ('/Users/neeraj.joshi/Downloads/index.html') as html_file:
soup = BeautifulSoup(html_file, 'lxml')
#row = soup.find_all('tr')
#column = row.find_all('td')
#print(soup)
# create a file by any name and in order to write it in write mode type w
filename = '/Users/neeraj.joshi/Downloads/test.csv'
csv_writer = csv.writer(open(filename, 'w'))
# storing data in data variable
#assume tr as a columns
for tree in soup.find_all('tr'):
data = []
#assume td as rows
for todd in tree.find_all('td'):
#print(todd.text) "appending data of td into array data made up there "
data.append(todd.text)
print(data)
csv_writer.writerow(data)
| [
0,
1,
2,
3,
4
] |
1,580 | e55fe845c18ff70ba12bb7c2db28ceded8ae9129 | <mask token>
| SSMDocumentName = 'AWS-RunPowerShellScript'
InstanceId = ['i-081a7260c79feb260']
Querytimeoutseconds = 3600
OutputS3BucketName = 'hccake'
OutputS3KeyPrefix = 'log_'
region_name = 'us-east-2'
aws_access_key_id = ''
aws_secret_access_key = ''
workingdirectory = ['c:\\']
executiontimeout = ['3600']
| SSMDocumentName ='AWS-RunPowerShellScript'
InstanceId = ['i-081a7260c79feb260']
Querytimeoutseconds = 3600
OutputS3BucketName = 'hccake'
OutputS3KeyPrefix = 'log_'
region_name ='us-east-2'
aws_access_key_id =''
aws_secret_access_key =''
workingdirectory =["c:\\"]
executiontimeout =["3600"] | null | null | [
0,
1,
2
] |
1,581 | cd34f9ef100ae6d116f02258d22c114ec3f3e3e6 | <mask token>
| <mask token>
with os.scandir(os.path.abspath(beatmap_dir)) as it:
for entry in it:
if entry.is_dir():
try:
beatmap_id = int(str(entry.name).split(' ')[0])
except ValueError:
continue
beatmaps.append(entry.path)
<mask token>
for beatmap in beatmaps:
with os.scandir(os.path.abspath(beatmap)) as it:
bm = {'id': int(str(os.path.split(beatmap)[1]).split(' ')[0]),
'name': str(os.path.split(beatmap)[1])[len(str(os.path.split(
beatmap)[1]).split(' ')[0]) + 1:], 'audio': None,
'audio_length': None, 'video': None}
print('{} {}'.format(bm['id'], bm['name']))
for entry in it:
if entry.is_file():
if entry.path.endswith('osu'):
with open(entry.path, 'r', encoding='utf-8') as f:
config_string = '[global]\n' + f.read()
a = ''
for x in config_string.split('\n')[:config_string.split
('\n').index('[Events]') - 1]:
a += x + '\n'
config = configparser.ConfigParser(allow_no_value=True)
config.read_string(a)
bm['audio'] = os.path.abspath(os.path.dirname(entry.
path) + '\\' + config.get('General', 'AudioFilename'))
elif entry.path.endswith('mp4') or entry.path.endswith('avi'
) or entry.path.endswith('mpg'):
bm['video'] = entry.path
bm_osu.append(bm)
<mask token>
for bm in bm_osu:
if bm['audio']:
text_playlist += '#EXTINF:0,{0}\n{1}\n'.format(bm['name'], bm['audio'])
<mask token>
try:
with open('osu.m3u', 'w', encoding='utf-8') as file:
file.write(text_playlist)
except:
open('osu.m3u', 'x')
with open('osu.m3u', 'w', encoding='utf-8') as file:
file.write(text_playlist)
<mask token>
for bm in bm_osu:
if bm['name']:
text_type += '{0}\n'.format(bm['name'])
<mask token>
try:
with open('osu.txt', 'w', encoding='utf-8') as file:
file.write(text_type)
except:
open('osu.txt', 'x')
with open('osu.txt', 'w', encoding='utf-8') as file:
file.write(text_type)
for bm in bm_osu:
if bm['audio']:
print('{} {}'.format(bm['id'], bm['name']))
if os.path.basename(bm['audio']).split('.')[-1] != '':
shutil.copy2(bm['audio'], '{}\\osu music\\{}.{}'.format(os.
getcwd(), bm['name'], os.path.basename(bm['audio']).split(
'.')[-1]))
if bm['video']:
shutil.copy2(bm['video'], '{}\\osu music\\{}.{}'.format(os.getcwd(),
bm['name'], os.path.basename(bm['video']).split('.')[-1]))
print('done, ty for use')
| <mask token>
beatmap_dir = os.path.abspath(os.environ['LOCALAPPDATA'] + '\\osu!\\Songs\\')
beatmaps = []
bm_osu = []
with os.scandir(os.path.abspath(beatmap_dir)) as it:
for entry in it:
if entry.is_dir():
try:
beatmap_id = int(str(entry.name).split(' ')[0])
except ValueError:
continue
beatmaps.append(entry.path)
beatmap_type = {'id': 0, 'name': 'Author - Title', 'audio':
'.\\somefile.mp3', 'video': '.\\something.mp4'}
for beatmap in beatmaps:
with os.scandir(os.path.abspath(beatmap)) as it:
bm = {'id': int(str(os.path.split(beatmap)[1]).split(' ')[0]),
'name': str(os.path.split(beatmap)[1])[len(str(os.path.split(
beatmap)[1]).split(' ')[0]) + 1:], 'audio': None,
'audio_length': None, 'video': None}
print('{} {}'.format(bm['id'], bm['name']))
for entry in it:
if entry.is_file():
if entry.path.endswith('osu'):
with open(entry.path, 'r', encoding='utf-8') as f:
config_string = '[global]\n' + f.read()
a = ''
for x in config_string.split('\n')[:config_string.split
('\n').index('[Events]') - 1]:
a += x + '\n'
config = configparser.ConfigParser(allow_no_value=True)
config.read_string(a)
bm['audio'] = os.path.abspath(os.path.dirname(entry.
path) + '\\' + config.get('General', 'AudioFilename'))
elif entry.path.endswith('mp4') or entry.path.endswith('avi'
) or entry.path.endswith('mpg'):
bm['video'] = entry.path
bm_osu.append(bm)
text_playlist = ''
for bm in bm_osu:
if bm['audio']:
text_playlist += '#EXTINF:0,{0}\n{1}\n'.format(bm['name'], bm['audio'])
text_playlist = text_playlist[:-1]
try:
with open('osu.m3u', 'w', encoding='utf-8') as file:
file.write(text_playlist)
except:
open('osu.m3u', 'x')
with open('osu.m3u', 'w', encoding='utf-8') as file:
file.write(text_playlist)
text_type = ''
for bm in bm_osu:
if bm['name']:
text_type += '{0}\n'.format(bm['name'])
text_type = text_type[:-1]
try:
with open('osu.txt', 'w', encoding='utf-8') as file:
file.write(text_type)
except:
open('osu.txt', 'x')
with open('osu.txt', 'w', encoding='utf-8') as file:
file.write(text_type)
for bm in bm_osu:
if bm['audio']:
print('{} {}'.format(bm['id'], bm['name']))
if os.path.basename(bm['audio']).split('.')[-1] != '':
shutil.copy2(bm['audio'], '{}\\osu music\\{}.{}'.format(os.
getcwd(), bm['name'], os.path.basename(bm['audio']).split(
'.')[-1]))
if bm['video']:
shutil.copy2(bm['video'], '{}\\osu music\\{}.{}'.format(os.getcwd(),
bm['name'], os.path.basename(bm['video']).split('.')[-1]))
print('done, ty for use')
| import os
import shutil
import configparser
beatmap_dir = os.path.abspath(os.environ['LOCALAPPDATA'] + '\\osu!\\Songs\\')
beatmaps = []
bm_osu = []
with os.scandir(os.path.abspath(beatmap_dir)) as it:
for entry in it:
if entry.is_dir():
try:
beatmap_id = int(str(entry.name).split(' ')[0])
except ValueError:
continue
beatmaps.append(entry.path)
beatmap_type = {'id': 0, 'name': 'Author - Title', 'audio':
'.\\somefile.mp3', 'video': '.\\something.mp4'}
for beatmap in beatmaps:
with os.scandir(os.path.abspath(beatmap)) as it:
bm = {'id': int(str(os.path.split(beatmap)[1]).split(' ')[0]),
'name': str(os.path.split(beatmap)[1])[len(str(os.path.split(
beatmap)[1]).split(' ')[0]) + 1:], 'audio': None,
'audio_length': None, 'video': None}
print('{} {}'.format(bm['id'], bm['name']))
for entry in it:
if entry.is_file():
if entry.path.endswith('osu'):
with open(entry.path, 'r', encoding='utf-8') as f:
config_string = '[global]\n' + f.read()
a = ''
for x in config_string.split('\n')[:config_string.split
('\n').index('[Events]') - 1]:
a += x + '\n'
config = configparser.ConfigParser(allow_no_value=True)
config.read_string(a)
bm['audio'] = os.path.abspath(os.path.dirname(entry.
path) + '\\' + config.get('General', 'AudioFilename'))
elif entry.path.endswith('mp4') or entry.path.endswith('avi'
) or entry.path.endswith('mpg'):
bm['video'] = entry.path
bm_osu.append(bm)
text_playlist = ''
for bm in bm_osu:
if bm['audio']:
text_playlist += '#EXTINF:0,{0}\n{1}\n'.format(bm['name'], bm['audio'])
text_playlist = text_playlist[:-1]
try:
with open('osu.m3u', 'w', encoding='utf-8') as file:
file.write(text_playlist)
except:
open('osu.m3u', 'x')
with open('osu.m3u', 'w', encoding='utf-8') as file:
file.write(text_playlist)
text_type = ''
for bm in bm_osu:
if bm['name']:
text_type += '{0}\n'.format(bm['name'])
text_type = text_type[:-1]
try:
with open('osu.txt', 'w', encoding='utf-8') as file:
file.write(text_type)
except:
open('osu.txt', 'x')
with open('osu.txt', 'w', encoding='utf-8') as file:
file.write(text_type)
for bm in bm_osu:
if bm['audio']:
print('{} {}'.format(bm['id'], bm['name']))
if os.path.basename(bm['audio']).split('.')[-1] != '':
shutil.copy2(bm['audio'], '{}\\osu music\\{}.{}'.format(os.
getcwd(), bm['name'], os.path.basename(bm['audio']).split(
'.')[-1]))
if bm['video']:
shutil.copy2(bm['video'], '{}\\osu music\\{}.{}'.format(os.getcwd(),
bm['name'], os.path.basename(bm['video']).split('.')[-1]))
print('done, ty for use')
| import os
import shutil
import configparser
beatmap_dir = os.path.abspath(os.environ['LOCALAPPDATA']+'\\osu!\\Songs\\')
beatmaps = []
bm_osu = []
with os.scandir(os.path.abspath(beatmap_dir)) as it:
for entry in it:
if entry.is_dir():
try:
beatmap_id = int(str(entry.name).split(' ')[0])
except ValueError:
# I'm not sure what to do about unranked maps right now, we will exclude them
continue
beatmaps.append(entry.path)
beatmap_type = {
"id": 0, # You may parse for "[Metadata]\n\nBeatmapSetID:{sid}" (WARN: Earlier maps will lack this parameter (osu file format v3 < osu file format v14)) or use the one provided with path
"name": 'Author - Title', # I should get it from osu files rather than directory, but that's how it happens
"audio": ".\\somefile.mp3", # Parse for "[General]\n\nAudioFilename: {filename}" | DONE
"video": ".\\something.mp4" # Parse for "[Events]\n\nVideo,{timestamp},{filename}" (found mp4,avi,mpg) | plz check, TODO
}
for beatmap in beatmaps:
with os.scandir(os.path.abspath(beatmap)) as it:
bm = {
'id': int(str(os.path.split(beatmap)[1]).split(' ')[0]),
'name': str(os.path.split(beatmap)[1])[len(str(os.path.split(beatmap)[1]).split(' ')[0])+1:],
'audio': None,
'audio_length': None,
'video': None
}
print('{} {}'.format(bm['id'], bm['name']))
for entry in it:
if entry.is_file():
if entry.path.endswith('osu'):
# ConfigParser is actually overkill solution, although I set it up to work
# FixMe: This solution does not account for multiple (via diff) maps in one
# Although, ranked maps should never have this.
with open(entry.path, 'r', encoding="utf-8") as f:
config_string = '[global]\n' + f.read()
a = ''
for x in config_string.split('\n')[:config_string.split('\n').index('[Events]')-1]:
a += x+'\n'
config = configparser.ConfigParser(allow_no_value=True)
config.read_string(a)
# TODO: Rewrite to simple checks and add video checking.
bm['audio'] = os.path.abspath(os.path.dirname(entry.path)+'\\'+config.get('General', 'AudioFilename'))
elif entry.path.endswith('mp4') or entry.path.endswith('avi') or entry.path.endswith('mpg'):
bm['video'] = entry.path
bm_osu.append(bm)
text_playlist = ""
for bm in bm_osu:
if bm['audio']:
text_playlist += "#EXTINF:0,{0}\n{1}\n".format(bm['name'], bm['audio'])
text_playlist = text_playlist[:-1]
try:
with open('osu.m3u', 'w', encoding='utf-8') as file:
file.write(text_playlist)
except:
open('osu.m3u', 'x')
with open('osu.m3u', 'w', encoding='utf-8') as file:
file.write(text_playlist)
text_type = ""
for bm in bm_osu:
if bm['name']:
text_type += "{0}\n".format(bm['name'])
text_type = text_type[:-1]
try:
with open('osu.txt', 'w', encoding='utf-8') as file:
file.write(text_type)
except:
open('osu.txt', 'x')
with open('osu.txt', 'w', encoding='utf-8') as file:
file.write(text_type)
for bm in bm_osu:
if bm['audio']:
print('{} {}'.format(bm['id'], bm['name']))
if os.path.basename(bm['audio']).split('.')[-1] != '':
shutil.copy2(bm['audio'], "{}\\osu music\\{}.{}".format(os.getcwd(), bm['name'], os.path.basename(bm['audio']).split('.')[-1]))
if bm['video']:
shutil.copy2(bm['video'], "{}\\osu music\\{}.{}".format(os.getcwd(), bm['name'], os.path.basename(bm['video']).split('.')[-1]))
print('done, ty for use') | [
0,
1,
2,
3,
4
] |
1,582 | 6b138dabf57166ec971052fff7df89ae0346e083 | <mask token>
class Video_Server(threading.Thread):
<mask token>
<mask token>
def run(self):
detector, predictor = face_capture_edit.face_init(self.
face_shape_predictor)
print('face_capture_init is ready')
print('VIDEO server starts ...')
self.sock.bind(self.ADDR)
self.sock.listen(1)
conn, addr = self.sock.accept()
print('remote VIDEO client success connected ...')
data = ''.encode('utf-8')
payload_size = struct.calcsize('L')
cv2.namedWindow('Remote', cv2.WINDOW_NORMAL)
while True:
while len(data) < payload_size:
data += conn.recv(81920)
packed_size = data[:payload_size]
data = data[payload_size:]
msg_size = struct.unpack('L', packed_size)[0]
while len(data) < msg_size:
data += conn.recv(89120)
zframe_data = data[:msg_size]
data = data[msg_size:]
frame_data = zlib.decompress(zframe_data)
frame = pickle.loads(frame_data)
if self.face_cap == 1:
frame_face = face_capture_edit.face_capture_e(frame.copy(),
detector, predictor)
cv2.imshow('Face_capture', frame_face)
if self.view_version == 0:
frame = frame
elif self.view_version == 1:
frame = cartoon_edit.cartoon_e(frame)
elif self.view_version == 2:
frame = pencil_edit.rgb_to_sketch(frame)
cv2.namedWindow('Remote', 0)
cv2.resizeWindow('Remote', 640, 480)
cv2.imshow('Remote', frame)
if cv2.waitKey(1) & 255 == ord('q'):
file_aip = open(self.break_audio_aip, 'w')
file_audio = open(self.break_audio, 'w')
break
| <mask token>
class Video_Server(threading.Thread):
<mask token>
def __del__(self):
self.sock.close()
try:
cv2.destoryALLWindows()
except:
pass
print('video close')
def run(self):
detector, predictor = face_capture_edit.face_init(self.
face_shape_predictor)
print('face_capture_init is ready')
print('VIDEO server starts ...')
self.sock.bind(self.ADDR)
self.sock.listen(1)
conn, addr = self.sock.accept()
print('remote VIDEO client success connected ...')
data = ''.encode('utf-8')
payload_size = struct.calcsize('L')
cv2.namedWindow('Remote', cv2.WINDOW_NORMAL)
while True:
while len(data) < payload_size:
data += conn.recv(81920)
packed_size = data[:payload_size]
data = data[payload_size:]
msg_size = struct.unpack('L', packed_size)[0]
while len(data) < msg_size:
data += conn.recv(89120)
zframe_data = data[:msg_size]
data = data[msg_size:]
frame_data = zlib.decompress(zframe_data)
frame = pickle.loads(frame_data)
if self.face_cap == 1:
frame_face = face_capture_edit.face_capture_e(frame.copy(),
detector, predictor)
cv2.imshow('Face_capture', frame_face)
if self.view_version == 0:
frame = frame
elif self.view_version == 1:
frame = cartoon_edit.cartoon_e(frame)
elif self.view_version == 2:
frame = pencil_edit.rgb_to_sketch(frame)
cv2.namedWindow('Remote', 0)
cv2.resizeWindow('Remote', 640, 480)
cv2.imshow('Remote', frame)
if cv2.waitKey(1) & 255 == ord('q'):
file_aip = open(self.break_audio_aip, 'w')
file_audio = open(self.break_audio, 'w')
break
| <mask token>
class Video_Server(threading.Thread):
def __init__(self, port, version, face_cap, view_version,
face_shape_predictor, break_audio_aip, break_audio):
threading.Thread.__init__(self)
self.setDaemon(True)
self.ADDR = '', port
self.face_cap = face_cap
self.view_version = view_version
self.face_shape_predictor = face_shape_predictor
self.break_audio = break_audio
self.break_audio_aip = break_audio_aip
if version == 4:
self.sock = socket(AF_INET, SOCK_STREAM)
else:
self.sock = socket(AF_INET6, SOCK_STREAM)
def __del__(self):
self.sock.close()
try:
cv2.destoryALLWindows()
except:
pass
print('video close')
def run(self):
detector, predictor = face_capture_edit.face_init(self.
face_shape_predictor)
print('face_capture_init is ready')
print('VIDEO server starts ...')
self.sock.bind(self.ADDR)
self.sock.listen(1)
conn, addr = self.sock.accept()
print('remote VIDEO client success connected ...')
data = ''.encode('utf-8')
payload_size = struct.calcsize('L')
cv2.namedWindow('Remote', cv2.WINDOW_NORMAL)
while True:
while len(data) < payload_size:
data += conn.recv(81920)
packed_size = data[:payload_size]
data = data[payload_size:]
msg_size = struct.unpack('L', packed_size)[0]
while len(data) < msg_size:
data += conn.recv(89120)
zframe_data = data[:msg_size]
data = data[msg_size:]
frame_data = zlib.decompress(zframe_data)
frame = pickle.loads(frame_data)
if self.face_cap == 1:
frame_face = face_capture_edit.face_capture_e(frame.copy(),
detector, predictor)
cv2.imshow('Face_capture', frame_face)
if self.view_version == 0:
frame = frame
elif self.view_version == 1:
frame = cartoon_edit.cartoon_e(frame)
elif self.view_version == 2:
frame = pencil_edit.rgb_to_sketch(frame)
cv2.namedWindow('Remote', 0)
cv2.resizeWindow('Remote', 640, 480)
cv2.imshow('Remote', frame)
if cv2.waitKey(1) & 255 == ord('q'):
file_aip = open(self.break_audio_aip, 'w')
file_audio = open(self.break_audio, 'w')
break
| <mask token>
from socket import *
import threading
import time
import cv2
import struct
import pickle
import zlib
import cartoon_edit
import face_capture_edit
import pencil_edit
class Video_Server(threading.Thread):
def __init__(self, port, version, face_cap, view_version,
face_shape_predictor, break_audio_aip, break_audio):
threading.Thread.__init__(self)
self.setDaemon(True)
self.ADDR = '', port
self.face_cap = face_cap
self.view_version = view_version
self.face_shape_predictor = face_shape_predictor
self.break_audio = break_audio
self.break_audio_aip = break_audio_aip
if version == 4:
self.sock = socket(AF_INET, SOCK_STREAM)
else:
self.sock = socket(AF_INET6, SOCK_STREAM)
def __del__(self):
self.sock.close()
try:
cv2.destoryALLWindows()
except:
pass
print('video close')
def run(self):
detector, predictor = face_capture_edit.face_init(self.
face_shape_predictor)
print('face_capture_init is ready')
print('VIDEO server starts ...')
self.sock.bind(self.ADDR)
self.sock.listen(1)
conn, addr = self.sock.accept()
print('remote VIDEO client success connected ...')
data = ''.encode('utf-8')
payload_size = struct.calcsize('L')
cv2.namedWindow('Remote', cv2.WINDOW_NORMAL)
while True:
while len(data) < payload_size:
data += conn.recv(81920)
packed_size = data[:payload_size]
data = data[payload_size:]
msg_size = struct.unpack('L', packed_size)[0]
while len(data) < msg_size:
data += conn.recv(89120)
zframe_data = data[:msg_size]
data = data[msg_size:]
frame_data = zlib.decompress(zframe_data)
frame = pickle.loads(frame_data)
if self.face_cap == 1:
frame_face = face_capture_edit.face_capture_e(frame.copy(),
detector, predictor)
cv2.imshow('Face_capture', frame_face)
if self.view_version == 0:
frame = frame
elif self.view_version == 1:
frame = cartoon_edit.cartoon_e(frame)
elif self.view_version == 2:
frame = pencil_edit.rgb_to_sketch(frame)
cv2.namedWindow('Remote', 0)
cv2.resizeWindow('Remote', 640, 480)
cv2.imshow('Remote', frame)
if cv2.waitKey(1) & 255 == ord('q'):
file_aip = open(self.break_audio_aip, 'w')
file_audio = open(self.break_audio, 'w')
break
| # -*- coding: utf-8 -*-
"""
Created on Mon Apr 1 19:16:16 2019
@author: pc
"""
from socket import *
import threading
import time
import cv2
import struct
import pickle
import zlib
import cartoon_edit
import face_capture_edit
import pencil_edit
class Video_Server(threading.Thread):
def __init__ (self, port, version, face_cap, view_version, face_shape_predictor, break_audio_aip, break_audio):
threading.Thread.__init__(self)
self.setDaemon(True)#使每个线程在主线程结束后自动退出,保证程序不会崩溃且无法销毁的情况
self.ADDR = ('',port)#指定套接字端口号
self.face_cap = face_cap
self.view_version = view_version
self.face_shape_predictor = face_shape_predictor
self.break_audio = break_audio
self.break_audio_aip = break_audio_aip
if version == 4:#IPV4 or IPV6
self.sock = socket(AF_INET, SOCK_STREAM)
else:
self.sock = socket(AF_INET6,SOCK_STREAM)
def __del__(self):
self.sock.close()
try:
cv2.destoryALLWindows()
except:
pass
print("video close")
def run(self):
detector, predictor = face_capture_edit.face_init(self.face_shape_predictor)
print("face_capture_init is ready")
print("VIDEO server starts ...")
self.sock.bind(self.ADDR)#关联特定的端口号
self.sock.listen(1)#监听
conn, addr = self.sock.accept()#服务器端创建新的套接字,与用户端连接
print("remote VIDEO client success connected ...")
data = "".encode("utf-8")#接收数据
payload_size = struct.calcsize("L")#记录当前缓冲区的数据长度,准确提取每一帧
cv2.namedWindow('Remote',cv2.WINDOW_NORMAL)
while True:
while len(data) < payload_size:#超过数据流的部分被截取掉,和下一次合并整合,不足时将合并下一帧到该帧
data +=conn.recv(81920)
packed_size = data[:payload_size]#从最初剪到指定位置,剪切操作,剪切到一个完整的一帧
data = data[payload_size:]#从指定位置剪切到末尾
msg_size = struct.unpack("L",packed_size)[0]#解压前面的头
while len(data) < msg_size:
data += conn.recv(89120)
zframe_data = data[:msg_size]
data = data[msg_size:]
frame_data = zlib.decompress(zframe_data)
frame = pickle.loads(frame_data)
if self.face_cap == 1:
frame_face = face_capture_edit.face_capture_e(frame.copy(),detector, predictor)
cv2.imshow("Face_capture", frame_face)
if self.view_version == 0:#不变样式
frame = frame
elif self.view_version == 1:#漫画
frame = cartoon_edit.cartoon_e(frame)
elif self.view_version == 2:#铅笔画
frame = pencil_edit.rgb_to_sketch(frame)
cv2.namedWindow("Remote",0);
cv2.resizeWindow("Remote", 640, 480);
cv2.imshow("Remote", frame)
if cv2.waitKey(1) & 0xff == ord('q'):
file_aip = open(self.break_audio_aip,'w')
file_audio = open(self.break_audio,'w')
break
| [
2,
3,
4,
5,
6
] |
1,583 | 9c251e0224979877b9ce244e4871fd4c403abb8e | <mask token>
class LRUCache:
def __init__(self, get_from_origin, max_size=1024):
if max_size == 0:
raise NotImplementedError()
if max_size < 0:
raise ValueError()
self.size = 0
self.max_size = max_size
self._get_from_origin = get_from_origin
self._cache = {}
self._most_recent = None
self._least_recent = None
<mask token>
<mask token>
def _hit(self, key):
self._bump_cached(key)
return self._cache[key].value
def _miss(self, key, *args, **kwargs):
value = self._get_from_origin(*args, **kwargs)
if not self._most_recent:
self._bump_init(key)
else:
self._bump_new(key)
self._set(key, value)
return value
<mask token>
def _bump_new(self, key):
self._bump(key)
if self.full:
old_last = self._least_recent
new_last = old_last.prv
new_last.nxt = None
self._least_recent = new_last
self._remove(old_last.key)
else:
self.size += 1
def _bump_cached(self, key):
self._bump(key)
self._remove_old_position(key)
<mask token>
<mask token>
<mask token>
<mask token>
def __repr__(self):
if not self._most_recent:
return '[ | ]'
current = self._most_recent
keys = [current.key]
while current.nxt:
current = current.nxt
keys.append(current.key)
return '[ ' + ' | '.join(keys) + ' ]'
<mask token>
class cache:
def __init__(self, max_size):
assert isinstance(max_size, int)
self.max_size = max_size
def __call__(self, func):
lru = LRUCache(func, max_size=self.max_size)
def cached_f(*args, **kwargs):
return lru.get(*args, **kwargs)
return cached_f
| <mask token>
class LRUCache:
def __init__(self, get_from_origin, max_size=1024):
if max_size == 0:
raise NotImplementedError()
if max_size < 0:
raise ValueError()
self.size = 0
self.max_size = max_size
self._get_from_origin = get_from_origin
self._cache = {}
self._most_recent = None
self._least_recent = None
<mask token>
<mask token>
def _hit(self, key):
self._bump_cached(key)
return self._cache[key].value
def _miss(self, key, *args, **kwargs):
value = self._get_from_origin(*args, **kwargs)
if not self._most_recent:
self._bump_init(key)
else:
self._bump_new(key)
self._set(key, value)
return value
def _bump_init(self, key):
self._most_recent = DoubleLinked(nxt=None, prv=None, key=key)
self._least_recent = self._most_recent
self.size = 1
def _bump_new(self, key):
self._bump(key)
if self.full:
old_last = self._least_recent
new_last = old_last.prv
new_last.nxt = None
self._least_recent = new_last
self._remove(old_last.key)
else:
self.size += 1
def _bump_cached(self, key):
self._bump(key)
self._remove_old_position(key)
<mask token>
def _bump(self, key):
old_first = self._most_recent
new_first = DoubleLinked(nxt=old_first, prv=None, key=key)
old_first.prv = new_first
self._most_recent = new_first
def _set(self, key, value):
self._cache[key] = CacheEntry(value, self._most_recent)
def _remove(self, key):
del self._cache[key]
def __repr__(self):
if not self._most_recent:
return '[ | ]'
current = self._most_recent
keys = [current.key]
while current.nxt:
current = current.nxt
keys.append(current.key)
return '[ ' + ' | '.join(keys) + ' ]'
<mask token>
class cache:
def __init__(self, max_size):
assert isinstance(max_size, int)
self.max_size = max_size
def __call__(self, func):
lru = LRUCache(func, max_size=self.max_size)
def cached_f(*args, **kwargs):
return lru.get(*args, **kwargs)
return cached_f
| <mask token>
class LRUCache:
def __init__(self, get_from_origin, max_size=1024):
if max_size == 0:
raise NotImplementedError()
if max_size < 0:
raise ValueError()
self.size = 0
self.max_size = max_size
self._get_from_origin = get_from_origin
self._cache = {}
self._most_recent = None
self._least_recent = None
@property
def full(self):
return self.size == self.max_size
<mask token>
def _hit(self, key):
self._bump_cached(key)
return self._cache[key].value
def _miss(self, key, *args, **kwargs):
value = self._get_from_origin(*args, **kwargs)
if not self._most_recent:
self._bump_init(key)
else:
self._bump_new(key)
self._set(key, value)
return value
def _bump_init(self, key):
self._most_recent = DoubleLinked(nxt=None, prv=None, key=key)
self._least_recent = self._most_recent
self.size = 1
def _bump_new(self, key):
self._bump(key)
if self.full:
old_last = self._least_recent
new_last = old_last.prv
new_last.nxt = None
self._least_recent = new_last
self._remove(old_last.key)
else:
self.size += 1
def _bump_cached(self, key):
self._bump(key)
self._remove_old_position(key)
<mask token>
def _bump(self, key):
old_first = self._most_recent
new_first = DoubleLinked(nxt=old_first, prv=None, key=key)
old_first.prv = new_first
self._most_recent = new_first
def _set(self, key, value):
self._cache[key] = CacheEntry(value, self._most_recent)
def _remove(self, key):
del self._cache[key]
def __repr__(self):
if not self._most_recent:
return '[ | ]'
current = self._most_recent
keys = [current.key]
while current.nxt:
current = current.nxt
keys.append(current.key)
return '[ ' + ' | '.join(keys) + ' ]'
def __len__(self):
return self.size
class cache:
def __init__(self, max_size):
assert isinstance(max_size, int)
self.max_size = max_size
def __call__(self, func):
lru = LRUCache(func, max_size=self.max_size)
def cached_f(*args, **kwargs):
return lru.get(*args, **kwargs)
return cached_f
| def _make_key(*args, **kwargs):
all_args = [str(arg) for arg in args]
all_args += [(str(arg) + '=' + str(value)) for arg, value in kwargs.items()
]
return '|'.join(all_args)
class DoubleLinked:
def __init__(self, prv, nxt, key):
self.prv = prv
self.nxt = nxt
self.key = key
class CacheEntry:
def __init__(self, value, position):
self.value = value
self.position = position
class LRUCache:
def __init__(self, get_from_origin, max_size=1024):
if max_size == 0:
raise NotImplementedError()
if max_size < 0:
raise ValueError()
self.size = 0
self.max_size = max_size
self._get_from_origin = get_from_origin
self._cache = {}
self._most_recent = None
self._least_recent = None
@property
def full(self):
return self.size == self.max_size
def get(self, *args, **kwargs):
if not args and not kwargs:
raise ValueError()
key = _make_key(*args, **kwargs)
if key in self._cache:
return self._hit(key)
return self._miss(key, *args, **kwargs)
def _hit(self, key):
self._bump_cached(key)
return self._cache[key].value
def _miss(self, key, *args, **kwargs):
value = self._get_from_origin(*args, **kwargs)
if not self._most_recent:
self._bump_init(key)
else:
self._bump_new(key)
self._set(key, value)
return value
def _bump_init(self, key):
self._most_recent = DoubleLinked(nxt=None, prv=None, key=key)
self._least_recent = self._most_recent
self.size = 1
def _bump_new(self, key):
self._bump(key)
if self.full:
old_last = self._least_recent
new_last = old_last.prv
new_last.nxt = None
self._least_recent = new_last
self._remove(old_last.key)
else:
self.size += 1
def _bump_cached(self, key):
self._bump(key)
self._remove_old_position(key)
def _remove_old_position(self, key):
old_position = self._cache[key].position
if not old_position.prv:
return
old_position.prv.nxt = old_position.nxt
if old_position.nxt:
old_position.nxt.prv = old_position.prv
else:
self._least_recent = old_position.prv
self._cache[key].position = self._most_recent
def _bump(self, key):
old_first = self._most_recent
new_first = DoubleLinked(nxt=old_first, prv=None, key=key)
old_first.prv = new_first
self._most_recent = new_first
def _set(self, key, value):
self._cache[key] = CacheEntry(value, self._most_recent)
def _remove(self, key):
del self._cache[key]
def __repr__(self):
if not self._most_recent:
return '[ | ]'
current = self._most_recent
keys = [current.key]
while current.nxt:
current = current.nxt
keys.append(current.key)
return '[ ' + ' | '.join(keys) + ' ]'
def __len__(self):
return self.size
class cache:
def __init__(self, max_size):
assert isinstance(max_size, int)
self.max_size = max_size
def __call__(self, func):
lru = LRUCache(func, max_size=self.max_size)
def cached_f(*args, **kwargs):
return lru.get(*args, **kwargs)
return cached_f
| def _make_key(*args, **kwargs):
all_args = [str(arg) for arg in args]
all_args += [str(arg) + '=' + str(value) for arg, value in kwargs.items()]
return '|'.join(all_args)
class DoubleLinked:
def __init__(self, prv, nxt, key):
self.prv = prv
self.nxt = nxt
self.key = key
class CacheEntry:
def __init__(self, value, position):
self.value = value
self.position = position
class LRUCache:
def __init__(self, get_from_origin, max_size=1024):
if max_size == 0:
raise NotImplementedError()
if max_size < 0:
raise ValueError()
# keep separate size counter, to save going over the list
self.size = 0
self.max_size = max_size
# the function to call
self._get_from_origin = get_from_origin
# the values to cache
self._cache = {}
self._most_recent = None
self._least_recent = None
@property
def full(self):
return self.size == self.max_size
def get(self, *args, **kwargs):
if not args and not kwargs:
raise ValueError()
key = _make_key(*args, **kwargs)
if key in self._cache:
return self._hit(key)
return self._miss(key, *args, **kwargs)
def _hit(self, key):
self._bump_cached(key)
return self._cache[key].value
def _miss(self, key, *args, **kwargs):
value = self._get_from_origin(*args, **kwargs)
if not self._most_recent:
self._bump_init(key)
else:
self._bump_new(key)
self._set(key, value)
return value
def _bump_init(self, key):
self._most_recent = DoubleLinked(nxt=None, prv=None, key=key)
self._least_recent = self._most_recent
self.size = 1
def _bump_new(self, key):
self._bump(key)
# remove oldest entry
# this is the entire reason for the linked list business
if self.full:
old_last = self._least_recent
new_last = old_last.prv
new_last.nxt = None
self._least_recent = new_last
self._remove(old_last.key)
else:
self.size += 1
def _bump_cached(self, key):
self._bump(key)
self._remove_old_position(key)
def _remove_old_position(self, key):
old_position = self._cache[key].position
if not old_position.prv:
return # we are already the most recent
old_position.prv.nxt = old_position.nxt
if old_position.nxt: # if we're not the last
old_position.nxt.prv = old_position.prv
else:
self._least_recent = old_position.prv
self._cache[key].position = self._most_recent
def _bump(self, key):
old_first = self._most_recent
new_first = DoubleLinked(nxt=old_first, prv=None, key=key)
old_first.prv = new_first
self._most_recent = new_first
def _set(self, key, value):
self._cache[key] = CacheEntry(value, self._most_recent)
def _remove(self, key):
del self._cache[key]
def __repr__(self):
if not self._most_recent:
return '[ | ]'
current = self._most_recent
keys = [current.key]
while current.nxt:
current = current.nxt
keys.append(current.key)
return '[ ' + (' | '.join(keys)) + ' ]'
def __len__(self):
return self.size
class cache: # pylint: disable=invalid-name
def __init__(self, max_size):
assert isinstance(max_size, int)
self.max_size = max_size
def __call__(self, func):
lru = LRUCache(func, max_size=self.max_size)
def cached_f(*args, **kwargs):
return lru.get(*args, **kwargs)
return cached_f
| [
10,
14,
16,
23,
24
] |
1,584 | 78efe97d838774cb831ef205186db29f392e1953 | <mask token>
def RED(t):
GPIO.output(21, 1)
time.sleep(1)
GPIO.output(21, 0)
<mask token>
def dataTransfer(conn):
while True:
data = conn.recv(1024)
data = data.decode('utf-8')
dataMessage = data.split(' ', 1)
command = dataMessage[0]
para = dataMessage[1]
y = int(para)
if len(command) > 0:
print(command)
if command == 'RED':
RED(y)
elif command == 'GREEN':
GREEN(y)
elif command == 'KILL':
print('Our server is shutting down.')
s.close()
break
else:
print('Unknown Command')
<mask token>
| <mask token>
def setupServer():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print('Socket created.')
try:
s.bind((host, port))
except socket.error as msg:
print(msg)
print('Socket bind comlete.')
return s
def setupConnection():
s.listen(1)
conn, address = s.accept()
print('Connected to: ' + address[0] + ':' + str(address[1]))
return conn
def RED(t):
GPIO.output(21, 1)
time.sleep(1)
GPIO.output(21, 0)
<mask token>
def dataTransfer(conn):
while True:
data = conn.recv(1024)
data = data.decode('utf-8')
dataMessage = data.split(' ', 1)
command = dataMessage[0]
para = dataMessage[1]
y = int(para)
if len(command) > 0:
print(command)
if command == 'RED':
RED(y)
elif command == 'GREEN':
GREEN(y)
elif command == 'KILL':
print('Our server is shutting down.')
s.close()
break
else:
print('Unknown Command')
<mask token>
| <mask token>
GPIO.setmode(GPIO.BCM)
GPIO.setup(20, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(21, GPIO.OUT, initial=GPIO.LOW)
GPIO.setwarnings(False)
<mask token>
def setupServer():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print('Socket created.')
try:
s.bind((host, port))
except socket.error as msg:
print(msg)
print('Socket bind comlete.')
return s
def setupConnection():
s.listen(1)
conn, address = s.accept()
print('Connected to: ' + address[0] + ':' + str(address[1]))
return conn
def RED(t):
GPIO.output(21, 1)
time.sleep(1)
GPIO.output(21, 0)
def GREEN(t):
GPIO.outdefput(20, 1)
time.sleep(t)
GPIO.output(20, 0)
def dataTransfer(conn):
while True:
data = conn.recv(1024)
data = data.decode('utf-8')
dataMessage = data.split(' ', 1)
command = dataMessage[0]
para = dataMessage[1]
y = int(para)
if len(command) > 0:
print(command)
if command == 'RED':
RED(y)
elif command == 'GREEN':
GREEN(y)
elif command == 'KILL':
print('Our server is shutting down.')
s.close()
break
else:
print('Unknown Command')
<mask token>
def main():
try:
while True:
try:
conn = setupConnection()
dataTransfer(conn)
except:
break
except KeyboardInterrupt:
print('program terminated')
finally:
GPIO.cleanup()
conn.close()
if __name__ == '__main__':
main()
| import socket
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setup(20, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(21, GPIO.OUT, initial=GPIO.LOW)
GPIO.setwarnings(False)
host = '192.168.87.191'
port = 5560
def setupServer():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print('Socket created.')
try:
s.bind((host, port))
except socket.error as msg:
print(msg)
print('Socket bind comlete.')
return s
def setupConnection():
s.listen(1)
conn, address = s.accept()
print('Connected to: ' + address[0] + ':' + str(address[1]))
return conn
def RED(t):
GPIO.output(21, 1)
time.sleep(1)
GPIO.output(21, 0)
def GREEN(t):
GPIO.outdefput(20, 1)
time.sleep(t)
GPIO.output(20, 0)
def dataTransfer(conn):
while True:
data = conn.recv(1024)
data = data.decode('utf-8')
dataMessage = data.split(' ', 1)
command = dataMessage[0]
para = dataMessage[1]
y = int(para)
if len(command) > 0:
print(command)
if command == 'RED':
RED(y)
elif command == 'GREEN':
GREEN(y)
elif command == 'KILL':
print('Our server is shutting down.')
s.close()
break
else:
print('Unknown Command')
s = setupServer()
def main():
try:
while True:
try:
conn = setupConnection()
dataTransfer(conn)
except:
break
except KeyboardInterrupt:
print('program terminated')
finally:
GPIO.cleanup()
conn.close()
if __name__ == '__main__':
main()
| import socket
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setup(20,GPIO.OUT,initial=GPIO.LOW) #green
GPIO.setup(21,GPIO.OUT,initial=GPIO.LOW) #red
GPIO.setwarnings(False)
host = '192.168.87.191'
port = 5560
def setupServer():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("Socket created.")
try:
s.bind((host, port))
except socket.error as msg:
print(msg)
print("Socket bind comlete.")
return s
def setupConnection():
s.listen(1) # Allows one connection at a time.
conn, address = s.accept()
print("Connected to: " + address[0] + ":" + str(address[1]))
return conn
def RED(t):
#Red LED
GPIO.output(21,1)
time.sleep(1)
GPIO.output(21,0)
def GREEN(t):
#GREEN LED
GPIO.outdefput(20,1)
time.sleep(t)
GPIO.output(20,0)
def dataTransfer(conn):
# A big loop that receives data until told not to.
while True:
# Receive the data
data = conn.recv(1024) # receive the data
data = data.decode('utf-8')
# Split the data such that you separate the command
# from the rest of the data.
dataMessage = data.split(' ', 1)
# Command
command = dataMessage[0]
# parameter
para=dataMessage[1]
y=int(para)
if len(command)>0:
print(command)
if command == 'RED':
RED(y)
elif command == 'GREEN':
GREEN(y)
elif command == 'KILL':
print("Our server is shutting down.")
s.close()
break
else:
print('Unknown Command')
#conn.close()
s = setupServer()
#while True:
# try:
# conn = setupConnection()
# dataTransfer(conn)
# except:
# break
def main():
try:
while True:
try:
conn = setupConnection()
dataTransfer(conn)
except:
break
except KeyboardInterrupt:
print("program terminated")
finally:
GPIO.cleanup()
conn.close()
#Runs Main Function
if __name__=="__main__":
main()
| [
2,
4,
7,
9,
10
] |
1,585 | fbce185671267bd70cf7b91696867b72dfcc8d5b | <mask token>
| conf = {'PROJECT': 'WCCIA', 'NAS_FOLDER':
'Q:\\GROUPS\\CORP_JGS_DSE\\ATI\\quotations', 'DB_SERVER': '10.0.36.129',
'DB_PORT': '34000/'}
| null | null | null | [
0,
1
] |
1,586 | ad88685e3f1cd5e0ddb42a5982a05ff8ee7b8111 | <mask token>
def pytest_addoption(parser):
print('Option ')
parser.addoption('--destination', action='store', help=
'subsystem address', dest='destination')
@pytest.fixture
def destination(request):
print(request.config.getoption('--html'))
return request.config.getoption('--destination')
<mask token>
| <mask token>
def pytest_collection_modifyitems(session, config, items):
print('sono qui', items)
def pytest_ignore_collect(path, config):
print(path)
print('mamma ', config.getoption('--destination'))
return False
def pytest_addoption(parser):
print('Option ')
parser.addoption('--destination', action='store', help=
'subsystem address', dest='destination')
@pytest.fixture
def destination(request):
print(request.config.getoption('--html'))
return request.config.getoption('--destination')
@pytest.fixture(scope='session')
def pq9_connection():
pq9client = PQ9Client.PQ9Client('localhost', '10000')
pq9client.connect()
yield pq9client
pq9client.close()
| <mask token>
def pytest_configure(config):
print('pytest_configure')
def pytest_collection_modifyitems(session, config, items):
print('sono qui', items)
def pytest_ignore_collect(path, config):
print(path)
print('mamma ', config.getoption('--destination'))
return False
def pytest_addoption(parser):
print('Option ')
parser.addoption('--destination', action='store', help=
'subsystem address', dest='destination')
@pytest.fixture
def destination(request):
print(request.config.getoption('--html'))
return request.config.getoption('--destination')
@pytest.fixture(scope='session')
def pq9_connection():
pq9client = PQ9Client.PQ9Client('localhost', '10000')
pq9client.connect()
yield pq9client
pq9client.close()
| <mask token>
sys.path.insert(1, '../Generic')
<mask token>
def pytest_configure(config):
print('pytest_configure')
def pytest_collection_modifyitems(session, config, items):
print('sono qui', items)
def pytest_ignore_collect(path, config):
print(path)
print('mamma ', config.getoption('--destination'))
return False
def pytest_addoption(parser):
print('Option ')
parser.addoption('--destination', action='store', help=
'subsystem address', dest='destination')
@pytest.fixture
def destination(request):
print(request.config.getoption('--html'))
return request.config.getoption('--destination')
@pytest.fixture(scope='session')
def pq9_connection():
pq9client = PQ9Client.PQ9Client('localhost', '10000')
pq9client.connect()
yield pq9client
pq9client.close()
| # content of conftest.py
import pytest
import sys
sys.path.insert(1, '../Generic')
import PQ9Client
def pytest_configure(config):
print("pytest_configure")
def pytest_collection_modifyitems(session, config, items):
print("sono qui", items)
def pytest_ignore_collect(path, config):
print(path)
print("mamma ", config.getoption("--destination"))
return False
def pytest_addoption(parser):
print("Option ")
parser.addoption(
"--destination", action="store", help="subsystem address", dest="destination",
)
@pytest.fixture
def destination(request):
print(request.config.getoption("--html"))
#print(request.config.getoption("kkk"))
return request.config.getoption("--destination")
@pytest.fixture(scope="session") #only 'make' this object once per session.
def pq9_connection():
pq9client = PQ9Client.PQ9Client("localhost","10000")
pq9client.connect()
yield pq9client
pq9client.close()
| [
2,
5,
6,
7,
9
] |
1,587 | 60354f25f55136d4e873d118cfe048cf08c06e39 | <mask token>
def game():
for i in range(1000):
request = input('Auto-Bot at your service. Please state your request. '
)
if request == 'google':
query = input('Search: ')
print(search(query, num_results=3))
elif request == 'stocks':
ticker = input('Ticker Symbol: ')
realticker = yf.Ticker(ticker)
print(realticker.history(period='1m'))
elif request == 'weather':
place = input('City: ')
weather = weather_forecast.forecast(place=place, time=
current_time, date=d1)
elif request == 'email':
to = input('Email address: ')
content = input('What do you want to say? ')
address = '[email protected]'
password = 'AutoBot1'
server = 'imap.gmail.com'
s = bot.SMTP(host='smtp.gmail.com', port=587)
s.starttls()
s.login(address, password)
s.ehlo()
s.sendmail(address, to, content)
{}
elif request == 'song':
song = input('Song name: ')
results = YoutubeSearch(song, max_results=1).to_dict()
dict = results[0].values()
newdict = list(dict)
url = newdict[7]
print(f'https://www.youtube.com{url}')
elif request == 'news':
news = input('Search news: ')
gn = GoogleNews()
top = gn.search(news)
newnews = gn.results()
dict = list(newnews[0].values())
dicttwo = list(newnews[1].values())
dictthree = list(newnews[2].values())
dictfour = list(newnews[3].values())
dictfive = list(newnews[4].values())
title1 = dict[0]
title2 = dicttwo[0]
title3 = dictthree[0]
title4 = dictfour[0]
title5 = dictfive[0]
src1 = dict[1]
src2 = dicttwo[1]
src3 = dictthree[1]
src4 = dictfour[1]
src5 = dictfive[1]
cap1 = dict[4]
cap2 = dicttwo[4]
cap3 = dictthree[4]
cap4 = dictfour[4]
cap5 = dictfive[4]
url1 = dict[5]
url2 = dicttwo[5]
url3 = dictthree[5]
url4 = dictfour[5]
url5 = dictfive[5]
print(f'Title: {title1}')
print(f'Source: {src1}')
print(f'Caption: {cap1}')
print(f'Url: {url1}')
print(f'Title: {title2}')
print(f'Source: {src2}')
print(f'Caption: {cap2}')
print(f'Url: {url2}')
print(f'Title: {title3}')
print(f'Source: {src3}')
print(f'Caption: {cap3}')
print(f'Url: {url3}')
print(f'Title: {title4}')
print(f'Source: {src4}')
print(f'Caption: {cap4}')
print(f'Url: {url4}')
print(f'Title: {title5}')
print(f'Source: {src5}')
print(f'Caption: {cap5}')
print(f'Url: {url5}')
elif request == 'math':
def add(x, y):
return x + y
def subtract(x, y):
return x - y
def multiply(x, y):
return x * y
def divide(x, y):
return x / y
while True:
choice = input('Enter choice( + / - / * / / ): ')
if choice in ('+', '-', '*', '/'):
num1 = float(input('Enter first number: '))
num2 = float(input('Enter second number: '))
if choice == '+':
print(num1, '+', num2, '=', add(num1, num2))
elif choice == '-':
print(num1, '-', num2, '=', subtract(num1, num2))
elif choice == '*':
print(num1, '*', num2, '=', multiply(num1, num2))
elif choice == '/':
print(num1, '/', num2, '=', divide(num1, num2))
break
else:
print('Invalid Input')
elif request == 'game':
type = input(
'Which game? Press 1 for tic-tac-toe, press 2 for rock-paper-scissors '
)
if type == '1':
unused_keys = ['1', '2', '3', '4', '5', '6', '7', '8', '9']
theBoard = {'7': ' ', '8': ' ', '9': ' ', '4': ' ', '5':
' ', '6': ' ', '1': ' ', '2': ' ', '3': ' '}
board_keys = []
for key in theBoard:
board_keys.append(key)
""" We will have to print the updated board after every move in the game and
thus we will make a function in which we'll define the printBoard function
so that we can easily print the board everytime by calling this function. """
def printBoard(board):
print(board['7'] + '|' + board['8'] + '|' + board['9'])
print('-+-+-')
print(board['4'] + '|' + board['5'] + '|' + board['6'])
print('-+-+-')
print(board['1'] + '|' + board['2'] + '|' + board['3'])
def tictactoe():
turn = 'X'
count = 0
for i in range(10):
printBoard(theBoard)
print("It's your turn," + turn +
'.Move to which place?')
if turn == 'O':
choice = random.randint(1, 9)
choice = unused_keys[choice]
if theBoard[f'{choice}'] == ' ':
theBoard[choice] = turn
unused_keys.remove(choice)
count += 1
elif turn == 'X':
move = input()
if theBoard[move] == ' ':
theBoard[move] = turn
unused_keys.remove(move)
count += 1
else:
print(
'That place is already filled.\nMove to which place?'
)
continue
if count >= 5:
if theBoard['7'] == theBoard['8'] == theBoard['9'
] != ' ':
printBoard(theBoard)
print('\nGame Over.\n')
print(' **** ' + turn + ' won. ****')
break
elif theBoard['4'] == theBoard['5'] == theBoard['6'
] != ' ':
printBoard(theBoard)
print('\nGame Over.\n')
print(' **** ' + turn + ' won. ****')
break
elif theBoard['1'] == theBoard['2'] == theBoard['3'
] != ' ':
printBoard(theBoard)
print('\nGame Over.\n')
print(' **** ' + turn + ' won. ****')
break
elif theBoard['1'] == theBoard['4'] == theBoard['7'
] != ' ':
printBoard(theBoard)
print('\nGame Over.\n')
print(' **** ' + turn + ' won. ****')
break
elif theBoard['2'] == theBoard['5'] == theBoard['8'
] != ' ':
printBoard(theBoard)
print('\nGame Over.\n')
print(' **** ' + turn + ' won. ****')
break
elif theBoard['3'] == theBoard['6'] == theBoard['9'
] != ' ':
printBoard(theBoard)
print('\nGame Over.\n')
print(' **** ' + turn + ' won. ****')
break
elif theBoard['7'] == theBoard['5'] == theBoard['3'
] != ' ':
printBoard(theBoard)
print('\nGame Over.\n')
print(' **** ' + turn + ' won. ****')
break
elif theBoard['1'] == theBoard['5'] == theBoard['9'
] != ' ':
printBoard(theBoard)
print('\nGame Over.\n')
print(' **** ' + turn + ' won. ****')
break
if count == 9:
print('\nGame Over.\n')
print("It's a Tie!!")
if turn == 'X':
turn = 'O'
else:
turn = 'X'
tictactoe()
elif type == '2':
print(
'Winning Rules of the Rock paper scissor game as follows: \n'
+ """Rock vs paper->paper wins
""" +
'Rock vs scissor->Rock wins \n' +
'paper vs scissor->scissor wins \n')
print('Enter choice \n 1. Rock \n 2. paper \n 3. scissor \n')
choice = int(input('User turn: '))
while choice > 3 or choice < 1:
choice = int(input('enter valid input: '))
if choice == 1:
choice_name = 'Rock'
elif choice == 2:
choice_name = 'paper'
else:
choice_name = 'scissor'
print('user choice is: ' + choice_name)
print('\nNow its computer turn.......')
comp_choice = random.randint(1, 3)
while comp_choice == choice:
comp_choice = random.randint(1, 3)
if comp_choice == 1:
comp_choice_name = 'Rock'
elif comp_choice == 2:
comp_choice_name = 'paper'
else:
comp_choice_name = 'scissor'
print('Computer choice is: ' + comp_choice_name)
print(choice_name + ' V/s ' + comp_choice_name)
if (choice == 1 and comp_choice == 2 or choice == 2 and
comp_choice == 1):
print('paper wins => ', end='')
result = 'paper'
elif choice == 1 and comp_choice == 3 or choice == 3 and comp_choice == 1:
print('Rock wins =>', end='')
result = 'Rock'
else:
print('scissor wins =>', end='')
result = 'scissor'
if result == choice_name:
print('<== User wins ==>')
else:
print('<== Computer wins ==>')
<mask token>
| <mask token>
def game():
for i in range(1000):
request = input('Auto-Bot at your service. Please state your request. '
)
if request == 'google':
query = input('Search: ')
print(search(query, num_results=3))
elif request == 'stocks':
ticker = input('Ticker Symbol: ')
realticker = yf.Ticker(ticker)
print(realticker.history(period='1m'))
elif request == 'weather':
place = input('City: ')
weather = weather_forecast.forecast(place=place, time=
current_time, date=d1)
elif request == 'email':
to = input('Email address: ')
content = input('What do you want to say? ')
address = '[email protected]'
password = 'AutoBot1'
server = 'imap.gmail.com'
s = bot.SMTP(host='smtp.gmail.com', port=587)
s.starttls()
s.login(address, password)
s.ehlo()
s.sendmail(address, to, content)
{}
elif request == 'song':
song = input('Song name: ')
results = YoutubeSearch(song, max_results=1).to_dict()
dict = results[0].values()
newdict = list(dict)
url = newdict[7]
print(f'https://www.youtube.com{url}')
elif request == 'news':
news = input('Search news: ')
gn = GoogleNews()
top = gn.search(news)
newnews = gn.results()
dict = list(newnews[0].values())
dicttwo = list(newnews[1].values())
dictthree = list(newnews[2].values())
dictfour = list(newnews[3].values())
dictfive = list(newnews[4].values())
title1 = dict[0]
title2 = dicttwo[0]
title3 = dictthree[0]
title4 = dictfour[0]
title5 = dictfive[0]
src1 = dict[1]
src2 = dicttwo[1]
src3 = dictthree[1]
src4 = dictfour[1]
src5 = dictfive[1]
cap1 = dict[4]
cap2 = dicttwo[4]
cap3 = dictthree[4]
cap4 = dictfour[4]
cap5 = dictfive[4]
url1 = dict[5]
url2 = dicttwo[5]
url3 = dictthree[5]
url4 = dictfour[5]
url5 = dictfive[5]
print(f'Title: {title1}')
print(f'Source: {src1}')
print(f'Caption: {cap1}')
print(f'Url: {url1}')
print(f'Title: {title2}')
print(f'Source: {src2}')
print(f'Caption: {cap2}')
print(f'Url: {url2}')
print(f'Title: {title3}')
print(f'Source: {src3}')
print(f'Caption: {cap3}')
print(f'Url: {url3}')
print(f'Title: {title4}')
print(f'Source: {src4}')
print(f'Caption: {cap4}')
print(f'Url: {url4}')
print(f'Title: {title5}')
print(f'Source: {src5}')
print(f'Caption: {cap5}')
print(f'Url: {url5}')
elif request == 'math':
def add(x, y):
return x + y
def subtract(x, y):
return x - y
def multiply(x, y):
return x * y
def divide(x, y):
return x / y
while True:
choice = input('Enter choice( + / - / * / / ): ')
if choice in ('+', '-', '*', '/'):
num1 = float(input('Enter first number: '))
num2 = float(input('Enter second number: '))
if choice == '+':
print(num1, '+', num2, '=', add(num1, num2))
elif choice == '-':
print(num1, '-', num2, '=', subtract(num1, num2))
elif choice == '*':
print(num1, '*', num2, '=', multiply(num1, num2))
elif choice == '/':
print(num1, '/', num2, '=', divide(num1, num2))
break
else:
print('Invalid Input')
elif request == 'game':
type = input(
'Which game? Press 1 for tic-tac-toe, press 2 for rock-paper-scissors '
)
if type == '1':
unused_keys = ['1', '2', '3', '4', '5', '6', '7', '8', '9']
theBoard = {'7': ' ', '8': ' ', '9': ' ', '4': ' ', '5':
' ', '6': ' ', '1': ' ', '2': ' ', '3': ' '}
board_keys = []
for key in theBoard:
board_keys.append(key)
""" We will have to print the updated board after every move in the game and
thus we will make a function in which we'll define the printBoard function
so that we can easily print the board everytime by calling this function. """
def printBoard(board):
print(board['7'] + '|' + board['8'] + '|' + board['9'])
print('-+-+-')
print(board['4'] + '|' + board['5'] + '|' + board['6'])
print('-+-+-')
print(board['1'] + '|' + board['2'] + '|' + board['3'])
def tictactoe():
turn = 'X'
count = 0
for i in range(10):
printBoard(theBoard)
print("It's your turn," + turn +
'.Move to which place?')
if turn == 'O':
choice = random.randint(1, 9)
choice = unused_keys[choice]
if theBoard[f'{choice}'] == ' ':
theBoard[choice] = turn
unused_keys.remove(choice)
count += 1
elif turn == 'X':
move = input()
if theBoard[move] == ' ':
theBoard[move] = turn
unused_keys.remove(move)
count += 1
else:
print(
'That place is already filled.\nMove to which place?'
)
continue
if count >= 5:
if theBoard['7'] == theBoard['8'] == theBoard['9'
] != ' ':
printBoard(theBoard)
print('\nGame Over.\n')
print(' **** ' + turn + ' won. ****')
break
elif theBoard['4'] == theBoard['5'] == theBoard['6'
] != ' ':
printBoard(theBoard)
print('\nGame Over.\n')
print(' **** ' + turn + ' won. ****')
break
elif theBoard['1'] == theBoard['2'] == theBoard['3'
] != ' ':
printBoard(theBoard)
print('\nGame Over.\n')
print(' **** ' + turn + ' won. ****')
break
elif theBoard['1'] == theBoard['4'] == theBoard['7'
] != ' ':
printBoard(theBoard)
print('\nGame Over.\n')
print(' **** ' + turn + ' won. ****')
break
elif theBoard['2'] == theBoard['5'] == theBoard['8'
] != ' ':
printBoard(theBoard)
print('\nGame Over.\n')
print(' **** ' + turn + ' won. ****')
break
elif theBoard['3'] == theBoard['6'] == theBoard['9'
] != ' ':
printBoard(theBoard)
print('\nGame Over.\n')
print(' **** ' + turn + ' won. ****')
break
elif theBoard['7'] == theBoard['5'] == theBoard['3'
] != ' ':
printBoard(theBoard)
print('\nGame Over.\n')
print(' **** ' + turn + ' won. ****')
break
elif theBoard['1'] == theBoard['5'] == theBoard['9'
] != ' ':
printBoard(theBoard)
print('\nGame Over.\n')
print(' **** ' + turn + ' won. ****')
break
if count == 9:
print('\nGame Over.\n')
print("It's a Tie!!")
if turn == 'X':
turn = 'O'
else:
turn = 'X'
tictactoe()
elif type == '2':
print(
'Winning Rules of the Rock paper scissor game as follows: \n'
+ """Rock vs paper->paper wins
""" +
'Rock vs scissor->Rock wins \n' +
'paper vs scissor->scissor wins \n')
print('Enter choice \n 1. Rock \n 2. paper \n 3. scissor \n')
choice = int(input('User turn: '))
while choice > 3 or choice < 1:
choice = int(input('enter valid input: '))
if choice == 1:
choice_name = 'Rock'
elif choice == 2:
choice_name = 'paper'
else:
choice_name = 'scissor'
print('user choice is: ' + choice_name)
print('\nNow its computer turn.......')
comp_choice = random.randint(1, 3)
while comp_choice == choice:
comp_choice = random.randint(1, 3)
if comp_choice == 1:
comp_choice_name = 'Rock'
elif comp_choice == 2:
comp_choice_name = 'paper'
else:
comp_choice_name = 'scissor'
print('Computer choice is: ' + comp_choice_name)
print(choice_name + ' V/s ' + comp_choice_name)
if (choice == 1 and comp_choice == 2 or choice == 2 and
comp_choice == 1):
print('paper wins => ', end='')
result = 'paper'
elif choice == 1 and comp_choice == 3 or choice == 3 and comp_choice == 1:
print('Rock wins =>', end='')
result = 'Rock'
else:
print('scissor wins =>', end='')
result = 'scissor'
if result == choice_name:
print('<== User wins ==>')
else:
print('<== Computer wins ==>')
<mask token>
game()
| <mask token>
t = time.localtime()
current_time = time.strftime('%H:%M:%S', t)
<mask token>
today = date.today()
d1 = today.strftime('%Y-%m-%d')
def game():
for i in range(1000):
request = input('Auto-Bot at your service. Please state your request. '
)
if request == 'google':
query = input('Search: ')
print(search(query, num_results=3))
elif request == 'stocks':
ticker = input('Ticker Symbol: ')
realticker = yf.Ticker(ticker)
print(realticker.history(period='1m'))
elif request == 'weather':
place = input('City: ')
weather = weather_forecast.forecast(place=place, time=
current_time, date=d1)
elif request == 'email':
to = input('Email address: ')
content = input('What do you want to say? ')
address = '[email protected]'
password = 'AutoBot1'
server = 'imap.gmail.com'
s = bot.SMTP(host='smtp.gmail.com', port=587)
s.starttls()
s.login(address, password)
s.ehlo()
s.sendmail(address, to, content)
{}
elif request == 'song':
song = input('Song name: ')
results = YoutubeSearch(song, max_results=1).to_dict()
dict = results[0].values()
newdict = list(dict)
url = newdict[7]
print(f'https://www.youtube.com{url}')
elif request == 'news':
news = input('Search news: ')
gn = GoogleNews()
top = gn.search(news)
newnews = gn.results()
dict = list(newnews[0].values())
dicttwo = list(newnews[1].values())
dictthree = list(newnews[2].values())
dictfour = list(newnews[3].values())
dictfive = list(newnews[4].values())
title1 = dict[0]
title2 = dicttwo[0]
title3 = dictthree[0]
title4 = dictfour[0]
title5 = dictfive[0]
src1 = dict[1]
src2 = dicttwo[1]
src3 = dictthree[1]
src4 = dictfour[1]
src5 = dictfive[1]
cap1 = dict[4]
cap2 = dicttwo[4]
cap3 = dictthree[4]
cap4 = dictfour[4]
cap5 = dictfive[4]
url1 = dict[5]
url2 = dicttwo[5]
url3 = dictthree[5]
url4 = dictfour[5]
url5 = dictfive[5]
print(f'Title: {title1}')
print(f'Source: {src1}')
print(f'Caption: {cap1}')
print(f'Url: {url1}')
print(f'Title: {title2}')
print(f'Source: {src2}')
print(f'Caption: {cap2}')
print(f'Url: {url2}')
print(f'Title: {title3}')
print(f'Source: {src3}')
print(f'Caption: {cap3}')
print(f'Url: {url3}')
print(f'Title: {title4}')
print(f'Source: {src4}')
print(f'Caption: {cap4}')
print(f'Url: {url4}')
print(f'Title: {title5}')
print(f'Source: {src5}')
print(f'Caption: {cap5}')
print(f'Url: {url5}')
elif request == 'math':
def add(x, y):
return x + y
def subtract(x, y):
return x - y
def multiply(x, y):
return x * y
def divide(x, y):
return x / y
while True:
choice = input('Enter choice( + / - / * / / ): ')
if choice in ('+', '-', '*', '/'):
num1 = float(input('Enter first number: '))
num2 = float(input('Enter second number: '))
if choice == '+':
print(num1, '+', num2, '=', add(num1, num2))
elif choice == '-':
print(num1, '-', num2, '=', subtract(num1, num2))
elif choice == '*':
print(num1, '*', num2, '=', multiply(num1, num2))
elif choice == '/':
print(num1, '/', num2, '=', divide(num1, num2))
break
else:
print('Invalid Input')
elif request == 'game':
type = input(
'Which game? Press 1 for tic-tac-toe, press 2 for rock-paper-scissors '
)
if type == '1':
unused_keys = ['1', '2', '3', '4', '5', '6', '7', '8', '9']
theBoard = {'7': ' ', '8': ' ', '9': ' ', '4': ' ', '5':
' ', '6': ' ', '1': ' ', '2': ' ', '3': ' '}
board_keys = []
for key in theBoard:
board_keys.append(key)
""" We will have to print the updated board after every move in the game and
thus we will make a function in which we'll define the printBoard function
so that we can easily print the board everytime by calling this function. """
def printBoard(board):
print(board['7'] + '|' + board['8'] + '|' + board['9'])
print('-+-+-')
print(board['4'] + '|' + board['5'] + '|' + board['6'])
print('-+-+-')
print(board['1'] + '|' + board['2'] + '|' + board['3'])
def tictactoe():
turn = 'X'
count = 0
for i in range(10):
printBoard(theBoard)
print("It's your turn," + turn +
'.Move to which place?')
if turn == 'O':
choice = random.randint(1, 9)
choice = unused_keys[choice]
if theBoard[f'{choice}'] == ' ':
theBoard[choice] = turn
unused_keys.remove(choice)
count += 1
elif turn == 'X':
move = input()
if theBoard[move] == ' ':
theBoard[move] = turn
unused_keys.remove(move)
count += 1
else:
print(
'That place is already filled.\nMove to which place?'
)
continue
if count >= 5:
if theBoard['7'] == theBoard['8'] == theBoard['9'
] != ' ':
printBoard(theBoard)
print('\nGame Over.\n')
print(' **** ' + turn + ' won. ****')
break
elif theBoard['4'] == theBoard['5'] == theBoard['6'
] != ' ':
printBoard(theBoard)
print('\nGame Over.\n')
print(' **** ' + turn + ' won. ****')
break
elif theBoard['1'] == theBoard['2'] == theBoard['3'
] != ' ':
printBoard(theBoard)
print('\nGame Over.\n')
print(' **** ' + turn + ' won. ****')
break
elif theBoard['1'] == theBoard['4'] == theBoard['7'
] != ' ':
printBoard(theBoard)
print('\nGame Over.\n')
print(' **** ' + turn + ' won. ****')
break
elif theBoard['2'] == theBoard['5'] == theBoard['8'
] != ' ':
printBoard(theBoard)
print('\nGame Over.\n')
print(' **** ' + turn + ' won. ****')
break
elif theBoard['3'] == theBoard['6'] == theBoard['9'
] != ' ':
printBoard(theBoard)
print('\nGame Over.\n')
print(' **** ' + turn + ' won. ****')
break
elif theBoard['7'] == theBoard['5'] == theBoard['3'
] != ' ':
printBoard(theBoard)
print('\nGame Over.\n')
print(' **** ' + turn + ' won. ****')
break
elif theBoard['1'] == theBoard['5'] == theBoard['9'
] != ' ':
printBoard(theBoard)
print('\nGame Over.\n')
print(' **** ' + turn + ' won. ****')
break
if count == 9:
print('\nGame Over.\n')
print("It's a Tie!!")
if turn == 'X':
turn = 'O'
else:
turn = 'X'
tictactoe()
elif type == '2':
print(
'Winning Rules of the Rock paper scissor game as follows: \n'
+ """Rock vs paper->paper wins
""" +
'Rock vs scissor->Rock wins \n' +
'paper vs scissor->scissor wins \n')
print('Enter choice \n 1. Rock \n 2. paper \n 3. scissor \n')
choice = int(input('User turn: '))
while choice > 3 or choice < 1:
choice = int(input('enter valid input: '))
if choice == 1:
choice_name = 'Rock'
elif choice == 2:
choice_name = 'paper'
else:
choice_name = 'scissor'
print('user choice is: ' + choice_name)
print('\nNow its computer turn.......')
comp_choice = random.randint(1, 3)
while comp_choice == choice:
comp_choice = random.randint(1, 3)
if comp_choice == 1:
comp_choice_name = 'Rock'
elif comp_choice == 2:
comp_choice_name = 'paper'
else:
comp_choice_name = 'scissor'
print('Computer choice is: ' + comp_choice_name)
print(choice_name + ' V/s ' + comp_choice_name)
if (choice == 1 and comp_choice == 2 or choice == 2 and
comp_choice == 1):
print('paper wins => ', end='')
result = 'paper'
elif choice == 1 and comp_choice == 3 or choice == 3 and comp_choice == 1:
print('Rock wins =>', end='')
result = 'Rock'
else:
print('scissor wins =>', end='')
result = 'scissor'
if result == choice_name:
print('<== User wins ==>')
else:
print('<== Computer wins ==>')
<mask token>
game()
| import weather_forecast
from weather_forecast import forecast
from googlesearch import search
from youtube_search import YoutubeSearch
import yfinance as yf
import smtplib as bot
import imaplib as imap
import email
import time
from GoogleNews import GoogleNews
import json
t = time.localtime()
current_time = time.strftime('%H:%M:%S', t)
from datetime import date
import random
today = date.today()
d1 = today.strftime('%Y-%m-%d')
def game():
for i in range(1000):
request = input('Auto-Bot at your service. Please state your request. '
)
if request == 'google':
query = input('Search: ')
print(search(query, num_results=3))
elif request == 'stocks':
ticker = input('Ticker Symbol: ')
realticker = yf.Ticker(ticker)
print(realticker.history(period='1m'))
elif request == 'weather':
place = input('City: ')
weather = weather_forecast.forecast(place=place, time=
current_time, date=d1)
elif request == 'email':
to = input('Email address: ')
content = input('What do you want to say? ')
address = '[email protected]'
password = 'AutoBot1'
server = 'imap.gmail.com'
s = bot.SMTP(host='smtp.gmail.com', port=587)
s.starttls()
s.login(address, password)
s.ehlo()
s.sendmail(address, to, content)
{}
elif request == 'song':
song = input('Song name: ')
results = YoutubeSearch(song, max_results=1).to_dict()
dict = results[0].values()
newdict = list(dict)
url = newdict[7]
print(f'https://www.youtube.com{url}')
elif request == 'news':
news = input('Search news: ')
gn = GoogleNews()
top = gn.search(news)
newnews = gn.results()
dict = list(newnews[0].values())
dicttwo = list(newnews[1].values())
dictthree = list(newnews[2].values())
dictfour = list(newnews[3].values())
dictfive = list(newnews[4].values())
title1 = dict[0]
title2 = dicttwo[0]
title3 = dictthree[0]
title4 = dictfour[0]
title5 = dictfive[0]
src1 = dict[1]
src2 = dicttwo[1]
src3 = dictthree[1]
src4 = dictfour[1]
src5 = dictfive[1]
cap1 = dict[4]
cap2 = dicttwo[4]
cap3 = dictthree[4]
cap4 = dictfour[4]
cap5 = dictfive[4]
url1 = dict[5]
url2 = dicttwo[5]
url3 = dictthree[5]
url4 = dictfour[5]
url5 = dictfive[5]
print(f'Title: {title1}')
print(f'Source: {src1}')
print(f'Caption: {cap1}')
print(f'Url: {url1}')
print(f'Title: {title2}')
print(f'Source: {src2}')
print(f'Caption: {cap2}')
print(f'Url: {url2}')
print(f'Title: {title3}')
print(f'Source: {src3}')
print(f'Caption: {cap3}')
print(f'Url: {url3}')
print(f'Title: {title4}')
print(f'Source: {src4}')
print(f'Caption: {cap4}')
print(f'Url: {url4}')
print(f'Title: {title5}')
print(f'Source: {src5}')
print(f'Caption: {cap5}')
print(f'Url: {url5}')
elif request == 'math':
def add(x, y):
return x + y
def subtract(x, y):
return x - y
def multiply(x, y):
return x * y
def divide(x, y):
return x / y
while True:
choice = input('Enter choice( + / - / * / / ): ')
if choice in ('+', '-', '*', '/'):
num1 = float(input('Enter first number: '))
num2 = float(input('Enter second number: '))
if choice == '+':
print(num1, '+', num2, '=', add(num1, num2))
elif choice == '-':
print(num1, '-', num2, '=', subtract(num1, num2))
elif choice == '*':
print(num1, '*', num2, '=', multiply(num1, num2))
elif choice == '/':
print(num1, '/', num2, '=', divide(num1, num2))
break
else:
print('Invalid Input')
elif request == 'game':
type = input(
'Which game? Press 1 for tic-tac-toe, press 2 for rock-paper-scissors '
)
if type == '1':
unused_keys = ['1', '2', '3', '4', '5', '6', '7', '8', '9']
theBoard = {'7': ' ', '8': ' ', '9': ' ', '4': ' ', '5':
' ', '6': ' ', '1': ' ', '2': ' ', '3': ' '}
board_keys = []
for key in theBoard:
board_keys.append(key)
""" We will have to print the updated board after every move in the game and
thus we will make a function in which we'll define the printBoard function
so that we can easily print the board everytime by calling this function. """
def printBoard(board):
print(board['7'] + '|' + board['8'] + '|' + board['9'])
print('-+-+-')
print(board['4'] + '|' + board['5'] + '|' + board['6'])
print('-+-+-')
print(board['1'] + '|' + board['2'] + '|' + board['3'])
def tictactoe():
turn = 'X'
count = 0
for i in range(10):
printBoard(theBoard)
print("It's your turn," + turn +
'.Move to which place?')
if turn == 'O':
choice = random.randint(1, 9)
choice = unused_keys[choice]
if theBoard[f'{choice}'] == ' ':
theBoard[choice] = turn
unused_keys.remove(choice)
count += 1
elif turn == 'X':
move = input()
if theBoard[move] == ' ':
theBoard[move] = turn
unused_keys.remove(move)
count += 1
else:
print(
'That place is already filled.\nMove to which place?'
)
continue
if count >= 5:
if theBoard['7'] == theBoard['8'] == theBoard['9'
] != ' ':
printBoard(theBoard)
print('\nGame Over.\n')
print(' **** ' + turn + ' won. ****')
break
elif theBoard['4'] == theBoard['5'] == theBoard['6'
] != ' ':
printBoard(theBoard)
print('\nGame Over.\n')
print(' **** ' + turn + ' won. ****')
break
elif theBoard['1'] == theBoard['2'] == theBoard['3'
] != ' ':
printBoard(theBoard)
print('\nGame Over.\n')
print(' **** ' + turn + ' won. ****')
break
elif theBoard['1'] == theBoard['4'] == theBoard['7'
] != ' ':
printBoard(theBoard)
print('\nGame Over.\n')
print(' **** ' + turn + ' won. ****')
break
elif theBoard['2'] == theBoard['5'] == theBoard['8'
] != ' ':
printBoard(theBoard)
print('\nGame Over.\n')
print(' **** ' + turn + ' won. ****')
break
elif theBoard['3'] == theBoard['6'] == theBoard['9'
] != ' ':
printBoard(theBoard)
print('\nGame Over.\n')
print(' **** ' + turn + ' won. ****')
break
elif theBoard['7'] == theBoard['5'] == theBoard['3'
] != ' ':
printBoard(theBoard)
print('\nGame Over.\n')
print(' **** ' + turn + ' won. ****')
break
elif theBoard['1'] == theBoard['5'] == theBoard['9'
] != ' ':
printBoard(theBoard)
print('\nGame Over.\n')
print(' **** ' + turn + ' won. ****')
break
if count == 9:
print('\nGame Over.\n')
print("It's a Tie!!")
if turn == 'X':
turn = 'O'
else:
turn = 'X'
tictactoe()
elif type == '2':
print(
'Winning Rules of the Rock paper scissor game as follows: \n'
+ """Rock vs paper->paper wins
""" +
'Rock vs scissor->Rock wins \n' +
'paper vs scissor->scissor wins \n')
print('Enter choice \n 1. Rock \n 2. paper \n 3. scissor \n')
choice = int(input('User turn: '))
while choice > 3 or choice < 1:
choice = int(input('enter valid input: '))
if choice == 1:
choice_name = 'Rock'
elif choice == 2:
choice_name = 'paper'
else:
choice_name = 'scissor'
print('user choice is: ' + choice_name)
print('\nNow its computer turn.......')
comp_choice = random.randint(1, 3)
while comp_choice == choice:
comp_choice = random.randint(1, 3)
if comp_choice == 1:
comp_choice_name = 'Rock'
elif comp_choice == 2:
comp_choice_name = 'paper'
else:
comp_choice_name = 'scissor'
print('Computer choice is: ' + comp_choice_name)
print(choice_name + ' V/s ' + comp_choice_name)
if (choice == 1 and comp_choice == 2 or choice == 2 and
comp_choice == 1):
print('paper wins => ', end='')
result = 'paper'
elif choice == 1 and comp_choice == 3 or choice == 3 and comp_choice == 1:
print('Rock wins =>', end='')
result = 'Rock'
else:
print('scissor wins =>', end='')
result = 'scissor'
if result == choice_name:
print('<== User wins ==>')
else:
print('<== Computer wins ==>')
<mask token>
game()
| # This is a sample Python script.
# Press ⌃R to execute it or replace it with your code.
# Press Double ⇧ to search everywhere for classes, files, tool windows, actions, and settings.
import weather_forecast
from weather_forecast import forecast
from googlesearch import search
from youtube_search import YoutubeSearch
import yfinance as yf
import smtplib as bot
import imaplib as imap
import email
import time
from GoogleNews import GoogleNews
import json
t = time.localtime()
current_time = time.strftime("%H:%M:%S", t)
from datetime import date
import random
today = date.today()
d1 = today.strftime("%Y-%m-%d")
def game():
for i in range(1000):
request = input('Auto-Bot at your service. Please state your request. ')
if request == 'google':
query = input('Search: ')
print(search(query, num_results = 3))
elif request == 'stocks':
ticker = input('Ticker Symbol: ')
realticker = yf.Ticker(ticker)
print(realticker.history(period= '1m'))
elif request == 'weather':
place = input('City: ')
weather = weather_forecast.forecast(place=place, time=current_time, date=d1)
elif request == 'email':
to = input('Email address: ')
content = input('What do you want to say? ')
address = '[email protected]'
password = 'AutoBot1'
server = 'imap.gmail.com'
s = bot.SMTP(host= 'smtp.gmail.com', port= 587)
s.starttls()
s.login(address, password)
s.ehlo()
s.sendmail(address, to ,content)
{}
elif request == 'song':
song = input('Song name: ')
results = YoutubeSearch(song, max_results=1).to_dict()
dict = results[0].values()
newdict = list(dict)
url = newdict[7]
print(f'https://www.youtube.com{url}')
elif request == 'news':
news = input('Search news: ')
gn = GoogleNews()
top = gn.search(news)
newnews = gn.results()
dict = list(newnews[0].values())
dicttwo = list(newnews[1].values())
dictthree = list(newnews[2].values())
dictfour = list(newnews[3].values())
dictfive = list(newnews[4].values())
title1 = dict[0]
title2 = dicttwo[0]
title3 = dictthree[0]
title4 = dictfour[0]
title5 = dictfive[0]
src1 = dict[1]
src2 = dicttwo[1]
src3 = dictthree[1]
src4 = dictfour[1]
src5 = dictfive[1]
cap1 = dict[4]
cap2 = dicttwo[4]
cap3 = dictthree[4]
cap4 = dictfour[4]
cap5 = dictfive[4]
url1 = dict[5]
url2 = dicttwo[5]
url3 = dictthree[5]
url4 = dictfour[5]
url5 = dictfive[5]
print(f'Title: {title1}')
print(f'Source: {src1}')
print(f'Caption: {cap1}')
print(f'Url: {url1}')
print(f'Title: {title2}')
print(f'Source: {src2}')
print(f'Caption: {cap2}')
print(f'Url: {url2}')
print(f'Title: {title3}')
print(f'Source: {src3}')
print(f'Caption: {cap3}')
print(f'Url: {url3}')
print(f'Title: {title4}')
print(f'Source: {src4}')
print(f'Caption: {cap4}')
print(f'Url: {url4}')
print(f'Title: {title5}')
print(f'Source: {src5}')
print(f'Caption: {cap5}')
print(f'Url: {url5}')
elif request == 'math':
def add(x, y):
return x + y
# This function subtracts two numbers
def subtract(x, y):
return x - y
# This function multiplies two numbers
def multiply(x, y):
return x * y
# This function divides two numbers
def divide(x, y):
return x / y
while True:
# Take input from the user
choice = input("Enter choice( + / - / * / / ): ")
# Check if choice is one of the four options
if choice in ('+', '-', '*', '/'):
num1 = float(input("Enter first number: "))
num2 = float(input("Enter second number: "))
if choice == '+':
print(num1, "+", num2, "=", add(num1, num2))
elif choice == '-':
print(num1, "-", num2, "=", subtract(num1, num2))
elif choice == '*':
print(num1, "*", num2, "=", multiply(num1, num2))
elif choice == '/':
print(num1, "/", num2, "=", divide(num1, num2))
break
else:
print("Invalid Input")
elif request == 'game':
type = input('Which game? Press 1 for tic-tac-toe, press 2 for rock-paper-scissors ')
if type == '1':
unused_keys = ['1', '2', '3', '4', '5', '6', '7', '8', '9']
theBoard = {'7': ' ', '8': ' ', '9': ' ',
'4': ' ', '5': ' ', '6': ' ',
'1': ' ', '2': ' ', '3': ' '}
board_keys = []
for key in theBoard:
board_keys.append(key)
''' We will have to print the updated board after every move in the game and
thus we will make a function in which we'll define the printBoard function
so that we can easily print the board everytime by calling this function. '''
def printBoard(board):
print(board['7'] + '|' + board['8'] + '|' + board['9'])
print('-+-+-')
print(board['4'] + '|' + board['5'] + '|' + board['6'])
print('-+-+-')
print(board['1'] + '|' + board['2'] + '|' + board['3'])
# Now we'll write the main function which has all the gameplay functionality.
def tictactoe():
turn = 'X'
count = 0
for i in range(10):
printBoard(theBoard)
print("It's your turn," + turn + ".Move to which place?")
if turn == 'O':
choice = random.randint(1,9)
choice = unused_keys[choice]
if theBoard[f'{choice}'] == ' ':
theBoard[choice] = turn
unused_keys.remove(choice)
count += 1
elif turn == 'X':
move = input()
if theBoard[move] == ' ':
theBoard[move] = turn
unused_keys.remove(move)
count += 1
else:
print("That place is already filled.\nMove to which place?")
continue
# Now we will check if player X or O has won,for every move after 5 moves.
if count >= 5:
if theBoard['7'] == theBoard['8'] == theBoard['9'] != ' ': # across the top
printBoard(theBoard)
print("\nGame Over.\n")
print(" **** " + turn + " won. ****")
break
elif theBoard['4'] == theBoard['5'] == theBoard['6'] != ' ': # across the middle
printBoard(theBoard)
print("\nGame Over.\n")
print(" **** " + turn + " won. ****")
break
elif theBoard['1'] == theBoard['2'] == theBoard['3'] != ' ': # across the bottom
printBoard(theBoard)
print("\nGame Over.\n")
print(" **** " + turn + " won. ****")
break
elif theBoard['1'] == theBoard['4'] == theBoard['7'] != ' ': # down the left side
printBoard(theBoard)
print("\nGame Over.\n")
print(" **** " + turn + " won. ****")
break
elif theBoard['2'] == theBoard['5'] == theBoard['8'] != ' ': # down the middle
printBoard(theBoard)
print("\nGame Over.\n")
print(" **** " + turn + " won. ****")
break
elif theBoard['3'] == theBoard['6'] == theBoard['9'] != ' ': # down the right side
printBoard(theBoard)
print("\nGame Over.\n")
print(" **** " + turn + " won. ****")
break
elif theBoard['7'] == theBoard['5'] == theBoard['3'] != ' ': # diagonal
printBoard(theBoard)
print("\nGame Over.\n")
print(" **** " + turn + " won. ****")
break
elif theBoard['1'] == theBoard['5'] == theBoard['9'] != ' ': # diagonal
printBoard(theBoard)
print("\nGame Over.\n")
print(" **** " + turn + " won. ****")
break
# If neither X nor O wins and the board is full, we'll declare the result as 'tie'.
if count == 9:
print("\nGame Over.\n")
print("It's a Tie!!")
# Now we have to change the player after every move.
if turn == 'X':
turn = 'O'
else:
turn = 'X'
tictactoe()
elif type == '2':
print("Winning Rules of the Rock paper scissor game as follows: \n"
+ "Rock vs paper->paper wins \n"
+ "Rock vs scissor->Rock wins \n"
+ "paper vs scissor->scissor wins \n")
print("Enter choice \n 1. Rock \n 2. paper \n 3. scissor \n")
choice = int(input("User turn: "))
# OR is the short-circuit operator
# if any one of the condition is true
# then it return True value
# looping until user enter invalid input
while choice > 3 or choice < 1:
choice = int(input("enter valid input: "))
# initialize value of choice_name variable
# corresponding to the choice value
if choice == 1:
choice_name = 'Rock'
elif choice == 2:
choice_name = 'paper'
else:
choice_name = 'scissor'
# print user choice
print("user choice is: " + choice_name)
print("\nNow its computer turn.......")
# Computer chooses randomly any number
# among 1 , 2 and 3. Using randint method
# of random module
comp_choice = random.randint(1, 3)
# looping until comp_choice value
# is equal to the choice value
while comp_choice == choice:
comp_choice = random.randint(1, 3)
# initialize value of comp_choice_name
# variable corresponding to the choice value
if comp_choice == 1:
comp_choice_name = 'Rock'
elif comp_choice == 2:
comp_choice_name = 'paper'
else:
comp_choice_name = 'scissor'
print("Computer choice is: " + comp_choice_name)
print(choice_name + " V/s " + comp_choice_name)
# condition for winning
if ((choice == 1 and comp_choice == 2) or
(choice == 2 and comp_choice == 1)):
print("paper wins => ", end="")
result = "paper"
elif ((choice == 1 and comp_choice == 3) or
(choice == 3 and comp_choice == 1)):
print("Rock wins =>", end="")
result = "Rock"
else:
print("scissor wins =>", end="")
result = "scissor"
# Printing either user or computer wins
if result == choice_name:
print("<== User wins ==>")
else:
print("<== Computer wins ==>")
'''
mail = imap.IMAP4_SSL(server)
mail.login(address, password)
mail.select('inbox')
status, data = mail.search(None, 'ALL')
ids = []
for block in data:
ids += block.split()
for i in ids:
status, data = mail.fetch(i, '(RFC822)')
for response_part in data:
if isinstance(response_part, tuple):
message = email.message_from_bytes(response_part[1])
mail_from = message['from']
mail_subject = message['subject']
if message.is_multipart():
mail_content = ''
for part in message.get_payload():
if part.get_content_type() == 'text/plain':
mail_content += part.get_payload()
else:
mail_content = message.get_payload()
print(mail_from)
s.quit()
'''
game() | [
1,
2,
3,
4,
5
] |
1,588 | 749e6a1f807843c9e2591f51561174cc51668b11 | <mask token>
def sumas(ux, uy, wx, wy, dx, dy, img_i, img_j):
suma = 0
x = ux - wx
y = uy - wy
while x < ux + wx:
while y < uy + wy:
xdx = x + dx if x + dx < img1.shape[0] else x
ydy = y + dy if y + dy < img1.shape[1] else y
suma += np.power(img_i[x][y] - img_j[xdx][ydy], 2)
y += 1
x += 1
return suma
def hazFuncion(iteracion):
for x in range(img1.shape[0] - 1):
for y in range(img1.shape[1] - 1):
img3[x][y] = sumas(x, y, 1, 1, iteracion, iteracion, img1, img2)
<mask token>
| <mask token>
def sumas(ux, uy, wx, wy, dx, dy, img_i, img_j):
suma = 0
x = ux - wx
y = uy - wy
while x < ux + wx:
while y < uy + wy:
xdx = x + dx if x + dx < img1.shape[0] else x
ydy = y + dy if y + dy < img1.shape[1] else y
suma += np.power(img_i[x][y] - img_j[xdx][ydy], 2)
y += 1
x += 1
return suma
def hazFuncion(iteracion):
for x in range(img1.shape[0] - 1):
for y in range(img1.shape[1] - 1):
img3[x][y] = sumas(x, y, 1, 1, iteracion, iteracion, img1, img2)
for x in range(iter):
img3 = np.zeros(img1.shape)
hazFuncion(x)
if x % 10 == 0:
cv2.namedWindow(str(x) + 'dd.jpg', cv2.WINDOW_NORMAL)
cv2.imshow(str(x) + 'dd.jpg', img3)
cv2.waitKey(0)
cv2.destroyAllWindows()
| <mask token>
img1 = cv2.imread('img0008.jpg')
img2 = cv2.imread('img0009.jpg')
img3 = np.zeros(img1.shape)
iter = 51
def sumas(ux, uy, wx, wy, dx, dy, img_i, img_j):
suma = 0
x = ux - wx
y = uy - wy
while x < ux + wx:
while y < uy + wy:
xdx = x + dx if x + dx < img1.shape[0] else x
ydy = y + dy if y + dy < img1.shape[1] else y
suma += np.power(img_i[x][y] - img_j[xdx][ydy], 2)
y += 1
x += 1
return suma
def hazFuncion(iteracion):
for x in range(img1.shape[0] - 1):
for y in range(img1.shape[1] - 1):
img3[x][y] = sumas(x, y, 1, 1, iteracion, iteracion, img1, img2)
for x in range(iter):
img3 = np.zeros(img1.shape)
hazFuncion(x)
if x % 10 == 0:
cv2.namedWindow(str(x) + 'dd.jpg', cv2.WINDOW_NORMAL)
cv2.imshow(str(x) + 'dd.jpg', img3)
cv2.waitKey(0)
cv2.destroyAllWindows()
| import cv2
import numpy as np
img1 = cv2.imread('img0008.jpg')
img2 = cv2.imread('img0009.jpg')
img3 = np.zeros(img1.shape)
iter = 51
def sumas(ux, uy, wx, wy, dx, dy, img_i, img_j):
suma = 0
x = ux - wx
y = uy - wy
while x < ux + wx:
while y < uy + wy:
xdx = x + dx if x + dx < img1.shape[0] else x
ydy = y + dy if y + dy < img1.shape[1] else y
suma += np.power(img_i[x][y] - img_j[xdx][ydy], 2)
y += 1
x += 1
return suma
def hazFuncion(iteracion):
for x in range(img1.shape[0] - 1):
for y in range(img1.shape[1] - 1):
img3[x][y] = sumas(x, y, 1, 1, iteracion, iteracion, img1, img2)
for x in range(iter):
img3 = np.zeros(img1.shape)
hazFuncion(x)
if x % 10 == 0:
cv2.namedWindow(str(x) + 'dd.jpg', cv2.WINDOW_NORMAL)
cv2.imshow(str(x) + 'dd.jpg', img3)
cv2.waitKey(0)
cv2.destroyAllWindows()
| import cv2
import numpy as np
img1 = cv2.imread('img0008.jpg')
img2 = cv2.imread('img0009.jpg')
#img3 = cv2.imread('img0009.jpg')
img3 = np.zeros(img1.shape)
iter = 51
def sumas(ux, uy, wx, wy, dx, dy, img_i, img_j):
suma = 0
x = ux - wx
y = uy - wy
while x < ux + wx:
while y < uy + wy:
xdx = x + dx if x + dx < img1.shape[0] else x
ydy = y + dy if y + dy < img1.shape[1] else y
suma += np.power(img_i[x][y] - img_j[xdx][ydy], 2)
y += 1
x += 1
return suma
def hazFuncion(iteracion):
for x in range(img1.shape[0]-1):
for y in range(img1.shape[1]-1):
img3[x][y] = sumas(x, y, 1, 1, iteracion, iteracion, img1, img2)
for x in range(iter):
img3 = np.zeros(img1.shape)
hazFuncion(x)
if x % 10 == 0:
#cv2.imwrite("s"+str(x)+"xy.jpg", img3)
cv2.namedWindow(str(x) + "dd.jpg", cv2.WINDOW_NORMAL)
cv2.imshow(str(x) + "dd.jpg", img3)
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
2,
3,
4,
5,
6
] |
1,589 | 166329c967e83806e3482179a56ac7e5541d5010 | <mask token>
def check_passport(text):
arr = text.split()
dct = {}
for elem in arr:
key = elem.split(':')[0]
val = elem.split(':')[1]
dct[key] = val
try:
if len(dct['byr']) != 4 or int(dct['byr']) < 1920 or int(dct['byr']
) > 2002:
print('byr invalid')
return False
if len(dct['iyr']) != 4 or int(dct['iyr']) < 2010 or int(dct['iyr']
) > 2030:
print('iyr invalid')
return False
if len(dct['eyr']) != 4 or int(dct['eyr']) < 2020 or int(dct['eyr']
) > 2030:
print('eyr invalid')
return False
if dct['hgt'][-2:] == 'in':
if int(dct['hgt'][:-2]) < 59 or int(dct['hgt'][:-2]) > 76:
print('hgt invalid')
return False
elif dct['hgt'][-2:] == 'cm':
if int(dct['hgt'][:-2]) < 150 or int(dct['hgt'][:-2]) > 193:
print('hgt invalid')
return False
else:
print('hgt invalid')
return False
if dct['hcl'][0] != '#' or not re.compile('[0-9a-f]{6}').fullmatch(dct
['hcl'][1:]):
print('hcl invalid')
return False
ecl_options = ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']
if not any(dct['ecl'] == option for option in ecl_options):
print('ecl invalid')
return False
if not re.compile('[0-9]{9}').fullmatch(dct['pid']):
print('pid invalid')
return False
return True
except KeyError as e:
print('Key error: ' + str(e))
return False
<mask token>
| <mask token>
with open('input.txt') as f:
input_file = f.readlines()
<mask token>
def check_passport(text):
arr = text.split()
dct = {}
for elem in arr:
key = elem.split(':')[0]
val = elem.split(':')[1]
dct[key] = val
try:
if len(dct['byr']) != 4 or int(dct['byr']) < 1920 or int(dct['byr']
) > 2002:
print('byr invalid')
return False
if len(dct['iyr']) != 4 or int(dct['iyr']) < 2010 or int(dct['iyr']
) > 2030:
print('iyr invalid')
return False
if len(dct['eyr']) != 4 or int(dct['eyr']) < 2020 or int(dct['eyr']
) > 2030:
print('eyr invalid')
return False
if dct['hgt'][-2:] == 'in':
if int(dct['hgt'][:-2]) < 59 or int(dct['hgt'][:-2]) > 76:
print('hgt invalid')
return False
elif dct['hgt'][-2:] == 'cm':
if int(dct['hgt'][:-2]) < 150 or int(dct['hgt'][:-2]) > 193:
print('hgt invalid')
return False
else:
print('hgt invalid')
return False
if dct['hcl'][0] != '#' or not re.compile('[0-9a-f]{6}').fullmatch(dct
['hcl'][1:]):
print('hcl invalid')
return False
ecl_options = ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']
if not any(dct['ecl'] == option for option in ecl_options):
print('ecl invalid')
return False
if not re.compile('[0-9]{9}').fullmatch(dct['pid']):
print('pid invalid')
return False
return True
except KeyError as e:
print('Key error: ' + str(e))
return False
<mask token>
for i in input_file:
if i != '':
curr += ' ' + i
else:
grouped_input.append(curr[1:])
curr = ''
<mask token>
for i in range(0, len(grouped_input)):
print(str(check_passport(grouped_input[i])) + ' ' + grouped_input[i])
if check_passport(grouped_input[i]):
count += 1
print(count)
| <mask token>
with open('input.txt') as f:
input_file = f.readlines()
input_file = [x.strip() for x in input_file]
def check_passport(text):
arr = text.split()
dct = {}
for elem in arr:
key = elem.split(':')[0]
val = elem.split(':')[1]
dct[key] = val
try:
if len(dct['byr']) != 4 or int(dct['byr']) < 1920 or int(dct['byr']
) > 2002:
print('byr invalid')
return False
if len(dct['iyr']) != 4 or int(dct['iyr']) < 2010 or int(dct['iyr']
) > 2030:
print('iyr invalid')
return False
if len(dct['eyr']) != 4 or int(dct['eyr']) < 2020 or int(dct['eyr']
) > 2030:
print('eyr invalid')
return False
if dct['hgt'][-2:] == 'in':
if int(dct['hgt'][:-2]) < 59 or int(dct['hgt'][:-2]) > 76:
print('hgt invalid')
return False
elif dct['hgt'][-2:] == 'cm':
if int(dct['hgt'][:-2]) < 150 or int(dct['hgt'][:-2]) > 193:
print('hgt invalid')
return False
else:
print('hgt invalid')
return False
if dct['hcl'][0] != '#' or not re.compile('[0-9a-f]{6}').fullmatch(dct
['hcl'][1:]):
print('hcl invalid')
return False
ecl_options = ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']
if not any(dct['ecl'] == option for option in ecl_options):
print('ecl invalid')
return False
if not re.compile('[0-9]{9}').fullmatch(dct['pid']):
print('pid invalid')
return False
return True
except KeyError as e:
print('Key error: ' + str(e))
return False
grouped_input = []
curr = ''
for i in input_file:
if i != '':
curr += ' ' + i
else:
grouped_input.append(curr[1:])
curr = ''
count = 0
for i in range(0, len(grouped_input)):
print(str(check_passport(grouped_input[i])) + ' ' + grouped_input[i])
if check_passport(grouped_input[i]):
count += 1
print(count)
| import re
with open('input.txt') as f:
input_file = f.readlines()
input_file = [x.strip() for x in input_file]
def check_passport(text):
arr = text.split()
dct = {}
for elem in arr:
key = elem.split(':')[0]
val = elem.split(':')[1]
dct[key] = val
try:
if len(dct['byr']) != 4 or int(dct['byr']) < 1920 or int(dct['byr']
) > 2002:
print('byr invalid')
return False
if len(dct['iyr']) != 4 or int(dct['iyr']) < 2010 or int(dct['iyr']
) > 2030:
print('iyr invalid')
return False
if len(dct['eyr']) != 4 or int(dct['eyr']) < 2020 or int(dct['eyr']
) > 2030:
print('eyr invalid')
return False
if dct['hgt'][-2:] == 'in':
if int(dct['hgt'][:-2]) < 59 or int(dct['hgt'][:-2]) > 76:
print('hgt invalid')
return False
elif dct['hgt'][-2:] == 'cm':
if int(dct['hgt'][:-2]) < 150 or int(dct['hgt'][:-2]) > 193:
print('hgt invalid')
return False
else:
print('hgt invalid')
return False
if dct['hcl'][0] != '#' or not re.compile('[0-9a-f]{6}').fullmatch(dct
['hcl'][1:]):
print('hcl invalid')
return False
ecl_options = ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']
if not any(dct['ecl'] == option for option in ecl_options):
print('ecl invalid')
return False
if not re.compile('[0-9]{9}').fullmatch(dct['pid']):
print('pid invalid')
return False
return True
except KeyError as e:
print('Key error: ' + str(e))
return False
grouped_input = []
curr = ''
for i in input_file:
if i != '':
curr += ' ' + i
else:
grouped_input.append(curr[1:])
curr = ''
count = 0
for i in range(0, len(grouped_input)):
print(str(check_passport(grouped_input[i])) + ' ' + grouped_input[i])
if check_passport(grouped_input[i]):
count += 1
print(count)
| import re
with open('input.txt') as f:
input_file = f.readlines()
input_file = [x.strip() for x in input_file]
def check_passport(text):
arr = text.split()
dct = {}
for elem in arr:
key = elem.split(":")[0]
val = elem.split(":")[1]
dct[key] = val
try:
if len(dct['byr']) != 4 or int(dct['byr']) < 1920 or int(dct['byr']) > 2002:
print("byr invalid")
return False
if len(dct['iyr']) !=4 or int(dct['iyr']) < 2010 or int(dct['iyr']) > 2030:
print("iyr invalid")
return False
if len(dct['eyr']) != 4 or int(dct['eyr']) < 2020 or int(dct['eyr']) > 2030:
print("eyr invalid")
return False
if dct['hgt'][-2:] == 'in':
if int(dct['hgt'][:-2]) < 59 or int(dct['hgt'][:-2]) > 76:
print("hgt invalid")
return False
elif dct['hgt'][-2:] == 'cm':
if int(dct['hgt'][:-2]) < 150 or int(dct['hgt'][:-2]) > 193:
print("hgt invalid")
return False
else:
print("hgt invalid")
return False
if dct['hcl'][0] != "#" or not re.compile("[0-9a-f]{6}").fullmatch(dct['hcl'][1:]):
print("hcl invalid")
return False
ecl_options = ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']
if not any(dct['ecl'] == option for option in ecl_options):
print("ecl invalid")
return False
if not re.compile("[0-9]{9}").fullmatch(dct['pid']):
print("pid invalid")
return False
return True
except KeyError as e:
print("Key error: " + str(e))
return False
# if ("byr:" in text and "iyr:" in text and "eyr:" in text and "hgt:" in text and "hcl:" in text and "ecl:" in text and "pid:" in text):
# return True
# else:
# return False
grouped_input = []
curr = ""
for i in input_file:
if i != "":
curr += " " + i
else:
grouped_input.append(curr[1:])
curr = ""
count = 0
for i in range(0, len(grouped_input)):
print(str(check_passport(grouped_input[i])) + " " + grouped_input[i])
if check_passport(grouped_input[i]):
count += 1
print(count) | [
1,
2,
3,
4,
5
] |
1,590 | 5f24c5a21dc151e9efbbfaff0fe1e71e65d1eb67 | class Box:
def __init__(self, id, capacity):
self.id = id
self.dogs = []
self.capacity = capacity
<mask token>
<mask token>
<mask token>
| class Box:
def __init__(self, id, capacity):
self.id = id
self.dogs = []
self.capacity = capacity
<mask token>
<mask token>
def remove_dog(self, dog):
if self.status > 0:
self.dogs.remove(dog)
return True
else:
return False
| class Box:
def __init__(self, id, capacity):
self.id = id
self.dogs = []
self.capacity = capacity
@property
def status(self):
return len(self.dogs)
<mask token>
def remove_dog(self, dog):
if self.status > 0:
self.dogs.remove(dog)
return True
else:
return False
| class Box:
def __init__(self, id, capacity):
self.id = id
self.dogs = []
self.capacity = capacity
@property
def status(self):
return len(self.dogs)
def add_dog(self, dog):
if self.capacity > self.status:
self.dogs.append(dog)
return True
else:
return False
def remove_dog(self, dog):
if self.status > 0:
self.dogs.remove(dog)
return True
else:
return False
| null | [
2,
3,
4,
5
] |
1,591 | 7fd89272d3d3584f35fd8f552cb7b14e57b7ed1b | <mask token>
| <mask token>
while True:
ret, frame = cap.read()
frame = np.float32(frame) / 255
average_stack = average_stack * frames + frame
frames += 1.0
average_stack = average_stack / frames
cv2.imshow('frame', np.uint8(average_stack * 255))
if cv2.waitKey(1) & 255 == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| <mask token>
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
average_stack = np.float32(np.copy(frame)) / 255
frames = 1.0
while True:
ret, frame = cap.read()
frame = np.float32(frame) / 255
average_stack = average_stack * frames + frame
frames += 1.0
average_stack = average_stack / frames
cv2.imshow('frame', np.uint8(average_stack * 255))
if cv2.waitKey(1) & 255 == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| import numpy as np
import cv2
import time
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
average_stack = np.float32(np.copy(frame)) / 255
frames = 1.0
while True:
ret, frame = cap.read()
frame = np.float32(frame) / 255
average_stack = average_stack * frames + frame
frames += 1.0
average_stack = average_stack / frames
cv2.imshow('frame', np.uint8(average_stack * 255))
if cv2.waitKey(1) & 255 == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| import numpy as np
import cv2
import time
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
average_stack = np.float32(np.copy(frame))/255
frames = 1.0
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
frame = np.float32(frame)/255
average_stack = average_stack * frames + frame
frames += 1.0
average_stack = average_stack/frames
# Display the resulting frame
cv2.imshow('frame',np.uint8(average_stack*255))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows() | [
0,
1,
2,
3,
4
] |
1,592 | 42ebd42801b7d1563c9f204f296afba5fa3c6d3c | <mask token>
class FreeReplier(RegularReplier):
<mask token>
def run(self):
while self._running or self.input.qsize():
try:
client, message = self.input.get(timeout=2)
except Empty:
continue
result = self.handle(message)
self.output.put([client, result])
<mask token>
<mask token>
@property
def unfinished(self):
return self.input.qsize() + self.output.qsize()
def put(self, client, message):
if message.get('method', None) == '.sys.heartbeat':
return self.methods['.sys.heartbeat'](message)
else:
self.input.put([client, message])
logging.debug('queue size | %s', self.input.qsize())
def get_output(self, timeout=0.001):
return self.output.get(timeout=timeout)
| <mask token>
class FreeReplier(RegularReplier):
def __init__(self):
super(FreeReplier, self).__init__()
self.jset = JsetHandler()
self.jsd = JsdHandler()
self.jsi = JsiHandler()
self.methods['jset.query'] = self.jset.handle
self.methods['jsd.query'] = self.jsd.handle
self.methods['jsi.query'] = self.jsi.handle
self.input = Queue()
self.output = Queue()
self._running = False
self.thread = Thread(target=self.run)
def run(self):
while self._running or self.input.qsize():
try:
client, message = self.input.get(timeout=2)
except Empty:
continue
result = self.handle(message)
self.output.put([client, result])
<mask token>
<mask token>
@property
def unfinished(self):
return self.input.qsize() + self.output.qsize()
def put(self, client, message):
if message.get('method', None) == '.sys.heartbeat':
return self.methods['.sys.heartbeat'](message)
else:
self.input.put([client, message])
logging.debug('queue size | %s', self.input.qsize())
def get_output(self, timeout=0.001):
return self.output.get(timeout=timeout)
| <mask token>
class FreeReplier(RegularReplier):
def __init__(self):
super(FreeReplier, self).__init__()
self.jset = JsetHandler()
self.jsd = JsdHandler()
self.jsi = JsiHandler()
self.methods['jset.query'] = self.jset.handle
self.methods['jsd.query'] = self.jsd.handle
self.methods['jsi.query'] = self.jsi.handle
self.input = Queue()
self.output = Queue()
self._running = False
self.thread = Thread(target=self.run)
def run(self):
while self._running or self.input.qsize():
try:
client, message = self.input.get(timeout=2)
except Empty:
continue
result = self.handle(message)
self.output.put([client, result])
<mask token>
def stop(self):
self._running = False
self.thread.join()
@property
def unfinished(self):
return self.input.qsize() + self.output.qsize()
def put(self, client, message):
if message.get('method', None) == '.sys.heartbeat':
return self.methods['.sys.heartbeat'](message)
else:
self.input.put([client, message])
logging.debug('queue size | %s', self.input.qsize())
def get_output(self, timeout=0.001):
return self.output.get(timeout=timeout)
| <mask token>
class FreeReplier(RegularReplier):
def __init__(self):
super(FreeReplier, self).__init__()
self.jset = JsetHandler()
self.jsd = JsdHandler()
self.jsi = JsiHandler()
self.methods['jset.query'] = self.jset.handle
self.methods['jsd.query'] = self.jsd.handle
self.methods['jsi.query'] = self.jsi.handle
self.input = Queue()
self.output = Queue()
self._running = False
self.thread = Thread(target=self.run)
def run(self):
while self._running or self.input.qsize():
try:
client, message = self.input.get(timeout=2)
except Empty:
continue
result = self.handle(message)
self.output.put([client, result])
def start(self):
self._running = True
self.thread.start()
def stop(self):
self._running = False
self.thread.join()
@property
def unfinished(self):
return self.input.qsize() + self.output.qsize()
def put(self, client, message):
if message.get('method', None) == '.sys.heartbeat':
return self.methods['.sys.heartbeat'](message)
else:
self.input.put([client, message])
logging.debug('queue size | %s', self.input.qsize())
def get_output(self, timeout=0.001):
return self.output.get(timeout=timeout)
| from jaqsmds.server.repliers.basic import RegularReplier
from jaqsmds.server.repliers.handlers import JsetHandler, JsdHandler, JsiHandler
from queue import Queue, Empty
from threading import Thread
import logging
class FreeReplier(RegularReplier):
def __init__(self):
super(FreeReplier, self).__init__()
self.jset = JsetHandler()
self.jsd = JsdHandler()
self.jsi = JsiHandler()
self.methods["jset.query"] = self.jset.handle
self.methods["jsd.query"] = self.jsd.handle
self.methods["jsi.query"] = self.jsi.handle
self.input = Queue()
self.output = Queue()
self._running = False
self.thread = Thread(target=self.run)
def run(self):
while self._running or self.input.qsize():
try:
client, message = self.input.get(timeout=2)
except Empty:
continue
result = self.handle(message)
self.output.put([client, result])
def start(self):
self._running = True
self.thread.start()
def stop(self):
self._running = False
self.thread.join()
@property
def unfinished(self):
return self.input.qsize() + self.output.qsize()
def put(self, client, message):
if message.get("method", None) == ".sys.heartbeat":
return self.methods[".sys.heartbeat"](message)
else:
self.input.put([client, message])
logging.debug("queue size | %s", self.input.qsize())
def get_output(self, timeout=0.001):
return self.output.get(timeout=timeout) | [
5,
6,
7,
8,
10
] |
1,593 | a74a880039bad030d665e001da74075bd61fcc23 | <mask token>
class ProductsListSteps:
@given('Prepare classes products list')
def prepare_class(context):
context.login = LoginPage(context.driver)
context.products = ProductsPage(context.driver)
<mask token>
<mask token>
| <mask token>
class ProductsListSteps:
@given('Prepare classes products list')
def prepare_class(context):
context.login = LoginPage(context.driver)
context.products = ProductsPage(context.driver)
@when('Sort by price low to high')
def sort_low_to_high(context):
context.products.sort_price_low_to_high()
<mask token>
| <mask token>
class ProductsListSteps:
@given('Prepare classes products list')
def prepare_class(context):
context.login = LoginPage(context.driver)
context.products = ProductsPage(context.driver)
@when('Sort by price low to high')
def sort_low_to_high(context):
context.products.sort_price_low_to_high()
@then('Validate price order')
def validate_price_order(context):
context.products.validate_price_order()
| from behave import given, when, then
from pages.LoginPage import LoginPage
from pages.ProductsPage import ProductsPage
class ProductsListSteps:
@given('Prepare classes products list')
def prepare_class(context):
context.login = LoginPage(context.driver)
context.products = ProductsPage(context.driver)
@when('Sort by price low to high')
def sort_low_to_high(context):
context.products.sort_price_low_to_high()
@then('Validate price order')
def validate_price_order(context):
context.products.validate_price_order()
| from behave import given, when, then
from pages.LoginPage import LoginPage
from pages.ProductsPage import ProductsPage
class ProductsListSteps:
@given("Prepare classes products list")
def prepare_class(context):
context.login = LoginPage(context.driver)
context.products = ProductsPage(context.driver)
@when("Sort by price low to high")
def sort_low_to_high(context):
context.products.sort_price_low_to_high()
@then("Validate price order")
def validate_price_order(context):
context.products.validate_price_order()
| [
2,
3,
4,
5,
6
] |
1,594 | 5b9f1b3ca4b50a4e9e8bd6715e73c62b4f778929 | <mask token>
class Car:
<mask token>
def activate(self):
self.deactivate()
self.pi.write(self.STBY, 1)
<mask token>
def setDrive(self, direction, dutycycle=100):
dc = int(255.0 / 100.0 * dutycycle)
if direction == 1:
self.pi.write(self.driveIN1, 1)
self.pi.write(self.driveIN2, 0)
self.pi.set_PWM_dutycycle(self.drivePWM, dc)
elif direction == -1:
self.pi.write(self.driveIN1, 0)
self.pi.write(self.driveIN2, 1)
self.pi.set_PWM_dutycycle(self.drivePWM, dc)
else:
self.pi.write(self.driveIN1, 0)
self.pi.write(self.driveIN2, 0)
self.pi.set_PWM_dutycycle(self.drivePWM, 0)
<mask token>
def updateDistances(self):
if self.sensorTrig > 0:
for sensor in range(len(self.sensors)):
while self.pi.read(self.sensors[sensor]):
continue
self.pi.write(self.sensorTrig, True)
time.sleep(1e-06)
self.pi.write(self.sensorTrig, False)
startT = time.time()
while not self.pi.read(self.sensors[sensor]) and time.time(
) - startT < 0.001:
continue
startT = time.time()
while self.pi.read(self.sensors[sensor]):
continue
endT = time.time()
self.distances[sensor] = round((endT - startT) * 17150, 2)
"""
# trial to read multiple sensors at once but was having issues
# definitely can be optimized better and needs code hang detection
startT = {}
endT = {}
self.pi.write(self.sensorTrig, True)
time.sleep(0.0000001)
self.pi.write(self.sensorTrig, False)
sensorCount = len(self.sensors)
while len(endT) < sensorCount:
for sensor in range(sensorCount):
if sensor not in startT.keys():
if self.pi.read(self.sensors[sensor]):
startT[sensor] = time.time()
elif not sensor in endT.keys():
if not self.pi.read(self.sensors[sensor]):
endT[sensor] = time.time()
for sensor in range(len(self.sensors)):
self.distances[sensor] = round((endT[sensor] - startT[sensor]) * 17150, 2)
"""
<mask token>
| <mask token>
class Car:
def __init__(self, STBY, PWMA, AIN2, AIN1, BIN1, BIN2, PWMB, sensorTrig
=0, sensors=[]):
self.pi = pigpio.pi()
if not self.pi.connected:
print('Pi not connected to pigpio.')
return
self.STBY = STBY
self.drivePWM = PWMA
self.driveIN1 = AIN1
self.driveIN2 = AIN2
self.steerPWM = PWMB
self.steerIN1 = BIN1
self.steerIN2 = BIN2
self.pi.set_mode(STBY, pigpio.OUTPUT)
self.pi.set_mode(PWMA, pigpio.OUTPUT)
self.pi.set_mode(AIN1, pigpio.OUTPUT)
self.pi.set_mode(AIN2, pigpio.OUTPUT)
self.pi.set_mode(PWMB, pigpio.OUTPUT)
self.pi.set_mode(BIN1, pigpio.OUTPUT)
self.pi.set_mode(BIN2, pigpio.OUTPUT)
self.pi.set_PWM_frequency(PWMA, 50)
self.pi.set_PWM_frequency(PWMB, 50)
self.sensorTrig = sensorTrig
self.sensors = sensors
self.distances = []
for i in range(len(sensors)):
self.distances.append(0)
if sensorTrig > 0:
self.pi.set_mode(sensorTrig, pigpio.OUTPUT)
for sensor in range(len(sensors)):
if sensors[sensor] > 0:
self.pi.set_mode(sensors[sensor], pigpio.INPUT)
self.activate()
def activate(self):
self.deactivate()
self.pi.write(self.STBY, 1)
<mask token>
def setDrive(self, direction, dutycycle=100):
dc = int(255.0 / 100.0 * dutycycle)
if direction == 1:
self.pi.write(self.driveIN1, 1)
self.pi.write(self.driveIN2, 0)
self.pi.set_PWM_dutycycle(self.drivePWM, dc)
elif direction == -1:
self.pi.write(self.driveIN1, 0)
self.pi.write(self.driveIN2, 1)
self.pi.set_PWM_dutycycle(self.drivePWM, dc)
else:
self.pi.write(self.driveIN1, 0)
self.pi.write(self.driveIN2, 0)
self.pi.set_PWM_dutycycle(self.drivePWM, 0)
<mask token>
def updateDistances(self):
if self.sensorTrig > 0:
for sensor in range(len(self.sensors)):
while self.pi.read(self.sensors[sensor]):
continue
self.pi.write(self.sensorTrig, True)
time.sleep(1e-06)
self.pi.write(self.sensorTrig, False)
startT = time.time()
while not self.pi.read(self.sensors[sensor]) and time.time(
) - startT < 0.001:
continue
startT = time.time()
while self.pi.read(self.sensors[sensor]):
continue
endT = time.time()
self.distances[sensor] = round((endT - startT) * 17150, 2)
"""
# trial to read multiple sensors at once but was having issues
# definitely can be optimized better and needs code hang detection
startT = {}
endT = {}
self.pi.write(self.sensorTrig, True)
time.sleep(0.0000001)
self.pi.write(self.sensorTrig, False)
sensorCount = len(self.sensors)
while len(endT) < sensorCount:
for sensor in range(sensorCount):
if sensor not in startT.keys():
if self.pi.read(self.sensors[sensor]):
startT[sensor] = time.time()
elif not sensor in endT.keys():
if not self.pi.read(self.sensors[sensor]):
endT[sensor] = time.time()
for sensor in range(len(self.sensors)):
self.distances[sensor] = round((endT[sensor] - startT[sensor]) * 17150, 2)
"""
def stop(self):
self.deactivate()
self.pi.stop()
| <mask token>
class Car:
def __init__(self, STBY, PWMA, AIN2, AIN1, BIN1, BIN2, PWMB, sensorTrig
=0, sensors=[]):
self.pi = pigpio.pi()
if not self.pi.connected:
print('Pi not connected to pigpio.')
return
self.STBY = STBY
self.drivePWM = PWMA
self.driveIN1 = AIN1
self.driveIN2 = AIN2
self.steerPWM = PWMB
self.steerIN1 = BIN1
self.steerIN2 = BIN2
self.pi.set_mode(STBY, pigpio.OUTPUT)
self.pi.set_mode(PWMA, pigpio.OUTPUT)
self.pi.set_mode(AIN1, pigpio.OUTPUT)
self.pi.set_mode(AIN2, pigpio.OUTPUT)
self.pi.set_mode(PWMB, pigpio.OUTPUT)
self.pi.set_mode(BIN1, pigpio.OUTPUT)
self.pi.set_mode(BIN2, pigpio.OUTPUT)
self.pi.set_PWM_frequency(PWMA, 50)
self.pi.set_PWM_frequency(PWMB, 50)
self.sensorTrig = sensorTrig
self.sensors = sensors
self.distances = []
for i in range(len(sensors)):
self.distances.append(0)
if sensorTrig > 0:
self.pi.set_mode(sensorTrig, pigpio.OUTPUT)
for sensor in range(len(sensors)):
if sensors[sensor] > 0:
self.pi.set_mode(sensors[sensor], pigpio.INPUT)
self.activate()
def activate(self):
self.deactivate()
self.pi.write(self.STBY, 1)
<mask token>
def setDrive(self, direction, dutycycle=100):
dc = int(255.0 / 100.0 * dutycycle)
if direction == 1:
self.pi.write(self.driveIN1, 1)
self.pi.write(self.driveIN2, 0)
self.pi.set_PWM_dutycycle(self.drivePWM, dc)
elif direction == -1:
self.pi.write(self.driveIN1, 0)
self.pi.write(self.driveIN2, 1)
self.pi.set_PWM_dutycycle(self.drivePWM, dc)
else:
self.pi.write(self.driveIN1, 0)
self.pi.write(self.driveIN2, 0)
self.pi.set_PWM_dutycycle(self.drivePWM, 0)
def setSteering(self, direction, dutycycle=100):
dc = int(255.0 / 100.0 * dutycycle)
if direction == 1:
self.pi.write(self.steerIN1, 0)
self.pi.write(self.steerIN2, 1)
self.pi.set_PWM_dutycycle(self.steerPWM, dc)
elif direction == -1:
self.pi.write(self.steerIN1, 1)
self.pi.write(self.steerIN2, 0)
self.pi.set_PWM_dutycycle(self.steerPWM, dc)
else:
self.pi.write(self.steerIN1, 0)
self.pi.write(self.steerIN2, 0)
self.pi.set_PWM_dutycycle(self.steerPWM, 0)
def updateDistances(self):
if self.sensorTrig > 0:
for sensor in range(len(self.sensors)):
while self.pi.read(self.sensors[sensor]):
continue
self.pi.write(self.sensorTrig, True)
time.sleep(1e-06)
self.pi.write(self.sensorTrig, False)
startT = time.time()
while not self.pi.read(self.sensors[sensor]) and time.time(
) - startT < 0.001:
continue
startT = time.time()
while self.pi.read(self.sensors[sensor]):
continue
endT = time.time()
self.distances[sensor] = round((endT - startT) * 17150, 2)
"""
# trial to read multiple sensors at once but was having issues
# definitely can be optimized better and needs code hang detection
startT = {}
endT = {}
self.pi.write(self.sensorTrig, True)
time.sleep(0.0000001)
self.pi.write(self.sensorTrig, False)
sensorCount = len(self.sensors)
while len(endT) < sensorCount:
for sensor in range(sensorCount):
if sensor not in startT.keys():
if self.pi.read(self.sensors[sensor]):
startT[sensor] = time.time()
elif not sensor in endT.keys():
if not self.pi.read(self.sensors[sensor]):
endT[sensor] = time.time()
for sensor in range(len(self.sensors)):
self.distances[sensor] = round((endT[sensor] - startT[sensor]) * 17150, 2)
"""
def stop(self):
self.deactivate()
self.pi.stop()
| <mask token>
class Car:
def __init__(self, STBY, PWMA, AIN2, AIN1, BIN1, BIN2, PWMB, sensorTrig
=0, sensors=[]):
self.pi = pigpio.pi()
if not self.pi.connected:
print('Pi not connected to pigpio.')
return
self.STBY = STBY
self.drivePWM = PWMA
self.driveIN1 = AIN1
self.driveIN2 = AIN2
self.steerPWM = PWMB
self.steerIN1 = BIN1
self.steerIN2 = BIN2
self.pi.set_mode(STBY, pigpio.OUTPUT)
self.pi.set_mode(PWMA, pigpio.OUTPUT)
self.pi.set_mode(AIN1, pigpio.OUTPUT)
self.pi.set_mode(AIN2, pigpio.OUTPUT)
self.pi.set_mode(PWMB, pigpio.OUTPUT)
self.pi.set_mode(BIN1, pigpio.OUTPUT)
self.pi.set_mode(BIN2, pigpio.OUTPUT)
self.pi.set_PWM_frequency(PWMA, 50)
self.pi.set_PWM_frequency(PWMB, 50)
self.sensorTrig = sensorTrig
self.sensors = sensors
self.distances = []
for i in range(len(sensors)):
self.distances.append(0)
if sensorTrig > 0:
self.pi.set_mode(sensorTrig, pigpio.OUTPUT)
for sensor in range(len(sensors)):
if sensors[sensor] > 0:
self.pi.set_mode(sensors[sensor], pigpio.INPUT)
self.activate()
def activate(self):
self.deactivate()
self.pi.write(self.STBY, 1)
def deactivate(self):
self.pi.write(self.STBY, 0)
self.pi.write(self.driveIN1, 0)
self.pi.write(self.driveIN2, 0)
self.pi.set_PWM_dutycycle(self.drivePWM, 0)
self.pi.write(self.steerIN1, 0)
self.pi.write(self.steerIN2, 0)
self.pi.set_PWM_dutycycle(self.steerPWM, 0)
if self.sensorTrig > 0:
self.pi.write(self.sensorTrig, False)
def setDrive(self, direction, dutycycle=100):
dc = int(255.0 / 100.0 * dutycycle)
if direction == 1:
self.pi.write(self.driveIN1, 1)
self.pi.write(self.driveIN2, 0)
self.pi.set_PWM_dutycycle(self.drivePWM, dc)
elif direction == -1:
self.pi.write(self.driveIN1, 0)
self.pi.write(self.driveIN2, 1)
self.pi.set_PWM_dutycycle(self.drivePWM, dc)
else:
self.pi.write(self.driveIN1, 0)
self.pi.write(self.driveIN2, 0)
self.pi.set_PWM_dutycycle(self.drivePWM, 0)
def setSteering(self, direction, dutycycle=100):
dc = int(255.0 / 100.0 * dutycycle)
if direction == 1:
self.pi.write(self.steerIN1, 0)
self.pi.write(self.steerIN2, 1)
self.pi.set_PWM_dutycycle(self.steerPWM, dc)
elif direction == -1:
self.pi.write(self.steerIN1, 1)
self.pi.write(self.steerIN2, 0)
self.pi.set_PWM_dutycycle(self.steerPWM, dc)
else:
self.pi.write(self.steerIN1, 0)
self.pi.write(self.steerIN2, 0)
self.pi.set_PWM_dutycycle(self.steerPWM, 0)
def updateDistances(self):
if self.sensorTrig > 0:
for sensor in range(len(self.sensors)):
while self.pi.read(self.sensors[sensor]):
continue
self.pi.write(self.sensorTrig, True)
time.sleep(1e-06)
self.pi.write(self.sensorTrig, False)
startT = time.time()
while not self.pi.read(self.sensors[sensor]) and time.time(
) - startT < 0.001:
continue
startT = time.time()
while self.pi.read(self.sensors[sensor]):
continue
endT = time.time()
self.distances[sensor] = round((endT - startT) * 17150, 2)
"""
# trial to read multiple sensors at once but was having issues
# definitely can be optimized better and needs code hang detection
startT = {}
endT = {}
self.pi.write(self.sensorTrig, True)
time.sleep(0.0000001)
self.pi.write(self.sensorTrig, False)
sensorCount = len(self.sensors)
while len(endT) < sensorCount:
for sensor in range(sensorCount):
if sensor not in startT.keys():
if self.pi.read(self.sensors[sensor]):
startT[sensor] = time.time()
elif not sensor in endT.keys():
if not self.pi.read(self.sensors[sensor]):
endT[sensor] = time.time()
for sensor in range(len(self.sensors)):
self.distances[sensor] = round((endT[sensor] - startT[sensor]) * 17150, 2)
"""
def stop(self):
self.deactivate()
self.pi.stop()
| import time
import pigpio
class Car:
def __init__(self, STBY, PWMA, AIN2, AIN1, BIN1, BIN2, PWMB, sensorTrig=0, sensors=[]):
self.pi = pigpio.pi()
if not self.pi.connected:
print("Pi not connected to pigpio.")
return
# GPIO Drive Pin locations
self.STBY = STBY
# drive motor
self.drivePWM = PWMA
self.driveIN1 = AIN1
self.driveIN2 = AIN2
# steering motor
self.steerPWM = PWMB
self.steerIN1 = BIN1
self.steerIN2 = BIN2
# initialize GPIO
self.pi.set_mode(STBY, pigpio.OUTPUT)
self.pi.set_mode(PWMA, pigpio.OUTPUT)
self.pi.set_mode(AIN1, pigpio.OUTPUT)
self.pi.set_mode(AIN2, pigpio.OUTPUT)
self.pi.set_mode(PWMB, pigpio.OUTPUT)
self.pi.set_mode(BIN1, pigpio.OUTPUT)
self.pi.set_mode(BIN2, pigpio.OUTPUT)
self.pi.set_PWM_frequency(PWMA, 50)
self.pi.set_PWM_frequency(PWMB, 50)
# Sensor GPIO Pin locations
self.sensorTrig = sensorTrig
self.sensors = sensors
self.distances = []
for i in range(len(sensors)):
self.distances.append(0)
# initialize sensor GPIO
if sensorTrig > 0:
self.pi.set_mode(sensorTrig, pigpio.OUTPUT)
for sensor in range(len(sensors)):
if sensors[sensor] > 0:
self.pi.set_mode(sensors[sensor], pigpio.INPUT)
# activate car
self.activate()
# activate motors
def activate(self):
self.deactivate()
self.pi.write(self.STBY, 1)
# shut off motors
def deactivate(self):
self.pi.write(self.STBY, 0)
# shut off drive motor
self.pi.write(self.driveIN1, 0)
self.pi.write(self.driveIN2, 0)
self.pi.set_PWM_dutycycle(self.drivePWM, 0)
# shut off steering motor
self.pi.write(self.steerIN1, 0)
self.pi.write(self.steerIN2, 0)
self.pi.set_PWM_dutycycle(self.steerPWM, 0)
if self.sensorTrig > 0:
# make sure sensors aren't triggered
self.pi.write(self.sensorTrig, False)
# set drive motor
def setDrive(self, direction, dutycycle=100):
dc = int((255.0 / 100.0) * dutycycle)
if direction == 1:
self.pi.write(self.driveIN1, 1)
self.pi.write(self.driveIN2, 0)
self.pi.set_PWM_dutycycle(self.drivePWM, dc)
elif direction == -1:
self.pi.write(self.driveIN1, 0)
self.pi.write(self.driveIN2, 1)
self.pi.set_PWM_dutycycle(self.drivePWM, dc)
else:
self.pi.write(self.driveIN1, 0)
self.pi.write(self.driveIN2, 0)
self.pi.set_PWM_dutycycle(self.drivePWM, 0)
# set steering motor
def setSteering(self, direction, dutycycle=100):
dc = int((255.0 / 100.0) * dutycycle)
if direction == 1:
self.pi.write(self.steerIN1, 0)
self.pi.write(self.steerIN2, 1)
self.pi.set_PWM_dutycycle(self.steerPWM, dc)
elif direction == -1:
self.pi.write(self.steerIN1, 1)
self.pi.write(self.steerIN2, 0)
self.pi.set_PWM_dutycycle(self.steerPWM, dc)
else:
self.pi.write(self.steerIN1, 0)
self.pi.write(self.steerIN2, 0)
self.pi.set_PWM_dutycycle(self.steerPWM, 0)
# update sensors distance
def updateDistances(self):
if self.sensorTrig > 0:
for sensor in range(len(self.sensors)):
while self.pi.read(self.sensors[sensor]):
continue
# trigger the sensors so they start reading
self.pi.write(self.sensorTrig, True)
time.sleep(0.000001)
self.pi.write(self.sensorTrig, False)
# wait until the sensor starts reading, if it takes longer than .001 seconds then something went wrong
startT = time.time()
while not self.pi.read(self.sensors[sensor]) and time.time() - startT < .001:
continue
startT = time.time()
# wait for the sensor to become inactive which gives us the ending time
while self.pi.read(self.sensors[sensor]):
continue
endT = time.time()
# convert the sensor readings to distance in centimeters
self.distances[sensor] = round((endT - startT) * 17150, 2)
'''
# trial to read multiple sensors at once but was having issues
# definitely can be optimized better and needs code hang detection
startT = {}
endT = {}
self.pi.write(self.sensorTrig, True)
time.sleep(0.0000001)
self.pi.write(self.sensorTrig, False)
sensorCount = len(self.sensors)
while len(endT) < sensorCount:
for sensor in range(sensorCount):
if sensor not in startT.keys():
if self.pi.read(self.sensors[sensor]):
startT[sensor] = time.time()
elif not sensor in endT.keys():
if not self.pi.read(self.sensors[sensor]):
endT[sensor] = time.time()
for sensor in range(len(self.sensors)):
self.distances[sensor] = round((endT[sensor] - startT[sensor]) * 17150, 2)
'''
# shut everything off and disconnect from pi
def stop(self):
self.deactivate()
self.pi.stop()
| [
4,
6,
7,
8,
10
] |
1,595 | 3ab26612111e3df59f41f5b5e0bf23398e015a8a | <mask token>
| <mask token>
class GameStats:
<mask token>
| <mask token>
class GameStats:
def __init__(self, setting):
self.setting = setting
self.ships_left = self.setting.ship_limit
self.game_active = True
| """
统计飞船信息
"""
class GameStats:
def __init__(self, setting):
self.setting = setting
self.ships_left = self.setting.ship_limit
self.game_active = True
| null | [
0,
1,
2,
3
] |
1,596 | e57680c9bd09866e68ade0cfea7ce83cd6d50f58 | <mask token>
def solve_problem(input):
parents = {}
for i, line in enumerate(input.split('\n')):
about, object = line.split(')')
parents[object] = about
orbit_counts = {'COM': 0}
for object in tuple(parents.keys()):
stack = [object]
while stack[-1] not in orbit_counts:
stack.append(parents[stack[-1]])
known = orbit_counts[stack.pop()]
stack.reverse()
for thing in stack:
orbit_counts[thing] = orbit_counts[parents[thing]] + 1
return sum(orbit_counts.values())
<mask token>
def get_parents(key, parents):
"""Get parents for a particular key through parents dict"""
r = [key]
while True:
this_one = r[-1]
if this_one == 'COM':
return r
r.append(parents[this_one])
def part2(input):
parents = {}
for i, line in enumerate(input.split('\n')):
about, object = line.split(')')
parents[object] = about
santa = get_parents('SAN', parents)
me = get_parents('YOU', parents)
for i, planet in enumerate(me):
if planet in santa:
print(f'met at {planet}')
print('')
print(santa[:santa.index(planet) + 1])
print(len(santa[:santa.index(planet) + 1]))
print(santa.index(planet))
print('')
print(me[:i + 1])
print(len(me[:i + 1]))
print(i)
return i + santa.index(planet) - 1
<mask token>
| <mask token>
with open(__file__.replace('.py', '.txt')) as f:
problem = f.read()
<mask token>
def solve_problem(input):
parents = {}
for i, line in enumerate(input.split('\n')):
about, object = line.split(')')
parents[object] = about
orbit_counts = {'COM': 0}
for object in tuple(parents.keys()):
stack = [object]
while stack[-1] not in orbit_counts:
stack.append(parents[stack[-1]])
known = orbit_counts[stack.pop()]
stack.reverse()
for thing in stack:
orbit_counts[thing] = orbit_counts[parents[thing]] + 1
return sum(orbit_counts.values())
if sys.argv[-1] in data.keys():
scenarios = sys.argv[-1],
else:
scenarios = tuple(data.keys())
for scenario in scenarios:
input = data[scenario]
r = solve_problem(input)
print(f'FINAL ANSWER: {r}')
print('')
print('**** PART 2 ******')
def get_parents(key, parents):
"""Get parents for a particular key through parents dict"""
r = [key]
while True:
this_one = r[-1]
if this_one == 'COM':
return r
r.append(parents[this_one])
def part2(input):
parents = {}
for i, line in enumerate(input.split('\n')):
about, object = line.split(')')
parents[object] = about
santa = get_parents('SAN', parents)
me = get_parents('YOU', parents)
for i, planet in enumerate(me):
if planet in santa:
print(f'met at {planet}')
print('')
print(santa[:santa.index(planet) + 1])
print(len(santa[:santa.index(planet) + 1]))
print(santa.index(planet))
print('')
print(me[:i + 1])
print(len(me[:i + 1]))
print(i)
return i + santa.index(planet) - 1
<mask token>
for scenario in scenarios:
input = data[scenario]
r = part2(input)
print(f'Part 2 answer {r}')
| <mask token>
with open(__file__.replace('.py', '.txt')) as f:
problem = f.read()
data = {'problem': problem, 'example':
"""COM)B
B)C
C)D
D)E
E)F
B)G
G)H
D)I
E)J
J)K
K)L"""}
def solve_problem(input):
parents = {}
for i, line in enumerate(input.split('\n')):
about, object = line.split(')')
parents[object] = about
orbit_counts = {'COM': 0}
for object in tuple(parents.keys()):
stack = [object]
while stack[-1] not in orbit_counts:
stack.append(parents[stack[-1]])
known = orbit_counts[stack.pop()]
stack.reverse()
for thing in stack:
orbit_counts[thing] = orbit_counts[parents[thing]] + 1
return sum(orbit_counts.values())
if sys.argv[-1] in data.keys():
scenarios = sys.argv[-1],
else:
scenarios = tuple(data.keys())
for scenario in scenarios:
input = data[scenario]
r = solve_problem(input)
print(f'FINAL ANSWER: {r}')
print('')
print('**** PART 2 ******')
def get_parents(key, parents):
"""Get parents for a particular key through parents dict"""
r = [key]
while True:
this_one = r[-1]
if this_one == 'COM':
return r
r.append(parents[this_one])
def part2(input):
parents = {}
for i, line in enumerate(input.split('\n')):
about, object = line.split(')')
parents[object] = about
santa = get_parents('SAN', parents)
me = get_parents('YOU', parents)
for i, planet in enumerate(me):
if planet in santa:
print(f'met at {planet}')
print('')
print(santa[:santa.index(planet) + 1])
print(len(santa[:santa.index(planet) + 1]))
print(santa.index(planet))
print('')
print(me[:i + 1])
print(len(me[:i + 1]))
print(i)
return i + santa.index(planet) - 1
data['example'] = """COM)B
B)C
C)D
D)E
E)F
B)G
G)H
D)I
E)J
J)K
K)L
K)YOU
I)SAN"""
for scenario in scenarios:
input = data[scenario]
r = part2(input)
print(f'Part 2 answer {r}')
| import sys
import json
with open(__file__.replace('.py', '.txt')) as f:
problem = f.read()
data = {'problem': problem, 'example':
"""COM)B
B)C
C)D
D)E
E)F
B)G
G)H
D)I
E)J
J)K
K)L"""}
def solve_problem(input):
parents = {}
for i, line in enumerate(input.split('\n')):
about, object = line.split(')')
parents[object] = about
orbit_counts = {'COM': 0}
for object in tuple(parents.keys()):
stack = [object]
while stack[-1] not in orbit_counts:
stack.append(parents[stack[-1]])
known = orbit_counts[stack.pop()]
stack.reverse()
for thing in stack:
orbit_counts[thing] = orbit_counts[parents[thing]] + 1
return sum(orbit_counts.values())
if sys.argv[-1] in data.keys():
scenarios = sys.argv[-1],
else:
scenarios = tuple(data.keys())
for scenario in scenarios:
input = data[scenario]
r = solve_problem(input)
print(f'FINAL ANSWER: {r}')
print('')
print('**** PART 2 ******')
def get_parents(key, parents):
"""Get parents for a particular key through parents dict"""
r = [key]
while True:
this_one = r[-1]
if this_one == 'COM':
return r
r.append(parents[this_one])
def part2(input):
parents = {}
for i, line in enumerate(input.split('\n')):
about, object = line.split(')')
parents[object] = about
santa = get_parents('SAN', parents)
me = get_parents('YOU', parents)
for i, planet in enumerate(me):
if planet in santa:
print(f'met at {planet}')
print('')
print(santa[:santa.index(planet) + 1])
print(len(santa[:santa.index(planet) + 1]))
print(santa.index(planet))
print('')
print(me[:i + 1])
print(len(me[:i + 1]))
print(i)
return i + santa.index(planet) - 1
data['example'] = """COM)B
B)C
C)D
D)E
E)F
B)G
G)H
D)I
E)J
J)K
K)L
K)YOU
I)SAN"""
for scenario in scenarios:
input = data[scenario]
r = part2(input)
print(f'Part 2 answer {r}')
| import sys
import json
with open(__file__.replace('.py', '.txt')) as f:
problem = f.read()
data = {
'problem': problem,
'example': """COM)B
B)C
C)D
D)E
E)F
B)G
G)H
D)I
E)J
J)K
K)L""" # should give 42
}
def solve_problem(input):
parents = {}
for i, line in enumerate(input.split('\n')):
about, object = line.split(')')
parents[object] = about
orbit_counts = {'COM': 0}
for object in tuple(parents.keys()):
stack = [object]
while stack[-1] not in orbit_counts:
stack.append(parents[stack[-1]])
known = orbit_counts[stack.pop()]
stack.reverse()
for thing in stack:
orbit_counts[thing] = orbit_counts[parents[thing]] + 1
return sum(orbit_counts.values())
# part 1
if sys.argv[-1] in data.keys():
scenarios = (sys.argv[-1],)
else:
scenarios = tuple(data.keys())
for scenario in scenarios:
input = data[scenario]
r = solve_problem(input)
print(f'FINAL ANSWER: {r}')
# 932, too low
print('')
print('**** PART 2 ******')
def get_parents(key, parents):
"""Get parents for a particular key through parents dict"""
r = [key]
while True:
this_one = r[-1]
if this_one == 'COM':
return r
r.append(parents[this_one])
def part2(input):
parents = {}
for i, line in enumerate(input.split('\n')):
about, object = line.split(')')
parents[object] = about
santa = get_parents('SAN', parents)
me = get_parents('YOU', parents)
for i, planet in enumerate(me):
if planet in santa:
print(f'met at {planet}')
print('')
print(santa[:santa.index(planet) + 1])
print(len(santa[:santa.index(planet) + 1]))
# minus one because we want traversials between elements in list
print(santa.index(planet))
print('')
print(me[:i + 1])
print(len(me[:i + 1]))
# minus one because we want traversials between elements in list
print(i)
# minus another one because transfering to the planet is already counted
# ...or something like that
# minus one because problem said so
return i + santa.index(planet) - 1
data['example'] = """COM)B
B)C
C)D
D)E
E)F
B)G
G)H
D)I
E)J
J)K
K)L
K)YOU
I)SAN"""
for scenario in scenarios:
input = data[scenario]
r = part2(input)
print(f'Part 2 answer {r}')
# 432, too high
# 433, too high
# 431, too high
# 430, correct
| [
3,
4,
5,
6,
7
] |
1,597 | 74c875d00c665aabbcad4e23e6059c3445d5e7bd | <mask token>
def load_dataframe(dataset):
return pd.read_csv(dataset)
<mask token>
| <mask token>
def load_dataframe(dataset):
return pd.read_csv(dataset)
def augment(x, y, t=2):
xs, xn = [], []
for i in range(t):
mask = y > 0
x1 = x[mask].copy()
for c in range(200):
val = x1[:, [c, c + 200, c + 400]]
np.random.shuffle(val)
x1[:, [c, c + 200, c + 400]] = val
xs.append(x1)
for i in range(t // 2):
mask = y == 0
x1 = x[mask].copy()
for c in range(200):
val = x1[:, [c, c + 200, c + 400]]
np.random.shuffle(val)
x1[:, [c, c + 200, c + 400]] = val
xn.append(x1)
xs = np.vstack(xs)
xn = np.vstack(xn)
ys = np.ones(xs.shape[0])
yn = np.zeros(xn.shape[0])
x = np.vstack([x, xs, xn])
y = np.concatenate([y, ys, yn])
return x, y
<mask token>
| <mask token>
def load_dataframe(dataset):
return pd.read_csv(dataset)
def augment(x, y, t=2):
xs, xn = [], []
for i in range(t):
mask = y > 0
x1 = x[mask].copy()
for c in range(200):
val = x1[:, [c, c + 200, c + 400]]
np.random.shuffle(val)
x1[:, [c, c + 200, c + 400]] = val
xs.append(x1)
for i in range(t // 2):
mask = y == 0
x1 = x[mask].copy()
for c in range(200):
val = x1[:, [c, c + 200, c + 400]]
np.random.shuffle(val)
x1[:, [c, c + 200, c + 400]] = val
xn.append(x1)
xs = np.vstack(xs)
xn = np.vstack(xn)
ys = np.ones(xs.shape[0])
yn = np.zeros(xn.shape[0])
x = np.vstack([x, xs, xn])
y = np.concatenate([y, ys, yn])
return x, y
if __name__ == '__main__':
gc.enable()
pd.set_option('max_rows', None)
pd.set_option('max_columns', None)
warnings.simplefilter('ignore', UserWarning)
top_folder = './output'
today = datetime.today()
now = today.strftime('%m%d-%H%M')
log_name = now + '.txt'
sys.stdout = Logger(path.join(top_folder, log_name))
seed_np = 1011
np.random.seed(seed_np)
print('numpy seed: {}'.format(seed_np))
start = time.time()
with multiprocessing.Pool() as pool:
train, test = pool.map(load_dataframe, ['./input/train.csv',
'./input/test.csv'])
df_test = test.drop(columns=['ID_code']).values
unique_samples = []
unique_count = np.zeros_like(df_test)
for feature in tqdm(range(df_test.shape[1])):
_, index_, count_ = np.unique(df_test[:, feature], return_counts=
True, return_index=True)
unique_count[index_[count_ == 1], feature] += 1
idx_score = np.argwhere(np.sum(unique_count, axis=1) > 0)[:, 0]
idx_synthetic = np.argwhere(np.sum(unique_count, axis=1) == 0)[:, 0]
synthetic = test.loc[idx_synthetic]
test = test.loc[idx_score]
raw = pd.concat([train, test], axis=0, sort=False, ignore_index=True)
len_train = len(train)
col_var = list(raw.columns[2:])
mask = pd.DataFrame(np.ones([raw.shape[0], len(col_var)]), columns=col_var)
for col in tqdm(col_var):
cnt = raw[col].value_counts()
val = cnt[cnt == 1].index
mask.loc[np.isin(raw[col], val), col] = 0
col_repeat = [(col + '_repeat_2') for col in col_var]
raw[col_repeat] = raw[col_var][mask.astype(bool)]
mask = pd.DataFrame(np.ones([raw.shape[0], len(col_var)]), columns=col_var)
for col in tqdm(col_var):
cnt = raw[col].value_counts()
val = cnt[np.isin(cnt, [1, 2])].index
mask.loc[np.isin(raw[col], val), col] = 0
col_repeat = [(col + '_repeat_3') for col in col_var]
raw[col_repeat] = raw[col_var][mask.astype(bool)]
raw = pd.concat([raw, synthetic], axis=0, sort=False, ignore_index=True)
print('data: {}'.format(raw.shape))
print('elapsed time: {:.1f} min'.format((time.time() - start) / 60))
feats = [col for col in raw.columns.values if col not in ['ID_code',
'target']]
train = raw[:len_train]
test = raw[len_train:].copy()
x_train = train[feats]
y_train = train['target']
x_test = test[feats]
print('trn_x: {}'.format(x_train.shape))
print('x_test: {}'.format(x_test.shape))
param = {'objective': 'binary', 'boosting': 'gbdt', 'metric': 'auc',
'verbosity': -1, 'n_jobs': 11, 'random_state': 1993,
'learning_rate': 0.01, 'num_leaves': 8, 'max_depth': -1,
'feature_fraction': 0.05, 'bagging_freq': 5, 'bagging_fraction':
0.4, 'min_data_in_leaf': 80, 'min_sum_hessian_in_leaf': 10.0}
print('model params:\n{}'.format(pd.Series(list(param.values()), index=
list(param.keys()))))
seed_fold = 26
folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=seed_fold)
print('StratifiedKFold seed: {}'.format(seed_fold))
round_max = 30000
round_early_stopping = 3000
print('num_round: {}'.format(round_max))
print('early_stopping_round: {}'.format(round_early_stopping))
oof = np.zeros(len(x_train))
predictions = np.zeros(len(x_test))
start = time.time()
for fold_, (trn_idx, val_idx) in enumerate(folds.split(x_train.values,
y_train.values)):
print('fold n°{}'.format(fold_))
trn_x, trn_y = x_train.iloc[trn_idx], y_train.iloc[trn_idx]
val_x, val_y = x_train.iloc[val_idx], y_train.iloc[val_idx]
N = 5
for i in range(N):
X_t, y_t = augment(trn_x.values, trn_y.values)
X_t = pd.DataFrame(X_t, columns=feats)
trn_data = lgb.Dataset(X_t, label=y_t)
val_data = lgb.Dataset(val_x, label=val_y)
evals_result = {}
clf = lgb.train(param, trn_data, round_max, valid_sets=[
trn_data, val_data], early_stopping_rounds=
round_early_stopping, verbose_eval=1000, evals_result=
evals_result)
oof[val_idx] += clf.predict(val_x, num_iteration=clf.best_iteration
) / N
predictions += clf.predict(x_test, num_iteration=clf.best_iteration
) / folds.n_splits / N
fold_score = roc_auc_score(val_y, oof[val_idx])
print('fold {} auc score: {:.5f}'.format(fold_, fold_score))
cv_score = roc_auc_score(y_train, oof)
print('elapsed time: {:.1f} min'.format((time.time() - start) / 60))
print('auc score: {:.5f}'.format(cv_score))
sub_folder = path.join(top_folder, 'cv_' + now + '_' + str(np.round(
cv_score, 5)))
makedirs(sub_folder, exist_ok=True)
test['target'] = predictions
test[['ID_code', 'target']].to_csv(path.join(sub_folder,
'submission.csv'), index=False)
raw['oof'] = np.concatenate([oof, predictions], axis=0)
raw[['ID_code', 'oof']].to_csv(path.join(sub_folder, 'oof.csv'), index=
False)
| import gc
import sys
import time
import warnings
import multiprocessing
import numpy as np
import pandas as pd
import lightgbm as lgb
from os import path, makedirs
from tqdm import tqdm
from utils import Logger
from datetime import datetime
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import StratifiedKFold
def load_dataframe(dataset):
return pd.read_csv(dataset)
def augment(x, y, t=2):
xs, xn = [], []
for i in range(t):
mask = y > 0
x1 = x[mask].copy()
for c in range(200):
val = x1[:, [c, c + 200, c + 400]]
np.random.shuffle(val)
x1[:, [c, c + 200, c + 400]] = val
xs.append(x1)
for i in range(t // 2):
mask = y == 0
x1 = x[mask].copy()
for c in range(200):
val = x1[:, [c, c + 200, c + 400]]
np.random.shuffle(val)
x1[:, [c, c + 200, c + 400]] = val
xn.append(x1)
xs = np.vstack(xs)
xn = np.vstack(xn)
ys = np.ones(xs.shape[0])
yn = np.zeros(xn.shape[0])
x = np.vstack([x, xs, xn])
y = np.concatenate([y, ys, yn])
return x, y
if __name__ == '__main__':
gc.enable()
pd.set_option('max_rows', None)
pd.set_option('max_columns', None)
warnings.simplefilter('ignore', UserWarning)
top_folder = './output'
today = datetime.today()
now = today.strftime('%m%d-%H%M')
log_name = now + '.txt'
sys.stdout = Logger(path.join(top_folder, log_name))
seed_np = 1011
np.random.seed(seed_np)
print('numpy seed: {}'.format(seed_np))
start = time.time()
with multiprocessing.Pool() as pool:
train, test = pool.map(load_dataframe, ['./input/train.csv',
'./input/test.csv'])
df_test = test.drop(columns=['ID_code']).values
unique_samples = []
unique_count = np.zeros_like(df_test)
for feature in tqdm(range(df_test.shape[1])):
_, index_, count_ = np.unique(df_test[:, feature], return_counts=
True, return_index=True)
unique_count[index_[count_ == 1], feature] += 1
idx_score = np.argwhere(np.sum(unique_count, axis=1) > 0)[:, 0]
idx_synthetic = np.argwhere(np.sum(unique_count, axis=1) == 0)[:, 0]
synthetic = test.loc[idx_synthetic]
test = test.loc[idx_score]
raw = pd.concat([train, test], axis=0, sort=False, ignore_index=True)
len_train = len(train)
col_var = list(raw.columns[2:])
mask = pd.DataFrame(np.ones([raw.shape[0], len(col_var)]), columns=col_var)
for col in tqdm(col_var):
cnt = raw[col].value_counts()
val = cnt[cnt == 1].index
mask.loc[np.isin(raw[col], val), col] = 0
col_repeat = [(col + '_repeat_2') for col in col_var]
raw[col_repeat] = raw[col_var][mask.astype(bool)]
mask = pd.DataFrame(np.ones([raw.shape[0], len(col_var)]), columns=col_var)
for col in tqdm(col_var):
cnt = raw[col].value_counts()
val = cnt[np.isin(cnt, [1, 2])].index
mask.loc[np.isin(raw[col], val), col] = 0
col_repeat = [(col + '_repeat_3') for col in col_var]
raw[col_repeat] = raw[col_var][mask.astype(bool)]
raw = pd.concat([raw, synthetic], axis=0, sort=False, ignore_index=True)
print('data: {}'.format(raw.shape))
print('elapsed time: {:.1f} min'.format((time.time() - start) / 60))
feats = [col for col in raw.columns.values if col not in ['ID_code',
'target']]
train = raw[:len_train]
test = raw[len_train:].copy()
x_train = train[feats]
y_train = train['target']
x_test = test[feats]
print('trn_x: {}'.format(x_train.shape))
print('x_test: {}'.format(x_test.shape))
param = {'objective': 'binary', 'boosting': 'gbdt', 'metric': 'auc',
'verbosity': -1, 'n_jobs': 11, 'random_state': 1993,
'learning_rate': 0.01, 'num_leaves': 8, 'max_depth': -1,
'feature_fraction': 0.05, 'bagging_freq': 5, 'bagging_fraction':
0.4, 'min_data_in_leaf': 80, 'min_sum_hessian_in_leaf': 10.0}
print('model params:\n{}'.format(pd.Series(list(param.values()), index=
list(param.keys()))))
seed_fold = 26
folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=seed_fold)
print('StratifiedKFold seed: {}'.format(seed_fold))
round_max = 30000
round_early_stopping = 3000
print('num_round: {}'.format(round_max))
print('early_stopping_round: {}'.format(round_early_stopping))
oof = np.zeros(len(x_train))
predictions = np.zeros(len(x_test))
start = time.time()
for fold_, (trn_idx, val_idx) in enumerate(folds.split(x_train.values,
y_train.values)):
print('fold n°{}'.format(fold_))
trn_x, trn_y = x_train.iloc[trn_idx], y_train.iloc[trn_idx]
val_x, val_y = x_train.iloc[val_idx], y_train.iloc[val_idx]
N = 5
for i in range(N):
X_t, y_t = augment(trn_x.values, trn_y.values)
X_t = pd.DataFrame(X_t, columns=feats)
trn_data = lgb.Dataset(X_t, label=y_t)
val_data = lgb.Dataset(val_x, label=val_y)
evals_result = {}
clf = lgb.train(param, trn_data, round_max, valid_sets=[
trn_data, val_data], early_stopping_rounds=
round_early_stopping, verbose_eval=1000, evals_result=
evals_result)
oof[val_idx] += clf.predict(val_x, num_iteration=clf.best_iteration
) / N
predictions += clf.predict(x_test, num_iteration=clf.best_iteration
) / folds.n_splits / N
fold_score = roc_auc_score(val_y, oof[val_idx])
print('fold {} auc score: {:.5f}'.format(fold_, fold_score))
cv_score = roc_auc_score(y_train, oof)
print('elapsed time: {:.1f} min'.format((time.time() - start) / 60))
print('auc score: {:.5f}'.format(cv_score))
sub_folder = path.join(top_folder, 'cv_' + now + '_' + str(np.round(
cv_score, 5)))
makedirs(sub_folder, exist_ok=True)
test['target'] = predictions
test[['ID_code', 'target']].to_csv(path.join(sub_folder,
'submission.csv'), index=False)
raw['oof'] = np.concatenate([oof, predictions], axis=0)
raw[['ID_code', 'oof']].to_csv(path.join(sub_folder, 'oof.csv'), index=
False)
| import gc
import sys
import time
import warnings
import multiprocessing
import numpy as np
import pandas as pd
import lightgbm as lgb
from os import path, makedirs
from tqdm import tqdm
from utils import Logger
from datetime import datetime
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import StratifiedKFold
# ======================================================================= Method
def load_dataframe(dataset):
return pd.read_csv(dataset)
def augment(x, y, t=2):
xs, xn = [], []
for i in range(t):
mask = y > 0
x1 = x[mask].copy()
for c in range(200):
val = x1[:, [c, c+200, c+400]]
np.random.shuffle(val)
x1[:, [c, c+200, c+400]] = val
xs.append(x1)
for i in range(t//2):
mask = y == 0
x1 = x[mask].copy()
for c in range(200):
val = x1[:, [c, c+200, c+400]]
np.random.shuffle(val)
x1[:, [c, c+200, c+400]] = val
xn.append(x1)
xs = np.vstack(xs)
xn = np.vstack(xn)
ys = np.ones(xs.shape[0])
yn = np.zeros(xn.shape[0])
x = np.vstack([x, xs, xn])
y = np.concatenate([y, ys, yn])
return x, y
# ======================================================================= Main
if __name__ == '__main__':
gc.enable()
pd.set_option('max_rows', None)
pd.set_option('max_columns', None)
warnings.simplefilter('ignore', UserWarning)
# =================================================================== Params
top_folder = './output'
today = datetime.today()
now = today.strftime('%m%d-%H%M')
log_name = now + '.txt'
sys.stdout = Logger(path.join(top_folder, log_name))
seed_np = 1011
np.random.seed(seed_np)
print('numpy seed: {}'.format(seed_np))
# =================================================================== Load Data
start = time.time()
with multiprocessing.Pool() as pool:
train, test = pool.map(load_dataframe, ['./input/train.csv', './input/test.csv'])
# === fake sample
df_test = test.drop(columns=['ID_code']).values
unique_samples = []
unique_count = np.zeros_like(df_test)
for feature in tqdm(range(df_test.shape[1])):
_, index_, count_ = np.unique(df_test[:, feature], return_counts=True, return_index=True)
unique_count[index_[count_ == 1], feature] += 1
idx_score = np.argwhere(np.sum(unique_count, axis=1) > 0)[:, 0]
idx_synthetic = np.argwhere(np.sum(unique_count, axis=1) == 0)[:, 0]
synthetic = test.loc[idx_synthetic]
test = test.loc[idx_score]
raw = pd.concat([train, test], axis=0, sort=False, ignore_index=True)
# ============================== Extra Feature
len_train = len(train)
col_var = list(raw.columns[2:])
# === replace value(frequency=1) to NA
mask = pd.DataFrame(np.ones([raw.shape[0], len(col_var)]), columns=col_var)
for col in tqdm(col_var):
cnt = raw[col].value_counts()
val = cnt[cnt == 1].index
mask.loc[np.isin(raw[col], val), col] = 0
col_repeat = [col + '_repeat_2' for col in col_var]
raw[col_repeat] = raw[col_var][mask.astype(bool)]
# === replace value(frequency=1/2) to NA
mask = pd.DataFrame(np.ones([raw.shape[0], len(col_var)]), columns=col_var)
for col in tqdm(col_var):
cnt = raw[col].value_counts()
val = cnt[np.isin(cnt, [1, 2])].index
mask.loc[np.isin(raw[col], val), col] = 0
col_repeat = [col + '_repeat_3' for col in col_var]
raw[col_repeat] = raw[col_var][mask.astype(bool)]
raw = pd.concat([raw, synthetic], axis=0, sort=False, ignore_index=True)
# === logging
print('data: {}'.format(raw.shape))
print('elapsed time: {:.1f} min'.format((time.time() - start)/60))
# =================================================================== PreProcess
feats = [col for col in raw.columns.values if col not in ['ID_code', 'target']]
# =================================================================== Model
train = raw[:len_train]
test = raw[len_train:].copy()
x_train = train[feats]
y_train = train['target']
x_test = test[feats]
print('trn_x: {}'.format(x_train.shape))
print('x_test: {}'.format(x_test.shape))
param = {
'objective': 'binary',
'boosting': 'gbdt',
'metric': 'auc',
'verbosity': -1,
'n_jobs': 11,
'random_state': 1993,
'learning_rate': 0.01,
'num_leaves': 8,
'max_depth': -1,
'feature_fraction': 0.05,
'bagging_freq': 5,
'bagging_fraction': 0.4,
'min_data_in_leaf': 80,
'min_sum_hessian_in_leaf': 10.0,
}
print('model params:\n{}'.format(pd.Series(list(param.values()), index=list(param.keys()))))
seed_fold = 26
folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=seed_fold)
print('StratifiedKFold seed: {}'.format(seed_fold))
round_max = 30000
round_early_stopping = 3000
print('num_round: {}'.format(round_max))
print('early_stopping_round: {}'.format(round_early_stopping))
# === training
oof = np.zeros(len(x_train))
predictions = np.zeros(len(x_test))
start = time.time()
for fold_, (trn_idx, val_idx) in enumerate(folds.split(x_train.values, y_train.values)):
print("fold n°{}".format(fold_))
trn_x, trn_y = x_train.iloc[trn_idx], y_train.iloc[trn_idx]
val_x, val_y = x_train.iloc[val_idx], y_train.iloc[val_idx]
N = 5
for i in range(N):
X_t, y_t = augment(trn_x.values, trn_y.values)
X_t = pd.DataFrame(X_t, columns=feats)
trn_data = lgb.Dataset(X_t, label=y_t)
val_data = lgb.Dataset(val_x, label=val_y)
evals_result = {}
clf = lgb.train(param,
trn_data,
round_max,
valid_sets=[trn_data, val_data],
early_stopping_rounds=round_early_stopping,
verbose_eval=1000,
evals_result=evals_result)
oof[val_idx] += clf.predict(val_x, num_iteration=clf.best_iteration) / N
predictions += clf.predict(x_test, num_iteration=clf.best_iteration) / folds.n_splits / N
fold_score = roc_auc_score(val_y, oof[val_idx])
print('fold {} auc score: {:.5f}'.format(fold_, fold_score))
cv_score = roc_auc_score(y_train, oof)
print('elapsed time: {:.1f} min'.format((time.time() - start)/60))
print('auc score: {:.5f}'.format(cv_score))
# =================================================================== Saving File
sub_folder = path.join(top_folder, 'cv_' + now + '_' + str(np.round(cv_score, 5)))
makedirs(sub_folder, exist_ok=True)
test['target'] = predictions
test[['ID_code', 'target']].to_csv(path.join(sub_folder, 'submission.csv'), index=False)
raw['oof'] = np.concatenate([oof, predictions], axis=0)
raw[['ID_code', 'oof']].to_csv(path.join(sub_folder, 'oof.csv'), index=False)
| [
1,
2,
3,
4,
5
] |
1,598 | 94b1e0280eff165f63e117969d5e1bf9d1e35193 | """Identifying Antecedent Pronoun"""
from question import Question,Packet
qdict={
"correct pronoun-antecedent agreement":[
"<u>He</u> came home to <u>his</u> own car.",
"<u>He</u> found <u>his</u> sneakers in the garage.",
"<u>Harry</u> gave <u>himself</u> a baseball for Christmas.",
"<u>Jill</u> found <u>her</u> missing sock on top of the dresser.",
"<u>The man named Voldemort</u> gave the girl named Hermione <u>his</u> own surprising gift for Christmas.",
"<u>The boy</u> gave the girl <u>his</u> tiny little pot for Christmas.",
"<u>They</u> found <u>themselves</u> in the midst of a great struggle with Greyback.",
"<u>The man named Voldemort</u> discovered that he held the secret to <u>his</u> success in his own hands.",
"<u>The man named Voldemort</u> hated <u>himself</u> after Harry defeated him.",
"The man named Voldemort found his wand to be too weak for Dumbledore.",
"The man named Voldemort found his wand in need of serious repair.",
"We found ourselves in the midst of a huge explosion.",
"I found myself in a real fit of pain.",
"Somebody has left their bag on the floor.",
"A can of lima beans sits on its shelf.",
"Josh and Jill made their presentation Monday.",
"Josh and Fiona made their presentation yesterday.",
"On Tuesday, Gandalf and Bilbo made their speech.",
"The jury read its verdict.",
"The crowd found its home inside the tree.",
"The flock went its own way for the summer.",
"Jury members gave their individual opinions.",
"The flocks gave their quaks in agreement with the jury.",
"The school had its roof repaired over the summer.",
"The swarm of bees had its nest inside Greyback's werewolf home.",
"The herd of cattle gathered into its cramp little barn for the night.",
"The two boys who owned that <u>home</u> found fortune inside one of <u>its</u> rooms.",
"The children, who were sometimes happy, had their own rooms.",
"They were so bored with the lecture, <u>they</u> found themselves drooling on <u>their</u> own homework.",
],
"incorrect pronoun-antecedent agreement":[
"The boy gave the girl its tiny little pot for Christmas.",
"He found yourself sneakers in the garage.",
"They found them sneakers to be in the locker.",
"He gave themselves a baseball outside the locker.",
"They gave himself something fun to do during the lecture.",
"The man named Voldemort gave the girl named Hermione their own surprising gift for Christmas.",
"The man named Voldemort discovered that he held the secret to her success in her own hands.",
"The man named Voldemort hated myself after Harry defeated them.",
"The man named Voldemort found herself to be too weak for Dumbledore.",
"The man named Voldemort found yourself in need of serious repair.",
"President Lincoln delivered her Gettysburg Address in 1863.",
"A can of pinto beans sits on it's shelf.",
"Josh and Jill made his presentation Monday.",
"Josh and Jane made her presentation yesterday.",
"On Tuesday, Tom and Mr Riddle made his speech.",
"The jury read their verdict.",
"The crowd found their home inside the tree.",
"The flock went their own way for the summer.",
"Jury members gave his individual opinions.",
"The flocks gave its quaks in agreement with the jury.",
"The school had their roof repaired over the summer.",
"The swarm of bees had their nest inside Greyback's werewolf home.",
"The herd of cattle gathered into their cramp barn for the night.",
"The two <u>boys</u> who owned that home found fortune inside one of <u>his</u> own rooms.",
"The two <u>boys</u> who owned that home found fortune inside one of <u>her</u> own rooms.",
"The <u>children</u>, who were sometimes happy, had <u>its</u> own rooms.",
"They were so bored with the lecture, <u>they</u> found themselves drooling on <u>his</u> own homework.",
"He was so tired <u>he</u> fell asleep on <u>their</u> own chair.",
"<u>She</u> was so tired <u>he</u> fell asleep on his own chair.",
"He was so tired <u>he</u> fell asleep on <u>her</u> own chair.",
"They found himself strong in the face of Greyback.",
]
}
def make_packet(number=1):
return Packet([Question(x,qdict=qdict,qsection="Antecedent Agreement") for x in qdict.keys()]).make_packet(number)
if __name__=="__main__":
print "testing..."
assert [Question(x,qdict=qdict,qsection="Antecedent Agreement") for x in qdict.keys()][0].get_Question()
print make_packet(10) | null | null | null | null | [
0
] |
1,599 | 9c35e64fd773c79dc20e6b388478e892bda85788 | <mask token>
| <mask token>
print(libras)
| quilogramas = float(input('Insira o peso em Kg:'))
libras = quilogramas / 0, 45
print(libras)
| quilogramas = float ( input ( "Insira o peso em Kg:" ))
libras = quilogramas / 0 , 45
print ( libras ) | null | [
0,
1,
2,
3
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.