code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
import os
import csv
root_folder = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + os.sep + ".." )
import aikif.dataTools.cls_datatable as mod_datatable
import config as mod_cfg
data_folder = mod_cfg.core_folder + os.sep + 'aikif' + os.sep + 'data' + os.sep + 'ref'
column_map_file = data_folder + os.sep + 'rules_column_maps.csv'
map_file = data_folder + os.sep + 'mapping_rules.csv'
sample_datafile = mod_cfg.fldrs['log_folder'] + os.sep + 'sample-filelist-for-AIKIF.csv'
sample_datafile = data_folder + os.sep + 'sample-filelist-for-AIKIF.csv'
class Mapper(object):
"""
Main class to map input information to aikif data structures
based on a mapping table
"""
def __init__(self, map_file=None):
"""
setup that reads the table
"""
self.map_type = 'file'
self.map_file = map_file
self.maps = [] # list of MapRule objects
self.load_rules()
def __str__(self):
res = ' -- List of Mapping Business Rules -- \n'
for m in self.maps:
res += str(m)
return res
def get_maps_stats(self):
"""
calculates basic stats on the MapRule elements of the maps
to give a quick overview.
"""
tpes = {}
for m in self.maps:
if m.tpe in tpes:
tpes[m.tpe] += 1
else:
tpes[m.tpe] = 1
return tpes
def load_rules(self):
"""
load the rules from file
"""
self.maps = []
with open(self.map_file, 'r') as f:
for line in f:
if line.strip(' ')[0:1] != '#':
rule = MapRule(line)
#print('rule = ', rule)
self.maps.append(rule)
def save_rules(self, op_file):
"""
save the rules to file after web updates or program changes
"""
with open(op_file, 'w') as f:
for m in self.maps:
f.write(m.format_for_file_output())
def process_raw_file(self, raw_file_name, field_names):
"""
takes the filename to be read and uses the maps setup
on class instantiation to process the file.
This is a top level function and uses self.maps which
should be the column descriptions (in order).
"""
#num_outouts = 0
dist_vals = []
group_dat = []
events = []
#facts = []
with open(raw_file_name) as csvfile:
reader = csv.DictReader(csvfile, fieldnames = field_names)
for num_lines, row in enumerate(reader):
#print('row = =',row)
for col_num, fld in enumerate(field_names):
try:
#print('self.maps[', col_num, '] = ', self.maps[col_num])
if self.maps[col_num].val == 'group_distinct':
group_dat.append(str(row[fld]))
elif self.maps[col_num].val == 'event_date':
events.append(str(row[fld]))
except Exception as ex:
print('parsing error - shouldnt really be splitting using a comma anyway!', str(ex))
dist_vals = sorted(list(set(group_dat)))
return num_lines, dist_vals, group_dat, sorted(list(set(events)))
def aggregate_data(self):
pass
def identify_data(self, tpe, raw_data):
"""
function to decide how to process
the raw data (which can be any format).
Note - not 100% sure how this will be implemented
should we pass the filename (currently takes a line)
"""
num_applicable_rules = 0
formatted_data = self.format_raw_data(tpe, raw_data)
for m in self.maps:
if m.tpe == tpe:
num_applicable_rules += 1
self.process_rule(m, formatted_data, tpe)
return num_applicable_rules
def process_rule(self, m, dct, tpe):
"""
uses the MapRule 'm' to run through the 'dict'
and extract data based on the rule
"""
print('TODO - ' + tpe + ' + applying rule ' + str(m).replace('\n', '') )
#print(dct)
def format_raw_data(self, tpe, raw_data):
"""
uses type to format the raw information to a dictionary
usable by the mapper
"""
if tpe == 'text':
formatted_raw_data = self.parse_text_to_dict(raw_data)
elif tpe == 'file':
formatted_raw_data = self.parse_file_to_dict(raw_data)
else:
formatted_raw_data = {'ERROR':'unknown data type', 'data':[raw_data]}
return formatted_raw_data
def parse_text_to_dict(self, txt):
"""
takes a string and parses via NLP, ready for mapping
"""
op = {}
print('TODO - import NLP, split into verbs / nouns')
op['nouns'] = txt
op['verbs'] = txt
return op
def parse_file_to_dict(self, fname):
"""
process the file according to the mapping rules.
The cols list must match the columns in the filename
"""
print('TODO - parse_file_to_dict' + fname)
for m in self.maps:
if m.tpe == 'file':
if m.key[0:3] == 'col':
print('reading column..')
def generate_map_from_dataset(self, l_dataset):
"""
creates a map file (in the standard CSV format) based on
columns of a dataset.
1. read column names, lookup names in list
2. read column content, get highest match of distinct values
from ontology lists (eg, Years, countries, cities, ages)
"""
l_map = []
headers = l_dataset.get_header()
print(headers)
for row_num, col in enumerate(headers):
if col != '':
l_map.append('column:name:' + str(row_num) + '=' + l_dataset.force_to_string(col))
for row_num, col in enumerate(headers):
if col != '':
vals = l_dataset.get_distinct_values_from_cols([col])
l_map.append('column:count:distinct:' + col + '=' + str(len(vals[0])) )
for row_num, col in enumerate(headers):
if col != '':
col_vals = l_dataset.count_unique_values(row_num, col, 10)
for val_num, v in enumerate(col_vals):
l_map.append('column:topvalues:' + col + ':' + str(val_num) + '=' + v )
#l_map.append('column:values:top5:' + str(row_num) + '=' + col_vals)
return l_map
def create_map_from_file(self, data_filename):
"""
reads the data_filename into a matrix and calls the main
function '' to generate a .rule file based on the data in the map
For all datafiles mapped, there exists a .rule file to define it
"""
op_filename = data_filename + '.rule'
dataset = mod_datatable.DataTable(data_filename, ',')
dataset.load_to_array()
l_map = self.generate_map_from_dataset(dataset)
with open(op_filename, 'w') as f:
f.write('# rules file autogenerated by mapper.py v0.1\n')
f.write('filename:source=' + data_filename + '\n')
f.write('filename:rule=' + op_filename + '\n\n')
for row in l_map:
#print('ROW = ' , row)
if type(row) is str:
f.write(row + '\n')
else:
for v in row:
f.write(v)
#def List2String(l):
# res = ""
# for v in l:
# res = res + v
# return res
class MapRule(object):
"""
manages the parsing of rules in the mapping table.
A rule can be a classification such as
1. File types: rule is file, [xtn], [doc_type]
eg
file .php program
file .docx document
file .htm document
file .html document
file .xls data_file
file .xlsx data_file
or it can be a text relationship
text object all
text event all
text action all
text relationship all
Column rules are currently mapped as separately (??)
"""
def __init__(self, raw_line):
"""
takes a raw row in the map file and extracts info
"""
cols = raw_line.split(',')
self.tpe = cols[0].strip()
self.key = cols[1].strip()
self.val = cols[2].strip()
def __str__(self):
"""
display a map rule to string
"""
return self.tpe.ljust(15) + self.key.ljust(15) + self.val.ljust(15) + '\n'
def format_for_file_output(self):
return self.tpe + ',' + self.key + ',' + self.val + '\n'
class MapColumns(object):
"""
directly maps columns in tables to aikif structures
"""
def __init__(self, col_file):
"""
takes a raw row in the map file and extracts info
"""
self.col_file = col_file
self.load_rules()
def __str__(self):
res = ' -- List of Column Mappings -- \n'
print('self.col_file = ' + self.col_file)
for m in self.col_maps:
res += str(m)
#print(m)
return res
def load_rules(self):
"""
load the rules from file
"""
self.col_maps = []
#print("reading mapping table")
with open(self.col_file, 'r') as f:
for line in f:
rule = MapColumn(line)
#rule = line
self.col_maps.append(rule)
class MapColumn(object):
"""
Class to manage the content of a single column map rule.
It is designed to be re-usable for all rows in a map file,
so instantiate it once, then call the create_from_csv_line
to load a rule, and then use it (parse, or process).
Properties of the class are:
table
column
data_type
aikif_map
aikif_map_name
extract
format
where
index
table,column,data_type,aikif_map,aikif_map_name,extract,format,where,index
emails_sent.csv,subject,str,fact,email subject,,,,full
"""
def __init__(self, csv_line):
self.csv_line = csv_line
self.cols = []
self.table = ''
self.column = ''
self.data_type = ''
self.aikif_map = ''
self.aikif_map_name = ''
self.extract = ''
self.format = ''
self.where = ''
self.index = ''
self._parse_csv_col_rules()
def __str__(self):
res = ' Map Column\n'
res += 'table : ' + self.table + '\n'
res += 'column : ' + self.column + '\n'
res += 'data_type : ' + self.data_type + '\n'
res += 'aikif_map : ' + self.aikif_map + '\n'
res += 'aikif_map_name : ' + self.aikif_map_name + '\n'
res += 'extract : ' + self.extract + '\n'
res += 'format : ' + self.format + '\n'
res += 'where : ' + self.where + '\n'
res += 'index : ' + self.index + '\n'
return res
def extract_col(self, num):
txt = ''
try:
txt = self.cols[num].strip(' ').strip('\n')
return txt
except Exception as ex:
#print('cant put text into col ' , num, ' txt = ', txt, ' ', str(ex))
# TODO - only log issues AFTER sorting out mapping validation on load
return ''
def _parse_csv_col_rules(self):
"""
splits the CSV line of the current format and puts into
local class variables - mainly for testing, though this is
not the best method long term. (TODO - fix this)
"""
self.cols = self.csv_line.split(',')
self.table = self.extract_col(0)
self.column = self.extract_col(1)
self.data_type = self.extract_col(2)
self.aikif_map = self.extract_col(3)
self.aikif_map_name = self.extract_col(4)
self.extract = self.extract_col(5)
self.format = self.extract_col(6)
self.where = self.extract_col(7)
self.index = self.extract_col(8) | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/mapper.py | mapper.py |
class Projects(object):
"""
handles the ontologies for all your projects to
avoid replication of data entry
"""
def __init__(self):
self.ontology = []
self.project_list = []
def __str__(self):
res = '-- List of All Projects --\n'
for p in self.project_list:
res += p.nme.ljust(22) + p.desc + '\n'
return res
def __iter__(self):
for p in self.project_list:
yield p
def add_project(self, proj):
self.project_list.append(proj)
def add_ontology(self, name):
"""
handles the data sources used in projects, mainly as an
abstract to call the data sources in /lib and /dataTools
"""
self.ontology.append(name)
def get_by_name(self, name):
""" returns an object Project which matches name """
for p in self.project_list:
if p.nme == name:
return p
return None
def run(self):
"""
executes all tasks for each of the projects
which (depending on projects schedule) will
allow each project to run the required tasks.
"""
for p in self.project_list:
p.execute_tasks()
class Project(object):
"""
handles the projects in AIKIF, meaning logs the names
and parameters used for various algorithms.
"""
def __init__(self, name, tpe='', fldr='' , desc=''):
self.nme = name
self.goals = []
self.data_sources = []
self.datatables = []
self.ontology = []
self.links = []
self.tasks = []
self.fldr = fldr
self.tpe = tpe
self.desc = desc
self.details = [] # as much info as is needed for project
def __str__(self):
res = ' /---- Project ------------------------------- \n'
res += '| name = ' + self.nme + '\n'
res += '| desc = ' + self.desc + '\n'
res += '| fldr = ' + self.fldr + '\n'
res += '\---------------------------------------------\n'
if len(self.details) > 0:
res += ':Details:\n'
for d in self.details:
res += d[0] + '\t ' + d[1] + '\n'
if len(self.data_sources) > 0:
res += ':Data Sources:\n'
for d in self.data_sources:
res += d[0] + '\t ' + d[1] + '\n'
# [task_id, name, due_date, priority]
if len(self.tasks) > 0:
res += ':Tasks:\n'
for t in self.tasks:
res += t.name + '\n'
return res
def add_task(self, task):
"""
adds a task for the project
"""
self.tasks.append(task)
def add_source(self, name, location, schedule='Daily', op=''):
"""
handles the data sources used in projects, mainly as an
abstract to call the data sources in /lib and /dataTools
"""
if op == '':
op = name + '.log'
self.data_sources.append([name, location, schedule, op])
def add_detail(self, tpe, detail):
"""
handles the data sources used in projects, mainly as an
abstract to call the data sources in /lib and /dataTools
"""
self.details.append([tpe, detail])
def log_table(self, datatable):
"""
records a list of datatables used in the project
"""
self.datatables.append(datatable)
def record(self, tbl, tpe, col_data):
"""
takes a DataTable as param and adds a record
TODO - log details here
"""
#print(tpe)
tbl.add(col_data)
def execute_tasks(self):
"""
run execute on all tasks IFF prior task is successful
"""
for t in self.tasks:
print('RUNNING ' + str(t.task_id) + ' = ' + t.name)
t.execute()
if t.success != '__IGNORE__RESULT__':
print(t)
print('TASK RESULT :', t.result, ' but success = ' , t.success )
if t.result != t.success:
#raise Exception('Project execution failed at task ' + str(t.task_id) + ' = ' + t.name)
print('ABORTING TASK EXECUTION SEQUENCE' + str(t.task_id) + ' = ' + t.name)
break
def build_report(self, op_file, tpe='md'):
"""
create a report showing all project details
"""
if tpe == 'md':
res = self.get_report_md()
elif tpe == 'rst':
res = self.get_report_rst()
elif tpe == 'html':
res = self.get_report_html()
else:
res = 'Unknown report type passed to project.build_report'
with open(op_file, 'w') as f:
f.write(res)
def get_report_rst(self):
"""
formats the project into a report in RST format
"""
res = ''
res += '-----------------------------------\n'
res += self.nme + '\n'
res += '-----------------------------------\n\n'
res += self.desc + '\n'
res += self.fldr + '\n\n'
res += '.. contents:: \n\n\n'
res += 'Overview\n' + '===========================================\n\n'
res += 'This document contains details on the project ' + self.nme + '\n\n'
for d in self.details:
res += ' - ' + d[0] + ' = ' + d[1] + '\n\n'
res += '\nTABLES\n' + '===========================================\n\n'
for t in self.datatables:
res += t.name + '\n'
res += '-------------------------\n\n'
res += t.format_rst() + '\n\n'
return res
def get_report_md(self):
"""
formats the project into a report in MD format - WARNING - tables missing BR
"""
res = '#' + self.nme + '\n'
res += self.desc + '\n'
res += self.fldr + '\n'
res += '\n\n##TABLES\n' + '\n'
for t in self.datatables:
res += '###' + t.name + '\n'
res += '-------------------------\n'
res += str(t) + '\n\n'
return res
def get_report_html(self):
"""
formats the project into a report in MD format - WARNING - tables missing BR
"""
res = '<h2>Project:' + self.nme + '</h2>'
res += '<p>' + self.desc + '</p>'
res += '<p>' + self.fldr + '</p>'
res += '<BR><h3>TABLES</h3>'
for t in self.datatables:
res += '<b>' + t.name + '<b><BR>'
res += '<p>' + str(t) + '</p>'
return res
class Task(object):
"""
handles a single task for a project
"""
def __init__(self, task_id, name, func, due_date=None, priority=None):
if type(task_id) is not int:
raise ValueError('Error - task_id must be an int')
self.task_id = task_id
self.name = name
self.func = func
self.due_date = due_date
self.priority = priority
self.result = 'NOT RUN'
self.success = True
self.params = []
def __str__(self):
res = '\nTask #' + str(self.task_id) + ' : ' + self.name + '\n'
if self.due_date:
res += 'Due Date : ' + self.due_date + '\n'
if self.priority:
res += 'Priority : ' + self.priority + '\n'
res += 'Function : ' + str(self.func.__name__) + '\n'
for i in self.params:
res += ' Param : ' + i[0] + ' = ' + self._force_str(i[1]) + '\n'
return res
def add_param(self, param_key, param_val):
"""
adds parameters as key value pairs
"""
self.params.append([param_key, param_val])
if param_key == '__success_test':
self.success = param_val # shitty way to do this!
def execute(self):
"""
executes all automatic tasks in order of task id
"""
func_params = []
exec_str = self.func.__name__ + '('
for p in self.params:
if p[0][0:2] != '__': # ignore custom param names
exec_str += p[0] + '="' + self._force_str(p[1]) + '", '
func_params.append(p[1])
exec_str = exec_str[:-2]
exec_str += ') # task' + str(self.task_id) + ': ' + self.name
self.result = self.func(*func_params)
print(exec_str + ' loaded ', self.result)
def _force_str(self, obj):
return str(obj) # lets see how this works
# if type(obj) is str:
# return obj
# elif type(obj) is int:
# return str(obj)
# elif type(obj) is list:
# txt = '['
# for l in obj:
# txt += self._force_str(l) + ','
# return txt + ']'
# elif type(obj) is dict:
# txt = '{'
# for k,v in obj.items():
# txt += k + '=' + self._force_str(v) + ','
# return txt + '}'
# else:
# return 'todo - convert type' | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/project.py | project.py |
import os
import time
import getpass
import socket
import random
from decorators import debug
from decorators import show_timing
class Log(object):
"""
Main logging class for AIKIF should record appropriate
actions and summarise to useful information.
STATUS: currently logs to 4 log files and does simple
aggregation, but still in alpha
TODO:
- should use python logger
- work out how to pass rules for each logfile to
identify useful information for that program
"""
def __init__(self, fldr):
"""
pass the folder on command line, use os.path.join
to null effect if trailing os.sep if passed
"""
self.log_folder = os.path.abspath(os.path.join(fldr))
self.logFileProcess = os.path.join(self.log_folder,'process.log')
self.logFileSource = os.path.join(self.log_folder,'source.log')
self.logFileCommand = os.path.join(self.log_folder,'command.log')
self.logFileResult = os.path.join(self.log_folder,'result.log')
ensure_dir(self.logFileCommand) # need to pass file not the folder for this to work
self.session_id = self.get_session_id()
self.watch_points = [] # watch points list of dicts to watch for which represent key results
def __str__(self):
return self.log_folder
def add_watch_point(self, string, rating, importance=5):
"""
For a log session you can add as many watch points
which are used in the aggregation and extraction of
key things that happen.
Each watch point has a rating (up to you and can range
from success to total failure and an importance for
finer control of display
"""
d = {}
d['string'] = string
d['rating'] = rating
d['importance'] = importance
self.watch_points.append(d)
def get_folder_process(self):
return self.logFileProcess
def get_session_id(self):
"""
get a unique id (shortish string) to allow simple aggregation
of log records from multiple sources. This id is used for the
life of the running program to allow extraction from all logs.
WARING - this can give duplicate sessions when 2 apps hit it
at the same time.
"""
max_session = '0'
try:
with open(self.log_folder + os.sep + '_sessions.txt', 'r') as f:
for _ in f:
txt = f.readline()
if txt.strip('\n') != '':
max_session = txt
except Exception:
max_session = '1'
this_session = str(int(max_session) + random.randint(9,100)).zfill(9) # not a great way to ensure uniqueness - TODO FIX
with open(self.log_folder + os.sep + '_sessions.txt', 'a') as f2:
f2.write(this_session + '\n')
return this_session
@show_timing
def estimate_complexity(self, x,y,z,n):
"""
calculates a rough guess of runtime based on product of parameters
"""
num_calculations = x * y * z * n
run_time = num_calculations / 100000 # a 2014 PC does about 100k calcs in a second (guess based on prior logs)
return self.show_time_as_short_string(run_time)
def show_time_as_short_string(self, seconds):
"""
converts seconds to a string in terms of
seconds -> years to show complexity of algorithm
"""
if seconds < 60:
return str(seconds) + ' seconds'
elif seconds < 3600:
return str(round(seconds/60, 1)) + ' minutes'
elif seconds < 3600*24:
return str(round(seconds/(60*24), 1)) + ' hours'
elif seconds < 3600*24*365:
return str(round(seconds/(3600*24), 1)) + ' days'
else:
print('WARNING - this will take ' + str(seconds/(60*24*365)) + ' YEARS to run' )
return str(round(seconds/(60*24*365), 1)) + ' years'
def _log(self, fname, txt, prg=''):
"""
logs an entry to fname along with standard date and user details
"""
if os.sep not in fname:
fname = self.log_folder + os.sep + fname
delim = ','
q = '"'
dte = TodayAsString()
usr = GetUserName()
hst = GetHostName()
i = self.session_id
if prg == '':
prg = 'cls_log.log'
logEntry = q + dte + q + delim + q + i + q + delim + q + usr + q + delim + q + hst + q + delim + q + prg + q + delim + q + txt + q + delim + '\n'
with open(fname, "a", encoding='utf-8', errors='replace') as myfile:
myfile.write(logEntry)
#@debug
def record_source(self, src, prg=''):
"""
function to collect raw data from the web and hard drive
Examples - new source file for ontologies, email contacts list, folder for xmas photos
"""
self._log(self.logFileSource , force_to_string(src), prg)
def record_process(self, process, prg=''):
"""
log a process or program - log a physical program (.py, .bat, .exe)
"""
self._log(self.logFileProcess, force_to_string(process), prg)
def record_command(self, cmd, prg=''):
"""
record the command passed - this is usually the name of the program
being run or task being run
"""
self._log(self.logFileCommand , force_to_string(cmd), prg)
def record_result(self, res, prg=''):
"""
record the output of the command. Records the result, can have
multiple results, so will need to work out a consistent way to aggregate this
"""
self._log(self.logFileResult , force_to_string(res), prg)
class LogSummary(object):
"""
Aggregating Logs. The goal of this class is to allow for
multiple usable aggregates to be automatically obtained
from the standard AIKIF log files.
"""
def __init__(self, log_object, fldr):
self.process_file = log_object.logFileProcess
self.command_file = log_object.logFileCommand
self.result_file = log_object.logFileResult
self.source_file = log_object.logFileSource
self.log_folder = fldr
self.log_sum = fldr + os.sep + 'log_sum.csv'
def __str__(self):
txt = ''
try:
with open(self.log_sum, "r") as f:
txt = f.read()
except Exception:
txt = 'Summary File doesnt exist : ' + self.log_sum
return txt
def filter_by_program(self, prg, opFile):
"""
parse the log files and extract entries from all
logfiles to one file per program (program is the
2nd to last entry each logfile)
"""
log_1 = open(self.process_file, 'r')
log_2 = open(self.command_file, 'r')
log_3 = open(self.result_file, 'r')
log_4 = open(self.source_file, 'r')
with open(opFile, 'a') as f:
for line in log_1:
if prg in line:
f.write('PROCESS, ' + line)
for line in log_2:
if prg in line:
f.write('COMMAND, ' + line)
for line in log_3:
if prg in line:
f.write('RESULT, ' + line)
for line in log_4:
if prg in line:
f.write('SOURCE, ' + line)
log_1.close()
log_2.close()
log_3.close()
log_4.close()
def extract_logs(self, fname, prg):
"""
read a logfile and return entries for a program
"""
op = []
with open(fname, 'r') as f:
for line in f:
if prg in line:
op.append(line)
return op
def summarise_events(self):
"""
takes the logfiles and produces an event summary matrix
date command result process source
20140421 9 40 178 9
20140423 0 0 6 0
20140424 19 1 47 19
20140425 24 0 117 24
20140426 16 0 83 16
20140427 1 0 6 1
20140429 0 0 0 4
"""
all_dates = []
d_command = self._count_by_date(self.command_file, all_dates)
d_result = self._count_by_date(self.result_file, all_dates)
d_process = self._count_by_date(self.process_file, all_dates)
d_source = self._count_by_date(self.source_file, all_dates)
with open(self.log_sum, "w") as sum_file:
sum_file.write('date,command,result,process,source\n')
for dte in sorted(set(all_dates)):
sum_file.write(dte + ',')
if dte in d_command:
sum_file.write(str(d_command[dte]) + ',')
else:
sum_file.write('0,')
if dte in d_result:
sum_file.write(str(d_result[dte]) + ',')
else:
sum_file.write('0,')
if dte in d_process:
sum_file.write(str(d_process[dte]) + ',')
else:
sum_file.write('0,')
if dte in d_source:
sum_file.write(str(d_source[dte]) + '\n')
else:
sum_file.write('0\n')
def _count_by_date(self, fname, all_dates):
"""
reads a logfile and returns a dictionary by date
showing the count of log entries
"""
if not os.path.isfile(fname):
return {}
d_log_sum = {}
with open(fname, "r") as raw_log:
for line in raw_log:
cols = line.split(',')
dte = cols[0].strip('"')[0:10].replace('-', '')
all_dates.append(dte)
if dte in d_log_sum:
d_log_sum[dte] += 1
else:
d_log_sum[dte] = 1
return d_log_sum
#@debug
def ensure_dir(f):
""" NOTE - not sure if this works exactly - needs a separate test """
#print('ensure_dir: file = ' + f)
d = os.path.dirname(f)
#print('ensure_dir: d = ' , d)
if not os.path.exists(d):
os.makedirs(d)
def List2String(l):
res = "["
for v in l:
res = res + v + ','
return res + ']'
def Dict2String(d):
res = "{"
for k, v in d.items():
res += k + ':' + str(v) + ','
return res + '}'
def TodayAsString():
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
def force_to_string(unknown):
"""
converts and unknown type to string for display purposes.
"""
result = ''
if type(unknown) is str:
result = unknown
if type(unknown) is int:
result = str(unknown)
if type(unknown) is float:
result = str(unknown)
if type(unknown) is dict:
result = Dict2String(unknown)
if type(unknown) is list:
result = List2String(unknown)
return result
def GetUserName():
"""
return username of person logged onto host PC
"""
return getpass.getuser()
def GetHostName():
"""
returns computer name, based on socket class
"""
return socket.gethostname() | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/cls_log.py | cls_log.py |
import os
import config as mod_cfg
import yaml
root_folder = mod_cfg.fldrs['root_path']
dataPath = root_folder + os.sep + "data"
dataFiles = []
"""
Load the ontology lookup files externally
See issue #22 for details on moving to local ontology
https://github.com/acutesoftware/AIKIF/issues/22
"""
subject_file = root_folder + os.sep + "data" + os.sep + "ref" + os.sep + "ONTOLOGY_SUBJECT_AREA.txt"
file_type_file = root_folder + os.sep + "data" + os.sep + "ref" + os.sep + "ONTOLOGY_FILE_TYPE.txt"
dataFileTypes = []
dataSubjectAreas = []
def load_data_subject_areas(subject_file):
"""
reads the subject file to a list, to confirm config is setup
"""
lst = []
if os.path.exists(subject_file):
with open(subject_file, 'r') as f:
for line in f:
lst.append(line.strip())
else:
print('MISSING DATA FILE (subject_file) ' , subject_file)
print('update your config.py or config.txt')
return lst
def load_data_file_types(file_type_file):
lst = []
if os.path.exists(file_type_file):
with open(file_type_file, 'r') as f:
for line in f:
lst.append(line.strip())
else:
print('MISSING DATA FILE (file_type_file) ' , file_type_file)
print('update your config.py or config.txt')
return lst
class FileMap(object):
"""
Provides mapping to file names
"""
def __init__(self, lst_type, lst_subj):
self.root_folder = root_folder
self.dataPath = root_folder + os.sep + "data"
self.dataFiles = []
if lst_type == []:
self.lst_type = load_data_file_types(file_type_file)
else:
self.lst_type = lst_type
if lst_subj == []:
self.lst_subj = load_data_subject_areas(file_type_file)
else:
self.lst_subj = lst_subj
def get_root_folder(self):
"""
returns root path for data folder - used by other modules in aikif
"""
return self.root_folder
def get_datapath(self):
"""
returns root path for data folder - used by other modules in aikif
"""
return self.dataPath
def find_ontology(self, txt):
"""
top level function used for new data processing which attempts
to find a level in a heirarchy and return the key and filename
usage res = FindOntology('file') # returns 'SYSTEM-PC-FILE'
"""
totFound = 0
searchString = txt.upper()
match = []
if searchString != '':
for i in self.lst_subj:
if searchString in i:
totFound = totFound + 1
match.append(i)
if len(match) == 0:
match.append('_TOP')
return match
def find_type(self, txt):
"""
top level function used to simply return the
ONE ACTUAL string used for data types
"""
searchString = txt.upper()
match = 'Unknown'
for i in self.lst_type:
if searchString in i:
match = i
return match
def get_full_filename(self, dataType, subjectArea):
"""
returns the file based on dataType and subjectArea
"""
return dataPath + os.sep + 'core' + os.sep + dataType + '_' + subjectArea + '.CSV'
def get_filename(self, dataType, subjectArea):
"""
get relative filename of core file
"""
return dataType + '_' + subjectArea + '.CSV'
def add_file_to_mapping_list(self, fname, lst):
"""
adds the name of the file to the list
"""
lst.append(fname)
def check_ontology(fname):
"""
reads the ontology yaml file and does basic verifcation
"""
with open(fname, 'r') as stream:
y = yaml.safe_load(stream)
import pprint
pprint.pprint(y) | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/cls_file_mapping.py | cls_file_mapping.py |
import os
import cls_log as mod_log
import config as mod_cfg
import cls_file_mapping as mod_filemap
import aikif.lib.cls_filelist as mod_fl
import aikif.lib.cls_file as mod_file
root_folder = os.path.abspath(mod_cfg.fldrs['root_path'] + os.sep )
class Programs(object):
"""
Class to manage a list of programs for AIKIF
"""
def __init__(self, name, fldr):
self.name = name
self.fldr = fldr
self.lstPrograms = []
self.log_folder = mod_cfg.fldrs['log_folder']
self.lg = mod_log.Log(self.log_folder)
self.lg.record_command('program', 'generating program list in - ' + self.log_folder)
self.list_all_python_programs()
def __str__(self):
"""
return a summary of programs
"""
res = 'List of Programs in ' + self.fldr + '\n'
for p in self.lstPrograms:
res += p[0] + '\n'
return res
def list_all_python_programs(self):
"""
collects a filelist of all .py programs
"""
self.tot_lines = 0
self.tot_bytes = 0
self.tot_files = 0
self.tot_loc = 0
self.lstPrograms = []
fl = mod_fl.FileList([self.fldr], ['*.py'], ["__pycache__", "/venv/", "/venv2/", ".git"])
for fip in fl.get_list():
if '__init__.py' not in fip:
self.add(fip, 'TODO - add comment')
f = mod_file.TextFile(fip)
self.tot_lines += f.count_lines_in_file()
self.tot_loc += f.count_lines_of_code()
self.tot_bytes += f.size
self.tot_files += 1
print('All Python Program Statistics')
print('Files = ', self.tot_files, ' Bytes = ', self.tot_bytes, ' Lines = ', self.tot_lines, ' Lines of Code = ', self.tot_loc)
def add(self, nme, desc):
"""
Adds a program to the list, with default desc
"""
self.lstPrograms.append([nme,desc])
#self.lg.record_process('program - generating program list in - ' + self.log_folder)
def comment(self, nme, desc):
"""
Adds a comment to the existing program in the list,
logs the reference and TODO - adds core link to processes
"""
if nme != '':
program_exists = False
for i in self.lstPrograms:
print(i)
if nme in i[0]:
i[1] = desc
program_exists = True
if program_exists is False: # not there?
self.lstPrograms.append([nme,desc + ' - <I>FILE DOESNT EXIST</I>'])
self.lg.record_process('adding description to - ' + nme)
def save(self, fname=''):
"""
Save the list of items to AIKIF core and optionally to local file fname
"""
if fname != '':
with open(fname, 'w') as f:
for i in self.lstPrograms:
f.write(self.get_file_info_line(i, ','))
# save to standard AIKIF structure
filemap = mod_filemap.FileMap([], [])
#location_fileList = filemap.get_full_filename(filemap.find_type('LOCATION'), filemap.find_ontology('FILE-PROGRAM')[0])
object_fileList = filemap.get_full_filename(filemap.find_type('OBJECT'), filemap.find_ontology('FILE-PROGRAM')[0])
print('object_fileList = ' + object_fileList + '\n')
if os.path.exists(object_fileList):
os.remove(object_fileList)
self.lstPrograms.sort()
try:
with open(object_fileList, 'a') as f:
f.write('\n'.join([i[0] for i in self.lstPrograms]))
except Exception as ex:
print('ERROR = cant write to object_filelist ' , object_fileList, str(ex))
def get_file_info_line(self, fname, delim):
"""
gathers info on a python program in list and formats as string
"""
txt = ''
f = mod_file.File(fname[0])
txt += '"' + f.fullname + '"' + delim
txt += '"' + f.name + '"' + delim
txt += '"' + f.path + '"' + delim
txt += '"' + f.GetDateAsString(f.date_modified)[2:10] + '"' + delim
txt += '"' + str(f.size) + '"' + delim
return txt + '\n'
def get_file_info_web(self, fname, delim='<BR>\n'):
"""
gathers info on a python program in list and formats as string
"""
txt = ''
f = mod_file.File(fname[0])
txt += '<sup>' + f.name + '</sup>' + delim
txt += '<sup>' + fname[1] + '</sup>' + delim
txt += '<sub><sup><span white-space:nowrap;>' + f.GetDateAsString(f.date_modified)[2:10] + '</span></sup></sub>' + delim
txt += '<sup><sup>' + str(f.size) + '</sup></sup>' + delim
return txt + '\n'
def collect_program_info(self, fname):
"""
gets details on the program, size, date, list of functions
and produces a Markdown file for documentation
"""
md = '#AIKIF Technical details\n'
md += 'Autogenerated list of programs with comments and progress\n'
md += '\nFilename | Comment | Date | Size\n'
md += '--- | --- | --- | ---\n'
for i in self.lstPrograms:
md += self.get_file_info_line(i, ' | ')
# save the details an Markdown file
with open(fname, 'w') as f:
f.write(md) | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/programs.py | programs.py |
import os
import sys
import codecs
root_folder = os.path.abspath(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(root_folder)
import cls_log as mod_log
import config as mod_cfg
import lib.cls_filelist as mod_fl
silent = 'N'
if len(sys.argv) == 2:
if sys.argv[1] == 'Q':
silent = 'Y'
ndxPath = mod_cfg.fldrs['public_data_path'] + os.sep + 'index'
ndxFile = ndxPath + os.sep + 'ndxFull.txt'
opIndex = ndxPath + os.sep + 'ndxWordsToFiles.txt'
ignore_files = ["__pycache__", ".git"]
def index():
"""
main function - outputs in following format BEFORE consolidation (which is TODO)
# filename, word, linenumbers
# refAction.csv, ActionTypeName, 1
# refAction.csv, PhysicalType, 1
# goals.csv, Cleanliness, 11
"""
lg = mod_log.Log(mod_cfg.fldrs['localPath'])
lg.record_command('Starting indexing', 'index.py') # sys.modules[self.__module__].__file__)
if silent == 'N':
print('------------------')
print('Rebuilding Indexes')
print('------------------')
with open(ndxFile, "w") as ndx:
ndx.write('filename, word, linenumbers\n')
files_to_index = mod_fl.FileList([mod_cfg.fldrs['public_data_path'] + os.sep + 'core'], ['*.csv'], ignore_files, "files_to_index_filelist.csv")
if silent == 'N':
print(format_op_hdr())
for f in files_to_index.get_list():
buildIndex(f, ndxFile, silent)
# now build the one big index file
consolidate(ndxFile, opIndex )
lg.record_command('Finished indexing', 'index.py') #, fle.GetModuleName())
if silent == 'N':
print('Done')
def buildIndex(ipFile, ndxFile, append='Y', silent='N', useShortFileName='Y'):
"""
this creates an index of a text file specifically for use in AIKIF
separates the ontology descriptions highest followed by values and lastly
a final pass to get all delimited word parts.
"""
if silent == 'N':
pass
if append == 'N':
try:
os.remove(ndxFile)
except Exception as ex:
print('file already deleted - ignore' + str(ex))
delims = [',', '$', '&', '"', '%', '/', '\\', '.', ';', ':', '!', '?', '-', '_', ' ', '\n', '*', '\'', '(', ')', '[', ']', '{', '}']
# 1st pass - index the ontologies, including 2 depths up (later - TODO)
#buildIndex(ipFile, ndxFile, ' ', 1, 'Y')
# 2nd pass - use ALL delims to catch each word as part of hyphenated - eg AI Build py
totWords, totLines, uniqueWords = getWordList(ipFile, delims)
AppendIndexDictionaryToFile(uniqueWords, ndxFile, ipFile, useShortFileName)
if silent == 'N':
print(format_op_row(ipFile, totLines, totWords, uniqueWords))
show('uniqueWords', uniqueWords, 5)
DisplayIndexAsDictionary(uniqueWords)
def format_op_row(ipFile, totLines, totWords, uniqueWords):
txt = os.path.basename(ipFile).ljust(36) + ' '
txt += str(totLines).rjust(7) + ' '
txt += str(totWords).rjust(7) + ' '
txt += str(len(uniqueWords)).rjust(7) + ' '
return txt
def format_op_hdr():
txt = 'Base Filename'.ljust(36) + ' '
txt += 'Lines'.rjust(7) + ' '
txt += 'Words'.rjust(7) + ' '
txt += 'Unique'.ljust(8) + ''
return txt
def AppendIndexDictionaryToFile(uniqueWords, ndxFile, ipFile, useShortFileName='Y'):
""" Save the list of unique words to the master list """
if useShortFileName == 'Y':
f = os.path.basename(ipFile)
else:
f = ipFile
with open(ndxFile, "a", encoding='utf-8', errors='replace') as ndx:
word_keys = uniqueWords.keys()
#uniqueWords.sort()
for word in sorted(word_keys):
if word != '':
line_nums = uniqueWords[word]
ndx.write(f + ', ' + word + ', ')
for line_num in line_nums:
ndx.write(str(line_num))
ndx.write('\n')
def DisplayIndexAsDictionary(word_occurrences):
""" print the index as a dict """
word_keys = word_occurrences.keys()
for num, word in enumerate(word_keys):
line_nums = word_occurrences[word]
print(word + " ")
if num > 3:
break
def show(title, lst, full=-1):
"""
for testing, simply shows a list details
"""
txt = title + ' (' + str(len(lst)) + ') items :\n '
num = 0
for i in lst:
if full == -1 or num < full:
if type(i) is str:
txt = txt + i + ',\n '
else:
txt = txt + i + ', ['
for j in i:
txt = txt + j + ', '
txt = txt + ']\n'
num = num + 1
try:
print(txt)
except Exception as ex:
print('index.show() - cant print line, error ' + str(ex))
def getWordList(ipFile, delim):
"""
extract a unique list of words and have line numbers that word appears
"""
indexedWords = {}
totWords = 0
totLines = 0
with codecs.open(ipFile, "r",encoding='utf-8', errors='replace') as f:
for line in f:
totLines = totLines + 1
words = multi_split(line, delim)
totWords = totWords + len(words)
for word in words:
cleanedWord = word.lower().strip()
if cleanedWord not in indexedWords:
indexedWords[cleanedWord] = str(totLines)
else:
indexedWords[cleanedWord] = indexedWords[cleanedWord] + ' ' + str(totLines)
return totWords, totLines, indexedWords
def multi_split(txt, delims):
"""
split by multiple delimiters
"""
res = [txt]
for delimChar in delims:
txt, res = res, []
for word in txt:
if len(word) > 1:
res += word.split(delimChar)
return res
def consolidate(ipFile, opFile):
"""
make a single index file with 1 record per word which shows the word, file and linenums
# storms, knowledge.csv - 3
# string, rawData.csv - 1
# structure, EVENT_SYSTEM-PC-FILE.CSV - 18, OBJECT_SYSTEM-PC-FILE.CSV - 4, sample-filelist-for-AIKIF.csv - 18 18
# subgoal, goals.csv - 3 4 12 13 14, goal_types.csv - 8
# subgoals, goal_types.csv - 9
# subgoals;, goals.csv - 2
"""
curFile = ''
curWord = ''
curLineNums = ''
indexedWords = {}
with open(ipFile, "r", encoding='utf-8', errors='replace') as ip:
for line in ip:
cols = line.split(',')
curFile = cols[0]
curWord = cols[1]
curLineNums = cols[2].strip()
#DebugIndexing(curFile, curWord, curLineNums, line)
if curWord in indexedWords:
indexedWords[curWord] = indexedWords[curWord] + ', ' + curFile + ' - ' + curLineNums
else:
indexedWords[curWord] = curFile + ' - ' + curLineNums
with open(opFile, "w",encoding='utf-8', errors='replace') as op:
op.write('word, filenames\n') # this shows which words appear in which files
word_keys = indexedWords.keys()
for word in sorted(word_keys):
if word != '':
line_nums = indexedWords[word]
op.write(word + ', ')
for line_num in line_nums:
op.write(str(line_num))
op.write('\n')
if __name__ == '__main__':
index() | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/index.py | index.py |
import web_utils as web
import aikif.project as project
import aikif.dataTools.cls_datatable as cls_datatable
import aikif.config as mod_cfg
root_folder = mod_cfg.fldrs['log_folder']
def get_page():
txt = ''
projects_list = create_sample_projects()
#txt += str(projects_list).replace('\n', '<BR>') + '<BR><BR>'
txt += '<table border=1><tr><td>Name</td><td>Description</td><td>Folder</td></tr>'
for p in projects_list:
txt += '<tr>\n'
txt += '<td>' + p.nme + '</td>\n'
txt += '<td>' + p.desc + '</td>\n'
txt += '<td>' + p.fldr + '</td>\n'
txt += '</tr>\n'
txt += '</table>\n'
txt += web.build_edit_form('Add Project', '002', ['Project Name', 'Folder Location', 'Details'], '/projects')
return txt
def create_sample_projects():
proj1 = project.Project(name='Acute Software', desc='Custom Software development', fldr='')
proj1.add_detail('website', 'http://www.acutesoftware.com.au')
proj1.add_detail('email', '[email protected]')
proj2 = project.Project(name='Sales Log', desc='Record list of sales', fldr='')
proj2.add_detail('Note', 'List of sales taken from manual entries in test program')
tbl_exp = cls_datatable.DataTable('expenses.csv', ',', col_names=['date', 'amount', 'details'])
proj2.record(tbl_exp, 'Expense', ['2015-02-13', 49.94, 'restaurant'])
proj2.record(tbl_exp, 'Expense', ['2015-02-15', 29.00, 'petrol'])
proj2.record(tbl_exp, 'Expense', ['2015-02-17', 89.95, 'fringe tickets'])
proj_diary = project.Project(name='Diary', fldr=root_folder, desc='Diary database for PIM application')
proj_diary.add_source('Calendar', root_folder)
proj_diary.add_source('Bookmarks', root_folder)
proj_diary.add_source('File Usage', root_folder)
proj_diary.add_source('PC Usage', root_folder)
proj_diary.add_source('TODO List', root_folder)
all_projects = project.Projects()
all_projects.add_project(proj_diary)
all_projects.add_project(proj1)
all_projects.add_project(proj2)
return all_projects | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/web_app/page_projects.py | page_projects.py |
import os
cur_folder = os.path.dirname(os.path.abspath(__file__))
aikif_folder = os.path.abspath(cur_folder + os.sep + ".." )
data_folder = os.path.abspath(aikif_folder + os.sep + '..' + os.sep + 'data' + os.sep + 'core')
def get_page(search_text):
"""
formats the entire search result in a table output
"""
lst = search_aikif(search_text)
txt = '<table class="as-table as-table-zebra as-table-horizontal">'
for result in lst:
txt += '<TR><TD>' + result + '</TD></TR>'
txt += '</TABLE>\n\n'
return txt
def search_aikif(txt, formatHTML=True):
"""
search for text - currently this looks in all folders in the
root of AIKIF but that also contains binaries so will need to
use the agent_filelist.py to specify the list of folders.
NOTE - this needs to use indexes rather than full search each time
"""
results = []
num_found = 0
import aikif.lib.cls_filelist as mod_fl
my_files = mod_fl.FileList([aikif_folder ], ['*.*'], ['*.pyc'])
files = my_files.get_list()
for f in files:
try:
num_found = 0
with open(f, 'r') as cur:
line_num = 0
for line in cur:
line_num += 1
if txt in line:
num_found += 1
if formatHTML is True:
results.append(format_result(line, line_num, txt))
else:
results.append([f, line, line_num, txt])
if num_found > 0:
if formatHTML is True:
results.append('<h3>' + f + ' = ' + str(num_found) + ' results</h3>')
else:
print(f + ' = ' + str(num_found) + '')
except Exception:
results.append('problem with file ' + f)
if len(results) == 0:
results.append("No results")
return results
def format_result(line, line_num, txt):
""" highlight the search result """
return ' ' + str(line_num) + ': ' + line.replace(txt, '<span style="background-color: #FFFF00">' + txt + '</span>') | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/web_app/page_search.py | page_search.py |
import os
import web_utils as web
cur_folder = os.path.dirname(os.path.abspath(__file__))
aikif_folder = os.path.abspath(cur_folder + os.sep + ".." )
root_folder = os.path.abspath(aikif_folder + os.sep + '..')
prog_file = 'program_list.html'
def get_page():
txt = '<a href="/programs/rebuild">Rebuild Program List</a><BR>'
txt += show_program_list()
return txt
def show_program_list():
txt = ''
try:
with open(prog_file, 'r') as f:
txt = f.read()
except Exception:
pass
return txt
def rebuild():
""" rebuilds the list of programs to file """
with open(prog_file, 'w') as f:
f.write(get_program_list())
def get_program_list():
"""
get a HTML formatted view of all Python programs
in all subfolders of AIKIF, including imports and
lists of functions and classes
"""
colList = ['FileName','FileSize','Functions', 'Imports']
txt = '<TABLE width=90% border=0>'
txt += format_file_table_header(colList)
fl = web.GetFileList(aikif_folder, ['*.py'], 'N')
for f in fl:
if '__init__.py' in f:
txt += '<TR><TD colspan=4><HR><H3>' + get_subfolder(f) + '</h3></td></tr>\n'
else:
txt += format_file_to_html_row(f, colList)
txt += '</TABLE>\n\n'
return txt
def get_subfolder(txt):
"""
extracts a displayable subfolder name from full filename
"""
root_folder = os.sep + 'aikif' + os.sep
ndx = txt.find(root_folder, 1)
return txt[ndx:].replace('__init__.py', '')
def format_file_table_header(lstCols):
txt = '<TR>'
if 'FullName' in lstCols:
txt += '<TD>Full Path and Filename</TD>'
if 'FileName' in lstCols:
txt += '<TD>File Name</TD>'
if 'Path' in lstCols:
txt += '<TD>Path</TD>'
if 'FileSize' in lstCols:
txt += '<TD>Size</TD>'
if 'Imports' in lstCols:
txt += '<TD>Imports</TD>'
if 'Functions' in lstCols:
txt += '<TD>List of Functions</TD>'
txt += '</TR>\n'
return txt
def format_file_to_html_row(fname, lstCols):
txt = '<TR>'
if 'FullName' in lstCols:
txt += '<TD>' + fname + '</TD>'
if 'FileName' in lstCols:
txt += '<TD>' + os.path.basename(fname) + '</TD>'
if 'Path' in lstCols:
txt += '<TD>' + os.path.abspath(fname) + '</TD>'
if 'FileSize' in lstCols:
txt += '<TD>' + format(os.path.getsize(fname), ",d") + '</TD>'
if 'Imports' in lstCols:
txt += '<TD>' + get_imports(fname) + '</TD>'
if 'Functions' in lstCols:
txt += '<TD>' + get_functions(fname) + '</TD>'
txt += '</TR>\n'
return txt
def get_functions(fname):
""" get a list of functions from a Python program """
txt = ''
with open(fname, 'r') as f:
for line in f:
if line.strip()[0:4] == 'def ':
txt += '<PRE>' + strip_text_after_string(strip_text_after_string(line, '#')[4:], ':') + '</PRE>\n'
if line[0:5] == 'class':
txt += '<PRE>' + strip_text_after_string(strip_text_after_string(line, '#'), ':') + '</PRE>\n'
return txt + '<BR>'
def strip_text_after_string(txt, junk):
""" used to strip any poorly documented comments at the end of function defs """
if junk in txt:
return txt[:txt.find(junk)]
else:
return txt
def get_imports(fname):
""" get a list of imports from a Python program """
txt = ''
with open(fname, 'r') as f:
for line in f:
if line[0:6] == 'import':
txt += '<PRE>' + strip_text_after_string(line[7:], ' as ') + '</PRE>\n'
return txt + '<BR>' | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/web_app/page_programs.py | page_programs.py |
import csv
import os
import fnmatch
from flask import request
def list2html(lst):
"""
convert a list to html using table formatting
"""
txt = '<TABLE width=100% border=0>'
for l in lst:
txt += '<TR>\n'
if type(l) is str:
txt+= '<TD>' + l + '</TD>\n'
elif type(l) is list:
txt+= '<TD>'
for i in l:
txt += i + ', '
txt+= '</TD>'
else:
txt+= '<TD>' + str(l) + '</TD>\n'
txt += '</TR>\n'
txt += '</TABLE><BR>\n'
return txt
def GetFileList(rootPaths, lstXtn, shortNameOnly='Y'):
"""
builds a list of files and returns as a list
"""
numFiles = 0
opFileList = []
if type(rootPaths) == str:
rootPaths = [rootPaths]
for rootPath in rootPaths:
for root, dirs, files in os.walk(rootPath):
#print('found ' + str(len(dirs)) + ' directories')
for basename in files:
for xtn in lstXtn:
if fnmatch.fnmatch(basename, xtn):
filename = os.path.join(root, basename)
numFiles = numFiles + 1
if shortNameOnly == 'Y':
opFileList.append( os.path.basename(filename))
else:
opFileList.append(filename)
return sorted(opFileList)
def build_search_form():
"""
returns the html for a simple search form
"""
txt = '<form action="." method="POST">\n'
txt += ' <input type="text" name="search_text">\n'
txt += ' <input type="submit" name="my-form" value="Search">\n'
txt += '</form>\n'
return txt
def build_edit_form(title, id, cols, return_page):
"""
returns the html for a simple edit form
"""
txt = '<H3>' + title + '<H3>'
txt += '<form action="' + return_page + '" method="POST">\n' # return_page = /agents
txt += ' updating id:' + str(id) + '\n<BR>'
txt += ' <input type="hidden" name="rec_id" readonly value="' + str(id) + '"> '
txt += ' <TABLE width=80% valign=top border=1>'
for col_num, col in enumerate(cols):
txt += ' <TR>\n'
txt += ' <TD><div id="form_label">' + col + '</div></TD>\n'
txt += ' <TD><div id="form_input"><input type="text" name="col_' + str(col_num) + '"></div></TD>\n'
txt += ' </TR>\n'
txt += ' <TR><TD></TD>\n'
txt += ' <TD>\n'
txt += ' <input type="submit" name="update-form" value="Save Changes">\n'
txt += ' <input type="submit" name="delete-form" value="Delete">\n'
txt += ' <input type="submit" name="add-form" value="Add">\n'
txt += ' </TD></TR></TABLE>'
txt += '</form>\n'
return txt
def build_html_listbox(lst, nme):
"""
returns the html to display a listbox
"""
res = '<select name="' + nme + '" multiple="multiple">\n'
for l in lst:
res += ' <option>' + str(l) + '</option>\n'
res += '</select>\n'
return res
def build_data_list(lst):
"""
returns the html with supplied list as a HTML listbox
"""
txt = '<H3>' + List + '<H3><UL>'
for i in lst:
txt += '<LI>' + i + '</LI>'
txt += '<UL>'
return txt
def filelist2html(lst, fldr, hasHeader='N'):
"""
formats a standard filelist to htmk using table formats
"""
txt = '<TABLE width=100% border=0>'
numRows = 1
if lst:
for l in lst:
if hasHeader == 'Y':
if numRows == 1:
td_begin = '<TH>'
td_end = '</TH>'
else:
td_begin = '<TD>'
td_end = '</TD>'
else:
td_begin = '<TD>'
td_end = '</TD>'
numRows += 1
txt += '<TR>'
if type(l) is str:
txt += td_begin + link_file(l, fldr) + td_end
elif type(l) is list:
txt += td_begin
for i in l:
txt+= link_file(i, fldr) + '; '
txt += td_end
else:
txt += td_begin + str(l) + td_end
txt += '</TR>\n'
txt += '</TABLE><BR>\n'
return txt
def link_file(f, fldr):
"""
creates a html link for a file using folder fldr
"""
fname = os.path.join(fldr,f)
if os.path.isfile(fname):
return '<a href="/aikif/data/core/' + f + '">' + f + '</a>'
else:
return f
def dict_to_htmlrow(d):
"""
converts a dictionary to a HTML table row
"""
res = "<TR>\n"
for k, v in d.items():
if type(v) == str:
res = res + '<TD><p>' + k + ':</p></TD><TD><p>' + v + '</p></TD>'
else:
res = res + '<TD><p>' + k + ':</p></TD><TD><p>' + str(v) + '</p></TD>'
res += '</TR>\n'
return res
def read_csv_to_html_table(csvFile, hasHeader='N'):
"""
reads a CSV file and converts it to HTML
"""
txt = '<table class="as-table as-table-zebra as-table-horizontal">'
with open(csvFile, "r") as f: #
numRows = 1
for row in f:
if hasHeader == 'Y':
if numRows == 1:
td_begin = '<TH>'
td_end = '</TH>'
else:
td_begin = '<TD>'
td_end = '</TD>'
else:
td_begin = '<TD>'
td_end = '</TD>'
cols = row.split(',')
numRows += 1
txt += "<TR>"
for col in cols:
txt += td_begin
try:
colString = col
except Exception:
colString = '<font color=red>Error decoding column data</font>'
txt += colString.strip('"')
txt += td_end
txt += "</TR>\n"
txt += "</TABLE>\n\n"
return txt
def read_csv_to_html_list(csvFile):
"""
reads a CSV file and converts it to a HTML List
"""
txt = ''
with open(csvFile) as csv_file:
for row in csv.reader(csv_file, delimiter=','):
txt += '<div id="table_row">'
for col in row:
txt += " "
try:
txt += col
except Exception:
txt += 'Error'
txt += " "
txt += "</div>\n"
return txt | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/web_app/web_utils.py | web_utils.py |
import sys
import os
print ("sys.version = ", sys.version)
print ("os.getcwd() = ", os.getcwd())
#AIKIF_WEB_VERSION = "PROD"
AIKIF_WEB_VERSION = "DEV"
AIKIF_VERSION_NUM = "Version 0.2.1 (alpha) - updated 15-Jan-2017"
import web_utils as web
from flask import Flask
from flask import request
app = Flask(__name__)
menu = [
['/', 'Home', 'This is the admin web interface for AIKIF (Artificial Intelligence Knowledge Information Framework)'],
['/todo', 'Todo', 'Project overview showing current list of tasks being worked on'],
['/data', 'Data', 'Shows the available data sets for AIKIF'],
['/projects', 'Projects', 'Manage projects'],
['/agents', 'Agents', 'Describes the agents capabilities, and last run status'],
['/programs','Programs', 'Details of the modules in AIKIF'],
['/about', 'About', 'About AIKIF and author contact']
]
###################### HELPER FUNCTIONS#################
def start_server():
if AIKIF_WEB_VERSION == "DEV":
print("WARNING - DEBUG MODE ACTIVE")
app.debug = True # TURN THIS OFF IN PRODUCTION
app.run()
###################### ROUTING #########################
@app.route("/")
def page_home():
txt = aikif_web_menu()
txt += web.build_search_form()
txt += "<H3>Pages on this site</h3><TABLE width=80% border=0 align=centre>\n"
for m in menu:
txt += '<TR><TD><a href="' + m[0] + '">' + m[1] + '</a></td><td>' + m[2] + '</td></tr>\n'
txt += '</table><BR>\n'
txt += '<H3>Status</h3>\n'
txt += AIKIF_VERSION_NUM + ' (Mode=' + AIKIF_WEB_VERSION + ')\n'
txt += "<BR><BR>\n"
txt += get_footer()
return txt
@app.route('/', methods=['POST'])
def search_post():
return(_search(request.form['search_text']))
def _search(search_text):
txt = aikif_web_menu()
txt += web.build_search_form()
import page_search
txt += page_search.get_page(search_text)
return txt
@app.route("/todo")
def page_todo():
txt = aikif_web_menu('Todo')
txt += web.build_search_form()
txt += "<H3>Dev Tasks</h3>\n"
txt += "<LI>implement mapping functionality of business rules</LI>\n"
txt += "<LI>web interface to control agents, including feedback status</LI>\n"
txt += "<LI></LI>\n"
txt += "<H3>Data Tasks</h3>\n"
txt += "<LI>define structures for core tables: events, people, facts, locations</LI>\n"
txt += "<LI>define flexible structure for raw data to knowledge to learning</LI>\n"
txt += "<LI>collect data output from existing proc_*.py needs to be properly integrated</LI>\n"
txt += "<LI>finish function to completely import random spreadsheet</LI>\n"
txt += "<H3>Config Tasks</h3>\n"
txt += "<LI>setup for users to auto build database</LI>\n"
txt += "<LI>get webserver running, deploy to restricted site</LI>\n"
txt += "<BR><BR>\n"
txt += get_footer()
return txt
@app.route("/projects")
def page_projects():
txt = aikif_web_menu('Projects')
import page_projects
txt += page_projects.get_page()
txt += get_footer()
return txt
@app.route("/data")
def page_data():
txt = aikif_web_menu('Data')
import page_data
txt += page_data.get_page()
txt += get_footer()
return txt
@app.route("/data/<dataFile>")
def page_data_show(dataFile):
txt = aikif_web_menu('Data')
import page_data
txt += page_data.get_page(dataFile)
txt += get_footer()
return txt
@app.route("/agents")
def page_agents():
txt = aikif_web_menu('Agents')
import page_agents as agt
txt += agt.get_page()
txt += get_footer()
return txt
@app.route("/agents", methods=['POST'])
def edit_agents():
res = ''
editedinfo = []
print('hi - about to get form values', request.form)
#editedinfo.append(request.form['Agent Name']) # request.form['search_text']
#editedinfo.append(request.form['Program Location'])
#editedinfo.append(request.form['params'])
for i in range(0,3):
editedinfo.append(request.form['col_' + str(i)])
#print('update-form ', request.form['update-form'] )
#print('add-form ', request.form['add-form'] )
#print('delete-form ', request.form['delete-form'] )
try:
res = request.form['update-form']
except:
pass
try:
res = request.form['add-form']
except:
pass
try:
res = request.form['delete-form']
except:
pass
return res + str(editedinfo)
@app.route("/programs")
def page_programs():
txt = aikif_web_menu('Programs')
import page_programs as prg
txt += prg.get_page()
return txt
@app.route("/programs/rebuild")
def page_programs_rebuild():
txt = aikif_web_menu('Programs')
import page_programs as prg
prg.rebuild()
txt += prg.get_page()
return txt
@app.route("/about")
def page_about():
txt = aikif_web_menu('About')
import page_about as abt
txt += abt.get_page()
txt += get_footer()
return txt
def page_error(calling_page):
txt = '<BR><BR>'
txt += '<H2>Error - problem calling ' + calling_page + '</H2>'
txt += get_footer()
return txt
def aikif_web_menu(cur=''):
""" returns the web page header containing standard AIKIF top level web menu """
pgeHdg = ''
pgeBlurb = ''
if cur == '':
cur = 'Home'
txt = get_header(cur) #"<div id=top_menu>"
txt += '<div id = "container">\n'
txt += ' <div id = "header">\n'
txt += ' <!-- Banner -->\n'
txt += ' <img src = "' + os.path.join('/static','aikif_banner.jpg') + '" alt="AIKIF Banner"/>\n'
txt += ' <ul id = "menu_list">\n'
for m in menu:
if m[1] == cur:
txt += ' <LI id="top_menu_selected"><a href=' + m[0] + '>' + m[1] + '</a></li>\n'
pgeHdg = m[1]
try:
pgeBlurb = m[2]
except Exception:
pass
else:
txt += ' <LI id="top_menu"><a href=' + m[0] + '>' + m[1] + '</a></li>\n'
txt += " </ul>\n </div>\n\n"
txt += '<H1>AIKIF ' + pgeHdg + '</H1>\n'
txt += '<H4>' + pgeBlurb + '</H4>\n'
return txt
###################### TEMPLATES #########################
def get_header(pge=''):
txt = '<HTML><HEAD>\n'
txt += '<title>AIKIF:' + pge + '</title>\n'
txt += '<!-- Stylesheets for responsive design -->\n'
txt += '<meta name="viewport" content="width=device-width, initial-scale=1.0" />\n'
txt += '<link rel="stylesheet" type="text/css" href="' + os.path.join('/static','aikif.css') + '" media="screen" />\n'
txt += '<link rel="stylesheet" href="' + os.path.join('/static','aikif_mob.css')
txt += '" media="only screen and (min-device-width : 320px) and (max-device-width : 480px)">\n'
txt += '</HEAD>\n'
txt += '<body>\n'
return txt
def get_footer(pge=''):
txt = '\n\n<BR><BR><BR>\n<div id="footer">\n'
txt += pge
txt += '<HR><a href="http://www.acutesoftware.com.au/aikif/index.html">AIKIF web interface</a> - '
txt += 'written by Duncan Murray : [email protected]<BR>\n'
txt += AIKIF_WEB_VERSION + ':' + AIKIF_VERSION_NUM + '\n'
txt += 'Python version:' + sys.version + '\n'
txt += '</div></BODY></HTML>\n'
return txt
def escape_html(s):
res = s
res = res.replace('&', "&")
res = res.replace('>', ">")
res = res.replace('<', "<")
res = res.replace('"', """)
return res
def format_list_as_html_table_row(lst):
txt = '<TR>'
for i in lst:
txt = txt + '<TD>' + i + '</TD>'
txt = txt + '</TR>'
return txt
def format_csv_to_html(csvFile, opHTML):
"""
print(len(opHTML))
with open(csvFile) as csv_file:
for row in csv.reader(csv_file, delimiter=','):
txt += "<TR>"
for col in row:
txt += "<TD>"
txt += escape_html(col)
txt += "</TD>"
txt += "</TR>"
txt += "</TABLE>"
"""
txt = 'TODO format_csv_to_html to convert' + csvFile + ' to ' + opHTML
return txt
if __name__ == "__main__":
start_server() | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/web_app/web_aikif.py | web_aikif.py |
def TEST():
"""
Modules for testing happiness of 'persons' in 'worlds'
based on simplistic preferences. Just a toy - dont take seriously
----- WORLD SUMMARY for : Mars -----
population = 0
tax_rate = 0.0
tradition = 0.9
equity = 0.0
Preferences for Rover
tax_min = 0.0
equity = 0.0
tax_max = 0.9
tradition = 0.9
Rover is Indifferent in Mars (0)
DETAILS
tax: Economic = 0.1 -> 0.3
tradition: Personal = 0.3 -> 0.9
equity: Personal = 0.1 -> 0.9
growth: Economic = 0.01 -> 0.09
"""
w = World('Mars', [0, 0.0, 0.9, 0.0])
print(w)
p = Person('Rover', {'tax_min':0.0, 'tax_max':0.9,'tradition':0.9, 'equity':0.0})
print(p)
h = Happiness(p,w)
#h.add_factor(HappinessFactors(name, type, min, max))
h.add_factor(HappinessFactors('tax', 'Economic', 0.1, 0.3))
h.add_factor(HappinessFactors('tradition', 'Personal', 0.3, 0.9))
h.add_factor(HappinessFactors('equity', 'Personal', 0.1, 0.9))
h.add_factor(HappinessFactors('growth', 'Economic', 0.01, 0.09))
print(h.show_details())
class World(object):
"""
define a 'world' that all the population live it
"""
def __init__(self, nme, params):
self.nme = nme
self.population = params[0]
self.tax_rate = params[1]
self.tradition = params[2]
self.equity = params[3]
self.world_locations = []
def __str__(self):
res = '\n----- WORLD SUMMARY for : ' + self.nme + ' -----\n'
res += 'population = ' + str( self.population) + '\n'
res += 'tax_rate = ' + str( self.tax_rate) + '\n'
res += 'tradition = ' + str( self.tradition) + '\n'
res += 'equity = ' + str( self.equity) #+ '\n'
return res
def add_location(self, loc):
"""
a world can have 0 or many locations - this adds one to the world
"""
self.world_locations.append(loc)
def get_population(self):
pop = 0
for loc in self.world_locations:
pop += loc.population
return pop
class WorldLocations(object):
"""
This is a subsection of the World with its own parameters
to allow people to experience maximum happiness (that's the idea anyway)
"""
def __init__(self, nme, params):
self.nme = nme
self.pos_x = 0 # may not use a grid, would be better as a graph
self.pos_y = 0 # to allow large populations to expand without effect
self.population = params[0]
self.tax_rate = params[1]
self.tradition = params[2]
self.equity = params[3]
def __str__(self):
res = '\n----- WORLD SUMMARY for : ' + self.nme + ' -----\n'
res += 'population = ' + str( self.population) + '\n'
res += 'tax_rate = ' + str( self.tax_rate) + '\n'
res += 'tradition = ' + str( self.tradition) + '\n'
res += 'equity = ' + str( self.equity) #+ '\n'
return res
class WorldFinder(object):
"""
Class to iterate through list of worlds (randomly generated
or using a solver / bit fit algorithm) to try and find the
best set of parameters for a world to make all people happy.
"""
def __init__(self, all_people):
self.all_people = all_people
self.net_happiness = 0
self.num_worlds = 0
self.unhappy_people = 0
self.tax_range = (0,7)
self.tradition_range = (1,9)
self.equity_range = (1,9)
def __str__(self):
res = '\n === World Finder Results ===\n'
res += 'Worlds tested = ' + str(self.num_worlds) + '\n'
res += 'Best happiness = ' + str(self.net_happiness) + '\n'
res += 'Num Unhappy people = ' + str(self.unhappy_people) + '\n'
res += 'Tot People in world = ' + str(len(self.all_people)) + '\n'
res += 'Everyone happy = ' + self.is_everyone_happy() + '\n'
return res
def is_everyone_happy(self):
"""
returns text result iff everyone happy
"""
if self.unhappy_people == 0:
return 'Yes'
else:
return 'No'
def solve(self, max_worlds=10000, silent=False):
"""
find the best world to make people happy
"""
self.num_worlds = 0
num_unhappy = 0
for tax_rate in range(self.tax_range[0],self.tax_range[1]):
for equity in range(self.equity_range[0],self.equity_range[1]):
for tradition in range(self.tradition_range[0],self.tradition_range[1]):
self.num_worlds += 1
if self.num_worlds > max_worlds:
break
w = World(str(self.num_worlds).zfill(6), [5000, tax_rate/10, tradition/10, equity/10])
world_happiness = 0
num_unhappy = 0
for person in self.all_people:
wh = Happiness(person, w)
world_happiness += wh.rating
if wh.rating < 0:
num_unhappy += 1
if world_happiness > self.net_happiness:
self.net_happiness = world_happiness
self.unhappy_people = num_unhappy
if not silent:
print('found better world - ' + w.nme + ' = ' + str(world_happiness) + ' - total unhappy_people = ' + str(self.unhappy_people))
class HappinessFactors(object):
"""
class for parameters used to calculate happiness
h = Happiness(p, w)
h.add_factor(HappinessFactors('tax rate', 0.2, 0.5, 'hi'))
"""
def __init__(self, name, tpe, mn, mx):
self.name = name
self.type = tpe
self.min = mn
self.max = mx
def __str__(self):
res = self.name.rjust(15) + ': '
res += self.type + ' = '
res += str(self.min) + ' -> '
res += str(self.max) + '\n'
return res
class Happiness(object):
"""
abstract to manage the happiness calculations.
The purpose of this class is to attempt to assign a number
to a persons happiness in a (limited parameters) world
Note - original calculation was flat out wrong - just
because the tax_rate is not ideal doesn't mean the person
is unhappy, rather that is a desire or preference.
It does have an influence but the influence needs to be
scaled right back.
Options
Need to have a class of preferences and their weightings,
so things like death by starvation has high unhappiness but
wishing you were a flying dragon has a low impact on happiness
"""
def __init__(self, person, world):
self.person = person
self.world = world
self.factors = []
self.rating = 0
self.calculate()
def __str__(self):
"""
return happiness rating as description
"""
res = self.person.nme + ' is '
if self.rating > 50:
res += 'Very Happy'
elif self.rating > 25:
res += 'Happy'
elif self.rating > 5:
res += 'Slightly Happy'
elif self.rating > -5:
res += 'Indifferent'
elif self.rating > -25:
res += 'Slightly Unhappy'
elif self.rating > -50:
res += 'Unhappy'
else:
res += 'Very Unhappy'
res += ' in ' + self.world.nme + ' (' + str(self.rating) + ')'
return res
def show_details(self):
"""
extended print details of happiness parameters
"""
res = str(self)
res += '\nDETAILS\n'
for f in self.factors:
res += str(f)
return res
def add_factor(self, factor):
self.factors.append(factor)
def calculate(self):
"""
calculates the estimated happiness of a person
living in a world
self._update_pref(self.person.prefs['tax_min'], self.person.prefs['tax_max'], self.world.tax_rate)
self._update_pref(self.person.prefs['tradition'], self.person.prefs['tradition'], self.world.tradition)
self._update_pref(self.person.prefs['equity'], self.person.prefs['equity'], self.world.equity)
"""
self.rating = 0
for f in self.factors:
self._update_pref(f.min, f.max, self.world.tax_rate)
def _update_pref(self, lmin, lmax, cur):
"""
update the self rating based on the parameters.
If min max is a range (ie not equal) then add fixed value
to rating depending if current value is in range, otherwise
compare distance away from min/max (same value)
"""
rate_of_change_positive = 10
rate_of_change_negative = 2
add_positive = 10
add_negative = 2
if lmin == lmax:
self.rating -= int(abs(lmin - cur)*100) / 10
else:
if lmin <= cur:
self.rating += (int(abs(lmin - cur)*rate_of_change_positive)) + add_positive
else:
self.rating -= (int(abs(lmin - cur)*rate_of_change_negative)) + add_negative
if lmax >= cur:
self.rating += (int(abs(lmax - cur)*rate_of_change_positive)) + add_positive
else:
self.rating -= (int(abs(lmax - cur)*rate_of_change_negative)) + add_negative
class Person(object):
def __init__(self, nme, prefs):
self.prefs = prefs
self.nme = nme
def __str__(self):
res = 'Preferences for ' + self.nme + '\n'
for k in self.prefs:
res += k + ' = ' + str(self.prefs[k]) + '\n'
return res
if __name__ == '__main__':
TEST() | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/environments/happiness.py | happiness.py |
import random
import environment as mod_env
class Internet(mod_env.Environment):
"""
Main base class for all AIKIF environments. Example output
1 127.0.222.159:3674 (6 pages)
2 127.0.38.18:3218 (8 pages)
3 127.0.164.219:3963 (6 pages)
4 127.0.105.73:3106 (5 pages)
5 127.0.193.200:3862 (6 pages)
"""
def __init__(self, name, desc):
"""
Note that the base class handles the following
self.name = name
self.log = aikif.cls_log.Log(aikif.config.fldrs['log_folder'])
self.log.record_command('enviroment.py', 'Initilising base environment - ' + self.name)
"""
super(Internet, self).__init__(name)
self.websites = []
self.desc = desc
def __str__(self):
res = super(Internet, self).__str__()
res += ' Internet class \n'
for num, w in enumerate(self.websites):
res += str(num+1).ljust(3) + str(w) + ' (' + str(len(w.pages)) + ' pages)\n'
return res
def create(self, num_sites):
"""
Creates the environment
Code in Base class = self.log.record_process('enviroment.py', 'Creating environment - ' + self.name)
"""
super(Internet, self).create(num_sites)
#print('building websites')
for _ in range(0,num_sites):
self.websites.append(Website())
def destroy(self):
"""
Call this when the environment is no longer needed
Code in Base class = self.log.record_process('enviroment.py', 'Destroying environment - ' + self.name)
"""
super(Internet, self).destroy()
class Website(object):
"""
manage the creation of a simulated website
"""
def __init__(self):
self.url = '127.0.' + str(random.randint(1,255)) + '.' + str(random.randint(1,255)) + ':' + str(random.randint(3000,4000))
self.pages = []
for _ in range(0,random.randint(3,8)):
w = WebPage()
#print(w)
self.pages.append(w)
def __str__(self):
return self.url
class WebPage(object):
"""
this is a random page in a site
"""
def __init__(self):
self.text = '<H1>Test</H1>this is a test'
self.title = 'Test page'
def __str__(self):
return self.title | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/environments/internet.py | internet.py |
from rdflib import Graph
#from rdflib import URIRef, Literal, Namespace #, ConjunctiveGraph
from rdflib import RDF
from rdflib import RDFS
def main():
ip_folder = 'S:\\DATA\\opendata\\ontology\\OpenCyc\\'
fname = ip_folder + 'open-cyc.n3' # 770,166 tuples
#create_sample_file('open-cyc.n3', 'sample_open_cyc.n3', 5000)
#small_fname = 'sample_open_cyc.n3' # 6618 tuples
g = load_graph_from_rdf(fname)
show_graph_summary(g)
export(g, fname + ".CSV")
def load_graph_from_rdf(fname):
""" reads an RDF file into a graph """
print("reading RDF from " + fname + "....")
store = Graph()
store.parse(fname, format="n3")
print("Loaded " + str(len(store)) + " tuples")
return store
def show_graph_summary(g):
""" display sample data from a graph """
sample_data = []
print("list(g[RDFS.Class]) = " + str(len(list(g[RDFS.Class]))))
# Get Subject Lists
num_subj = 0
for subj in g.subjects(RDF.type):
num_subj += 1
if num_subj < 5:
sample_data.append("subjects.subject: " + get_string_from_rdf(subj))
print("g.subjects(RDF.type) = " + str(num_subj))
# Get Sample of Subjects, Predicates, Objects
num_subj = 0
for subj, pred, obj in g:
num_subj += 1
if num_subj < 5:
sample_data.append("g.subject : " + get_string_from_rdf(pred))
sample_data.append("g.predicate : " + get_string_from_rdf(subj))
sample_data.append("g.object : " + get_string_from_rdf(obj))
print("g.obj(RDF.type) = " + str(num_subj))
print ("------ Sample Data ------")
for line in sample_data:
print(line)
def export(g, csv_fname):
""" export a graph to CSV for simpler viewing """
with open(csv_fname, "w") as f:
num_tuples = 0
f.write('"num","subject","predicate","object"\n')
for subj, pred, obj in g:
num_tuples += 1
f.write('"' + str(num_tuples) + '",')
f.write('"' + get_string_from_rdf(subj) + '",')
f.write('"' + get_string_from_rdf(pred) + '",')
f.write('"' + get_string_from_rdf(obj) + '"\n')
print("Finished exporting " , num_tuples, " tuples")
def get_string_from_rdf(src):
""" extracts the real content from an RDF info object """
res = src.split("/") #[:-1]
return "".join([l.replace('"', '""') for l in res[len(res) - 1]])
def create_sample_file(ip, op, num_lines):
""" make a short version of an RDF file """
with open(ip, "rb") as f:
with open(op, "wb") as fout:
for _ in range(num_lines):
fout.write(f.readline() )
main() | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/ontology/cyc_extract.py | cyc_extract.py |
# This script lists the sources and a short comment of each
# as well as functions to download and extract samples of each.
ontologyList = [ # links to various upper ontologies - http://en.wikipedia.org/wiki/Upper_ontology
{'name': 'WordNet',
'url': 'http://wordnet.princeton.edu/wordnet/download/',
'data': 'http://wordnetcode.princeton.edu/wn3.1.dict.tar.gz',
'localFile': 'S:\\DATA\\opendata\\ontology\\WordNet\\dict\\data.noun',
'rating': """Best word list, not in OWL format, though qualifies as an upper ontology by including
the most general concepts as well as more specialized concepts, related to each other not only by
the subsumption relations, but by other semantic relations as well, such as part-of and cause.
However, unlike Cyc, it has not been formally axiomatized so as to make the logical relations
between the concepts precise. It has been widely used in Natural language processing research""",
'tested': 'Untested'},
{'name': 'OpenCyc',
'url': 'http://en.wikipedia.org/wiki/Cyc#OpenCyc',
'data': 'http://sourceforge.net/projects/texai/files/open-cyc-rdf/1.1/open-cyc.rdf.ZIP/download',
'localFile': 'S:\\DATA\\opendata\\ontology\\OpenCyc\\open-cyc.rdf',
'rating': 'Was proprietry, now open source. Fairly precise, this is the best bet for AI applications',
'tested': 'Work in Progress'},
{'name': 'SUMO - Suggested Upper Merged Ontology',
'url': 'http://www.ontologyportal.org/',
'data': 'http://sigmakee.cvs.sourceforge.net/viewvc/sigmakee/KBs/?view=tar',
'localFile': 'S:\\DATA\\opendata\\ontology\\SUMO\\KBs\\Mid-level-ontology.kif',
'rating': 'Created by the IEEE working group P1600.1 - has multiple files by subject area which includes an upper ontology (which file?)',
'tested': 'Untested'},
{'name': 'DOLCE - Descriptive Ontology for Linguistic and Cognitive Engineering ',
'url': 'http://www.loa.istc.cnr.it/',
'data': 'http://www.loa-cnr.it/ontologies/DOLCE-Lite.owl',
'localFile': 'S:\\DATA\\opendata\\ontology\\DOLCE\\DOLCE-Lite.owl',
'rating': """Not an active project on website, but has a clear cognitive bias, in that it aims
at capturing the ontological categories underlying natural language and human common sense""",
'tested': 'Untested'},
{'name': 'DBPedia',
'url': 'http://wiki.dbpedia.org/Datasets',
'data': 'http://wiki.dbpedia.org/Downloads39',
'localFile': 'S:\\DATA\\opendata\\ontology\\wikipedia_categories\\dbpedia-ontology.owl.bz2.owl.bz2.owl',
'rating': 'The most comprehensive set of data based on Wikipedia (470M facts)',
'tested': 'Untested'},
{'name': 'BFO - Basic Formal Ontology',
'url': 'http://www.ifomis.org/bfo',
'data': 'http://www.ifomis.org/bfo/1.1',
'localFile': 'S:\\DATA\\opendata\\ontology\\BFO\\bfo-1.1.owl',
'rating': """Incorporates both three-dimensionalist and four-dimensionalist perspectives on
reality within a single framework. Has over 100 other ontologies build based on this""",
'tested': 'Untested'},
{'name': 'UMBEL',
'url': 'http://structureddynamics.com/resources.html#Ontologies',
'data': 'https://github.com/structureddynamics/UMBEL/blob/master/Ontology/umbel.n3',
'localFile': 'S:\\DATA\\opendata\\ontology\\UMBEL\\umbel.n3',
'rating': 'Maps to a simplified subset of the OpenCyc ontology (28,000 entries)',
'tested': 'Untested'},
{'name': 'DnS - Descriptions and Situations (implementation of DOLCE+DnS-Ultralite abbreviated to DUL) ',
'url': 'http://stlab.istc.cnr.it/stlab/The_Semantic_Technology_Laboratory_%28STLab%29',
'data': 'http://www.ontologydesignpatterns.org/ont/dul/DUL.owl',
'localFile': 'S:\\DATA\\opendata\\ontology\\DnS\\DUL.owl',
'rating': """constructivist ontology that pushes DOLCEs descriptive stance even
further allowing for context-sensitive redescriptions of the types and relations
postulated by other given ontologies""",
'tested': 'Untested'},
{'name': 'GFO - General Formal Ontology',
'url': 'http://www.onto-med.de/ontologies/index.jsp',
'data': 'http://www.onto-med.de/ontologies/gfo.owl',
'localFile': 'S:\\DATA\\opendata\\ontology\\GFO\\gfo-ato.owl',
'rating': 'have developed a top level ontology and a biological core ontology. OWL file is copyright, but redistribution allowed',
'tested': 'Untested'},
{'name': 'UFO - Unified Foundation Ontology',
'url': 'https://oxygen.informatik.tu-cottbus.de/drupal7/ufo/',
'data': '',
'localFile': '',
'rating': 'new, pretty good. tested for complex domains, combines DOLCE and GFO. Count not find single download OWL file',
'tested': 'Untested'},
{'name': 'CIDOC Conceptual Reference Model',
'url': 'http://en.wikipedia.org/wiki/CIDOC_Conceptual_Reference_Model',
'data': 'http://www.cidoc-crm.org/rdfs/cidoc_crm_v5.0.4_official_release.rdfs',
'localFile': 'S:\\DATA\\opendata\\ontology\\CIDOC\\cidoc_crm_v5.0.4_official_release.rdfs',
'rating': """provides an extensible ontology for concepts and information in cultural heritage
and museum documentation. Includes its own version of an upper ontology in its core classes""",
'tested': 'Untested'},
{'name': 'COSMO - COmmon Semantic MOdel',
'url': 'http://ontolog.cim3.net/cgi-bin/wiki.pl?COSMO',
'data': 'http://www.micra.com/COSMO/COSMO.owl',
'localFile': 'S:\\DATA\\opendata\\ontology\\COSMO\\COSMO.owl',
'rating': 'The current (May 2009) OWL version of COSMO has over 6400 types (OWL classes), over 700 relations, and over 1400 restrictions',
'tested': 'Untested'},
{'name': 'YAMATO - Yet Another More Advanced Top Ontology',
'url': 'http://www.ei.sanken.osaka-u.ac.jp/hozo/onto_library/upperOnto.htm',
'data': 'http://www.ei.sanken.osaka-u.ac.jp/hozo/onto_library/download.php?filename=YAMATO20120714owl.zip',
'localFile': 'S:\\DATA\\opendata\\ontology\\YAMATO\\YAMATO20120714.owl',
'rating': 'complex but very advanced',
'tested': 'Untested'},
{'name': 'PROTON',
'url': 'http://www.ontotext.com/proton-ontology',
'data': 'http://www.ontotext.com/sites/default/files/proton/protontop.ttl',
'localFile': 'S:\\DATA\\opendata\\ontology\\PROTON\\protontop.ttl',
'rating': """basic subsumption hierarchy which provides coverage of most of the upper-level concepts
necessary for semantic annotation, indexing, and retrieval""",
'tested': 'Untested'},
{'name': 'IDEAS',
'url': 'http://www.ideasgroup.org/7Documents/',
'data': 'http://www.ideasgroup.org/file_download/5/IDEAS+Foundation+v1_0+Released+2009-04-24.xmi.zip',
'localFile': 'S:\\DATA\\opendata\\ontology\\IDEAS\\IDEAS Foundation v1_0 Released 2009-04-24.xmi',
'rating': 'The most common usage of IDEAS will be in direct exchange of information between architectural modelling tools are repositories',
'tested': 'Untested'},
{'name': 'MarineTLO',
'url': 'http://www.ics.forth.gr/isl/MarineTLO/',
'data': 'http://www.ics.forth.gr/isl/MarineTLO/v3/core_v3.owl',
'localFile': 'S:\\DATA\\opendata\\ontology\\MarineTLO\\core_v3.owl',
'rating': 'MarineTLO is a top-level ontology for the marine domain (also applicable to the terrestrial domain)',
'tested': 'Untested'},
{'name': 'MindOntology (OpenCog)',
'url': 'http://wiki.opencog.org/w/MindOntology',
'data': 'http://wiki.opencog.org/wikihome/index.php?title=Special:AllPages&namespace=104',
'localFile': 'S:\\DATA\\opendata\\ontology\\MindOntology\\mindOntology.csv',
'rating': 'Focussed for AGI use, but no single download found (arranged as wiki pages, converted to CSV 27/3/2014)',
'tested': 'Untested'},
{'name': 'DIY - eg build your own Ontology',
'url': 'http://localhost',
'data': '',
'localFile': '',
'rating': 'Not ideal - better to use existing sets, but best method to get a concise set of tuples',
'tested': 'Untested'}
]
documentList = [ # links to various documents, tools and general ontology related notes
{'title' : 'A Comparison of Upper Ontologies (Technical Report DISI-TR-06-21)',
'url' : 'http://www.disi.unige.it/person/MascardiV/Download/DISI-TR-06-21.pdf',
'author' : 'Viviana Mascardi1, Valentina Cordi, Paolo Rosso - Universita degli Studi di Genova, Italy',
'year' : '',
'localSavedFile': 'comparison-of-ontologies-DISI-TR-06-21.pdf',
'comment' : 'Summary of ontologies - compares BFO, cyc, DOLCE, GFO, PROTON, Sowas, SUMO'},
{'title': 'Ontology-based data integration',
'url': 'http://en.wikipedia.org/wiki/Ontology-based_data_integration',
'author': 'Wikipedia', 'year': '2013', 'localSavedFile': '',
'comment': 'short article with examples of approaches to use'},
{'title': 'Python RDF library',
'url': 'https://github.com/RDFLib/rdflib',
'author': '', 'year': '2013', 'localSavedFile': '',
'comment': 'good simple examples for using RDFLIB at https://rdflib.readthedocs.org/en/latest/'},
{'title': 'Protege - ontology editor',
'url': 'http://protege.stanford.edu/products.php#desktop-protege',
'author': 'Stanford', 'year': '', 'localSavedFile': '',
'comment': """feature rich ontology editing environment with full support for the OWL 2 Web
Ontology Language, and direct in-memory connections to description logic reasoners like HermiT and Pellet"""},
{'title': 'ontogenesis - upper level ontologies',
'url': 'http://ontogenesis.knowledgeblog.org/740',
'author': 'Robert Hoehndorf', 'year': '2010',
'localSavedFile': 'ontogenesis.html',
'comment': 'basic overview of ontologies with descriptions of time/space and objects/processes'},
{'title': 'DAML Ontology Library',
'url': 'http://www.daml.org/ontologies/',
'author': 'http://www.daml.org/ontologies/uri.html', 'year':'',
'localSavedFile': 'S:\\DATA\\opendata\\ontology\\DAML\\uri.html',
'comment': 'Not a single ontology, but has links to other ontologies of various grains'},
{'title': 'Toward the Use of an Upper Ontology for U.S. Government and U.S. Military Domains: An Evaluation',
'url': 'http://oai.dtic.mil/oai/oai?verb=getRecord&metadataPrefix=html&identifier=ADA459575',
'author': 'MITRE Corp report for US Government', 'year': '2004',
'localSavedFile': 'ADA459575.pdf',
'comment': 'very good explanation of the types of ontological choices such as 3d/4d, descriptive vs revisionary, mult vs reduct, ...'},
{'title': 'ROMULUS - A repository of foundational ontologies',
'url': 'http://www.thezfiles.co.za/ROMULUS/downloads.html',
'author': 'Zubeida Casmod Dawood', 'year': '2014',
'localSavedFile': 'ROMULUS.html',
'comment': 'Very good list of links to ontologies'},
{'title': 'Ontological realism: A methodology for coordinated evolution of scientic ontologies',
'url': 'http://iospress.metapress.com/content/1551884412214u67/fulltext.pdf',
'author': 'Barry Smith and Werner Ceusters - University of Buffalo', 'year': '2010',
'localSavedFile': 'ontological-realisation.pdf',
'comment': 'technical focus on biological and biomedical ontologies within the framework of the OBO (Open Biomedical Ontologies) Foundry initiative' },
{'title': 'Some Ideas and Examples to Evaluate Ontologies',
'author': 'Asuncion Gomez-Perez, Knowledge Systems Laboratory, Stanford University', 'year': '199?',
'url': 'http://oa.upm.es/6242/1/Some_Ideas_and_Examples_to_Evaluate_Ontologies.pdf',
'localSavedFile': 'Some_Ideas_and_Examples_to_Evaluate_Ontologies.pdf',
'comment': 'Paper shows ideas and methods to review ontologies'},
{'title': 'Ontology Development 101: A Guide to Creating Your First Ontology',
'author': 'Natalya F. Noy and Deborah L. McGuinness - Stanford University', 'year': '2000?',
'url': 'http://protege.stanford.edu/publications/ontology_development/ontology101-noy-mcguinness.html',
'localSavedFile': 'ontology101-noy-mcguinness.html',
'comment': """Good introduction and examples of building an ontology - key points: reuse parts if possible,
but build yourself to keep it short and valid"""},
{'title': 'KSL Ontonlingua',
'url' : 'http://www.ksl.stanford.edu/software/ontolingua/',
'author': 'Standford',
'year': '2012',
'localSavedFile': '',
'comment': """Ontolingua provides a distributed collaborative environment to browse, create,
edit, modify, and use ontologies. The server supports over 150 active users, some of whom
have provided us with descriptions of their projects. """},
{'title': 'OCHRE',
'url': 'http://en.wikipedia.org/wiki/Object-centered_high-level_reference_ontology',
'author' : '',
'year' : '',
'localSavedFile': 'S:\\DATA\\opendata\\ontology\\OCHRE\\ki2003epaper.pdf',
'comment': """Descriptive document, not an actual ontology - has a focus on conceptual simplicity,
so that the number of basic (primitive) concepts and relations is as small as possible in order to
simplify the theory"""},
{'title' : 'Onto-Med Report Nr. 8',
'url' : 'http://www.onto-med.de/publications/2010/gfo-basic-principles.pdf',
'author' : 'Onto-Med in Leipzig',
'year' : '',
'localSavedFile': 'gfo-basic-principles.pdf',
'comment' : 'basic principles of GFO'}
]
commentsList = [
{'src': 'comment', 'comment': 'there is no single correct ontology for any domain',
'url': 'http://protege.stanford.edu/publications/ontology_development/ontology101-noy-mcguinness.html'},
{'src': 'conclusion', 'comment': 'use an existing top level ontology as a starting point, but base remaining ones on DOLCE',
'url': 'n/a'}
]
import os
def main():
ShowStatistics()
SaveHTML_File_Samples('review_file_samples.html')
SaveHTML_Review('review_ontology.html')
SaveAsMarkup('review_ontology.txt')
#os.system('start review_ontology.html')
#os.system('start review_ontology.txt')
ShowConclusion()
def ShowStatistics():
print('Ontologies = ' + str(len(ontologyList)))
print('Documents = ' + str(len(documentList)))
print('Comments = ' + str(len(commentsList)))
def ShowConclusion():
print('Conclusion: ')
for i in commentsList:
#print(i['src'], i['comment'])
if i['src'] == 'conclusion':
print(i['comment'])
def ShowData():
print('------ Ontologies-------\n')
for i in ontologyList:
#print(i['name'], i['url'])
print(i)
print('------ Documents-------\n')
for i in documentList:
print(i['title'], i['url'])
print('------ COMMENTS-------\n')
for i in commentsList:
print(i['comment'])
def SaveHTML_Review(htmlFile):
#print(' saving results to ' + htmlFile)
deleteFile(htmlFile)
AppendToFile(htmlFile, BuildHTMLHeader('Ontology Review', '\r\n', '0'))
AppendToFile(htmlFile, '</TABLE>')
AppendToFile(htmlFile, 'Updated 3/12/2014 - list of upper ontologies with comments/ratings for possible use in AI applications.<BR><BR>\r\n')
AppendToFile(htmlFile, '<H2>Ontology Datasets</h2>\r\n')
for i in ontologyList:
AppendToFile(htmlFile, '<B>' + i['name'] + '</B><BR>')
AppendToFile(htmlFile, 'page = <a href=' + i['url'] + '>' + i['url'] + '</a><BR>')
AppendToFile(htmlFile, 'data = <a href=' + i['data'] + '>' + i['data'] + '</a><BR>\r\n')
AppendToFile(htmlFile, i['rating'] + '<BR>\r\n')
AppendToFile(htmlFile, TestLocalFile(i['localFile']) + '<BR><BR>\r\n')
AppendToFile(htmlFile, '<BR><BR>\r\n')
# show document list of RHS
AppendToFile(htmlFile, '<H2>Useful Links for Ontological development</H2>\r\n')
for i in documentList:
AppendToFile(htmlFile, '<B>' + i['title'] + '</B><BR>' + '<a href=' + i['url'] + '>' + i['url'] + '</a><BR>\r\n')
AppendToFile(htmlFile, i['comment'] + '<BR><BR>\r\n')
AppendToFile(htmlFile, '<BR><BR></BODY></HTML>')
def SaveAsMarkup(htmlFile):
deleteFile(htmlFile)
AppendToFile(htmlFile, 'Updated 25/3/2014 - list of upper ontologies with comments/ratings for possible use in AI applications.<BR><BR>\r\n')
AppendToFile(htmlFile, '## Ontology Datasets\n')
for i in ontologyList:
AppendToFile(htmlFile, '### ' + i['name'] + '\n' )
AppendToFile(htmlFile, 'page = [' + i['url'] + '](' + i['url'] + ')\n')
AppendToFile(htmlFile, 'data = [' + i['data'] + '](' + i['data'] + ')\n')
AppendToFile(htmlFile, i['rating'] + '\n')
AppendToFile(htmlFile, TestLocalFile(i['localFile']) + '\n')
AppendToFile(htmlFile, '## Useful Links for Ontological development')
for i in documentList:
AppendToFile(htmlFile, '### ' + i['title'] + '\n' + '[' + i['url'] + '](' + i['url'] + ')\n')
AppendToFile(htmlFile, i['comment'] + '\n')
AppendToFile(htmlFile, '\n')
AppendToFile(htmlFile, '\n\n This report generated via ')
AppendToFile(htmlFile, '[https://github.com/acutesoftware/AIKIF/blob/master/aikif/ontology/review_ontology.py]')
AppendToFile(htmlFile, '(https://github.com/acutesoftware/AIKIF/blob/master/aikif/ontology/review_ontology.py)')
def BuildHTMLHeader(title, linefeed='\n', border='1'):
res = "<HTML><HEAD><title>" + linefeed
res = res + title + "</title>" + linefeed
res = res + CreateCssString("Arial", "10pt", linefeed ) + linefeed
res = res + "</HEAD><BODY>"
res = res + "Back to <a href=http://www.acutesoftware.com.au>Acute Software homepage</a> "
res = res + "or <a href=http://www.acutesoftware.com.au/aikif>AIKIF home</a><BR>" + linefeed
res = res + "<H1>" + title + "</H1><TABLE border=" + border + ">"
return res
def CreateCssString(fontFamily, baseFontSize, linefeed='\n'):
css = "<STYLE>" + linefeed
css = css + "BODY { font-size:" + baseFontSize + "; FONT-FAMILY:" + fontFamily + "; }" + linefeed
css = css + "A:link { font-size:" + baseFontSize + "; COLOR: blue;TEXT-DECORATION:none}" + linefeed
css = css + "A:visited { color: #003399; font-size:" + baseFontSize + ";TEXT-DECORATION:none }" + linefeed
css = css + "A:hover { color:#FF3300;TEXT-DECORATION:underline}" + linefeed
css = css + "TD { font-size:" + baseFontSize + "; valign=top; FONT-FAMILY:Arial; padding: 1px 2px 2px 1px; }" + linefeed
css = css + "H1 { font-size:200%; padding: 1px 0px 0px 0px; margin:0px; }" + linefeed
css = css + "H2 { font-size:160%; FONT-WEIGHT:NORMAL; margin:0px 0px 0px 0px; padding:0px; }" + linefeed
css = css + "H3 { font-size:100%; FONT-WEIGHT:BOLD; padding:1px; letter-spacing:0.1em; }" + linefeed
css = css + "H4 { font-size:140%; FONT-WEIGHT:NORMAL; margin:0px 0px 0px 0px; padding:1px; }" + linefeed
css = css + "</STYLE>" + linefeed
return css
def deleteFile(f):
if f == "":
pass
else:
try:
os.remove(f)
except Exception:
print("Cant delete ",f)
def AppendToFile(fname, txt):
with open(fname, "a") as myfile:
myfile.write(txt)
def GetFileSize(localFile):
return os.path.getsize(localFile)
def GetTotalNumFiles(localFile):
fldr = os.path.dirname(localFile)
fileList = os.listdir(fldr)
numfiles = len([name for name in fileList if os.path.isfile(fldr + os.sep + name)])
return numfiles
def GetTotalFileSizesForFolder(localFile):
fldr = os.path.dirname(localFile)
fileList = os.listdir(fldr)
num = sum(os.path.getsize(fldr + os.sep + f) for f in fileList if os.path.isfile(fldr + os.sep + f))
return num
def DoesFileExist(localFile):
success = False
try:
if os.path.isfile(localFile):
success = True
except Exception:
pass
return success
def TestLocalFile(localFile):
result = 'Not downloaded'
if DoesFileExist(localFile):
print('Collecting stats for ' + localFile)
result = '<I>Sample file saved to ' + localFile + ' (' + format(GetFileSize(localFile), ',d') + ' bytes)<BR>'
result = result + '' + format(GetTotalNumFiles(localFile), ',d')
result = result + ' files in folder, totalling ' + format(GetTotalFileSizesForFolder(localFile), ',d') + ' bytes</I>'
return result
def SaveHTML_Review_as_table(htmlFile):
#print(' saving results to ' + htmlFile)
deleteFile(htmlFile)
AppendToFile(htmlFile, BuildHTMLHeader('Ontology Review', '\r\n', '2'))
AppendToFile(htmlFile, '<TR>\r\n')
AppendToFile(htmlFile, '<TD valign=top><TABLE border=1 valign=top width=96%>\r\n')
AppendToFile(htmlFile, '<TH>Ontology</TH><TH>Test Comments</th><TH>Rating for AI</TH></TR>\r\n')
for i in ontologyList:
txtRow = '<TR>'
#print(i['name'], i['url'])
txtRow = txtRow + '<TD><a href=' + i['url'] + '>' + i['name'] + '</a></TD>\r\n'
txtRow = txtRow + '<TD>' + i['tested'] + '</TD>\r\n'
txtRow = txtRow + '<TD>' + i['rating'] + '</TD>\r\n'
txtRow = txtRow + '</TR>\r\n'
#fle.AppendToFile(htmlFile, net.FormatListAsHTMLTableRow(dat.dict2list(i)))
AppendToFile(htmlFile, txtRow)
AppendToFile(htmlFile, '</TABLE></TD valign=top><TD>\r\n')
# show document list of RHS
AppendToFile(htmlFile, '<TABLE border=1 valign=top width=96%>\r\n')
AppendToFile(htmlFile, '<TH>Document Title</TH><TH>Comments</th></TR>\r\n')
for i in documentList:
txtRow = '<TR>'
#print(i['name'], i['url'])
txtRow = txtRow + '<TD><a href=' + i['url'] + '>' + i['title'] + '</a></TD>\r\n'
txtRow = txtRow + '<TD>' + i['comment'] + '</TD>\r\n'
txtRow = txtRow + '</TR>\r\n'
#AppendToFile(htmlFile, net.FormatListAsHTMLTableRow(dat.dict2list(i)))
AppendToFile(htmlFile, txtRow)
AppendToFile(htmlFile, '</TABLE></TD><TD>\r\n')
AppendToFile(htmlFile, '</TD></TR></TABLE><BR><BR><BR><BR></BODY></HTML>')
def SaveHTML_File_Samples(htmlFile):
# extracts samples of the ontology files into one single HTML file
# has bookmarked index at top of page
# get the first 10 lines + last 5 lines
deleteFile(htmlFile)
AppendToFile(htmlFile, "Back to <a href=http://www.acutesoftware.com.au>Acute Software homepage</a> or ")
AppendToFile(htmlFile, "<a href=http://www.acutesoftware.com.au/aikif>AIKIF home</a><BR>")
AppendToFile(htmlFile, '<H1>Ontology File Samples</h1>\n')
AppendToFile(htmlFile, 'Generated via <a href=https://github.com/acutesoftware/AIKIF/blob/master/aikif/ontology/review_ontology.py>')
AppendToFile(htmlFile, 'https://github.com/acutesoftware/AIKIF/blob/master/aikif/ontology/review_ontology.py</a><BR>\n')
for i in ontologyList:
ontFile = i['localFile']
AppendToFile(htmlFile, '<h2>' + i['name'] + '</h2>\n' )
AppendToFile(htmlFile, 'Page info = <a href=' + i['url'] + '>' + i['url'] + '</a><BR>\n')
AppendToFile(htmlFile, 'source data = <a href=' + i['data'] + '>' + i['data'] + '</a><BR>\n')
AppendToFile(htmlFile, 'Sample from ' + ontFile + '<BR>\n')
AppendToFile(htmlFile, '<PRE>' + GetSampleData(ontFile) + '</PRE><BR><BR>')
AppendToFile(htmlFile, "<BR><BR><BR>Back to <a href=http://www.acutesoftware.com.au>Acute Software homepage</a> or ")
AppendToFile(htmlFile, "<a href=http://www.acutesoftware.com.au/aikif>AIKIF home</a><BR>")
def GetSampleData(fname):
res = ''
numLine = 0
numChars = 0
try:
with open(fname, "r") as myfile:
for rawLine in myfile:
line = rawLine[0:254]
res = res + line
if len(rawLine) > 254:
res = res + '...\n'
else:
res = res + ''
numLine = numLine + 1
numChars = numChars + len(line)
if numLine > 100 or numChars > 20000:
return res.replace('<', '<')
except Exception:
res = '[no local file saved]'
return res.replace('<', '<')
if __name__ == '__main__':
main() | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/ontology/review_ontology.py | review_ontology.py |
import os
import redis
#ip_folder = os.path.dirname(os.path.abspath(__file__)) # leave src out of git - too big
ip_folder = 'S:\\DATA\\opendata\\ontology\\OpenCyc'
op_folder = os.path.abspath(ip_folder + os.sep + ".." + os.sep + ".." + os.sep + "data" )
print ('ip = ', ip_folder)
print ('op = ', op_folder)
#files = ['open-cyc.n3', 'open-cyc.rdf', 'open-cyc.trig']
files = ['open-cyc.n3.csv']
files = ['open-cyc_sample.n3.csv']
lookup = ['gauge', 'mind', 'post']
def main():
#create_html_summary()
load_data(ip_folder + os.sep + files[0])
def load_data(fname):
""" loads previously exported CSV file to redis database """
print('Loading ' + fname + ' to redis')
r = redis.StrictRedis(host = '127.0.0.1', port = 6379, db = 0);
with open(fname, 'r') as f:
for line_num, row in enumerate(f):
if row.strip('') != '':
if line_num < 100000000:
l_key, l_val = parse_n3(row, 'csv')
if line_num % 1000 == 0:
print('loading line #', line_num, 'key=', l_key, ' = ', l_val)
if l_key != '':
r.set(l_key, l_val)
def parse_n3(row, src='csv'):
"""
takes a row from an n3 file and returns the triple
NOTE - currently parses a CSV line already split via
cyc_extract.py
"""
if row.strip() == '':
return '',''
l_root = 'opencyc'
key = ''
val = ''
if src == 'csv':
cols = row.split(',')
if len(cols) < 3:
#print('PARSE ISSUE : ', row)
return '',''
key = ''
val = ''
key = l_root + ':' + cols[1].strip('"').strip() + ':' + cols[2].strip('"').strip()
try:
val = cols[3].strip('"').strip()
except Exception:
val = "Error parsing " + row
elif src == 'n3':
pass
return key, val
def create_html_summary():
txt = '<HTML><BODY>'
for f in files:
txt += summarise_file_as_html(f)
txt += '</BODY></HTML>'
with open('open_cyc_summary.html', 'w') as fop:
fop.write(txt)
print('Done')
def escape_html(s):
res = s
res = res.replace('&', "&")
res = res.replace('>', ">")
res = res.replace('<', "<")
res = res.replace('"', """)
return res
def summarise_file_as_html(fname):
"""
takes a large data file and produces a HTML summary as html
"""
txt = '<H1>' + fname + '</H1>'
num_lines = 0
print('Reading OpenCyc file - ', fname)
with open(ip_folder + os.sep + fname, 'r') as f:
txt += '<PRE>'
for line in f:
if line.strip() != '':
num_lines += 1
if num_lines < 80:
txt += str(num_lines) + ': ' + escape_html(line) + ''
txt += '</PRE>'
txt += 'Total lines = ' + str(num_lines) + '<BR><BR>'
return txt
if __name__ == "__main__":
main() | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/ontology/read_opencyc.py | read_opencyc.py |
# NOTE - this file is not part of the coverage stats, and doesn't
# have any tests, but needs to stay here as it is linked
# externally.
baseURL = 'http://wiki.opencog.org'
srcURL = 'http://wiki.opencog.org/w/MindOntology'
AllPagesURL = 'http://wiki.opencog.org/wikihome/index.php?title=Special:AllPages&namespace=104'
searchStringOntology = '/w/Category:MindOntology'
searchStringPages = '/w/MindOntology:'
searchStringWebText = 'mw-content-text'
searchStringCategoryLink = 'mw-normal-catlinks'
csvOutput = 'mindOntology.csv'
owlOutput = 'mindOntology.owl' # NOT implemented properly
txtOutput = 'mindOntology.txt'
xmlOutput = 'mindOntology.xml'
subOutput = 'mindOntology_subCategory.csv'
subOntology = []
allPages = []
import urllib.request
try:
from BeautifulSoup import BeautifulSoup
except ImportError:
from bs4 import BeautifulSoup
def main():
print('Converts OpenCog wiki pages "MindOntology" to single file')
subOntology = ExtractPageLinks(srcURL, searchStringOntology)
allPages = ExtractPageLinks(AllPagesURL, searchStringPages)
currentWebText = ''
for p in allPages:
fixedURL = p['url'].replace('%26', ' ') # not needed - the actual page returned 404 anyway
print('Reading ... ' + fixedURL)
currentWebText = GetWebPage(fixedURL)
if currentWebText != '404':
p['html'], p['txt'] = ExtractContent(currentWebText, searchStringWebText)
p['catList'] = ExtractCategoryLinks(currentWebText, searchStringCategoryLink)
SaveAsTXT(allPages, txtOutput)
SaveAsXML(allPages, xmlOutput)
SaveAsOWL(allPages, owlOutput) # not implemented
SaveAsCSV(allPages, csvOutput)
SaveAsCSV(subOntology, subOutput)
print('Done')
def ExtractPageLinks(url, searchText):
links = []
rawText = GetWebPage(url)
soup = BeautifulSoup(rawText)
for link in soup.findAll('a'):
l = str(link.get('href'))
if searchText in l:
#a = link.attrs
content = str(link.string)
t = str(link.get('title'))
links.append({'name': content, 'url': baseURL + l, 'title': t, 'txt': '', 'html': '', 'catList': []})
return links
def ExtractCategoryLinks(txt, divID):
lCatList = []
soup = BeautifulSoup(txt)
textBlob = soup.find("div", {"id": divID})
if textBlob is not None:
for link in textBlob.findAll('a'):
l = str(link.get('title'))
content = str(link.string)
#t = str(link.get('title')) # link not needed here, as many pages dont exist (??)
if l != 'Special:Categories': #this is the category heading so ignore
curCat = content.replace('(page does not exist)', '')
#links.append({'name': content, 'url': baseURL + l, 'title': t, 'txt': '', 'html': ''})
lCatList.append({'catList': curCat})
return lCatList
def GetWebPage(url):
txtString = '404'
try:
rawText = urllib.request.urlopen(url).read()
txtString = str( rawText, encoding='utf8' )
except Exception:
pass
return txtString
def ExtractContent(rawText, divID):
html = ''
soup = BeautifulSoup(rawText)
results = soup.find("div", {"id": divID})
txt = results.getText() # gives results without List items
print(str(len(txt)) + ' bytes read\n')
for line in results.contents:
html = html + str(line) + '\n'
return html, txt
def SaveAsTXT(lst, fname):
def formatListItemText(itm):
txt = '[BEGIN_ROW]\n'
txt = txt + 'NAME: ' + itm['name'] + '\n'
txt = txt + 'URL: ' + itm['url'] + '\n'
txt = txt + 'TEXT: ' + itm['txt'] + '\n'
txt = txt + 'HTML: ' + itm['html'] + '\n'
for i in itm['catList']:
txt = txt + 'CATEGORY: ' + str(i['catList']) + '\n'
txt = txt + '[END_ROW]\n\n'
#print(txt)
return txt
with open(fname, "w") as myfile:
for dct in lst:
myfile.write(formatListItemText(dct))
def SaveAsCSV(lst, fname):
def formatListItemCSV(itm):
def StripLineBreaks(txt):
# also double quotes "
result = ''
try:
result = txt.replace('\n', ' ').replace('\r', ' ').replace('"', '""')
except Exception:
pass
return result
txt = '"'
txt = txt + itm['name'] + '","'
txt = txt + itm['url'] + '","'
txt = txt + itm['title'] + '","'
txt = txt + StripLineBreaks(itm['txt']) + '","'
txt = txt + StripLineBreaks(itm['html']) + '","'
for i in itm['catList']:
txt = txt + str(i['catList']) + ' ; '
txt = txt + '"\n'
return txt
op = open(fname, 'w')
op.write('"name","url","title","txt","html","catList"\n')
for dct in lst:
op.write(formatListItemCSV(dct))
def SaveAsXML(lst, fname):
def formatListItemXML(itm):
"""
def FixTextForXML(txt):
res = txt.replace('&', '&') # must be done first
return res.replace('"', '"').replace('\'', ''').replace('<', '<').replace('>', '>')
"""
def StripHexChars(txt):
txt2 = txt.replace('\0x93', '"')
return txt2.replace('\0x94', '"')
txt = '<MindOntology_Definition>\n'
txt = txt + ' <COL_NAME><![CDATA[' + itm['name'] + ']]></COL_NAME>\n'
txt = txt + ' <COL_URL><![CDATA[' + itm['url'] + ']]></COL_URL>\n'
txt = txt + ' <COL_TXT>\n<![CDATA[\n' + StripHexChars(itm['txt']) + '\n]]>\n</COL_TXT>\n'
txt = txt + ' <COL_HTML>\n<![CDATA[\n' + StripHexChars(itm['html']) + '\n]]>\n</COL_HTML>\r\n'
for i in itm['catList']:
txt = txt + ' <COL_CATEGORY><![CDATA[' + str(i['catList']) + ']]></COL_CATEGORY>\n'
txt = txt + '</MindOntology_Definition>\r\n'
#print(FixTextForXML(txt))
return txt
with open(fname, "w") as op:
op.write('<?xml version="1.0"?>' + "\n")
op.write('<mindOntology>' + "\n")
for l in lst:
op.write(formatListItemXML(l))
op.write('</mindOntology>')
def SaveAsOWL(lst, fname):
def formatListItemOWL(itm):
txt = '<RDF_ROW>\n'
txt = txt + ' <COL_NAME>' + itm['name'] + '</COL_NAME>\n'
txt = txt + ' <COL_URL>' + itm['url'] + '</COL_URL>\n'
txt = txt + ' <COL_TXT>' + itm['txt'] + '</COL_TXT>\n'
txt = txt + ' <COL_HTML>' + itm['html'] + '</COL_HTML></RDF_ROW>\r\n'
for i in itm['catList']:
txt = txt + ' <COL_CATEGORY>' + str(i['catList']) + '</COL_CATEGORY>\n'
#print(txt)
return txt
with open(fname, "w") as op:
op.write('<RDF>')
for l in lst:
op.write(formatListItemOWL(l))
op.write('</RDF>')
if __name__ == '__main__':
main() | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/ontology/createMindOntology.py | createMindOntology.py |
ipFolder = 'S:\\DATA\\opendata\\ontology\\WordNet\\dict'
opFolder = '..//..//data//' # os.getcwd()
files = ['data.noun', 'data.verb']
lookup = ['gauge', 'mind', 'post']
def main():
allWords = []
for f in files:
numLines = 0
print('Reading WordNet file - ', f)
fullName = ipFolder + '\\' + f
for line in open(fullName,'r'):
if line[0:2] != ' ':
ndx, words = ParseLine(line)
# works sort of rec = [f, ndx, [w for w in words]]
for w in words:
rec = [f, ndx, w, line]
allWords.append(rec)
numLines = numLines + 1
if numLines < 40:
#print(line[0:75] + '...')
#print('ndx=', ndx, ' words = ', words)
#print(rec)
pass
print('Finished reading ' + str(numLines) + ' lines\n')
print('allWords = ', str(len(allWords)))
# Do a lookup
for i in lookup:
results = Lookup(i, allWords)
for r in results:
print(r)
print('Result for ' + i + ' - ' + r[0] + ' ' + r[1] + '\n ' , r[2], '\n')
# TESTING - show lines for index
PrintLineForIndex('00469029', allWords)
PrintLineForIndex('05619057', allWords)
PrintLineForIndex('01033142', allWords)
def Lookup(txt, wrdList):
res = []
for i in wrdList:
src = i[0]
ndx = i[1]
for wrd in i[2]:
if txt == wrd:
res.append([src, ndx, wrd])
res.append([src, ndx, GetWordForIndex(ndx, wrdList)])
return res
def GetWordForIndex(ndxToFind, lst):
wrdResult = []
for i in lst:
src = i[0]
ndx = i[1]
if ndxToFind == ndx:
for wrd in i[2]:
wrdResult.append([src, ndx, wrd])
return wrdResult
def PrintLineForIndex(i, wrdList):
print('looking for index - ', i)
for line in wrdList: # bug here - TODO
if i in line:
print(line)
print('found index ' , i[0:40], '\n')
def ParseLine(line):
wrds = []
cols = line.split(' ')
ndx = cols[0]
#print(line)
numWords = int(cols[3], 16)
for i in range(numWords):
wrds.append(cols[5+i-1])
#if numWords > 10:
#print (line)
return ndx, wrds
if __name__ == '__main__':
main() | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/ontology/read_wordnet.py | read_wordnet.py |
import os
import sys
import codecs
from datetime import datetime
#img_file = ImageFile('..\\..\\doc\\web-if-v02.jpg')
#print(img_file)
#aud_file = AudioFile(r"E:\backup\music\Music\_Rock\Angels\Red Back Fever\07 Red Back Fever.mp3")
#print(aud_file)
class File(object):
"""
handles various file conversions, reading, writing
as well as general file operations (delete, copy, launch)
"""
def __init__(self, fname):
self.fullname = os.path.abspath(fname)
self.name = fname
self.path = ''
self.size = 0
self.date_modified = None # self.GetDateAsString(os.path.getmtime(fname))
try:
self.fullname = os.path.abspath(fname)
self.name = os.path.basename(self.fullname)
self.path = os.path.dirname(self.fullname)
self.size = os.path.getsize(self.fullname)
self.date_modified = os.path.getmtime(self.fullname) # self.GetDateAsString(os.path.getmtime(fname))
except Exception as ex:
print('problem accessing ' + fname + ' ' + str(ex))
def __str__(self):
# when printing a file class it should print the name, size, date
# as well as top 10 lines (first 80 chars in each line)
txt = '=============================================\n'
txt += '| name = ' + self.name + '\n'
txt += '| size = ' + str(self.size) + ' bytes\n'
txt += '| folder = ' + self.path + '\n'
txt += '| modified = ' + self.GetDateAsString(self.date_modified) + '\n'
txt += '=============================================\n'
return txt
def exists(self):
if os.path.exists(self.fullname):
return True
return False
def launch(self):
""" launch a file - used for starting html pages """
#os.system(self.fullname) # gives permission denied seeing it needs to be chmod +x
import subprocess
try:
retcode = subprocess.call(self.fullname, shell=True)
if retcode < 0:
print("Child was terminated by signal", -retcode, file=sys.stderr)
return False
else:
print("Child returned", retcode, file=sys.stderr)
return True
except OSError as e:
print("Execution failed:", e, file=sys.stderr)
return False
def delete(self):
""" delete a file, don't really care if it doesn't exist """
if self.fullname != "":
try:
os.remove(self.fullname)
except IOError:
print("Cant delete ",self.fullname)
def GetDateAsString(self, t):
res = ''
try:
res = str(datetime.fromtimestamp(t).strftime("%Y-%m-%d %H:%M:%S"))
except Exception as ex:
print('problem converting time ' + str(t) + ' ' + str(ex))
return res
class TextFile(File):
"""
handles various file conversions, reading, writing
as well as general file operations (delete, copy, launch)
"""
def __init__(self, fname):
super(TextFile, self).__init__(fname)
self.lines = self.count_lines_in_file(fname)
def __str__(self):
""" display the text file sample """
txt = super(TextFile, self).__str__()
txt += 'TextFile contains ' + str(self.lines) + ' lines\n'
txt += self.get_file_sample()
return txt
def count_lines_in_file(self, fname=''):
""" you wont believe what this method does """
i = 0
if fname == '':
fname = self.fullname
try:
#with open(fname, encoding="utf8") as f:
with codecs.open(fname, "r",encoding='utf8', errors='ignore') as f:
for i, _ in enumerate(f):
pass
return i + 1
except Exception as ex:
print('cant count lines in file in "', fname, '":', str(ex))
return 0
def count_lines_of_code(self, fname=''):
""" counts non blank lines """
if fname == '':
fname = self.fullname
loc = 0
try:
with open(fname) as f:
for l in f:
if l.strip() != '':
loc += 1
return loc
except Exception as ex:
print('cant count lines of code in "', fname, '":', str(ex))
return 0
def get_file_sample(self, numLines=10):
""" retrieve a sample of the file """
res = ''
try:
with open(self.fullname, 'r') as f:
for line_num, line in enumerate(f):
res += str(line_num).zfill(5) + ' ' + line
if line_num >= numLines-1:
break
return res
except Exception as ex:
print('cant get_file_sample in "', self.fullname, '":', str(ex))
return res
def append_text(self, txt):
""" adds a line of text to a file """
with open(self.fullname, "a") as myfile:
myfile.write(txt)
def convert_to_csv(self, op_csv_file, delim):
# function to simply convert the diary files to csv - testing
import csv
in_txt = csv.reader(open(self.fullname, "r"), delimiter = delim)
ofile = open(op_csv_file, 'w', newline='')
out_csv = csv.writer(ofile, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
if in_txt != "":
out_csv.writerows(in_txt)
ofile.close()
#in_txt.close()
def load_file_to_string(self):
""" load a file to a string """
try:
with open(self.fullname, 'r') as f:
txt = f.read()
return txt
except IOError:
return ''
def load_file_to_list(self):
""" load a file to a list """
lst = []
try:
with open(self.fullname, 'r') as f:
for line in f:
lst.append(line)
return lst
except IOError:
return lst
class ImageFile(File):
"""
handles various image file metadata collection
(by calling toolbox/image_tools.py)
"""
def __init__(self, fname):
import aikif.toolbox.image_tools as img
super(ImageFile, self).__init__(fname)
self.meta = img.get_metadata_as_dict(fname)
def __str__(self):
""" display the text file sample """
#txt = self.name + '\n'
txt = super(ImageFile, self).__str__()
txt += 'Image size = ' + str(self.meta['width']) + ' x ' + str(self.meta['height']) + '\n'
return txt
class AudioFile(File):
"""
handles various audio file metadata collection
(by calling toolbox/audio_tools.py)
"""
def __init__(self, fname):
import aikif.toolbox.audio_tools as aud
super(AudioFile, self).__init__(fname)
self.meta = aud.get_audio_metadata(fname)
#print(self.meta)
def __str__(self):
""" display the meta data from the audio file """
txt = super(AudioFile, self).__str__()
txt += 'Song = ' + str(self.meta['title'][0]) + ' by ' + str(self.meta['artist'][0]) + '\n'
return txt | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/lib/cls_file.py | cls_file.py |
import datetime
class Plan_BDI(object):
"""
class for handling various plans for AIKIF using
Belief | Desires | Intentions
"""
def __init__(self, name, dependency):
self.name = name
self.id = 1
self.dependency = dependency
self.plan_version = "v0.10"
self.success = False
self.start_date = datetime.datetime.now().strftime("%I:%M%p %d-%B-%Y")
self.resources = []
self.constraint = []
self.beliefs = Beliefs(self)
self.desires = Desires(self)
self.intentions = Intentions(self)
def __str__(self):
res = "---== Plan ==---- \n"
res += "name : " + self.name + "\n"
res += "version : " + self.plan_version + "\n"
for i in self.beliefs.list():
res += "belief : " + i + "\n"
for i in self.desires.list():
res += "desire : " + i + "\n"
for i in self.intentions.list():
res += "intention : " + i + "\n"
return res
def get_name(self):
return self.name
def generate_plan(self):
"""
Main logic in class which generates a plan
"""
print("generating plan... TODO")
def load_plan(self, fname):
""" read the list of thoughts from a text file """
with open(fname, "r") as f:
for line in f:
if line != '':
tpe, txt = self.parse_plan_from_string(line)
#print('tpe= "' + tpe + '"', txt)
if tpe == 'name':
self.name = txt
elif tpe == 'version':
self.plan_version = txt
elif tpe == 'belief':
self.beliefs.add(txt)
elif tpe == 'desire':
self.desires.add(txt)
elif tpe == 'intention':
self.intentions.add(txt)
def save_plan(self, fname):
with open(fname, "w") as f:
f.write("# AIKIF Plan specification \n")
f.write("name :" + self.name + "\n")
f.write("version :" + self.plan_version + "\n")
for txt in self.beliefs.list():
f.write("belief :" + txt + "\n")
for txt in self.desires.list():
f.write("desire :" + txt + "\n")
for txt in self.intentions.list():
f.write("intention :" + txt + "\n")
def parse_plan_from_string(self, line):
tpe = ''
txt = ''
if line != '':
if line[0:1] != '#':
parts = line.split(":")
tpe = parts[0].strip()
txt = parts[1].strip()
return tpe, txt
def add_resource(self, name, tpe):
"""
add a resource available for the plan. These are text strings
of real world objects mapped to an ontology key or programs
from the toolbox section (can also be external programs)
"""
self.resources.append([name, tpe])
def add_constraint(self, name, tpe, val):
"""
adds a constraint for the plan
"""
self.constraint.append([name, tpe, val])
class Thoughts(object):
""" base class for beliefs, desires, intentions simply
to make it easier to manage similar groups of objects """
def __init__(self, thought_type):
#print("Thoughts - init: thought_type = " + thought_type + "\n")
self._thoughts = []
self._type = thought_type
def __str__(self):
res = ' -- Thoughts --\n'
for i in self._thoughts:
res += i + '\n'
return res
def add(self, name):
self._thoughts.append(name)
def list(self, print_console=False):
lst = []
for i, thought in enumerate(self._thoughts):
if print_console is True:
print(self._type + str(i) + ' = ' + thought)
lst.append(thought)
return lst
class Beliefs(Thoughts):
def __init__(self, parent_plan):
self.parent_plan = parent_plan
super(Beliefs, self).__init__('belief')
class Desires(Thoughts):
def __init__(self, parent_plan):
self.parent_plan = parent_plan
super(Desires, self).__init__('desire')
class Intentions(Thoughts):
def __init__(self, parent_plan):
self.parent_plan = parent_plan
super(Intentions, self).__init__('intention')
def TEST():
myplan = Plan_BDI('new plan', '')
myplan.beliefs.add('belief0')
myplan.beliefs.add('belief1')
myplan.beliefs.add('belief2')
myplan.desires.add('desire0')
myplan.desires.add('desire1')
myplan.intentions.add('intention0')
myplan.beliefs.list()
myplan.desires.list()
myplan.intentions.list()
#myplan.save_plan("test_plan.txt")
#myplan.load_plan("test_plan.txt")
print(str(myplan))
if __name__ == '__main__':
TEST() | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/lib/cls_plan_BDI.py | cls_plan_BDI.py |
import os
#import shutil
#import csv
#import glob
import fnmatch
import time
from datetime import datetime
#import aikif.cls_log as mod_log
class FileListGroup(object):
"""
not sure about the point of this class - might be simpler
to just get cls_filelist to do all the work. Will leave it in
in case I remember the original idea
"""
def __init__(self, name, src_folder, dest_folder):
self.name = name
self.filelist = [] # contains a list of the filelist class instances
self.dest_folder = dest_folder
def __str__(self):
""" display the filelist group details """
txt = 'FileListGroup : ' + self.name + '\n'
txt += 'dest_folder : ' + self.dest_folder + '\n'
return txt
class FileList(object):
def __init__(self, paths, xtn, excluded, output_file_name = 'my_files.csv'):
self.output_file_name = output_file_name
self.filelist = [] # list of full filenames
self.fl_metadata = [] # dictionary of all file metadata
self.paths = paths
self.xtn = xtn
self.excluded = excluded
self.get_file_list(self.paths, self.xtn, self.excluded)
def get_list(self):
return self.filelist
def get_metadata(self):
return self.fl_metadata
def get_file_list(self, lstPaths, lstXtn, lstExcluded, VERBOSE = False):
"""
builds a list of files and returns as a list
"""
if VERBOSE:
print("Generating list of Files...")
print("Paths = ", lstPaths)
print("Xtns = ", lstXtn)
print("exclude = ", lstExcluded)
numFiles = 0
self.filelist = []
self.fl_metadata = []
for rootPath in lstPaths:
if VERBOSE:
print(rootPath)
for root, dirs, files in os.walk(rootPath):
if VERBOSE:
print(dirs)
for basename in files:
for xtn in lstXtn:
if fnmatch.fnmatch(basename, xtn):
filename = os.path.join(root, basename)
includeThisFile = "Y"
if len(lstExcluded) > 0:
for exclude in lstExcluded:
if filename.find(exclude) != -1:
includeThisFile = "N"
if includeThisFile == "Y":
numFiles = numFiles + 1
self.filelist.append(filename)
self.add_file_metadata(filename) # not sure why there is a 2nd list, but there is.
if VERBOSE:
print("Found ", numFiles, " files")
return self.filelist
def add_file_metadata(self, fname):
"""
collects the files metadata - note that this will fail
with strange errors if network connection drops out to
shared folder, but it is better to stop the program
rather than do a try except otherwise you will get an
incomplete set of files.
"""
file_dict = {}
file_dict["fullfilename"] = fname
try:
file_dict["name"] = os.path.basename(fname)
file_dict["date"] = self.GetDateAsString(fname)
file_dict["size"] = os.path.getsize(fname)
file_dict["path"] = os.path.dirname(fname)
except IOError:
print('Error getting metadata for file')
self.fl_metadata.append(file_dict)
def print_file_details_in_line(self, fname, col_headers):
"""
makes a nice display of filename for printing based on columns passed
print('{:<30}'.format(f["name"]), '{:,}'.format(f["size"]))
"""
line = ''
for fld in col_headers:
if fld == "fullfilename":
line = line + fname
if fld == "name":
line = line + '{:<30}'.format(os.path.basename(fname)) + ' '
if fld == "date":
line = line + self.GetDateAsString(fname) + ' '
if fld == "size":
line = line + '{:,}'.format(os.path.getsize(fname)) + ' '
if fld == "path":
line = line + os.path.dirname(fname) + ' '
#line += '\n'
return line
def print_file_details_as_csv(self, fname, col_headers):
""" saves as csv format """
line = ''
qu = '"'
d = ','
for fld in col_headers:
if fld == "fullfilename":
line = line + qu + fname + qu + d
if fld == "name":
line = line + qu + os.path.basename(fname) + qu + d
if fld == "date":
line = line + qu + self.GetDateAsString(fname) + qu + d
if fld == "size":
line = line + qu + self.get_size_as_string(fname) + qu + d
if fld == "path":
try:
line = line + qu + os.path.dirname(fname) + qu + d
except IOError:
line = line + qu + 'ERROR_PATH' + qu + d
return line
def get_size_as_string(self, fname):
res = ''
try:
res = str(os.path.getsize(fname))
except Exception:
res = 'Unknown size'
return res
def GetDateAsString(self, fname):
res = ''
try:
t = os.path.getmtime(fname)
res = str(datetime.fromtimestamp(t).strftime("%Y-%m-%d %H:%M:%S"))
except Exception:
res = 'Unknown Date'
return res
def TodayAsString(self):
"""
returns current date and time like oracle
return time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
"""
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
def save_filelist(self, opFile, opFormat, delim=',', qu='"'):
"""
uses a List of files and collects meta data on them and saves
to an text file as a list or with metadata depending on opFormat.
"""
with open(opFile,'w') as fout:
fout.write("fullFilename" + delim)
for colHeading in opFormat:
fout.write(colHeading + delim)
fout.write('\n')
for f in self.filelist:
line = qu + f + qu + delim
try:
for fld in opFormat:
if fld == "name":
line = line + qu + os.path.basename(f) + qu + delim
if fld == "date":
line = line + qu + self.GetDateAsString(f) + qu + delim
if fld == "size":
line = line + qu + str(os.path.getsize(f)) + qu + delim
if fld == "path":
line = line + qu + os.path.dirname(f) + qu + delim
except IOError:
line += '\n' # no metadata
try:
fout.write (str(line.encode('ascii', 'ignore').decode('utf-8')))
fout.write ('\n')
except IOError:
#print("Cant print line - cls_filelist line 304")
pass | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/lib/cls_filelist.py | cls_filelist.py |
import heapq
import queue
PriorityQueue = queue.PriorityQueue
import aikif.config as mod_cfg
import aikif.cls_log as mod_log
class Problem(object):
"""
Defines the planning problem. Must contain:
- goal state
- start state
- set of actions with outcomes
- path cost function
"""
def __init__(self, goal, start, actions, name):
"""
The Search Problem is defined by an initial state, a successor function,
and a goal state. In lieu of a path cost function, a default one is
provided that depends on the depth of the node in the tree.
"""
self.name = name
self.actions = actions
self.start = start
self.goal = goal
def __str__(self):
res = ' --- Problem Definition ---' '\n'
res += 'Problem Name : ' + self.name + '\n'
res += 'Start State : ' + list_2_str(self.start) + '\n'
res += 'Goal State : ' + list_2_str(self.goal) + '\n'
res += 'Actions List : ' + '\n'
if self.actions:
for num, action in enumerate(self.actions):
res += ' ' + str(num+1).zfill(2) + ' : ' + action + '\n'
else:
res += 'No actions specified\n'
return res
def path_cost(self):
"""
return the cost of the function - this needs to be subclassed
"""
print('TODO: cls_plan_search.path_cost not implemented')
return 1
def goal_test(self, state_to_check):
"""
Checks for success
"""
if isinstance(state_to_check, type(self.goal)):
print('TODO - cls_plan_search.goal_test : isinstance(state_to_check, type(self.goal))')
#return False
if state_to_check == self.goal:
return True
else:
return False
def get_successors(self):
"""
expand the children on the node (or successors)
to get the list of next nodes and their cost,
[[x1, 50], [x2, 24], [x3,545], [x5,32.1]]
"""
print('TODO: cls_plan_search.get_successors not implemented')
return [['x1', 50], ['x2', 24], ['x3',545], ['x5',32.1]]
class Plan(object):
"""
base class for AI planners to implement standard logging
"""
def __init__(self, nme, environ, target, start):
self.nme = nme
self.environ = environ
self.start = start
self.target = target
self.method = 'No method defined'
self.lg = mod_log.Log(mod_cfg.fldrs['log_folder']) #'T:\\user\\AIKIF')
self.lg.record_command('CLS_PLAN_SEARCH - starting Plan', nme)
def __str__(self):
res = 'Plan : ' + self.nme + '\n'
res += 'Method : ' + self.method + '\n'
res += 'Start : ' + ', '.join(str(p) for p in self.start) + '\n'
res += 'Target : ' + ', '.join(str(p) for p in self.target) + '\n'
return res
class PlanSearchAStar(Plan):
"""
Search algorithm using AStar.
"""
def __init__(self, nme, environ, target, start):
Plan.__init__(self, nme, environ, target, start)
self.opened = []
heapq.heapify(self.opened)
self.closed = set()
self.came_from = []
self.current = start
self.method = 'A*'
self.num_loops = 0
self.lg.record_source(','.join(str(p) for p in self.start), 'CLS_PLAN_SEARCH : Source = ')
self.lg.record_source(','.join(str(p) for p in self.target), 'CLS_PLAN_SEARCH : Target = ')
def heuristic_cost(self, start, target):
""" assumes start and target are an (x,y) grid """
(x1, y1) = start
(x2, y2) = target
return abs(x1 - x2) + abs(y1 - y2)
def get_min_f_score(self):
print('TODO: cls_plan_search.get_min_f_score not implemented')
return 1
def search(self):
print('TODO: not implemented - cls_plan_search.search()')
self.lg.record_process('CLS_PLAN_SEARCH', 'running A* search')
if self.target == self.current:
print('starting point is target')
self.lg.record_result('CLS_PLAN_SEARCH', 'Success - start == Target')
return 0
while self.opened:
self.num_loops += 1
self.lg.record_command('CLS_PLAN_SEARCH - Finished Plan', self.nme)
#Utilities and Search Algorithms
#(used by examples/ folder)
def list_2_str(lst):
return ', '.join(str(i) for i in lst)
def find_path_BFS(Graph,n,m):
"""
Breadth first search
"""
if m not in Graph:
return None
if n == m:
return [m]
path = [[n]]
searched = []
while True:
j = len(path)
#k = len(Graph[n])
for i in range(j):
node = path[i][-1]
for neighbor in Graph[node]:
if neighbor not in searched:
path.append(path[i]+[neighbor])
searched.append(neighbor)
if neighbor==m:
return path[-1]
for i in range(j):
path.pop(0)
return path | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/lib/cls_plan_search.py | cls_plan_search.py |
import os
#####################################################
# Change User Settings below (currently hard coded)
hosts = [
dict(type='Work PC', name='xxxxxxxxx'),
dict(type='Home PC', name='Treebeard'),
dict(type='Laptop', name='Ent'),
dict(type='Server', name='Fangorn'),
dict(type='Phone', name='GT-19001T'),
]
users = [
dict(type='Developer', name='Duncan'),
dict(type='User', name='murraydj'),
dict(type='Tester', name='test'),
dict(type='web_user', name='web*'),
]
transport = [
dict(type='None', name='inside'),
dict(type='Walk', name='Walk'),
dict(type='Car', name='Car'),
dict(type='Public',name='Tram'), # <-- edit your preferred public transport
]
#####################################################
physical = ['home', 'work', 'travelling']
files = []
usage = []
mode = ['work', 'business', 'email', 'web', 'games', 'media', 'nonPC']
tpe = ['passive', 'typing', 'clicking']
class Context(object):
"""
This class does a best guess to return a plain english version
of what the user (you), this software (aikif) and the computer is doing.
"""
def __init__(self):
self.user = ''
self.username = ''
self.host = ''
self.hostname = ''
try:
self.user, self.username = self.get_user()
except Exception as ex:
print('Error:cls_context cant identify user ' + str(ex))
try:
self.host, self.hostname = self.get_host()
except Exception as ex:
print('Error:cls_context cant identify host ' + str(ex))
self.transport = self.inspect_phone()
self.summary = self.summarise()
self.host_cpu_pct, self.host_num_processes, self.host_mem_available, self.host_mem_total = self.get_host_usage()
def __str__(self):
return 'Hello, ' + self.username + '! You are a ' + self.user + ' using the ' + self.host + ' "' + self.hostname + '"'
def dump_all(self, silent='NO'):
"""
prints all attributes and returns them as a dictionary
(mainly for testing how it will all integrate)
"""
all_params = []
all_params.append(dict(name='phone', val=self.transport))
all_params.append(dict(name='username', val=self.username))
all_params.append(dict(name='user', val=self.user))
all_params.append(dict(name='hostname', val=self.hostname))
all_params.append(dict(name='host', val=self.host))
all_params.append(dict(name='cpu_pct', val=self.host_cpu_pct))
all_params.append(dict(name='num_proc', val=self.host_num_processes))
all_params.append(dict(name='mem_avail', val=self.host_mem_available))
all_params.append(dict(name='mem_total', val=self.host_mem_total))
if silent != 'NO':
for a in all_params:
print(a['name'].ljust(14) + '= ' + a['val'])
return all_params
def summarise(self):
""" extrapolate a human readable summary of the contexts """
res = ''
if self.user == 'Developer':
if self.host == 'Home PC':
res += 'At Home'
else:
res += 'Away from PC'
elif self.user == 'User' and self.host == 'Home PC':
res += 'Remote desktop into home PC'
res += '\n'
res += self.transport
return res
def is_user_busy(self):
""" determines from user details if user is busy or not """
if self.phone_on_charge is True and self.user == 'Developer':
return False
else:
return True
def is_host_busy(self):
""" determines from host details if computer is busy or not """
if self.host_cpu_pct > '25' or self.host_mem_available < '500000':
return False
else:
return True
def get_host(self):
"""
returns the host computer running this program
"""
import socket
host_name = socket.gethostname()
for h in hosts:
if h['name'] == host_name:
return h['type'], h['name']
def get_user(self):
"""
returns the username on this computer
"""
for name in ('LOGNAME', 'USER', 'LNAME', 'USERNAME'):
user = os.environ.get(name)
if user:
break
for u in users:
if u['name'] == user:
return u['type'], u['name']
def inspect_phone(self, gps_lat_long = [137.0000,100.0000], moving = False, move_dist_2_mn = 4, on_charge = True, screen_saver = False):
"""
FUNCTION STUB - TODO
The intention is to get data from the mobile in the format:
gps_lat = 137.000
gps_lng = 100.000
moving = True | False
move_dist_10_sc = 0
move_dist_2_mn = 4
move_dist_10_mn = 4
move_dist_2_hr = 4
screen_saver = True | False
on_charge = True | False
"""
self.phone_gps_lat = gps_lat_long[0]
self.phone_gps_lng = gps_lat_long[1]
self.phone_moving = moving
self.phone_move_dist_2_mn = move_dist_2_mn
self.phone_on_charge = on_charge
self.screen_saver = screen_saver
#-------------------------------
phone_status = ''
if self.phone_on_charge is True:
phone_status += 'Phone is charging'
if self.phone_moving is True:
phone_status += ', driving in Car'
else:
phone_status += ', sitting still'
else:
if self.screen_saver is False:
phone_status += 'Phone is being used'
else:
phone_status += 'Phone is off'
if self.phone_moving is True:
if self.phone_move_dist_2_mn < 5:
phone_status += ', going for Walk'
elif self.phone_move_dist_2_mn > 500:
phone_status += ', flying on Plane'
else:
phone_status += ', riding on public transport'
return phone_status
def get_host_usage(self):
""" get details of CPU, RAM usage of this PC """
import psutil
process_names = [proc.name for proc in psutil.process_iter()]
cpu_pct = psutil.cpu_percent(interval=1)
mem = psutil.virtual_memory()
return str(cpu_pct), str(len(process_names)), str(mem.available), str(mem.total) | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/lib/cls_context.py | cls_context.py |
import os
root_folder = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + os.sep + "..") # used by tests
def count_lines_in_file(src_file ):
"""
test function.
"""
tot = 0
res = ''
try:
with open(src_file, 'r') as f:
for line in f:
tot += 1
res = str(tot) + ' recs read'
except:
res = 'ERROR -couldnt open file'
return res
def load_txt_to_sql(tbl_name, src_file_and_path, src_file, op_folder):
"""
creates a SQL loader script to load a text file into a database
and then executes it.
Note that src_file is
"""
if op_folder == '':
pth = ''
else:
pth = op_folder + os.sep
fname_create_script = pth + 'CREATE_' + tbl_name + '.SQL'
fname_backout_file = pth + 'BACKOUT_' + tbl_name + '.SQL'
fname_control_file = pth + tbl_name + '.CTL'
cols = read_csv_cols_to_table_cols(src_file)
create_script_staging_table(fname_create_script, tbl_name, cols)
create_file(fname_backout_file, 'DROP TABLE ' + tbl_name + ' CASCADE CONSTRAINTS;\n')
create_CTL(fname_control_file, tbl_name, cols, 'TRUNCATE')
def create_BAT_file(fname_batch_file, tbl_name, src_file_and_path, src_file, par_file):
with open(fname_batch_file, 'w') as f:
f.write('REM Loads ' + tbl_name + ' from ' + src_file + '\n')
f.write("sqlldr parfile='" + par_file + "'" + get_CTL_log_string(tbl_name, src_file_and_path))
######################################
# Internal Functions and Classes
######################################
def create_script_staging_table(fname_create, output_table, col_list):
ddl_text = '---------------------------------------------\n'
ddl_text += '-- CREATE Table - ' + output_table + '\n'
ddl_text += '---------------------------------------------\n'
ddl_text += ''
ddl_text += 'CREATE TABLE ' + output_table + ' (\n '
ddl_text += ' '.join([col + ' VARCHAR2(200), \n' for col in col_list])
ddl_text += ' REC_EXTRACT_DATE DATE \n' # + src_table + '; \n'
ddl_text += ');\n'
with open(fname_create, "w") as f:
f.write(ddl_text)
def create_CTL(fname, tbl_name, col_list, TRUNC_OR_APPEND, delim=','):
"""
create_CTL(fname_control_file, tbl_name, src_file, cols, 'TRUNCATE')
"""
with open(fname, 'w') as ct:
ct.write('LOAD DATA\n')
ct.write(TRUNC_OR_APPEND + '\n')
ct.write('into table ' + tbl_name + '\n')
ct.write("fields terminated by '" + delim + "'\n")
ct.write('optionally Enclosed by \'"\'\n')
ct.write('TRAILING NULLCOLS\n')
ct.write('(\n')
ct.write(',\n'.join(c for c in col_list ))
ct.write(')\n')
def get_CTL_log_string(tbl_name, fname):
ctl_details = ''
ctl_details += " log='" + tbl_name + ".log'"
ctl_details += " bad='" + tbl_name + ".bad'"
ctl_details += " discard='" + tbl_name + ".discard'"
ctl_details += " control=" + tbl_name + '.CTL'
ctl_details += " data='" + fname + "'\n"
return ctl_details
def get_cols(fname):
with open(fname, 'r') as f:
cols = f.readline().strip('\n').split('|')
return cols
def read_csv_cols_to_table_cols(fname):
with open(fname, 'r') as f:
cols = f.readline().strip('\n').split(',')
return [c.upper().strip(' ').replace(' ', '_') for c in cols ]
def create_file(fname, txt):
with open(fname, 'w') as f:
f.write(txt + '\n') | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/toolbox/sql_tools.py | sql_tools.py |
import os
import sys
import aikif.cls_log as mod_log
import aikif.config as mod_cfg
class Toolbox(object):
"""
Class to manage the functional tools (programs or functions) that the AI can use
The toolbox starts as a subset of the Programs class (Programs manage the users
list of written programs and applications), and its purpose is to have an interface
that an AI can use to run its own tasks.
The toolbox is basically detailed documentation and interfaces for any program or
function that is robust enough to be used.
The first use of this will be the dataTools 'identify columns' function which calls
a solver from this managed list
"""
def __init__(self, fldr=None, lst=None):
self.fldr = fldr
self.lg = mod_log.Log(mod_cfg.fldrs['log_folder'])
if lst is None:
self.lstTools = []
else:
self.lstTools = lst
self.lg.record_command('Toolbox')
self.lg.record_source(fldr)
def __str__(self):
"""
returns list of tools
"""
res = ''
for tool in self.lstTools:
res += self._get_tool_str(tool)
return res
def _get_tool_str(self, tool):
"""
get a string representation of the tool
"""
res = tool['file']
try:
res += '.' + tool['function']
except Exception as ex:
print('Warning - no function defined for tool ' + str(tool))
res += '\n'
return res
def get_tool_by_name(self, nme):
"""
get the tool object by name or file
"""
for t in self.lstTools:
if 'name' in t:
if t['name'] == nme:
return t
if 'file' in t:
if t['file'] == nme:
return t
return None
def add(self, tool):
"""
Adds a Tool to the list, logs the reference and TODO
"""
self.lstTools.append(tool)
self.lg.record_process(self._get_tool_str(tool))
def list(self):
"""
Display the list of items
"""
for i in self.lstTools:
print (i)
return self.lstTools
def tool_as_string(self, tool):
"""
return the string of the filename and function to call
"""
return self._get_tool_str(tool)
def save(self, fname=''):
"""
Save the list of tools to AIKIF core and optionally to local file fname
"""
if fname != '':
with open(fname, 'w') as f:
for t in self.lstTools:
self.verify(t)
f.write(self.tool_as_string(t))
def verify(self, tool):
"""
check that the tool exists
"""
if os.path.isfile(tool['file']):
print('Toolbox: program exists = TOK :: ' + tool['file'])
return True
else:
print('Toolbox: program exists = FAIL :: ' + tool['file'])
return False
def run(self, tool, args, new_import_path=''):
"""
import the tool and call the function, passing the args.
"""
if new_import_path != '':
#print('APPENDING PATH = ', new_import_path)
sys.path.append(new_import_path)
#if silent == 'N':
print('main called ' + tool['file'] + '->' + tool['function'] + ' with ', args, ' = ', tool['return'])
mod = __import__( os.path.basename(tool['file']).split('.')[0]) # for absolute folder names
# mod = __import__( tool['file'][:-2]) # for aikif folders (doesnt work)
func = getattr(mod, tool['function'])
tool['return'] = func(args)
return tool['return'] | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/toolbox/Toolbox.py | Toolbox.py |
import os
import zipfile
import gzip
import tarfile
import fnmatch
def extract_all(zipfile, dest_folder):
"""
reads the zip file, determines compression
and unzips recursively until source files
are extracted
"""
z = ZipFile(zipfile)
print(z)
z.extract(dest_folder)
def create_zip_from_file(zip_file, fname):
"""
add a file to the archive
"""
with zipfile.ZipFile(zip_file, 'w') as myzip:
myzip.write(fname)
def create_gz_from_content(gz_file, binary_content):
with gzip.open(gz_file, 'wb') as f:
f.write(binary_content)
def create_tar_from_files(tar_file, fl):
with tarfile.open(tar_file, "w:gz") as tar:
for name in fl:
tar.add(name)
def create_zip_from_folder(zip_file, fldr, mode="r"):
"""
add all the files from the folder fldr
to the archive
"""
#print('zip from folder - adding folder : ', fldr)
zipf = zipfile.ZipFile(zip_file, 'w')
for root, dirs, files in os.walk(fldr):
for file in files:
fullname = os.path.join(root, file)
#print('zip - adding file : ', fullname)
zipf.write(fullname)
zipf.close()
class ZipFile(object):
def __init__(self, fname):
self.fname = fname
self.type = self._determine_zip_type()
def __str__(self):
return self.fname + ' is type ' + self.type
def _determine_zip_type(self):
xtn = self.fname[-3:].upper()
#print('_' + xtn + '_', self.fname)
if xtn == 'ZIP':
return 'ZIP'
elif xtn == '.GZ':
return 'GZ'
elif xtn == 'TAR':
return 'TAR'
else:
print('Unknown file type - TODO, examine header')
return 'Unknown'
def _extract_zip(self, fldr, password=''):
z = zipfile.ZipFile(self.fname)
z.extractall(fldr, pwd=password) # must pass as bytes, eg b'SECRET'
def _extract_gz(self, fldr, password=''):
with gzip.open(self.get_file_named(fldr, '*.gz'), 'rb') as fip:
file_content = fip.read()
with open(fldr + os.sep + 'temp.tar', 'wb') as fop:
fop.write(file_content)
def _extract_tar(self, fldr, tar_file=''):
if tar_file == '':
fldr + os.sep + 'temp.tar'
tar = tarfile.open(tar_file)
for item in tar:
tar.extract(item)
#tar.extractall(path=fldr)
tar.close()
def extract(self, dest_fldr, password=''):
"""
unzip the file contents to the dest_folder
(create if it doesn't exist)
and then return the list of files extracted
"""
#print('extracting to ' + dest_fldr)
if self.type == 'ZIP':
self._extract_zip(dest_fldr, password)
elif self.type == 'GZ':
self._extract_gz(dest_fldr, password)
elif self.type == 'TAR':
self._extract_tar(dest_fldr, self.fname)
else:
raise('Unknown archive file type')
def get_file_named(self, fldr, xtn):
"""
scans a directory for files like *.GZ or *.ZIP and returns
the filename of the first one found (should only be one of
each file here
"""
res = [] # list of Sample objects
for root, _, files in os.walk(fldr):
for basename in files:
if fnmatch.fnmatch(basename, xtn):
filename = os.path.join(root, basename)
res.append(filename)
if len(res) > 0:
return res[0]
else:
return None | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/toolbox/zip_tools.py | zip_tools.py |
import os
import urllib
import urllib.request
import aikif.config as mod_cfg
import aikif.cls_log as mod_log
import getpass
import socket
lg = mod_log.Log(os.getcwd()) # TODO - fix this. not the best way
def load_username_password(fname):
"""
use the config class to read credentials
"""
username, password = mod_cfg.read_credentials(fname)
return username, password # load_username_password
def get_user_name():
"""
get the username of the person logged on
"""
return getpass.getuser()
def get_host_name():
"""
get the computer name
"""
return socket.gethostname()
def get_web_page(url):
txtString = '404'
try:
rawText = urllib.request.urlopen(url).read()
txtString = str( rawText, encoding='utf8' )
except UnicodeError:
pass
return txtString
def download_file_no_logon(url, filename):
"""
download a file from a public website with no logon required
output = open(filename,'wb')
output.write(request.urlopen(url).read())
output.close()
"""
import urllib.request
#url = "http://www.google.com/"
request = urllib.request.Request(url)
try:
response = urllib.request.urlopen(request)
with open(filename,'wb') as f:
#print (response.read().decode('utf-8'))
f.write(response.read())
except Exception as ex:
lg.record_result("Error - cant download " + url + str(ex))
def get_protected_page(url, user, pwd, filename):
"""
having problems with urllib on a specific site so trying requests
"""
import requests
r = requests.get(url, auth=(user, pwd))
print(r.status_code)
if r.status_code == 200:
print('success')
with open(filename, 'wb') as fd:
for chunk in r.iter_content(4096):
fd.write(chunk)
lg.record_result("Success - downloaded " + url)
else:
lg.record_result('network_tools.get_protected_page:Failed to downloaded ' + url + ' (status code = ' + str(r.status_code) + ')')
def download_file(p_realm, p_url, p_op_file, p_username, p_password):
"""
Currently not working...
# https://docs.python.org/3/library/urllib.request.html#examples
# Create an OpenerDirector with support for Basic HTTP Authentication...
"""
auth_handler = urllib.request.HTTPBasicAuthHandler()
auth_handler.add_password(realm=p_realm,
uri=p_url,
user=p_username,
passwd=p_password)
opener = urllib.request.build_opener(auth_handler)
# ...and install it globally so it can be used with urlopen.
urllib.request.install_opener(opener)
web = urllib.request.urlopen(p_url)
with open(p_op_file, 'w') as f:
f.write(web.read().decode('utf-8'))
def download_file_proxy(p_url, p_op_file, p_username, p_password, proxies):
"""
Currently fails behind proxy...
# https://docs.python.org/3/library/urllib.request.html#examples
"""
chunk_size=4096
import requests
r = requests.get(p_url, auth=(p_username, p_password), proxies=proxies)
#print(r.status_code)
with open(p_op_file, 'wb') as fd:
for chunk in r.iter_content(chunk_size):
fd.write(chunk)
return r.status_code | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/toolbox/network_tools.py | network_tools.py |
import os
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
from PIL import ImageFilter
from PIL.ExifTags import TAGS, GPSTAGS
from PIL import ImageStat
from PIL import ImageOps
try:
from PIL import ImageGrab
except Exception as ex:
print('cant load ImageGrab (running on Linux)' + str(ex))
def screenshot(fname):
"""
takes a screenshot of the users desktop (Currently Win only)
"""
try:
im = ImageGrab.grab()
im.save(fname)
except Exception as ex:
print('image_tools.screenshot:cant create screenshot ' + str(ex))
def get_exif_data(image):
"""
Returns a dictionary from the exif data of
an PIL Image item. Also converts the GPS Tags
"""
exif_data = {}
info = image._getexif()
if info:
for tag, value in info.items():
decoded = TAGS.get(tag, tag)
if decoded == "GPSInfo":
gps_data = {}
for t in value:
sub_decoded = GPSTAGS.get(t, t)
gps_data[sub_decoded] = value[t]
exif_data[decoded] = gps_data
else:
exif_data[decoded] = value
return exif_data
def _get_if_exist(data, key):
if key in data:
return data[key]
return None
def _convert_to_degress(value):
"""
Helper function to convert the GPS coordinates
stored in the EXIF to degress in float format
"""
d0 = value[0][0]
d1 = value[0][1]
d = float(d0) / float(d1)
m0 = value[1][0]
m1 = value[1][1]
m = float(m0) / float(m1)
s0 = value[2][0]
s1 = value[2][1]
s = float(s0) / float(s1)
return d + (m / 60.0) + (s / 3600.0)
def get_lat_lon(exif_data):
"""
Returns the latitude and longitude, if available, from the
provided exif_data (obtained through get_exif_data above)
"""
lat = None
lon = None
if "GPSInfo" in exif_data:
gps_info = exif_data["GPSInfo"]
print("IN GET_LAT_LONG - GPSInfo = ", gps_info)
gps_latitude = _get_if_exist(gps_info, "GPSLatitude")
gps_latitude_ref = _get_if_exist(gps_info, 'GPSLatitudeRef')
gps_longitude = _get_if_exist(gps_info, 'GPSLongitude')
gps_longitude_ref = _get_if_exist(gps_info, 'GPSLongitudeRef')
if gps_latitude and gps_latitude_ref and gps_longitude and gps_longitude_ref:
lat = _convert_to_degress(gps_latitude)
if gps_latitude_ref != "N":
lat = 0 - lat
lon = _convert_to_degress(gps_longitude)
if gps_longitude_ref != "E":
lon = 0 - lon
return lat, lon
def resize(fname, basewidth, opFilename):
""" resize an image to basewidth """
if basewidth == 0:
basewidth = 300
img = Image.open(fname)
wpercent = (basewidth/float(img.size[0]))
hsize = int((float(img.size[1])*float(wpercent)))
img = img.resize((basewidth,hsize), Image.ANTIALIAS)
img.save(opFilename)
#print("Resizing ", fname, " to ", basewidth, " pixels wide to file " , opFilename)
def print_stats(img):
""" prints stats, remember that img should already have been loaded """
stat = ImageStat.Stat(img)
print("extrema : ", stat.extrema)
print("count : ", stat.count)
print("sum : ", stat.sum)
print("sum2 : ", stat.sum2)
print("mean : ", stat.mean)
print("median : ", stat.median)
print("rms : ", stat.rms)
print("var : ", stat.var)
print("stddev : ", stat.stddev)
#def print_exif_data(img):
# """ NOTE - the img is ALREADY opened by calling function """
# try:
# exif_data = {
# TAGS[k]: v
# for k, v in img._getexif().items()
# if k in TAGS
# }
# for k,v in exif_data.items():
# if k:
# if type(v) is str:
# #if v[1:] != 'b':
# print (k , " : ", v)
# elif type(v) is int:
# print (k , " : ", v)
# elif type(v) is tuple:
# print (k , " : ", v)
# else:
# if k == "GPSInfo":
# pass
# except Exception:
# print ("Error - ", sys.exc_info()[0])
def print_all_metadata(fname):
""" high level that prints all as long list """
print("Filename :", fname )
print("Basename :", os.path.basename(fname))
print("Path :", os.path.dirname(fname))
print("Size :", os.path.getsize(fname))
img = Image.open(fname)
# get the image's width and height in pixels
width, height = img.size
# get the largest dimension
#max_dim = max(img.size)
print("Width :", width)
print("Height :", height)
print("Format :", img.format)
print("palette :", img.palette )
print_stats(img)
#print_exif_data(img)
exif_data = get_exif_data(img)
(lat, lon) = get_lat_lon(exif_data)
print("GPS Lat :", lat )
print("GPS Long :", lon )
def metadata_header():
hdr = [
'Filename',
'Basename',
'Path',
'Size',
'Width',
'Height',
'Format',
'palette',
'count',
'sum',
'sum2',
'mean',
'median',
'rms',
'var',
'stddev',
'GPS_Lat',
'GPS_Long'
]
return hdr
def get_metadata_as_dict(fname):
""" Gets all metadata and puts into dictionary """
imgdict = {}
try:
imgdict['filename'] = fname
imgdict['size'] = str(os.path.getsize(fname))
imgdict['basename'] = os.path.basename(fname)
imgdict['path'] = os.path.dirname(fname)
img = Image.open(fname)
# get the image's width and height in pixels
width, height = img.size
imgdict['width'] = str(width)
imgdict['height'] = str(height)
imgdict['format'] = str(img.format)
imgdict['palette'] = str(img.palette)
stat = ImageStat.Stat(img)
#res = res + q + str(stat.extrema) + q + d
imgdict['count'] = List2String(stat.count, ",")
imgdict['sum'] = List2String(stat.sum, ",")
imgdict['sum2'] = List2String(stat.sum2, ",")
imgdict['mean'] = List2String(stat.mean, ",")
imgdict['median'] = List2String(stat.median, ",")
imgdict['rms'] = List2String(stat.rms, ",")
imgdict['var'] = List2String(stat.var, ",")
imgdict['stddev'] = List2String(stat.stddev, ",")
exif_data = get_exif_data(img)
print('exif_data = ', exif_data)
(lat, lon) = get_lat_lon(exif_data)
print('(lat, lon)', (lat, lon))
imgdict['lat'] = str(lat)
imgdict['lon'] = str(lon)
except Exception as ex:
print('problem reading image file metadata in ', fname, str(ex))
imgdict['lat'] = 'ERROR'
imgdict['lon'] = 'ERROR'
return imgdict
def get_metadata_as_csv(fname):
""" Gets all metadata and puts into CSV format """
q = chr(34)
d = ","
res = q + fname + q + d
res = res + q + os.path.basename(fname) + q + d
res = res + q + os.path.dirname(fname) + q + d
try:
res = res + q + str(os.path.getsize(fname)) + q + d
img = Image.open(fname)
# get the image's width and height in pixels
width, height = img.size
res = res + q + str(width) + q + d
res = res + q + str(height) + q + d
res = res + q + str(img.format) + q + d
res = res + q + str(img.palette) + q + d
stat = ImageStat.Stat(img)
#print(fname, width, height)
#res = res + q + str(stat.extrema) + q + d
res = res + q + List2String(stat.count, ",") + q + d
res = res + q + List2String(stat.sum, ",") + q + d
res = res + q + List2String(stat.sum2, ",") + q + d
res = res + q + List2String(stat.mean, ",") + q + d
res = res + q + List2String(stat.median, ",") + q + d
res = res + q + List2String(stat.rms, ",") + q + d
res = res + q + List2String(stat.var, ",") + q + d
res = res + q + List2String(stat.stddev, ",") + q + d
exif_data = get_exif_data(img)
(lat, lon) = get_lat_lon(exif_data)
res = res + q + str(lat) + q + d
res = res + q + str(lon) + q + d
except Exception as ex:
print('problem reading image file metadata in ', fname, str(ex))
return res
def List2String(l, delim):
res = ""
for v in l:
if is_number(v):
res = res + str(v) + delim
else:
res = res + v + delim
return res
def Dict2String(d):
res = ","
for k, v in d.items(): # .iteritems():
res = res + k + ',' + str(v) + ','
return res
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
#def auto_contrast(img, opFile):
# """ run the autocontrast PIL function to a new opFile """
# imgOp = ImageOps.autocontrast(img)
# imgOp.save(opFile)
def add_text_to_image(fname, txt, opFilename):
""" convert an image by adding text """
ft = ImageFont.load("T://user//dev//src//python//_AS_LIB//timR24.pil")
#wh = ft.getsize(txt)
print("Adding text ", txt, " to ", fname, " pixels wide to file " , opFilename)
im = Image.open(fname)
draw = ImageDraw.Draw(im)
draw.text((0, 0), txt, fill=(0, 0, 0), font=ft)
del draw
im.save(opFilename)
def add_crosshair_to_image(fname, opFilename):
""" convert an image by adding a cross hair """
im = Image.open(fname)
draw = ImageDraw.Draw(im)
draw.line((0, 0) + im.size, fill=(255, 255, 255))
draw.line((0, im.size[1], im.size[0], 0), fill=(255, 255, 255))
del draw
im.save(opFilename)
def filter_contour(imageFile, opFile):
""" convert an image by applying a contour """
im = Image.open(imageFile)
im1 = im.filter(ImageFilter.CONTOUR)
im1.save(opFile)
def detect_face(fname, opFile):
"""
TODO - not implemented
storage = cv.CreateMemStorage()
haar=cv.LoadHaarClassifierCascade('haarcascade_frontalface_default.xml')
detected = cv.HaarDetectObjects(fname, haar, storage, 1.2, 2,cv.CV_HAAR_DO_CANNY_PRUNING, (100,100))
if detected:
for face in detected:
print (face, 'saving to ', opFile)
"""
print("detect_face NOT IMPLEMENTED: ", fname, opFile)
def check_image_duplicates(file_list):
""" Checking Images for duplicates (despite resizing, colours, etc) """
master_hash = ''
ham_dist = 0
results = []
print("Checking Images for duplicates (despite resizing, colours, etc) " )
for ndx, fname in enumerate(file_list):
#img = Image.open(fname)
img = load_image(fname)
hsh = get_img_hash(img)
if ndx == 0: # this is the test MASTER image
master_hash = hsh
else:
# compare hamming distance against image 1
#print("hash=" + hash + " MASTER_HASH=" + master_hash)
ham_dist = hamming_distance(hsh, master_hash)
#print(hsh + " <- " + fname + " , hamming dist to img1 = " + str(ham_dist))
#results.append(hsh + " <- " + fname + " , hamming dist to img1 = " + str(ham_dist))
results.append({'hsh':hsh, 'fname':fname, 'dist_to_img1':str(ham_dist)})
return results
def hamming_distance(s1, s2):
"""
Return the Hamming distance between equal-length sequences
(http://en.wikipedia.org/wiki/Hamming_distance )
"""
if len(s1) != len(s2):
raise ValueError("Undefined for sequences of unequal length")
return sum(ch1 != ch2 for ch1, ch2 in zip(s1, s2))
def get_img_hash(image, hash_size = 8):
""" Grayscale and shrink the image in one step """
image = image.resize((hash_size + 1, hash_size), Image.ANTIALIAS, )
pixels = list(image.getdata())
#print('get_img_hash: pixels=', pixels)
# Compare adjacent pixels.
difference = []
for row in range(hash_size):
for col in range(hash_size):
pixel_left = image.getpixel((col, row))
pixel_right = image.getpixel((col + 1, row))
difference.append(pixel_left > pixel_right)
# Convert the binary array to a hexadecimal string.
decimal_value = 0
hex_string = []
for index, value in enumerate(difference):
if value:
decimal_value += 2**(index % 8)
if (index % 8) == 7:
hex_string.append(hex(decimal_value)[2:].rjust(2, '0'))
decimal_value = 0
return ''.join(hex_string)
def load_image(fname):
""" read an image from file - PIL doesnt close nicely """
with open(fname, "rb") as f:
i = Image.open(fname)
#i.load()
return i
def dump_img(fname):
""" output the image as text """
img = Image.open(fname)
width, _ = img.size
txt = ''
pixels = list(img.getdata())
for col in range(width):
txt += str(pixels[col:col+width])
return txt | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/toolbox/image_tools.py | image_tools.py |
import os
import random
import sys
cur_folder = os.path.abspath(os.path.dirname(os.path.abspath(__file__)))
lib_folder = os.path.abspath(cur_folder )
aikif_folder = os.path.abspath(cur_folder + os.sep + ".." )
import aikif.toolbox.cls_grid as mod_grid #
class GameOfLife(mod_grid.Grid):
"""
Extend Grid class to support Game of Life
"""
def update_gol(self):
"""
Function that performs one step of the Game of Life
"""
updated_grid = [[self.update_cell(row, col) \
for col in range(self.get_grid_width())] \
for row in range(self.get_grid_height())]
self.replace_grid(updated_grid)
def update_cell(self, row, col):
"""
Function that computes the update for one cell in the Game of Life
"""
# compute number of living neighbors
neighbors = self.eight_neighbors(row, col)
living_neighbors = 0
for neighbor in neighbors:
if not self.is_empty(neighbor[0], neighbor[1]):
living_neighbors += 1
# logic for Game of life
if (living_neighbors == 3) or (living_neighbors == 2 and not self.is_empty(row, col)):
return mod_grid.FULL
else:
return mod_grid.EMPTY
class GameOfLifePatterns(object):
"""
class to generate patterns on a grid for Game of Life
All patterns have a start pos of 2,2 so needs to be offset
by calling program or random screen generation
"""
def __init__(self, num_patterns, max_x=77, max_y=21):
self.patterns = []
self.max_x = max_x
self.max_y = max_y
self.pattern_list = ['block','beehive','loaf', # stationary
'boat','blinker','toad','beacon', # oscillators
'glider' # gliders
]
for _ in range(num_patterns):
pattern_to_add = random.choice(self.pattern_list)
methodToCall = getattr(sys.modules['cls_grid_life'], pattern_to_add)
result = methodToCall()
self.patterns.extend(self.random_offset(self.bitmap_pattern_to_list(result)))
def get_patterns(self):
""" return the list of patterns """
return self.patterns
def bitmap_pattern_to_list(self, bmp_pat):
"""
takes a list of bitmap points (drawn via Life screen)
and converts to a list of full coordinates
"""
res = []
x = 1
y = 1
lines = bmp_pat.split('\n')
for line in lines:
y += 1
for char in line:
x += 1
if char == 'X':
res.append([y,x])
return res
def random_offset(self, lst):
"""
offsets a pattern list generated below to a random
position in the grid
"""
res = []
x = random.randint(4,self.max_x - 42)
y = random.randint(4,self.max_y - 10)
for itm in lst:
res.append([itm[0] + y, itm[1] + x])
return res
# Patterns are below outside the class to allow for simpler importing
# still lifes
def block():
return '\n'.join([
'.XX.......',
'.XX.......',
'..........',
'..........',
'..........'])
def beehive():
return '\n'.join([
'..........',
'..XX......',
'.X..X.....',
'..XX......',
'..........'])
def loaf():
return '\n'.join([
'..........',
'...XX.....',
'..X..X....',
'...X.X....',
'....X.....'])
def boat():
return '\n'.join([
'..........',
'.XX.......',
'.X.X......',
'..X.......',
'..........'])
# Oscillators
def blinker():
return '\n'.join([
'..........',
'...XXX....',
'..........',
'..........',
'..........'])
def toad():
return '\n'.join([
'..........',
'..XXX.....',
'.XXX......',
'..........',
'..........'])
def beacon():
return '\n'.join([
'.XX.......',
'.XX.......',
'...XX.....',
'...XX.....',
'..........'])
# Spaceships
def glider():
return '\n'.join([
'..........',
'......X...',
'....X.X...',
'.....XX...',
'..........'])
def _BLANK():
return '\n'.join([
'..........',
'..........',
'..........',
'..........',
'..........']) | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/toolbox/cls_grid_life.py | cls_grid_life.py |
import os
import sys
from xml.sax import parse
from xml.sax.saxutils import XMLGenerator
from xml.etree.ElementTree import iterparse
import aikif.lib.cls_file as mod_file
# Public facing functions
###############################
def get_xml_stats(fname):
"""
return a dictionary of statistics about an
XML file including size in bytes, num lines,
number of elements, count by elements
"""
f = mod_file.TextFile(fname)
res = {}
res['shortname'] = f.name
res['folder'] = f.path
res['filesize'] = str(f.size) + ' bytes'
res['num_lines'] = str(f.lines) + ' lines'
res['date_modified'] = f.GetDateAsString(f.date_modified)
return res
def make_random_xml_file(fname, num_elements=200, depth=3):
"""
makes a random xml file mainly for testing the xml_split
"""
with open(fname, 'w') as f:
f.write('<?xml version="1.0" ?>\n<random>\n')
for dep_num, _ in enumerate(range(1,depth)):
f.write(' <depth>\n <content>\n')
#f.write('<depth' + str(dep_num) + '>\n')
for num, _ in enumerate(range(1, num_elements)):
f.write(' <stuff>data line ' + str(num) + '</stuff>\n')
#f.write('</depth' + str(dep_num) + '>\n')
f.write(' </content>\n </depth>\n')
f.write('</random>\n')
def split_xml(fname, element, num_elements):
parse(fname, XMLBreaker(element, break_after=num_elements, out=CycleFile(fname)))
def count_elements(fname, element):
"""
returns (511, 35082) for ANC__WhereToHongKong.xml
"""
num = 0
tot = 0
for event, elem in iterparse(fname):
tot += 1
if elem.text != '':
#print(' tag = ', elem.tag)
#print(' event = ', event # always end
#print(' text = ', elem.text)
pass
if element in elem.tag:
#print(elem.xpath( 'description/text( )' ))
#print(elem.text)
num += 1
elem.clear()
return num, tot
def count_via_minidom(fname, tag_name):
from xml.dom.minidom import parseString
print('count_via_minidom : reading ' + fname)
file = open(fname,'r')
data = file.read()
file.close()
dom = parseString(data)
return len(dom.getElementsByTagName(tag_name))
# internal classes for toolkit
###############################
"""
def fast_iter(context, func, *args, **kwargs):
#http://lxml.de/parsing.html#modifying-the-tree
#Based on Liza Daly's fast_iter
#http://www.ibm.com/developerworks/xml/library/x-hiperfparse/
#See also http://effbot.org/zone/element-iterparse.htm
for event, elem in context:
func(elem, *args, **kwargs)
# It's safe to call clear() here because no descendants will be
# accessed
elem.clear()
# Also eliminate now-empty references from the root node to elem
for ancestor in elem.xpath('ancestor-or-self::*'):
while ancestor.getprevious() is not None:
del ancestor.getparent()[0]
del context
def process_element(elem):
print(elem.xpath( 'description/text( )' ))
"""
class CycleFile(object):
def __init__(self, filename):
self.basename, self.ext = os.path.splitext(filename)
self.index = 0
self.open_next_file()
def open_next_file(self):
self.index += 1
filename = self.basename + str(self.index) + self.ext
print('CycleFile:open_next_file: filename = ', filename)
self.file = open(filename, 'w', encoding='utf-8', errors='ignore' )
def cycle(self):
print('CycleFile:cycle: self.index = ', self.index)
self.file.close()
self.open_next_file()
def write(self, txt):
#print('CycleFile:write: txt = ', txt.decode(encoding='UTF-8'), ', type(txt) = ',type(txt) )
#.decode(encoding='UTF-8')
#print('CycleFile:write: txt = ', bytes(txt, encoding="UTF-8"))
if type(txt) is str:
self.file.write(txt)
else:
self.file.write(txt.decode(encoding='utf-8'))
#self.file.write(str(txt))
#try:
# self.file.write(txt.decode('utf-8'))
#except Exception as ex:
# print('cant write data')
# unicode(str, errors='ignore')
def close(self):
self.file.close()
class XMLBreaker(XMLGenerator):
def __init__(self, break_into=None, break_after=200, out=None, *args, **kwargs):
XMLGenerator.__init__(self, out, *args, **kwargs)
self.out_file = out
self.break_into = break_into
self.break_after = break_after
self.context = []
self.count = 0
print('XMLBreaker __init__ : ', break_into, break_after)
def start_element(self, name, attrs):
XMLGenerator.start_element(self, name, attrs)
self.context.append((name, attrs))
def end_element(self, name):
XMLGenerator.end_element(self, name)
self.context.pop()
print('end_element:name = ', name, ', self.break_into = ', self.break_into)
if name == self.break_into:
self.count += 1
if self.count >= self.break_after:
self.count = 0
for element in reversed(self.context):
self.out_file.write("\n")
XMLGenerator.end_element(self, element[0])
self.out_file.cycle()
for element in self.context:
XMLGenerator.start_element(self, *element)
class XmlFile(mod_file.TextFile):
"""
Xml specific details derived from File object
"""
def __init__(self, fname):
super(XmlFile, self).__init__(fname)
self.element_count = self.count_elements_in_file()
self.lines = self.count_lines_in_file()
def __str__(self):
""" display the text file sample """
txt = super(mod_file.TextFile, self).__str__()
txt += '| TextFile = ' + str(self.lines) + ' lines\n'
txt += '| XmlFile = ' + str(self.element_count) + ' elements\n'
return txt
def count_elements_in_file(self, tag_name='MindOntology_Definition'):
return count_via_minidom(self.fullname , tag_name) | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/toolbox/xml_tools.py | xml_tools.py |
import os
try:
from PIL import Image
except ImportError:
print("--------------------------------------------------------------------------")
print("Error: Cant import PIL")
print("you need to run 'easy_install pillow' for PIL functionality in 3.4")
print("--------------------------------------------------------------------------\n")
"""
SCIPY examples with PIL
https://scipy-lectures.github.io/advanced/image_processing/
http://docs.scipy.org/doc/scipy/reference/tutorial/ndimage.html
http://docs.scipy.org/doc/scipy/reference/ndimage.html
http://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.measurements.find_objects.html
TODO
=====
Attempt to use a path finding / line trace method to detect objects
drawn on a map rather than traditional pattern match against existing
set of training images.
Reason is, that it would be easier to define map objects mathematically
and it should be similar to the path finding routine that follows the
roads
"""
def TEST(fname):
"""
Test function to step through all functions in
order to try and identify all features on a map
This test function should be placed in a main
section later
"""
#fname = os.path.join(os.getcwd(), '..','..', # os.path.join(os.path.getcwd(), '
m = MapObject(fname, os.path.join(os.getcwd(), 'img_prog_results'))
m.add_layer(ImagePathFollow('border'))
m.add_layer(ImagePathFollow('river'))
m.add_layer(ImagePathFollow('road'))
m.add_layer(ImageArea('sea', col='Blue', density='light'))
m.add_layer(ImageArea('desert', col='Yellow', density='med'))
m.add_layer(ImageArea('forest', col='Drak Green', density='light'))
m.add_layer(ImageArea('fields', col='Green', density='light'))
m.add_layer(ImageObject('mountains'))
m.add_layer(ImageObject('trees'))
m.add_layer(ImageObject('towns'))
##############################################
# Main Functions to call from external apps #
##############################################
def get_roads(fname):
"""
takes a filename and returns a vector map
(or possibly an image layer) of roads on
the map
"""
return []
##############################################
# Utility functions #
##############################################
def img_to_array(fname):
"""
takes a JPG file name and loads into an array
"""
img = Image.open(fname)
return img
##############################################
# Classes used for managing image detection #
##############################################
class MapObject(object):
"""
base class used to hold objects detected on a map.
Takes a filename which it loads into the first layer
of an array.
Additional layers are added mapped to same [x,y]
points for each additional feature extraction done.
"""
def __init__(self, fname='', op_folder=''):
self.arr = []
self.arr.append(img_to_array(fname))
def add_layer(self, layer):
"""
creates a new layer in the self.arr and sets
to zeros,ready for next image processing task
"""
self.arr.append(layer)
class ImagePathFollow(object):
"""
class to handle the long lines line
drawn on the map - eg borders, rivers, roads,
"""
def __init__(self, nme):
#super(ImagePathFollow, self).__init__(fname, op_folder)
self.nme = nme
self.arr = []
class ImageObject(object):
"""
class to handle the list of small objects that are
drawn on the map - eg mountains, trees, towns
"""
def __init__(self, nme):
#super(ImagePathFollow, self).__init__(fname, op_folder)
self.nme = nme
self.arr = []
class ImageArea(object):
"""
class to handle the list of AREAS on the map.
These are the regions for sea, land, forest,
desert and are mainly based on colours or
styles.
mountains, trees
"""
def __init__(self, nme, col, density):
#super(ImagePathFollow, self).__init__(fname, op_folder)
self.nme = nme
self.arr = []
self.col = col
self.density = density | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/toolbox/image_detection_tools.py | image_detection_tools.py |
#----------------------------
# Classes for Data Structures
class Node(object):
"""
Node data structure
"""
def __init__(self, name, data=None):
"""
takes a name and optional data
"""
self.name = name
self.data = data
self.parent = None
self.depth = 0
self.links = []
def __str__(self):
res = self.name + ' : ( ' + str(len(self.links)) + ' links)\n'
for link in self.links:
res += ' ' + link.name + ''
return res
def __eq__(self, other):
if not isinstance(other, Node):
return False
return self.name == other.name
def add_link(self, node):
self.links.append(node)
node.parent = self
def get_children(self):
""" returns list of child nodes """
return self.links
def get_parent(self):
""" returns parent node """
return self.parent
class Graph(object):
def __init__(self, graph):
""" takes a graph definition as input
e.g. the following tree is encoded as follows:
A
/ \
B E
/ | \ \
H C D M
would be entered as
{ 'A': ['B', 'E'],
'B': ['H', 'C', 'D'],
'E': ['M'] }
"""
self.graph = graph
self.nodes = []
self.links = []
self.adj_matrix = []
def __str__(self):
""" display as raw data """
return str(self.graph)
def get_adjacency_matrix(self, show_in_console=False):
""" return the matrix as a list of lists
raw graph = {'1': ['2', '3', '4'], '2': ['6', '7']}
6 nodes: ['1', '2', '3', '4', '6', '7']
5 links: [['1', '2'], ['1', '3'], ['1', '4'], ['2', '6'], ['2', '7']]
[0, 1, 1, 1, 0, 0]
[1, 0, 0, 0, 1, 1]
[1, 0, 0, 0, 0, 0]
[1, 0, 0, 0, 0, 0]
[0, 1, 0, 0, 0, 0]
[0, 1, 0, 0, 0, 0]
"""
self.links = [[i,j] for i in self.graph for j in self.graph[i]]
all_nodes = []
op = []
for node in self.graph:
all_nodes.append(node) # to get the root node
for connection in self.graph[node]:
all_nodes.append(connection)
self.nodes = sorted(list(set(all_nodes)))
if show_in_console is not False:
print (len(self.nodes), 'nodes:', self.nodes)
print (len(self.links), 'links:', self.links)
for y in range(len(self.nodes)):
row = []
for x in range(len(self.nodes)):
match = False
for l in self.links:
if self.nodes[x] == l[0] and self.nodes[y] == l[1]:
match = True
if self.nodes[x] == l[1] and self.nodes[y] == l[0]:
match = True
if match is True:
row.append(1)
else:
row.append(0)
op.append(row)
if show_in_console is not False:
for row in op:
print(row)
return op | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/toolbox/data_structures.py | data_structures.py |
import os
import glob
import shutil
import aikif.lib.cls_filelist as mod_fl
exclude_folders = [ os.sep + 'venv',
os.sep + 'venv2',
os.sep + '__pycache__',
os.sep + 'htmlcov'
]
def get_filelist(fldr):
"""
extract a list of files from fldr
"""
lst = mod_fl.FileList([fldr], ['*.*'], exclude_folders, '')
return lst.get_list()
def delete_file(f, ignore_errors=False):
"""
delete a single file
"""
try:
os.remove(f)
except Exception as ex:
if ignore_errors:
return
print('ERROR deleting file ' + str(ex))
def delete_files_in_folder(fldr):
"""
delete all files in folder 'fldr'
"""
fl = glob.glob(fldr + os.sep + '*.*')
for f in fl:
delete_file(f, True)
def copy_file(src, dest):
"""
copy single file
"""
try:
shutil.copy2(src , dest)
except Exception as ex:
print('ERROR copying file')
def copy_files_to_folder(src, dest, xtn='*.txt'):
"""
copies all the files from src to dest folder
"""
try:
all_files = glob.glob(os.path.join(src,xtn))
for f in all_files:
copy_file(f, dest)
except Exception as ex:
print('ERROR copy_files_to_folder')
def copy_all_files_and_subfolders(src, dest, base_path_ignore, xtn_list):
"""
file_tools.copy_all_files_and_subfolders(src, dest, backup_path, ['*.*'])
gets list of all subfolders and copies each file to
its own folder in 'dest' folder
paths, xtn, excluded, output_file_name = 'my_files.csv')
"""
fl = mod_fl.FileList([src], xtn_list, exclude_folders, '')
all_paths = list(set([p['path'] for p in fl.fl_metadata]))
fl.save_filelist(os.path.join(dest,'files_backed_up.csv'), ["name", "path", "size", "date"])
for p in all_paths:
dest_folder = os.path.join(dest, p[len(base_path_ignore):])
if not os.path.exists(dest_folder):
try:
os.makedirs(dest_folder) # create all directories, raise an error if it already exists
except:
print('Error - cant create directory')
copy_files_to_folder(p, dest_folder, xtn='*') | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/toolbox/file_tools.py | file_tools.py |
try:
import mutagenx
import mutagenx.id3
except ImportError:
print("Error: cant import mutagen")
def TEST():
""" local test to demo usage - see unittests for full functionality """
print("Local test of audio_tools.py")
fname = r"E:\backup\music\Music\_Rock\Angels\Red Back Fever\07 Red Back Fever.mp3"
#res = get_audio_metadata(fname)
#print(res)
res = get_audio_metadata(fname)
print(res)
#{'album': ['Red Back Fever'], 'title': ['Red Back Fever'], 'artist': ['Angels']}
def get_audio_metadata(fname):
""" collects basic MP3 metadata
Works, once you use mutagenx (buried deep in issues page)
['Angels']
['Red Back Fever']
['Red Back Fever']
{'album': ['Red Back Fever'], 'title': ['Red Back Fever'], 'artist': ['Angels']}
"""
from mutagenx.easyid3 import EasyID3
audio = EasyID3(fname)
audio_dict = {}
try:
artist = audio["artist"]
except KeyError:
artist = ''
try:
title = audio["title"]
except KeyError:
print("Cant get title")
try:
album = audio["album"]
except KeyError:
album = ''
audio_dict['album'] = album
audio_dict['title'] = title
audio_dict['artist'] = artist
return audio_dict
def get_audio_metadata_old(fname):
""" retrieve the metadata from an MP3 file """
audio_dict = {}
print("IDv2 tag info for %s:" % fname)
try:
audio = mutagenx.id3.ID3(fname, translate=False)
except StandardError as err:
print("ERROR = " + str(err))
#else:
#print(audio.pprint().encode("utf-8", "replace"))
#for frame in audio.values():
# print(repr(frame))
try:
audio_dict["title"] = audio["title"]
except KeyError:
print("No title")
try:
audio_dict["artist"] = audio["artist"] # tags['TPE1']
except KeyError:
print("No artist")
try:
audio_dict["album"] = audio["album"]
except KeyError:
print("No album")
try:
audio_dict["length"] = audio["length"]
except KeyError:
print("No length")
#pprint.pprint(audio.tags)
return audio_dict
if __name__ == '__main__':
TEST() | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/toolbox/audio_tools.py | audio_tools.py |
import random
EMPTY = '.' # TODO - need to change this in multiple places (see worlds.py, cls_grid, world_generator)
FULL = 'X'
class Grid(object):
"""
Class to run the game logic.
"""
def __init__(self, grid_height, grid_width, pieces, spacing=6):
self.grid_height = grid_height
self.grid_width = grid_width
self.spacing = spacing
self.pieces = pieces
self.reset()
self.grid = [[EMPTY for dummy_col in range(self.grid_width)]
for dummy_row in range(self.grid_height)]
#print(self.grid)
def reset(self):
"""
Reset the game so the grid is zeros (or default items)
"""
self.grid = [[0 for dummy_l in range(self.grid_width)] for dummy_l in range(self.grid_height)]
def clear(self):
"""
Clears grid to be EMPTY
"""
self.grid = [[EMPTY for dummy_col in range(self.grid_width)] for dummy_row in range(self.grid_height)]
def __str__(self):
"""
Return a string representation of the grid for debugging.
"""
output_string = ''
for row in range(self.grid_height):
for col in range(self.grid_width):
output_string += str(self.grid[row][col]).rjust(self.spacing)
output_string += "\n"
output_string += "\n"
return output_string
def save(self, fname):
""" saves a grid to file as ASCII text """
try:
with open(fname, "w") as f:
f.write(str(self))
except Exception as ex:
print('ERROR = cant save grid results to ' + fname + str(ex))
def load(self, fname):
""" loads a ASCII text file grid to self """
# get height and width of grid from file
self.grid_width = 4
self.grid_height = 4
# re-read the file and load it
self.grid = [[0 for dummy_l in range(self.grid_width)] for dummy_l in range(self.grid_height)]
with open(fname, 'r') as f:
for row_num, row in enumerate(f):
if row.strip('\n') == '':
break
for col_num, col in enumerate(row.strip('\n')):
self.set_tile(row_num, col_num, col)
#print('loaded grid = \n', str(self))
def get_grid_height(self):
"""
Get the height of the board.
"""
return self.grid_height
def get_grid_width(self):
"""
Get the width of the board.
"""
return self.grid_width
def is_empty(self, row, col):
"""
Checks whether cell with index (row, col) is empty
"""
return self.grid[row][col] == EMPTY
def extract_col(self, col):
"""
get column number 'col'
"""
new_col = [row[col] for row in self.grid]
return new_col
def extract_row(self, row):
"""
get row number 'row'
"""
new_row = []
for col in range(self.get_grid_width()):
new_row.append(self.get_tile(row, col))
return new_row
def replace_row(self, line, ndx):
"""
replace a grids row at index 'ndx' with 'line'
"""
for col in range(len(line)):
self.set_tile(ndx, col, line[col])
def replace_col(self, line, ndx):
"""
replace a grids column at index 'ndx' with 'line'
"""
for row in range(len(line)):
self.set_tile(row, ndx, line[row])
def reverse_line(self, line):
"""
helper function
"""
return line[::-1]
def new_tile(self, num=1):
"""
Create a new tile in a randomly selected empty
square. The tile should be 2 90% of the time and
4 10% of the time.
"""
for _ in range(num):
if random.random() > .5:
new_tile = self.pieces[0]
else:
new_tile = self.pieces[1]
# check for game over
blanks = self.count_blank_positions()
if blanks == 0:
print ("GAME OVER")
else:
res = self.find_random_blank_cell()
row = res[0]
col = res[1]
self.set_tile(row, col, new_tile)
def count_blank_positions(self):
"""
return a count of blank cells
"""
blanks = 0
for row_ndx in range(self.grid_height - 0):
for col_ndx in range(self.grid_width - 0):
if self.get_tile(row_ndx, col_ndx) == EMPTY:
blanks += 1
return blanks
def count_filled_positions(self):
"""
return a count of blank cells
"""
filled = 0
for row_ndx in range(self.grid_height - 0):
for col_ndx in range(self.grid_width - 0):
if self.get_tile(row_ndx, col_ndx) != EMPTY:
filled += 1
return filled
def find_random_blank_cell(self):
if self.count_blank_positions() == 0:
return -1, -1
row = random.randrange(0, self.grid_height)
col = random.randrange(0, self.grid_width)
while self.grid[row][col] != EMPTY:
row = random.randrange(0, self.grid_height)
col = random.randrange(0, self.grid_width)
return [row, col]
def set_tile(self, row, col, value):
"""
Set the tile at position row, col to have the given value.
"""
#print('set_tile: y=', row, 'x=', col)
if col < 0:
print("ERROR - x less than zero", col)
col = 0
#return
if col > self.grid_width - 1 :
print("ERROR - x larger than grid", col)
col = self.grid_width - 1
#return
if row < 0:
print("ERROR - y less than zero", row)
row = 0
#return
if row > self.grid_height - 1:
print("ERROR - y larger than grid", row)
row = self.grid_height - 1
self.grid[row][col] = value
def get_tile(self, row, col):
"""
Return the value of the tile at position row, col.
"""
#print('attempting to get_tile from ', row, col)
return self.grid[row][col]
def set_empty(self, row, col):
"""
Set cell with index (row, col) to be empty
"""
self.grid[row][col] = EMPTY
def set_full(self, row, col):
"""
Set cell with index (row, col) to be full
"""
self.grid[row][col] = FULL
def four_neighbors(self, row, col):
"""
Returns horiz/vert neighbors of cell (row, col)
"""
ans = []
if row > 0:
ans.append((row - 1, col))
if row < self.grid_height - 1:
ans.append((row + 1, col))
if col > 0:
ans.append((row, col - 1))
if col < self.grid_width - 1:
ans.append((row, col + 1))
return ans
def eight_neighbors(self, row, col):
"""
Returns horiz/vert neighbors of cell (row, col) as well as
diagonal neighbors
"""
ans = []
if row > 0:
ans.append((row - 1, col))
if row < self.grid_height - 1:
ans.append((row + 1, col))
if col > 0:
ans.append((row, col - 1))
if col < self.grid_width - 1:
ans.append((row, col + 1))
if (row > 0) and (col > 0):
ans.append((row - 1, col - 1))
if (row > 0) and (col < self.grid_width - 1):
ans.append((row - 1, col + 1))
if (row < self.grid_height - 1) and (col > 0):
ans.append((row + 1, col - 1))
if (row < self.grid_height - 1) and (col < self.grid_width - 1):
ans.append((row + 1, col + 1))
return ans
def get_index(self, point, cell_size):
"""
Takes point in screen coordinates and returns index of
containing cell
"""
return (point[1] / cell_size, point[0] / cell_size)
def replace_grid(self, updated_grid):
"""
replace all cells in current grid with updated grid
"""
for col in range(self.get_grid_width()):
for row in range(self.get_grid_height()):
if updated_grid[row][col] == EMPTY:
self.set_empty(row, col)
else:
self.set_full(row, col)
def find_safe_starting_point(self):
"""
finds a place on the grid which is clear on all sides
to avoid starting in the middle of a blockage
"""
y = random.randint(2,self.grid_height-4)
x = random.randint(2,self.grid_width-4)
return y, x | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/toolbox/cls_grid.py | cls_grid.py |
from collections import namedtuple
Item = namedtuple("Item", ['index', 'value', 'weight', 'density'])
def describe_problem(capacity, items):
print('\nAIKIF Knapsack Solver - 9/3/2014')
print('This attempts to maximise the number of items that can fit into a Knapsack')
print('--------------------------------------------------------------------------')
print('PROBLEM : ' )
print('Knapsack Capacity = ' + str(capacity))
print('Total Items = ' + str(len(items)))
for item in items:
print ('item [' + str(item.index) + '] value = ' + str(item.value) + ', item.weight = ' + str(item.weight) + ' density = ' + str(item.density))
print('--------------------------------------------------------------------------')
def solve_greedy_trivial(capacity, items):
taken = [0]*len(items)
# a trivial greedy algorithm for filling the knapsack
# it takes items in-order until the knapsack is full
value = 0
weight = 0
taken = [0]*len(items)
for item in items:
if weight + item.weight <= capacity:
taken[item.index] = 1
value += item.value
weight += item.weight
#density =
return value, taken
def solve_smallest_items_first(capacity, items):
taken = [0]*len(items)
value = 0
weight = 0
taken = [0]*len(items)
sortedList = sorted(items, key=lambda dens: dens[2], reverse=False)
for item in sortedList:
if weight + item.weight <= capacity:
taken[item.index] = 1
value += item.value
weight += item.weight
return value, taken
def solve_expensive_items_first(capacity, items):
taken = [0]*len(items)
value = 0
weight = 0
taken = [0]*len(items)
sortedList = sorted(items, key=lambda dens: dens[1], reverse=True)
for item in sortedList:
#print ('item [' + str(item.index) + '] value = ' + str(item.value) + ', item.weight = ' + str(item.weight) + ' density = ' + str(item.density))
if weight + item.weight <= capacity:
taken[item.index] = 1
value += item.value
weight += item.weight
#print('Adding [' + str(item.index) + '], value = ' + str(item.value) + ' wght = ' + str(item.weight) )
return value, taken
def solve_value_density(capacity, items):
value = 0
weight = 0
taken = [0]*len(items)
valueDensity = sorted(items, key=lambda dens: dens[3], reverse=True)
for item in valueDensity:
if weight + item.weight <= capacity:
taken[item.index] = 1
value += item.value
weight += item.weight
return value, taken
def main():
# Modify this code to run your optimization algorithm
#Item = namedtuple("Item", ['index', 'value', 'weight', 'density'])
capacity = 19
items = []
items.append(Item(1, 8, 4, 8 * 4))
items.append(Item(1, 10, 5, 10 * 5))
items.append(Item(1, 15, 8, 15 * 8))
items.append(Item(1, 4, 3, 4 * 3))
describe_problem(capacity, items)
value, taken = solve_expensive_items_first(capacity, items)
print('solve_expensive_items_first = ', value, taken)
value, taken = solve_smallest_items_first(capacity, items)
print('solve_smallest_items_first = ', value, taken)
value, taken = solve_value_density(capacity, items)
print('solve_value_density = ', value, taken)
if __name__ == '__main__':
main() | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/toolbox/solve_knapsack.py | solve_knapsack.py |
import os
try:
import win32gui
except Exception as ex:
print('Cant import win32gui (probably CI build on linux)' + str(ex))
try:
import win32con
except Exception as ex:
print('Cant import win32gui (probably CI build on linux)')
try:
import win32api
except Exception as ex:
print('Cant import win32gui (probably CI build on linux)')
try:
import win32com.client
except Exception as ex:
print('Cant import win32gui (probably CI build on linux)')
root_folder = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + os.sep + "..")
print(root_folder)
def get_window_by_caption(caption):
"""
finds the window by caption and returns handle (int)
"""
try:
hwnd = win32gui.FindWindow(None, caption)
return hwnd
except Exception as ex:
print('error calling win32gui.FindWindow ' + str(ex))
return -1
def send_text(hwnd, txt):
"""
sends the text 'txt' to the window handle hwnd using SendMessage
"""
try:
for c in txt:
if c == '\n':
win32api.SendMessage(hwnd, win32con.WM_KEYDOWN, win32con.VK_RETURN, 0)
win32api.SendMessage(hwnd, win32con.WM_KEYUP, win32con.VK_RETURN, 0)
else:
win32api.SendMessage(hwnd, win32con.WM_CHAR, ord(c), 0)
except Exception as ex:
print('error calling SendMessage ' + str(ex))
def launch_app(app_path, params=[], time_before_kill_app=15):
"""
start an app
"""
import subprocess
try:
res = subprocess.call([app_path, params], timeout=time_before_kill_app, shell=True)
print('res = ', res)
if res == 0:
return True
else:
return False
except Exception as ex:
print('error launching app ' + str(app_path) + ' with params ' + str(params) + '\n' + str(ex))
return False
def app_activate(caption):
"""
use shell to bring the application with caption to front
"""
try:
shell = win32com.client.Dispatch("WScript.Shell")
shell.AppActivate(caption)
except Exception as ex:
print('error calling win32com.client.Dispatch (AppActivate)')
def close_app(caption):
"""
close an app
"""
pass
def send_keys(key_string):
"""
sends the text or keys to the active application using shell
Note, that the imp module shows deprecation warning.
Examples:
shell.SendKeys("^a") # CTRL+A
shell.SendKeys("{DELETE}") # Delete key
shell.SendKeys("hello this is a lot of text with a //")
"""
try:
shell = win32com.client.Dispatch("WScript.Shell")
shell.SendKeys(key_string)
except Exception as ex:
print('error calling win32com.client.Dispatch (SendKeys)') | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/toolbox/interface_windows_tools.py | interface_windows_tools.py |
def parse_text_to_table(txt):
"""
takes a blob of text and finds delimiter OR guesses
the column positions to parse into a table.
input: txt = blob of text, lines separated by \n
output: res = table of text
"""
res = [] # resulting table
delim = identify_delim(txt)
if delim == '' or delim == ' ':
fixed_split = identify_col_pos(txt)
if fixed_split == []:
res = []
else:
res = parse_text_by_col_pos(txt, fixed_split)
else:
res = parse_text_by_delim(txt, delim)
return res
def identify_col_pos(txt):
"""
assume no delimiter in this file, so guess the best
fixed column widths to split by
"""
res = []
#res.append(0)
lines = txt.split('\n')
prev_ch = ''
for col_pos, ch in enumerate(lines[0]):
if _is_white_space(ch) is False and _is_white_space(prev_ch) is True:
res.append(col_pos)
prev_ch = ch
res.append(col_pos)
return res
def _is_white_space(ch):
if ch in [' ', '\t']:
return True
else:
return False
def save_tbl_as_csv(t, fname):
with open(fname, 'w') as f:
for row in t:
for col in row:
f.write('"' + col + '",')
f.write('\n')
def parse_text_by_col_pos(txt, fixed_split):
tbl = []
cur_pos = 0
lines = txt.split('\n')
for line in lines:
if line.strip('\n') != '':
cols = []
prev_spacing = 0
for cur_pos in fixed_split:
cols.append(line[prev_spacing:cur_pos])
prev_spacing = cur_pos
cols.append(line[cur_pos:])
tbl.append(cols)
return tbl
def parse_text_by_delim(txt, delim):
tbl = []
lines = txt.split('\n')
for line in lines:
if line.strip('\n') != '':
tbl.append(line.split(delim))
return tbl
def _get_dict_char_count(txt):
"""
reads the characters in txt and returns a dictionary
of all letters
"""
dct = {}
for letter in txt:
if letter in dct:
dct[letter] += 1
else:
dct[letter] = 1
return dct
def identify_delim(txt):
"""
identifies delimiters and returns a count by ROW
in the text file as well as the delimiter value (if any)
The delim is determined if the count of delims is consistant
in all rows.
"""
possible_delims = _get_dict_char_count(txt) # {'C': 3, 'a': 4, 'b': 5, 'c': 6, ',': 6, 'A': 3, '\n': 3, 'B': 3})
delim = max(possible_delims.keys(), key=(lambda k: possible_delims[k]))
"""
count_by_row = []
max_cols = 0
max_rows = 0
lines = txt.split('\n')
for line in lines:
if len(line) > max_cols:
max_cols = len(line)
this_count = _get_dict_char_count(line)
count_by_row.append(this_count)
print('line = ', line)
print('count_by_row = ', this_count)
max_rows += 1
# make a matrix
matrix = [[0 for i in range(max_rows)] for j in range(max_cols)]
pprint.pprint(matrix)
"""
return delim | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/toolbox/text_tools.py | text_tools.py |
import os
import sys
root_fldr = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + os.sep + ".." )
sys.path.append(root_fldr)
import cls_log as mod_log
import config as mod_cfg
agent_status = [ 'NONE', # agent was never instantiated (then how would it report this?)
'RUNNING', # agent is running
'STOPPED', # agent was stopped - not sure if it finished or not
'FINISHED', # agent has finished performing its task, ready to report
'READY', # agent has reported its results back, and is ready for next command
'WAITING', # agent is running, but waiting on other input
'ERROR', # agent has encountered an error it cannot handle
]
class Agent(object):
"""
Class for Agents in AIKIF, all agents base class this
"""
def __init__(self, name='', fldr='', running=False):
self.name = name
self.fldr = fldr
self.running = running
self.characteristics = None # this optionally can be a vais.Character, skillpoints, rules
self.ai = None # this is the external AI object that controls this agent
self.results = []
self.coords = {}
self.coords['x']=0
self.coords['y']=0
self.coords['z']=0
self.coords['t']=0
self.status = 'READY'
if fldr == '':
fldr = mod_cfg.fldrs['log_folder']
if fldr == '':
print('ERROR - no log folder found')
exit(1)
self.mylog = mod_log.Log(fldr)
self.mylog.record_command('agent', self.name + ' - initilising')
# log agent name to get list of all agents
self._log_agent_name('list_agent_names.txt')
if self.running is True:
self.start()
def __str__(self):
"""
returns an agent summary for console mainly
"""
txt = '\n--------- Agent Summary ---------\n'
txt += 'Name : ' + self.name + '\n'
txt += 'Folder : ' + self.fldr + '\n'
txt += 'Status : ' + self.status + '\n'
if self.running is True:
txt += 'Running : True\n'
else:
txt += 'Running : False\n'
for c,v in self.coords.items():
txt += ' coord ' + str(c) + ' = ' + str(v) + '\n'
return txt
def _get_instance(self):
"""
returns unique class and name for logging
"""
return self.__class__.__name__ + ":" + self.name
def _log_agent_name(self, unique_name_file):
"""
logs the agent details to logfile
unique_name_file (list_agents.txtlist_agents_names.txt) = list of all instances of all agents
"""
agt_list = os.path.join(root_fldr, 'data', unique_name_file)
if os.path.exists(agt_list):
agents_logged = open(agt_list, 'r').read()
else:
agents_logged = ''
print('agents_logged = ', agents_logged)
if self._get_instance() not in agents_logged:
with open(agt_list, 'a') as f:
f.write(self._get_instance() + '\n')
def start(self):
"""
Starts an agent with standard logging
"""
self.running = True
self.status = 'RUNNING'
self.mylog.record_process('agent', self.name + ' - starting')
def set_coords(self, x=0, y=0, z=0, t=0):
"""
set coords of agent in an arbitrary world
"""
self.coords = {}
self.coords['x'] = x
self.coords['y'] = y
self.coords['z'] = z
self.coords['t'] = t
def get_coords(self):
"""
Agent to keep track of its coordinates in an unknown world or
simulation. There are no tests here for viability or safety of
locations, rather it makes sense for each agent to keep its
location and have the simulation, or world track locations of
*all* agents.
"""
return self.coords
def do_your_job(self):
"""
Main method which does the actual work required.
This method needs to be sub-classed in your agents
code, but should also call this for the logging and
status updates.
"""
self.mylog.record_process(self.name, 'agent.py')
def stop(self):
"""
Stops an agent with standard logging
"""
self.running = False
self.status = 'STOPPED'
def check_status(self):
"""
Requests an agent to report its status as a single string
(see allowed strings in agent_status
"""
return self.status
def report(self):
"""
Requests an agent to report its results as a dictionary
"""
return self.results | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/agents/agent.py | agent.py |
import os
import random
root_folder = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + os.sep + ".." + os.sep + ".." + os.sep + "..")
import aikif.agents.agent as agt
class ExploreAgent(agt.Agent):
"""
agent that explores a world (2D grid)
"""
def __init__(self, name, fldr, running, LOG_LEVEL):
#agt.Agent.__init__(self, *arg)
agt.Agent.__init__(self, name, fldr, running)
self.LOG_LEVEL = LOG_LEVEL
self.num_steps = 0
self.num_climbs = 0
def set_world(self, grd, start_y_x, y_x):
"""
tell the agent to move to location y,x
Why is there another grd object in the agent? Because
this is NOT the main grid, rather a copy for the agent
to overwrite with planning routes, etc.
The real grid is initialised in World.__init__() class
"""
self.grd = grd
self.start_y = start_y_x[0]
self.start_x = start_y_x[1]
self.current_y = start_y_x[0]
self.current_x = start_y_x[1]
self.target_y = y_x[0]
self.target_x = y_x[1]
self.backtrack = [0,0] # set only if blocked and agent needs to go back
self.prefer_x = 0 # set only if backtracked as preferred direction x
self.prefer_y = 0 # set only if backtracked as preferred direction y
def do_your_job(self):
"""
the goal of the explore agent is to move to the
target while avoiding blockages on the grid.
This function is messy and needs to be looked at.
It currently has a bug in that the backtrack oscillates
so need a new method of doing this - probably checking if
previously backtracked in that direction for those coords, ie
keep track of cells visited and number of times visited?
"""
y,x = self.get_intended_direction() # first find out where we should go
if self.target_x == self.current_x and self.target_y == self.current_y:
#print(self.name + " : TARGET ACQUIRED")
if len(self.results) == 0:
self.results.append("TARGET ACQUIRED")
self.lg_mv(2, self.name + ": TARGET ACQUIRED" )
return
self.num_steps += 1
# first try is to move on the x axis in a simple greedy search
accessible = ['\\', '-', '|', '/', '.']
# randomly move in Y direction instead of X if all paths clear
if y != 0 and x != 0 and self.backtrack == [0,0]:
if random.randint(1,10) > 6:
if self.grd.get_tile(self.current_y + y, self.current_x) in accessible:
self.current_y += y
self.lg_mv(3, self.name + ": randomly moving Y axis " + str(self.num_steps) )
return
if x == 1:
if self.grd.get_tile(self.current_y, self.current_x + 1) in accessible:
self.current_x += 1
self.lg_mv(3, self.name + ": move# " + str(self.num_steps) + " - moving West" )
return
elif x == -1:
if self.grd.get_tile(self.current_y, self.current_x - 1) in accessible:
self.current_x -= 1
self.lg_mv(3, self.name + ": move# " + str(self.num_steps) + " - moving East" )
return
elif y == 1:
if self.grd.get_tile(self.current_y + 1, self.current_x) in accessible:
self.current_y += 1
self.lg_mv(3, self.name + ": move# " + str(self.num_steps) + " - moving South" )
return
elif y == -1:
if self.grd.get_tile(self.current_y - 1, self.current_x) in accessible:
self.current_y -= 1
self.lg_mv(3, self.name + ": move# " + str(self.num_steps) + " - moving North")
return
self.grd.set_tile(self.start_y, self.start_x, 'A')
self.grd.save(os.path.join(os.getcwd(), 'agent.txt'))
def lg_mv(self, log_lvl, txt):
"""
wrapper for debugging print and log methods
"""
if log_lvl <= self.LOG_LEVEL:
print(txt + str(self.current_y) + "," + str(self.current_x))
def get_intended_direction(self):
"""
returns a Y,X value showing which direction the
agent should move in order to get to the target
"""
x = 0
y = 0
if self.target_x == self.current_x and self.target_y == self.current_y:
return y,x # target already acquired
if self.target_y > self.current_y:
y = 1
elif self.target_y < self.current_y:
y = -1
if self.target_x > self.current_x:
x = 1
elif self.target_x < self.current_x:
x = -1
return y,x
def clear_surroundings(self):
"""
clears the cells immediately around the grid of the agent
(just to make it find to see on the screen)
"""
cells_to_clear = self.grd.eight_neighbors(self.current_y, self.current_x)
for cell in cells_to_clear:
self.grd.set_tile(cell[0], cell[1], ' ')
def show_status(self):
"""
dumps the status of the agent
"""
txt = 'Agent Status:\n'
print(txt)
txt += "start_x = " + str(self.start_x) + "\n"
txt += "start_y = " + str(self.start_y) + "\n"
txt += "target_x = " + str(self.target_x) + "\n"
txt += "target_y = " + str(self.target_y) + "\n"
txt += "current_x = " + str(self.current_x) + "\n"
txt += "current_y = " + str(self.current_y) + "\n"
print(self.grd)
return txt | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/agents/explore/agent_explore_grid.py | agent_explore_grid.py |
#from cls_dataset import DataSet
import math
import collections
class DataTable(object):
"""
A data table is a single grid of data, such as a
CSV / TXT file or database view or table.
Sample input ( test.csv)
TERM,GENDER,ID,tot1,tot2
5320,M,78,18,66
1310,M,78,10,12
1310,F,78,1,45
1310,F,16,0,2
1310,F,16,5,12
5320,F,16,31,40
1310,F,16,67,83
>> describe_contents()
======================================================================
TERM GENDER ID tot1 tot2
5320 M 78 18 66
1310 M 78 10 12
1310 F 78 1 45
1310 F 16 0 2
1310 F 16 5 12
5320 F 16 31 40
1310 F 16 67 83
Table = 5 cols x 7 rows
HEADER = ['TERM', 'GENDER', 'ID', 'tot1', 'tot2']
arr = [['5320', 'M', '78', '18', '66'], ['1310', 'M', '78', '10', '12']]
"""
def __init__(self, name, dataset_type, col_names = None, delim = ','):
self.name = name
self.delim = delim
self.dataset_type = dataset_type
self.arr = []
self.header = []
if col_names:
self.col_names = col_names
else:
self.col_names = []
self.header = col_names # possible dupe but work in progress
#self.load_to_array()
def __str__(self):
res = ''
for c in self.header:
res += c.ljust(8)
res += '\n'
for row in self.arr:
for c in row:
res += self.force_to_string(c).ljust(8) + self.delim
res += '\n'
return res
def add(self, row):
""" add a row to data table array """
self.arr.append(row)
def describe_contents(self):
""" describes various contents of data table """
print('======================================================================')
print(self)
print('Table = ', str(len(self.header)) + ' cols x ' + str(len(self.arr)) + ' rows')
print('HEADER = ', self.get_header())
print('arr = ', self.arr[0:2])
#for num, itm in enumerate(self.get_header()):
# print('HEADER ', num, itm)
def get_distinct_values_from_cols(self, l_col_list):
"""
returns the list of distinct combinations in a dataset
based on the columns in the list. Note that this is
currently implemented as MAX permutations of the combo
so it is not guarenteed to have values in each case.
"""
uniq_vals = []
for l_col_name in l_col_list:
#print('col_name: ' + l_col_name)
uniq_vals.append(set(self.get_col_data_by_name(l_col_name)))
#print(' unique values = ', uniq_vals)
#print(' unique values[0] = ', uniq_vals[0])
#print(' unique values[1] = ', uniq_vals[1])
if len(l_col_list) == 0:
return []
elif len(l_col_list) == 1:
return sorted([v for v in uniq_vals])
elif len(l_col_list) == 2:
res = []
res = [(a, b) for a in uniq_vals[0] for b in uniq_vals[1]]
return res
else:
print ("TODO ")
return -44 # yes this will crash - fix this
def count_unique_values(self, colNum, colText, topN_values=10):
res = []
cols = collections.Counter()
for row in self.arr:
cols[row[colNum]] += 1
print (colText)
for k,v in cols.most_common()[0:topN_values]:
res.append( self.force_to_string(k) + ' (' + self.force_to_string(v) + ')')
return res
def select_where(self, where_col_list, where_value_list, col_name=''):
"""
selects rows from the array where col_list == val_list
"""
res = [] # list of rows to be returned
col_ids = [] # ids of the columns to check
#print('select_where : arr = ', len(self.arr), 'where_value_list = ', where_value_list)
for col_id, col in enumerate(self.header):
if col in where_col_list:
col_ids.append([col_id, col])
#print('select_where : col_ids = ', col_ids) # correctly prints [[0, 'TERM'], [2, 'ID']]
for row_num, row in enumerate(self.arr):
keep_this_row = True
#print('col_ids=', col_ids, ' row = ', row_num, row)
for ndx, where_col in enumerate(col_ids):
#print('type where_value_list[ndx] = ', type(where_value_list[ndx]))
#print('type row[where_col[0]] = ', type(row[where_col[0]]))
if row[where_col[0]] != where_value_list[ndx]:
keep_this_row = False
if keep_this_row is True:
if col_name == '':
res.append([row_num, row])
else: # extracting a single column only
l_dat = self.get_col_by_name(col_name)
if l_dat is not None:
res.append(row[l_dat])
return res
def force_to_string(self,unknown):
result = ''
if type(unknown) is str:
result = unknown
if type(unknown) is int:
result = str(unknown)
if type(unknown) is float:
result = str(unknown)
if type(unknown) is dict:
result = '{' + self.Dict2String(unknown) + '}'
if type(unknown) is list:
result = '[' + ','.join([self.force_to_string(i) for i in unknown]) + ']'
return result
def Dict2String(self, d):
res = ""
for k, v in d.items(): # .iteritems():
res = res + k + '=' + str(v) + ','
return res
#def dict2list(self, dct, keylist):
# return [dct[i] for i in keylist]
def update_where(self, col, value, where_col_list, where_value_list):
"""
updates the array to set cell = value where col_list == val_list
"""
if type(col) is str:
col_ndx = self.get_col_by_name(col)
else:
col_ndx = col
#print('col_ndx = ', col_ndx )
#print("updating " + col + " to " , value, " where " , where_col_list , " = " , where_value_list)
new_arr = self.select_where(where_col_list, where_value_list)
#print('new_arr', new_arr)
for r in new_arr:
self.arr[r[0]][col_ndx] = value
#print(self.arr)
def calc_percentiles(self, col_name, where_col_list, where_value_list):
"""
calculates the percentiles of col_name
WHERE [where_col_list] = [where_value_list]
"""
#col_data = self.get_col_data_by_name(col_name)
col_data = self.select_where(where_col_list, where_value_list, col_name)
#print('calc_percentiles: col_data = ', col_data, ' where_col_list = ', where_col_list, ', where_value_list = ', where_value_list)
if len(col_data) == 0:
#print("Nothing to calculate")
return 0,0,0
else:
first = self.percentile(col_data, .25)
third = self.percentile(col_data, .75)
median = self.percentile(col_data, .50)
#print('CALC_PERCENTILES = first, third, median ', first, third, median )
return first, third, median
def percentile(self, lst_data, percent , key=lambda x:x):
""" calculates the 'num' percentile of the items in the list """
new_list = sorted(lst_data)
#print('new list = ' , new_list)
#n = float(len(lst_data))
k = (len(new_list)-1) * percent
f = math.floor(k)
c = math.ceil(k)
if f == c:
#print(key(new_list[int(k)]))
return key(new_list[int(k)])
d0 = float(key(new_list[int(f)])) * (c-k)
d1 = float(key(new_list[int(c)])) * (k-f)
return d0+d1
def load(self, filename):
"""
loads a dataset to memory - usually overriden but
default is to load a file as a list of lines
"""
with open(filename, "r") as f:
return f.read()
def save(self, filename, content):
"""
default is to save a file from list of lines
"""
with open(filename, "w") as f:
if hasattr(content, '__iter__'):
f.write('\n'.join([row for row in content]))
else:
print('WRINGI CONTWETESWREWR')
f.write(str(content))
def save_csv(self, filename, write_header_separately=True):
"""
save the default array as a CSV file
"""
txt = ''
#print("SAVING arr = ", self.arr)
with open(filename, "w") as f:
if write_header_separately:
f.write(','.join([c for c in self.header]) + '\n')
for row in self.arr:
#print('save_csv: saving row = ', row)
txt = ','.join([self.force_to_string(col) for col in row])
#print(txt)
f.write(txt + '\n')
f.write('\n')
def drop(self, fname):
"""
drop the table, view or delete the file
"""
if self.dataset_type == 'file':
import os
try:
os.remove(fname)
except Exception as ex:
print('cant drop file "' + fname + '" : ' + str(ex))
#elif self.dataset_type == 'view':
# print ("TODO - drop view")
#elif self.dataset_type == 'table':
# print ("TODO - drop table")
def get_arr(self):
return self.arr
def get_header(self):
""" returns a list of the first rows data """
return self.header
def add_cols(self, col_list):
#print("col_list = " , len(col_list))
#print("BEFORE = " , len(self.arr[0]))
self.header.extend(col_list)
for r in self.arr:
r.extend(['0' for _ in col_list])
#print("AFTER = " , len(self.arr[0]))
def load_to_array(self):
self.arr = []
with open (self.name, 'r') as f:
row = f.readline()
self.header = [r.strip('\n').strip('"') for r in row.split(self.dataset_type)]
#print (self.header)
for row in f:
if row:
self.arr.append([r.strip('\n').strip('"') for r in row.split(self.dataset_type)])
#print('loading row : ', row)
#return self.arr
def get_col_by_name(self, col_name):
#print('get_col_by_name: col_name = ', col_name, ' self.header = ', len(self.header))
for num, c in enumerate(self.header):
#print (num, c)
if c == col_name:
#print('found = c =', c, ' num=', num)
return num
print(col_name, 'NOT found = returning None')
return None
def get_col_data_by_name(self, col_name, WHERE_Clause=''):
""" returns the values of col_name according to where """
print('get_col_data_by_name: col_name = ', col_name, ' WHERE = ', WHERE_Clause)
col_key = self.get_col_by_name(col_name)
if col_key is None:
print('get_col_data_by_name: col_name = ', col_name, ' NOT FOUND')
return []
#print('get_col_data_by_name: col_key =', col_key)
res = []
for row in self.arr:
#print('col_key=',col_key, ' len(row)=', len(row), ' row=', row)
res.append(row[col_key]) # need to convert to int for calcs but leave as string for lookups
return res
def format_rst(self):
"""
return table in RST format
"""
res = ''
num_cols = len(self.header)
col_width = 25
for _ in range(num_cols):
res += ''.join(['=' for _ in range(col_width - 1)]) + ' '
res += '\n'
for c in self.header:
res += c.ljust(col_width)
res += '\n'
for _ in range(num_cols):
res += ''.join(['=' for _ in range(col_width - 1)]) + ' '
res += '\n'
for row in self.arr:
for c in row:
res += self.force_to_string(c).ljust(col_width)
res += '\n'
for _ in range(num_cols):
res += ''.join(['=' for _ in range(col_width - 1)]) + ' '
res += '\n'
return res
def get_col_width(self, col_name):
vals = self.get_col_data_by_name(col_name)
return max([len(l) for l in vals])
class DataStats(object):
""" class to do statistics on an array """
def __init__(self, arr):
self.arr = arr
# Utility
def TodayAsString():
"""
returns current date and time like oracle
"""
import time
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/dataTools/cls_datatable.py | cls_datatable.py |
# install https://pypi.python.org/pypi/pypyodbc/
# extract folder to D:\install\python\pypyodbc-1.3.1
# shell to folder, run setup.py
# in your main program:
# import lib_data_SQLServer as sql
# sql.CreateAccessDatabase('test.mdb')
try:
import pypyodbc
except ImportError:
print('you need to install https://pypi.python.org/pypi/pypyodbc/ ')
exit(1)
from if_database import Database
def TEST():
#testFile = 'D:\\database.mdb'
print('wrapper for MS SQL Server and Access databases')
d = MSSQL_server(['server', 'database', 'username', 'password'])
d.connect()
print(d.server)
class MSSQL_server(Database):
def CreateAccessDatabase(self, fname):
pypyodbc.win_create_mdb(fname)
connection = pypyodbc.win_connect_mdb(fname)
connection.cursor().execute('CREATE TABLE t1 (id COUNTER PRIMARY KEY, name CHAR(25));').commit()
connection.close()
def CompactAccessDatabase(self, fname):
pypyodbc.win_compact_mdb(fname,'D:\\compacted.mdb')
def SQLServer_to_CSV(self, cred, schema, table, fldr):
opFile = fldr + table + '.CSV'
print ('Saving ' + table + ' to ' + opFile)
#cred = [server, database, username, password]
connection_string ='Driver={SQL Server Native Client 11.0};Server=' + cred[0] + ';Database=' + cred[1] + ';Uid=' + cred[2] + ';Pwd=' + cred[3] + ';'
#print(connection_string)
conn = pypyodbc.connect(connection_string)
cur = conn.cursor()
sqlToExec = 'SELECT * FROM ' + schema + '.' + table + ';'
cur.execute(sqlToExec)
op = open(opFile,'wb') # 'wb'
# add column headers
txt = ''
for col in cur.description:
txt += '"' + self.force_string(col[0]) + '",'
op.write(txt + '\n')
for row_data in cur: # add table rows .encode('utf-8')
txt = ''
for col in row_data:
txt += '"' + self.force_string(col) + '",'
op.write(txt + '\n')
op.close()
cur.close()
conn.close()
def force_string(self, obj):
if type(obj) is str:
return obj
else:
return str(obj)
if __name__ == '__main__':
TEST() | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/dataTools/if_mssqlserver.py | if_mssqlserver.py |
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import cls_log as mod_log
import config as mod_cfg
#lg = mod_log.Log(mod_cfg.fldrs['log_folder'])
lg = mod_log.Log(os.getcwd())
class Data(object):
"""
This is an experiment to provide a high level interface the
various data conversion functions in AIKIF and the toolbox
methods.
For non critical (low bias rating) information, you use this
to scan scan large numbers of unknown data sources (e.g. twitter
feeds, web sites) and shouldn't have to worry too much about
file formats / data types.
"""
def __init__(self, input_data, name='unamed data', data_type='', src=''):
self.input_data = input_data
self.content = {}
self.name = name
self.data_type = data_type
self.src = src
self.total_records = 0
self.total_nodes = 0
self.total_length = 0
if self.data_type == '':
self._identify_datatype(self.input_data)
self.read_data()
lg.record_source(self.src, self._calc_size_stats())
def __str__(self):
"""
txt = self.name + ' (type=' + self.data_type + ')\n'
txt += str(self.content)
return txt
"""
return str(self.content['data'])
def _identify_datatype(self, input_data):
"""
uses the input data, which may be a string, list, number
or file to work out how to load the data (this can be
overridden by passing the data_type on the command line
"""
if isinstance(input_data, (int, float)) :
self.data_type = 'number'
elif isinstance(input_data, (list)): #, set
self.data_type = 'list'
elif isinstance(input_data, dict):
self.data_type = 'dict'
elif type(input_data) is str:
if self.input_data[0:4] == 'http':
self.data_type = 'url'
elif os.path.exists(input_data):
self.data_type = 'file'
else:
self.data_type = 'str'
lg.record_result('_identify_datatype', self.name + ' is ' + self.data_type)
def read_data(self):
if self.data_type in ['str', 'list', 'number']:
self.content['data'] = self.input_data
elif self.data_type == 'file':
if self.input_data[-3:].upper() == 'CSV':
self._create_from_csv()
elif self.input_data[-3:].upper() == 'OWL':
self._create_from_owl()
elif self.data_type == 'url':
self._create_from_url()
else: # dictionary and others to be specified later
self.content['data'] = self.input_data
def _create_from_csv(self):
"""
create a standard data object based on CSV file
"""
import aikif.dataTools.cls_datatable as cl
fle = cl.DataTable(self.input_data, ',')
fle.load_to_array()
self.content['data'] = fle.arr
lg.record_process('_create_from_csv', 'read ' + self._calc_size_stats() + ' from ' + self.input_data)
def _create_from_owl(self):
"""
create a standard data object based on CSV file
"""
self.content['data'] = 'TODO - read OWL from ' + self.input_data
lg.record_process('_create_from_owl', 'read ' + self._calc_size_stats() + ' from ' + self.input_data)
def _create_from_url(self):
"""
create a standard data object based on CSV file
"""
import aikif.toolbox.network_tools as mod_net
mod_net.download_file_no_logon(self.input_data, 'temp_file.htm')
with open('temp_file.htm', 'r') as f:
self.content['data'] = f.read()
lg.record_process('_create_from_url', 'read ' + self._calc_size_stats() + ' from ' + self.input_data)
def _calc_size_stats(self):
"""
get the size in bytes and num records of the content
"""
self.total_records = 0
self.total_length = 0
self.total_nodes = 0
if type(self.content['data']) is dict:
self.total_length += len(str(self.content['data']))
self.total_records += 1
self.total_nodes = sum(len(x) for x in self.content['data'].values())
elif hasattr(self.content['data'], '__iter__') and type(self.content['data']) is not str:
self._get_size_recursive(self.content['data'])
else:
self.total_records += 1
self.total_length += len(str(self.content['data']))
return str(self.total_records) + ' records [or ' + str(self.total_nodes) + ' nodes], taking ' + str(self.total_length) + ' bytes'
def _get_size_recursive(self, dat):
"""
recursively walk through a data set or json file
to get the total number of nodes
"""
self.total_records += 1
#self.total_nodes += 1
for rec in dat:
if hasattr(rec, '__iter__') and type(rec) is not str:
self._get_size_recursive(rec)
else:
self.total_nodes += 1
self.total_length += len(str(rec)) | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/dataTools/cls_data.py | cls_data.py |
import os
#import sys
#import datetime
#import csv
try:
import redis
except ImportError:
print('you need to run pip install redis \nand also install the server via https://github.com/ServiceStack/redis-windows')
exit(1)
from if_database import Database
import cls_datatable
def TEST():
root_folder = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + os.sep + '..' + os.sep + '..' + os.sep + 'data')
fname = root_folder + os.sep + 'core' + os.sep + 'OBJECT_INFO-COURSE.csv'
print('wrapper for redis databases')
host = '127.0.0.1'
port = 6379
db = 0
d = redis_server(host, port , db)
d.connect()
print(d.server)
d.set('test123', 'this is a test')
print(d.get('test123'))
dt = cls_datatable.DataTable(fname, ',')
dt.load_to_array()
d.import_datatable(dt, 'aikif', 1)
print(d.get("aikif:OBJECT_INFO-COURSE.csv:categories:Artificial Intelligence Planning"))
#127.0.0.1:6379> get #"aikif:OBJECT_INFO-COURSE.csv:categories:Artificial Intelligence Planning"
# "https://class.coursera.org/aiplan-002/"
class redis_server(Database):
def __init__(self, host, port , db):
""" override the database base class to get a
connection string to a local redis server
(this is not how class will be implemented - just testing for now)
"""
# super(Database, self).__init__([host + ':' + str(port), str(db), '', ''])
super(redis_server, self).__init__([host + ':' + str(port), str(db), '', ''])
self.connection = redis.StrictRedis(host, port , db);
def get(self, key):
""" get a set of keys from redis """
res = self.connection.get(key)
print(res)
return res
def set(self, key, val):
""" add data """
self.connection.set(key, val)
def import_datatable(self, l_datatable, schema='datatable', col_key=0):
"""
import a datatable (grid) by using the schema:table:column as keys.
e.g. Sample input ( via cls_database.py -> test.csv)
TERM,GENDER,ID,tot1,tot2
5320,M,78,18,66
1310,M,78,10,12
Loads the following:
"""
key = ''
hdr = l_datatable.get_header()
schema_root_key = schema + ':' + os.path.basename(l_datatable.name) + ':'
print(hdr)
row_num = 0
for row_num, row in enumerate(l_datatable.get_arr()):
#print(row)
for col_num, col in enumerate(row):
#print('col_num, col = ', col_num, col)
if col and col_num < len(hdr):
key = schema_root_key + row[col_key] + ':' + hdr[col_num]
self.connection.set(key, col)
#self.connection.lpush(key, col)
print ('loaded ', str(row_num) , ' rows')
def export_to_CSV(self, fldr, printHeader = True):
opFile = fldr + 'test' + '.CSV'
if printHeader is True:
print('saving header')
print ('Saving to ' + opFile)
#cred = [server, database, username, password]
print(self.connection)
if __name__ == '__main__':
TEST() | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/dataTools/if_redis.py | if_redis.py |
class SQLCodeGenerator(object):
""" generates SQL based on a table """
def __init__(self, fact_table):
self.fact_table = fact_table
self.sql_text = '' # for the main procedure
self.ddl_text = '' # for the create scripts
self.col_list = []
self.date_updated_col = 'REC_EXTRACT_DATE'
self.date_updated_col = 'UPDATE_DATE'
self.undo_text = '' # for the backout script
def __str__(self):
txt = self.fact_table + '\n'
for col in self.col_list:
txt += 'col : ' + col + '\n'
return txt
def get_sql(self):
return self.sql_text
def save(self, fname):
try:
with open(fname, "w") as f:
f.write(self.sql_text)
except IOError:
print("Error - cant save file " + fname)
def save_ddl(self, fname):
with open(fname, "w") as f:
f.write(self.ddl_text)
def save_undo(self, fname):
with open(fname, "w") as f:
f.write(self.undo_text)
def set_column_list(self, col_list):
"""
opens table or CSV file and collects column names, data samples
set_column_list(['yr', 'institution', 'gender', 'count_completions'])
"""
self.col_list = col_list
def add_to_column_list(self, col_list):
"""
opens table or CSV file and collects column names, data samples
set_column_list(['yr', 'institution', 'gender', 'count_completions'])
"""
self.col_list.extend(col_list)
def get_column_list_from_select(self, txt, col_delim=','):
"""
takes the list of columns as a string (via SELECT statement in DB)
and produces a lst of columns
"""
return [c.strip().strip(',') for c in txt.split(col_delim)]
def create_script_fact(self):
"""
appends the CREATE TABLE, index etc to self.ddl_text
"""
self.ddl_text += '---------------------------------------------\n'
self.ddl_text += '-- CREATE Fact Table - ' + self.fact_table + '\n'
self.ddl_text += '---------------------------------------------\n'
self.ddl_text += 'DROP TABLE ' + self.fact_table + ' CASCADE CONSTRAINTS;\n'
self.ddl_text += 'CREATE TABLE ' + self.fact_table + ' (\n'
self.ddl_text += ' '.join([col + ' VARCHAR2(200), \n' for col in self.col_list])
self.ddl_text += ' ' + self.date_updated_col + ' DATE \n' # + src_table + '; \n'
self.ddl_text += ');\n'
def create_script_staging_table(self, output_table, col_list):
"""
appends the CREATE TABLE, index etc to another table
"""
self.ddl_text += '---------------------------------------------\n'
self.ddl_text += '-- CREATE Staging Table - ' + output_table + '\n'
self.ddl_text += '---------------------------------------------\n'
self.ddl_text += 'DROP TABLE ' + output_table + ' CASCADE CONSTRAINTS;\n'
self.ddl_text += 'CREATE TABLE ' + output_table + ' (\n '
self.ddl_text += ' '.join([col + ' VARCHAR2(200), \n' for col in col_list])
self.ddl_text += ' ' + self.date_updated_col + ' DATE \n' # + src_table + '; \n'
self.ddl_text += ');\n'
def create_index(self, tbl, col_list ):
self.ddl_text += '\nCREATE INDEX ndx_' + tbl + ' ON ' + tbl + ' ('
self.ddl_text += ','.join([c for c in col_list]) + ' );\n'
def trunc_fact_table(self):
""" wipe all records from fact table """
self.sql_text += 'DELETE FROM ' + self.fact_table + ';\n'
self.sql_text += 'COMMIT;\n'
def reverse_pivot_to_fact(self, staging_table, piv_column, piv_list, from_column, meas_names, meas_values, new_line):
"""
For each column in the piv_list, append ALL from_column's using the group_list
e.g.
Input Table
YEAR Person Q1 Q2
2010 Fred Spain 14
2010 Jane Spain 13.995
Output Table
Year Person Question Result
2010 Fred Q1 Spain
2010 Fred Q2 14
2010 Jane Q1 Spain
2010 Jane Q2 13.995
You would use:
from_column = [YEAR, Person]
pivot_column = 'Question'
piv_list = [Q1, Q2]
meas_names = [Result]
meas_values = [Result] # this can be SQL such as count(*) or count(distinct COL_NAME)
To get the result:
INSERT INTO C_UES2014_FT (
YEAR, Person, Question, Result, REC_EXTRACT_DATE) (
SELECT
YEAR, Person, 'Q1', Q1, SYSDATE
FROM S_UES2014_RAW);
INSERT INTO C_UES2014_FT (
YEAR, Person, Question, Result, REC_EXTRACT_DATE) (
SELECT
YEAR, Person, 'Q2', Q2, SYSDATE
FROM S_UES2014_RAW);
COMMIT;
"""
self.sql_text += '\n-----------------------------\n--Reverse Pivot\n--------------------------\n\n'
num_chars_on_line = 0
for piv_num in range(len(piv_list)):
# INSERT columns
self.sql_text += 'INSERT INTO ' + self.fact_table + ' (' + new_line
if piv_column not in from_column:
self.sql_text += piv_column + ', '
#pass
for g in meas_names:
self.sql_text += g + ', '
for c in from_column:
if c not in meas_names:
if c == piv_column: # dont insert the same column twice
print("Error - do NOT specify pivot column in the fact list")
exit(1)
else:
self.sql_text += c + ', '
num_chars_on_line += len(c) + 2
if num_chars_on_line > 100:
self.sql_text += new_line
num_chars_on_line = 0
self.sql_text += '' + self.date_updated_col + ') (\n'
# SELECT columns
self.sql_text += 'SELECT ' + new_line
num_chars_on_line = 0
self.sql_text += "'" + piv_list[piv_num] + "', "
for meas_num, _ in enumerate(meas_names):
if meas_values[meas_num] == '':
self.sql_text += piv_list[piv_num] + ', '
else:
self.sql_text += meas_values[meas_num] + ', '
for c in from_column:
if c not in meas_names: # dont insert the same column twice
self.sql_text += c + ', '
num_chars_on_line += len(c) + 2
if num_chars_on_line > 100:
self.sql_text += new_line
num_chars_on_line = 0
self.sql_text += 'SYSDATE \nFROM ' + staging_table
self.sql_text += ');\n'
def populate_from_staging(self, staging_table, from_column_list, output_table):
"""
generate SQL to insert staging table records into
the core table based on column_list (If no column list
then insert sequentially)
"""
self.sql_text += 'INSERT INTO ' + output_table + ' (\n'
for c in self.col_list:
if c != '':
self.sql_text += ' ' + c + ',\n'
self.sql_text += ' ' + self.date_updated_col + ') (\n'
self.sql_text += ' SELECT \n'
for c in from_column_list:
if c != '':
self.sql_text += ' ' + c + ',\n'
self.sql_text += ' SYSDATE \n FROM ' + staging_table
self.sql_text += '\n);\n'
def collect_stats(self, tbl):
self.sql_text += "collect_stats('" + tbl + "'); \n"
def comment(self, txt):
self.sql_text += txt + '\n'
def comment_block(self, txt):
self.sql_text += '\n------------------------------------------------------------\n'
self.sql_text += '-- ' + txt + '\n'
self.sql_text += '------------------------------------------------------------\n\n'
def update_old_to_new(self, col, old_val, new_val):
""" simply updates all rows and sets COL to NEW_VAL where col = old_val
e.g. update_old_to_new("NAME", "The University of Adelaide", "University of Adelaide")
will generate
UPDATE table op SET op.NAME = 'University of Adelaide' WHERE op.NAME = 'The University of Adelaide';
"""
self.sql_text += "UPDATE " + self.fact_table + " SET " + col + " = '" + new_val + "' WHERE " + col + " = '" + old_val + "'; \n"
def update_set_where(self, set_sql, where_sql):
self.sql_text += "UPDATE " + self.fact_table + " SET " + set_sql + " WHERE " + where_sql + "; \n"
def commit(self):
self.sql_text += "COMMIT;\n"
def key_to_dimension(self, fact_key, fact_join_col, dimension_name, dimension_join_col, dimension_key):
"""
create SQL to join a fact table key based on "join_col" to a dimension
The fact table is aliased as "op" and the join dimension is aliased as "ip"
meaning you can pass substrings or SQL to match values.
e.g. the command:
aup.key_to_dimension('GENDER_KEY', 'substr(op.GENDER, 1,1)', 'tbl_GENDER', 'gender_code', 'GENDER_KEY')
will generate the code:
UPDATE table op SET op.gender_key = NVL ( (SELECT MAX (ip.gender_key)
FROM tbl_GENDER ip WHERE ip.gender_code = SUBSTR (op.gender, 1, 1)), -1);
"""
self.sql_text += "UPDATE " + self.fact_table + " op SET op." + fact_key + " = NVL(\n"
self.sql_text += " (SELECT MAX (ip." + dimension_key + ")\n"
self.sql_text += " FROM " + dimension_name + " ip WHERE "
self.sql_text += fact_join_col + " = \n ip." + dimension_join_col + "), -1); \n\n"
def extract_dimension(self, dim_name, dim_cols, dim_key, dim_stag_table, src_table, src_cols, grain_cols, where_clause):
"""
selects the src_cols from src_table and groups by dim_grain
then inserts into newly created table dim_name the columns as 'dim_cols
"""
self.ddl_text += '---------------------------------------------\n'
self.ddl_text += '-- CREATE Dimension - ' + dim_name + '\n'
self.ddl_text += '---------------------------------------------\n'
self.ddl_text += 'DROP TABLE ' + dim_stag_table + ' CASCADE CONSTRAINTS;\n'
self.ddl_text += 'CREATE TABLE ' + dim_stag_table + ' (\n'
self.ddl_text += ' '.join([col + ' VARCHAR2(200), \n' for col in dim_cols])
self.ddl_text += ' ' + self.date_updated_col + ' DATE \n' # + src_table + '; \n'
self.ddl_text += ');\n'
self.ddl_text += 'DROP TABLE ' + dim_name + ' CASCADE CONSTRAINTS;\n'
self.ddl_text += 'CREATE TABLE ' + dim_name + ' (\n'
self.ddl_text += ' ' + dim_key + ' NUMBER, \n'
self.ddl_text += ' '.join([col + ' VARCHAR2(200), \n' for col in dim_cols])
self.ddl_text += ' REC_SOURCE_SYSTEM VARCHAR2(100), \n' # + src_table + '; \n'
self.ddl_text += ' ' + self.date_updated_col + ' DATE \n' # + src_table + '; \n'
self.ddl_text += ');\n'
self.ddl_text += 'CREATE OR REPLACE VIEW U' + dim_name[1:] + ' AS SELECT * FROM ' + dim_name + ';\n'
self.ddl_text += 'GRANT SELECT ON U' + dim_name[1:] + ' TO ALL_USERS;\n'
self.ddl_text += '\n'
self.ddl_text += 'DROP SEQUENCE SEQ_' + dim_name + ';\n'
self.ddl_text += 'CREATE SEQUENCE SEQ_' + dim_name + ';\n\n'
self.sql_text += '---------------------------------------------\n'
self.sql_text += '-- Populate Dimension - ' + dim_name + '\n'
self.sql_text += '---------------------------------------------\n'
self.sql_text += "DELETE FROM " + dim_stag_table + ";\n"
self.sql_text += "COMMIT;\n"
self.sql_text += "INSERT INTO " + dim_stag_table + " (\n"
self.sql_text += ", ".join([col for col in dim_cols])
self.sql_text += ")\n (SELECT \n"
self.sql_text += ", ".join([col for col in src_cols])
self.sql_text += "\nFROM " + src_table + "\n"
if where_clause != '':
self.sql_text += "WHERE " + where_clause + "\n"
if len(grain_cols) > 0:
self.sql_text += "GROUP BY " + ", ".join([col for col in grain_cols]) + "\n"
self.sql_text += "); \n"
self.sql_text += "COMMIT;\n"
self.sql_text += "DELETE FROM " + dim_name + ";\n"
self.sql_text += "COMMIT;\n"
self.sql_text += "INSERT INTO " + dim_name + " (\n"
self.sql_text += ", ".join([col for col in dim_cols])
self.sql_text += ", REC_SOURCE_SYSTEM, " + self.date_updated_col + " "
self.sql_text += ") \n(SELECT \n"
self.sql_text += ", ".join([col for col in src_cols])
self.sql_text += ", '" + src_table + "', sysdate "
self.sql_text += "\nFROM " + dim_stag_table + "\n"
self.sql_text += "); \n"
self.sql_text += "COMMIT;\n"
self.sql_text += "UPDATE " + dim_name + " SET " + dim_key + " = SEQ_" + dim_name + ".nextval;\n"
self.sql_text += "COMMIT;\n\n"
print(self.ddl_text)
print(self.sql_text)
def aggregate(self, opTable, group_by_cols, meas):
"""
Create an aggregate table grouped by col showing meas
The meas is something like "sum(in)" or "count(*)"
RETURNS:
DROP TABLE C_AGG_PRODUCT;
CREATE TABLE C_AGG_PRODUCT AS (
SELECT PRODUCT, sum(AMOUNT) AS result
FROM C_SALES GROUP BY PRODUCT
);
"""
self.sql_text += "DROP TABLE " + opTable + ";\n"
self.sql_text += "CREATE TABLE " + opTable + " AS (\n"
self.sql_text += " SELECT " + group_by_cols + ", " + meas + " AS result \n"
self.sql_text += " FROM " + self.fact_table + " GROUP BY " + group_by_cols + "\n"
self.sql_text += ");\n" | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/dataTools/cls_sql_code_generator.py | cls_sql_code_generator.py |
AIML Bot API
============
This is a very basic `GraphQL <http://graphql.org/>`__ API for `AIML
Bot <https://github.com/hosford42/aiml_bot>`__.
**IMPORTANT:** No security measures are implemented. Use this module as
a public-facing API at your own risk. Anyone who has access to the API
has access to the entire data set.
Endpoints
---------
The following endpoints are provided:
``/``
~~~~~
The GraphQL endpoint is the preferred method for interacting with the
system.
``/users``
~~~~~~~~~~
A JSON endpoint for listing registered users or adding a new user.
``/users/<user_id>``
~~~~~~~~~~~~~~~~~~~~
A JSON endpoint for retrieving information about a specific user.
``/users/<user_id>/messages``
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A JSON endpoint for listing the messages to/from a user or sending a new
message to the bot.
``/users/<user_id>/messages/<message_id>``
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A JSON endpoint for retrieving information about a specific message.
| AIML-Bot-API | /AIML_Bot_API-0.0-py3-none-any.whl/AIML_Bot_API-0.0.dist-info/DESCRIPTION.rst | DESCRIPTION.rst |
import datetime
import hashlib
import os
import shelve
import threading
from collections import deque
import aiml_bot
class ItemLock:
"""A lock for a single item in a lock set."""
def __init__(self, lock_set: 'LockSet', item):
self.lock_set = lock_set
self.item = item
def acquire(self):
"""Acquire the lock."""
with self.lock_set.per_item_lock:
while self.item in self.lock_set.locked_items:
self.lock_set.item_unlocked.wait()
self.lock_set.locked_items.add(self.item)
def release(self):
"""Release the lock."""
with self.lock_set.per_item_lock:
self.lock_set.locked_items.remove(self.item)
def __enter__(self):
self.acquire()
def __exit__(self, exc_type, exc_val, exc_tb):
self.release()
class LockSet:
"""A set of named resource locks."""
def __init__(self):
self.list_lock = threading.Lock() # For updating the list itself
self.per_item_lock = threading.Lock() # For updating the list of locked items
self.item_unlocked = threading.Condition(self.per_item_lock)
self.locked_items = set() # The list of currently locked items
def acquire(self):
"""Acquire the entire set of locks."""
self.list_lock.acquire()
while self.locked_items:
self.item_unlocked.wait()
def release(self):
"""Release the entire set of locks."""
self.list_lock.release()
def __getitem__(self, item):
return ItemLock(self, item)
def __enter__(self):
self.acquire()
def __exit__(self, exc_type, exc_val, exc_tb):
self.release()
class DataManager:
"""The DataManager handles the storage of conversational data and
triggering of the bot on behalf of the endpoints. It is designed to be
thread-safe."""
def __init__(self, bot: aiml_bot.Bot = None, data_folder: str = None):
if data_folder is None:
data_folder = os.path.expanduser('~/aiml_bot_api')
if not os.path.isdir(data_folder):
os.makedirs(data_folder)
if not os.path.isdir(os.path.join(data_folder, 'messages')):
os.makedirs(os.path.join(data_folder, 'messages'))
self.data_folder = data_folder
self.users = shelve.open(os.path.join(data_folder, 'users.db'))
self.user_sessions = shelve.open(os.path.join(data_folder, 'user_sessions.db'))
self.user_message_cache = {}
self.user_message_lru = deque()
self.max_cached_users = 1000
self.user_locks = LockSet()
self.message_locks = LockSet()
self.sessions_lock = threading.Lock()
self.bot_lock = threading.Lock()
if bot is None:
bot = aiml_bot.Bot(commands="load std aiml")
self.bot = bot
def __del__(self) -> None:
self.close()
def close(self) -> None:
"""Close all resources held by the data manager in a clean and safe
manner. Once this has been called, the data manager will no longer be
in a usable state."""
self.user_locks.acquire()
self.message_locks.acquire()
self.sessions_lock.acquire()
self.bot_lock.acquire()
self.users.close()
self.user_sessions.close()
for messages_db in self.user_message_cache.values():
messages_db.close()
def get_user_ids(self) -> list:
"""Return a list of user IDs."""
with self.user_locks:
return list(self.users)
def add_user(self, user_id: str, user_name: str) -> None:
"""Add a new user. The user id must be new. Otherwise a KeyError is
raised."""
with self.user_locks, self.user_locks[user_id]:
if user_id in self.users:
raise KeyError(user_id)
self.users[user_id] = {
'id': user_id,
'name': user_name,
}
def set_user_name(self, user_id: str, user_name: str) -> None:
"""Set the user's name to a new value. The user ID must already exist.
If it does not, a KeyError is raised."""
with self.user_locks[user_id]:
# This has to be extracted, modified, and inserted as a unit; if
# you operate directly on the user data without reassigning, e.g.
# with `self.users[user_id]['name'] = user_name`, the changes
# will not be written to disk and will be lost.
user_data = self.users[user_id]
user_data['name'] = user_name
self.users[user_id] = user_data
def get_user_data(self, user_id: str) -> dict:
"""Return the data associated with a given user ID. If no such user ID
exists, raise a KeyError."""
with self.user_locks[user_id]:
return self.users[user_id]
def _get_messages(self, user_id: str) -> dict:
if user_id in self.user_message_cache:
messages_db = self.user_message_cache[user_id]
self.user_message_lru.remove(user_id)
else:
if len(self.user_message_cache) >= self.max_cached_users:
lru = self.user_message_lru.popleft()
self.user_message_cache.pop(lru).close()
with self.bot_lock:
session_data = self.bot.get_session_data(lru)
with self.sessions_lock:
self.user_sessions[lru] = session_data
with self.bot_lock:
self.bot.delete_session(lru)
messages_db = shelve.open(os.path.join(self.data_folder, 'messages', user_id + '.db'))
with self.sessions_lock:
session_data = self.user_sessions.get(user_id, {})
with self.bot_lock:
self.bot.set_session_data(session_data, user_id)
self.user_message_lru.append(user_id)
return messages_db
def get_message_ids(self, user_id: str) -> list:
"""Return the list of message IDs for the given user."""
with self.user_locks[user_id]:
if user_id not in self.users:
raise KeyError(user_id)
with self.message_locks[user_id]:
return list(self._get_messages(user_id))
def add_message(self, user_id: str, content: str) -> (str, str):
"""Add a new incoming message from the user. The bot is given the
immediate opportunity to respond, in which case the bot's response
is also added. If the bot generates a response, a tuple (id1, id2)
is returned, where id1 is the message ID of the user's message, and
id2 is the message ID of the bot's reply. Otherwise, None is returned
for the value of id2. If the user does not exist, a KeyError is raised.
"""
timestamp = datetime.datetime.utcnow().strftime('%Y%m%d%H%M%S.%f')
message_id = 'c' + hashlib.sha256(timestamp.encode()).hexdigest()
with self.user_locks[user_id]:
if user_id not in self.users:
raise KeyError(user_id)
with self.message_locks[user_id]:
messages_db = self._get_messages(user_id)
messages_db[message_id] = {
'id': message_id,
'origin': 'client',
'content': content,
'time': timestamp,
}
with self.bot_lock:
response = self.bot.respond(content, user_id)
session_data = self.bot.get_session_data(user_id)
with self.sessions_lock:
self.user_sessions[user_id] = session_data
print("Response:", repr(response))
if response:
timestamp = datetime.datetime.utcnow().strftime('%Y%m%d%H%M%S.%f')
response_id = 's' + hashlib.sha256(timestamp.encode()).hexdigest()
response_data = {
'id': response_id,
'origin': 'server',
'content': response,
'time': timestamp,
}
with self.message_locks[user_id]:
messages_db[response_id] = response_data
else:
response_id = None
return message_id, response_id
def get_message_data(self, user_id: str, message_id: str) -> dict:
"""Return the data associated with a given message. If the user or
message does not exist, a KeyError is raised."""
with self.user_locks[user_id]:
if user_id not in self.users:
raise KeyError(user_id)
with self.message_locks[user_id]:
messages_db = self._get_messages(user_id)
return messages_db[message_id] | AIML-Bot-API | /AIML_Bot_API-0.0-py3-none-any.whl/aiml_bot_api/data.py | data.py |
# Mutations:
# https://dev-blog.apollodata.com/designing-graphql-mutations-e09de826ed97
# http://docs.graphene-python.org/en/latest/types/mutations/
import re
import flask_graphql
import graphene
from graphene import resolve_only_args
from .endpoints import app, data_manager
class User(graphene.ObjectType):
"""Model for the users. Each user has a name, a unique ID, and a list of
messages sent to/from the user."""
id = graphene.String() # The unique ID of the user.
name = graphene.String() # The name of the user.
messages = graphene.List( # The messages to/from this user.
lambda: Message,
id=graphene.String(),
origin=graphene.String(),
content=graphene.String(),
time=graphene.String(),
after=graphene.String(),
before=graphene.String(),
pattern=graphene.String()
)
# noinspection PyShadowingBuiltins
def __init__(self, id: str):
self.id = id
self.data = data_manager.get_user_data(id)
super().__init__()
@resolve_only_args
def resolve_id(self):
"""Resolve the id field of the user."""
return self.id
@resolve_only_args
def resolve_name(self):
"""Resolve the name field of the user."""
return self.data['name']
# noinspection PyShadowingBuiltins
@resolve_only_args
def resolve_messages(self, id=None, origin=None, content=None, time=None, after=None, before=None, pattern=None):
"""Resolve the list of messages nested under the user."""
if id is None:
message_data = [data_manager.get_message_data(self.id, id) for id in data_manager.get_message_ids(self.id)]
else:
try:
message_data = [data_manager.get_message_data(self.id, id)]
except KeyError:
message_data = []
if origin is not None:
message_data = [data for data in message_data if data['origin'] == origin]
if content is not None:
message_data = [data for data in message_data if data['content'] == content]
if time is not None:
message_data = [data for data in message_data if data['time'] == time]
if after is not None:
after = float(after)
message_data = [data for data in message_data if float(data['time']) >= after]
if before is not None:
before = float(before)
message_data = [data for data in message_data if float(data['time']) <= before]
if pattern is not None:
pattern = re.compile(pattern)
message_data = [data for data in message_data if pattern.match(data['content'])]
return [Message(self.id, data['id']) for data in message_data]
class UserInput(graphene.InputObjectType):
id = graphene.String()
name = graphene.String()
class AddUser(graphene.Mutation):
class Input:
input = graphene.Argument(UserInput)
user = graphene.Field(User)
error = graphene.String()
@staticmethod
def mutate(root, args, context, info) -> 'AddUser':
data = args.get('input')
id = data.get('id')
name = data.get('name')
try:
data_manager.add_user(id, name)
except KeyError:
user = None
error = 'User already exists.'
else:
user = User(id)
error = None
return AddUser(user=user, error=error)
class SetUserName(graphene.Mutation):
class Input:
input = graphene.Argument(UserInput)
user = graphene.Field(User)
error = graphene.String()
@staticmethod
def mutate(root, args, context, info) -> 'SetUserName':
data = args.get('input')
id = data.get('id')
name = data.get('name')
try:
data_manager.set_user_name(id, name)
except KeyError:
user = None
error = 'User not found.'
else:
user = User(id)
error = None
return SetUserName(user=user, error=error)
class Message(graphene.ObjectType):
"""The model for messages. Each message has an associated ID, an origin, a
time, a user, and the message content. Messages are always associated with
exactly one user. A message will either originate from the user and be
directed to the bot, or originate from the bot and be directed to the user;
user-to-user messages are not supported. The value of the origin will be
either "client" or "server" depending on whether it was sent by the user or
the bot, respectively. The value of the time is a string in the format
"YYYYMMDDHHMMSS.FFFFFF". Message IDs are unique among all messages
belonging to the same user, but not necessarily among messages belonging to
any user."""
id = graphene.String() # The unique (per user) ID of this message.
origin = graphene.String() # The origin of this message. (Either "server" or "client".)
content = graphene.String() # The content of this message.
time = graphene.String() # The date/time of this message, in the format YYYYMMDDHHMMSS.FFFFFF
user = graphene.Field(User) # The user who received or sent this message.
# noinspection PyShadowingBuiltins
def __init__(self, user_id, id):
self.user_id = user_id
self.id = id
super().__init__()
@resolve_only_args
def resolve_id(self):
"""Resolve the id field of the message."""
return self.id
@resolve_only_args
def resolve_origin(self):
"""Resolve the origin field of the message."""
data = data_manager.get_message_data(self.user_id, self.id)
return data['origin']
@resolve_only_args
def resolve_content(self):
"""Resolve the content field of the message."""
data = data_manager.get_message_data(self.user_id, self.id)
return data['content']
@resolve_only_args
def resolve_time(self):
"""Resolve the time field of the message."""
data = data_manager.get_message_data(self.user_id, self.id)
return data['time']
@resolve_only_args
def resolve_user(self):
"""Resolve the user field of the message."""
return User(self.user_id)
class SendMessageInput(graphene.InputObjectType):
user = graphene.InputField(UserInput)
content = graphene.String()
class SendMessage(graphene.Mutation):
class Input:
input = graphene.Argument(SendMessageInput)
user = graphene.Field(User)
message = graphene.Field(Message)
response = graphene.Field(Message)
error = graphene.String()
@staticmethod
def mutate(root, args, context, info) -> 'SendMessage':
data = args.get('input')
if data is None:
return SendMessage(user=None, message=None, response=None, error='No input specified.')
user = data.get('user') # type: UserInput
content = data.get('content') # type: str
if user is None:
return SendMessage(user=None, message=None, response=None, error='No user specified.')
if not content:
return SendMessage(user=None, message=None, response=None, error='No content specified.')
user_id = user.get('id')
if user_id is None:
return SendMessage(user=None, message=None, response=None, error='No user ID specified.')
try:
message_id, response_id = data_manager.add_message(user_id, content)
except KeyError:
user = None
message = None
response = None
error = 'User not found.'
else:
user = User(user_id)
message = Message(user_id, message_id)
if response_id is None:
response = None
else:
response = Message(user_id, response_id)
error = None
return SendMessage(user=user, message=message, response=response, error=error)
class Query(graphene.ObjectType):
"""This is the schema entry point. Queries always start from this class
and work their way through the other classes via the properties of each
class. For example, to access Query.users[user_id].messages[message_id],
the GraphQL query would be:
{
users(id: user_id) {
messages(id: message_id) {
id,
origin,
content,
time
}
}
}
It is also possible to use other selection criteria besides the id, as
determined by the corresponding resolve_*() method.
"""
users = graphene.List(
User,
id=graphene.String(),
name=graphene.String()
)
# noinspection PyShadowingBuiltins
@resolve_only_args
def resolve_users(self, id=None, name=None):
"""Resolve the selected users at the top level of the query."""
if id is None:
if name is None:
return [User(id) for id in data_manager.get_user_ids()]
else:
return [User(id) for id in data_manager.get_user_ids()
if data_manager.get_user_data(id)['name'] == name]
else:
try:
data = data_manager.get_user_data(id)
except KeyError:
return []
if name is None or data['name'] == name:
return [User(id)]
else:
return []
class Mutation(graphene.ObjectType):
add_user = AddUser.Field()
set_user_name = SetUserName.Field()
send_message = SendMessage.Field()
# Register the schema and map it into an endpoint.
schema = graphene.Schema(query=Query, mutation=Mutation)
app.add_url_rule('/', view_func=flask_graphql.GraphQLView.as_view('graphql', schema=schema, graphiql=True)) | AIML-Bot-API | /AIML_Bot_API-0.0-py3-none-any.whl/aiml_bot_api/graphql.py | graphql.py |
import json
import logging
from functools import wraps
from flask import Flask, request, Response
from .data import DataManager
log = logging.getLogger(__name__)
app = Flask(__name__)
# TODO: Initialize this from a configuration file.
data_manager = DataManager()
def json_only(func):
"""Decorator for JSON-only API endpoints."""
@wraps(func)
def wrapped(*args, **kwargs):
"""The decorated function."""
print(request.headers)
if request.method in ('POST', 'PUT') and request.headers['Content-Type'] != 'application/json':
return Response('Unsupported Media Type: %s' % request.headers['Content-Type'], status=415)
else:
raw_result = func(*args, **kwargs) # type: dict
if isinstance(raw_result.get('status'), int):
status = raw_result.pop('status')
else:
status = None
return Response(raw_result, status=status, content_type='application/json; charset=utf-8')
return wrapped
@app.route('/users/', methods=['GET', 'POST'])
@json_only
def all_users():
"""The list of all users in the system.
The client can get the list of users, or post a new user to the list."""
if request.method == 'GET':
# noinspection PyBroadException
try:
user_ids = data_manager.get_user_ids()
except Exception:
log.exception("Error in all_users() (GET):")
return json.dumps({'type': 'error', 'value': 'Server-side error.', 'status': 500})
else:
return json.dumps({'type': 'user_list', 'value': user_ids})
else:
assert request.method == 'POST'
user_data = request.get_json()
if not isinstance(user_data, dict) or 'id' not in user_data or 'name' not in user_data or len(user_data) > 2:
return json.dumps({'type': 'error', 'value': 'Malformed request.', 'status': 400})
user_id = user_data['id'] # type: str
if not isinstance(user_id, str) or not user_id.isidentifier():
return json.dumps({'type': 'error', 'value': 'Invalid user ID.', 'status': 400})
user_name = user_data['name'] # type: str
if not isinstance(user_name, str) or not user_name:
return json.dumps({'type': 'error', 'value': 'Invalid user name.', 'status': 400})
# noinspection PyBroadException
try:
data_manager.add_user(user_id, user_name)
except KeyError:
return json.dumps({'type': 'error', 'value': 'User already exists.', 'status': 405})
except Exception:
log.exception("Error in all_users() (%s):" % request.method)
return json.dumps({'type': 'error', 'value': 'Server-side error.', 'status': 500})
else:
return json.dumps({'type': 'user_created', 'id': user_id})
@app.route('/users/<user_id>/', methods=['GET', 'PUT'])
@json_only
def one_user(user_id):
"""A specific user. The client can get or set the associated properties for
that user."""
if request.method == 'GET':
# noinspection PyBroadException
try:
user_data = data_manager.get_user_data(user_id)
except KeyError:
return json.dumps({'type': 'error', 'value': 'User not found.', 'status': 404})
except Exception:
log.exception("Error in one_user() (GET):")
return json.dumps({'type': 'error', 'value': 'Server-side error.', 'status': 500})
else:
return json.dumps({'type': 'user', 'value': user_data})
else:
assert request.method == 'PUT'
user_data = request.get_json()
if (not isinstance(user_data, dict) or not user_data.keys() <= {'id', 'name'} or
user_data.get('id', user_id) != user_id):
return json.dumps({'type': 'error', 'value': 'Malformed request.', 'status': 400})
if 'name' in user_data:
user_name = user_data['name'] # type: str
if not isinstance(user_name, str) or not user_name:
return json.dumps({'type': 'error', 'value': 'Invalid user name.', 'status': 400})
# noinspection PyBroadException
try:
data_manager.set_user_name(user_id, user_name)
except KeyError:
return json.dumps({'type': 'error', 'value': 'User not found.', 'status': 405})
except Exception:
log.exception("Error in all_users() (%s):" % request.method)
return json.dumps({'type': 'error', 'value': 'Server-side error.', 'status': 500})
return json.dumps({'type': 'user_updated', 'id': user_id})
@app.route('/users/<user_id>/messages/', methods=['GET', 'POST'])
@json_only
def all_messages(user_id):
"""The list of all messages associated with a given user.
The client can get the list of messages, or post a new message to the list."""
if request.method == 'GET':
# noinspection PyBroadException
try:
message_ids = data_manager.get_message_ids(user_id)
except KeyError:
return json.dumps({'type': 'error', 'value': 'User not found.', 'status': 404})
except Exception:
log.exception("Error in all_messages(%r) (GET):" % user_id)
return json.dumps({'type': 'error', 'value': 'Server-side error.', 'status': 500})
else:
return json.dumps({'type': 'message_list', 'value': message_ids})
else:
assert request.method == 'POST'
message_data = request.get_json()
if not (isinstance(message_data, dict) and message_data.get('origin', 'client') == 'client' and
'content' in message_data and not message_data.keys() - {'origin', 'content'}):
return json.dumps({'type': 'error', 'value': 'Malformed request.', 'status': 400})
content = message_data['content']
if not isinstance(content, str):
return json.dumps({'type': 'error', 'value': 'Malformed request.', 'status': 400})
content = content.strip()
if not content:
return json.dumps({'type': 'error', 'value': 'Empty message content.', 'status': 400})
# noinspection PyBroadException
try:
message_id, response_id = data_manager.add_message(user_id, content)
except KeyError:
return json.dumps({'type': 'error', 'value': 'User not found.', 'status': 404})
except Exception:
log.exception("Error in all_messages(%r) (%s):" % (user_id, request.method))
return json.dumps({'type': 'error', 'value': 'Server-side error.', 'status': 500})
return json.dumps({'type': 'message_received', 'id': message_id, 'response_id': response_id})
@app.route('/users/<user_id>/messages/<message_id>/')
@json_only
def one_message(user_id, message_id):
"""A specific message for a specific user.
The client can get the associated properties for that message."""
# noinspection PyBroadException
try:
message_data = data_manager.get_message_data(user_id, message_id)
except KeyError:
return json.dumps({'type': 'error', 'value': 'Message not found.', 'status': 404})
except Exception:
log.exception("Error in one_message(%r, %r) (GET):" % (user_id, message_id))
return json.dumps({'type': 'error', 'value': 'Server-side error.', 'status': 500})
else:
return json.dumps({'type': 'message', 'value': message_data}) | AIML-Bot-API | /AIML_Bot_API-0.0-py3-none-any.whl/aiml_bot_api/endpoints.py | endpoints.py |
# Supported Tags
This document describes the current state of AIML Bot's compliance
to the AIML 1.0.1 standard. The full AIML reference manual can be
found online [here](http://alicebot.org/TR/2001/WD-aiml).
The following tags are currently supported:
<bot name="name"> (see notes)
<condition>
<date>
<formal>
<gender>
<get>
<id>
<input>
<learn>
<li>
<lowercase>
<person>
<person2>
<random>
<sentence>
<set>
<size>
<sr>
<srai>
<star>
<system>
<that>
<thatstar>
<think>
<topic>
<topicstar>
<uppercase>
<version>
Support for the following tags should be implemented in the next version:
None
The following tags are not supported:
<gossip> (see notes)
<if> / <else> (see notes)
<javascript> (see notes)
<secure> (see notes)
------------------------------------------------------------------
## Notes
### `<bot name="name">`
To set the bot's name, set the `Bot.name` property. Note that the
name *MUST* be a single word!
`<gossip>`
The AIML 1.0.1 specification lets engine authors implement the the behavior
of the <gossip> tag however they wish. I haven't yet decided what I'd like
to do with it, so right now it doesn't do anything at all.
### `<if>` / `<else>`
These elements appear to have been dropped between AIML 1.0 and AIML 1.0.1.
They may someday be added as a part of an AIML 1.0 backwards-compatibility
mode, but in the meantime, use `<condition>` instead.
### `<javascript>`
Support for the JavaScript tag is not anticipated; one of the design
goals of AIML Bot is to remain 100% pure standard Python. So until
somebody writes a JavaScript interpreter in Python, AIML Bot won't
support the `<javascript>` tag. On the bright side, it is possible
to simulate the effects of the `<javascript>` tag (i.e.
dynamically-generated tag contents) using the `<system mode="sync">`
tag. This solution has the added advantage of allowing *any* programming
language to be used, not just JavaScript.
UPDATE: The python-spidermonkey project provides a bridge between Python
and the open-source SpiderMonkey JavaScript library. I am currently
investigating the possibility of adding support for the `<javascript>`
tag ON A PURELY OPTIONAL BASIS.
### `<secure>`
Some AIML implementations support a non-standard `<secure>` tag, intended
to wrap parts of a template which should only be processed if the user is
"secure", or trusted. After implementing support for this tag, I realized
that it wasn't doing anything that you can't do with the `<condition>` tag.
Therefore, I've decided to drop support for the `<secure>` tag. You can
easily duplicate its effects; simply replace this:
<secure error="you're not allowed">you are allowed</secure>
with this:
<condition name="secure">
<li value="yes">you are allowed</li>
<li>you are not allowed</li>
</condition>
Then, use the `Bot.set_predicate()` call to set the "secure" predicate to
"yes" for any session that you wish to be secure.
| AIML-Bot | /AIML_Bot-0.0.3-py3-none-any.whl/AIML_Bot-0.0.3.data/data/doc/aiml_bot/SUPPORTED_TAGS.md | SUPPORTED_TAGS.md |
Laundry list of future tasks, in no particular order:
- AIML 1.0.1 compliance (highest priority):
- Unknown yet well-defined elements (e.g. HTML) inside templates
(see sections 3.2, 3.6).
- `AimlParser._validateElemStart()` needs to test the well-formedness of
attribute values (for example, making sure that the "index" attribute
has an integer value, and not a string). UPDATE: this works for
`<star>`, `<thatstar>` and `<topicstar>`. Still needs to be written
for `<input>` and `<that>`, which take either an integer or an integer
pair.
- Support the Program D startup file syntax, or something similar? It
seems to be a good way to initialize bot settings and substitutions.
- Documentation/tutorials.
| AIML-Bot | /AIML_Bot-0.0.3-py3-none-any.whl/AIML_Bot-0.0.3.data/data/doc/aiml_bot/TODO.md | TODO.md |
# AIML Bot
* Original Author: Cort Stratton
* Maintainer: Aaron Hosford
* Project Home: https://github.com/hosford42/aiml_bot
AIML Bot is a fork of Cort Stratton's PyAIML, a pure-Python interpreter for
AIML (Artificial Intelligence Markup Language), refactored for Pep 8
conformance and ease of use. It strives for simple, austere, 100% compliance
with the AIML 1.0.1 standard. You can find Cort's original implementation at
https://github.com/cdwfs/pyaiml. Many thanks go to him for laying the
groundwork for this project.
For information on what's new in this version, see the CHANGES.md file.
For information on the state of development, including the current level of
AIML 1.0.1 compliance, see the SUPPORTED_TAGS.txt file.
Quick & dirty example (assuming you've installed the aiml_sets package):
import aiml_bot
# The Bot class is the public interface to the AIML interpreter.
bot = aiml_bot.Bot(command='load std aiml')
# Loop forever, reading user input from the command line and printing
# responses.
while True:
# Use the 'respond' method to compute the response to a user's input
# string. respond() returns the interpreter's response.
print(bot.respond(input("> ")))
| AIML-Bot | /AIML_Bot-0.0.3-py3-none-any.whl/AIML_Bot-0.0.3.data/data/doc/aiml_bot/README.md | README.md |
AIML Bot
========
- Original Author: Cort Stratton
- Maintainer: Aaron Hosford
- Project Home: https://github.com/hosford42/aiml\_bot
AIML Bot is a fork of Cort Stratton's PyAIML, a pure-Python interpreter
for AIML (Artificial Intelligence Markup Language), refactored for Pep 8
conformance and ease of use. It strives for simple, austere, 100%
compliance with the AIML 1.0.1 standard. You can find Cort's original
implementation at https://github.com/cdwfs/pyaiml. Many thanks go to him
for laying the groundwork for this project.
For information on what's new in this version, see the CHANGES.md file.
For information on the state of development, including the current level
of AIML 1.0.1 compliance, see the SUPPORTED\_TAGS.txt file.
Quick & dirty example (assuming you've installed the aiml\_sets
package):
::
import aiml_bot
# The Bot class is the public interface to the AIML interpreter.
bot = aiml_bot.Bot(command='load std aiml')
# Loop forever, reading user input from the command line and printing
# responses.
while True:
# Use the 'respond' method to compute the response to a user's input
# string. respond() returns the interpreter's response.
print(bot.respond(input("> ")))
| AIML-Bot | /AIML_Bot-0.0.3-py3-none-any.whl/AIML_Bot-0.0.3.dist-info/DESCRIPTION.rst | DESCRIPTION.rst |
default_gender = {
# masculine -> feminine
"he": "she",
"him": "her",
"his": "her",
"himself": "herself",
# feminine -> masculine
"she": "he",
"her": "him",
"hers": "his",
"herself": "himself",
}
default_person = {
# 1st->3rd (masculine)
"I": "he",
"me": "him",
"my": "his",
"mine": "his",
"myself": "himself",
# 3rd->1st (masculine)
"he": "I",
"him": "me",
"his": "my",
"himself": "myself",
# 3rd->1st (feminine)
"she": "I",
"her": "me",
"hers": "mine",
"herself": "myself",
}
default_person2 = {
# 1st -> 2nd
"I": "you",
"me": "you",
"my": "your",
"mine": "yours",
"myself": "yourself",
# 2nd -> 1st
"you": "me",
"your": "my",
"yours": "mine",
"yourself": "myself",
}
# TODO: this list is far from complete
default_normal = {
"wanna": "want to",
"gonna": "going to",
"I'm": "I am",
"I'd": "I would",
"I'll": "I will",
"I've": "I have",
"you'd": "you would",
"you're": "you are",
"you've": "you have",
"you'll": "you will",
"he's": "he is",
"he'd": "he would",
"he'll": "he will",
"she's": "she is",
"she'd": "she would",
"she'll": "she will",
"we're": "we are",
"we'd": "we would",
"we'll": "we will",
"we've": "we have",
"they're": "they are",
"they'd": "they would",
"they'll": "they will",
"they've": "they have",
"y'all": "you all",
"can't": "can not",
"cannot": "can not",
"couldn't": "could not",
"wouldn't": "would not",
"shouldn't": "should not",
"isn't": "is not",
"ain't": "is not",
"don't": "do not",
"aren't": "are not",
"won't": "will not",
"weren't": "were not",
"wasn't": "was not",
"didn't": "did not",
"hasn't": "has not",
"hadn't": "had not",
"haven't": "have not",
"where's": "where is",
"where'd": "where did",
"where'll": "where will",
"who's": "who is",
"who'd": "who did",
"who'll": "who will",
"what's": "what is",
"what'd": "what did",
"what'll": "what will",
"when's": "when is",
"when'd": "when did",
"when'll": "when will",
"why's": "why is",
"why'd": "why did",
"why'll": "why will",
"it's": "it is",
"it'd": "it would",
"it'll": "it will",
} | AIML-Bot | /AIML_Bot-0.0.3-py3-none-any.whl/aiml_bot/default_substitutions.py | default_substitutions.py |
import marshal
import pprint
import re
PUNCTUATION = "\"`~!@#$%^&*()-_=+[{]}\|;:',<.>/?"
class PatternManager:
"""
This class implements the AIML pattern-matching algorithm described
by Dr. Richard Wallace at the following site:
http://www.alicebot.org/documentation/matching.html
"""
# special dictionary keys
_UNDERSCORE = 0
_STAR = 1
_TEMPLATE = 2
_THAT = 3
_TOPIC = 4
_BOT_NAME = 5
def __init__(self):
self._root = {}
self._template_count = 0
self._bot_name = "Nameless"
self._punctuation_re = re.compile("[" + re.escape(PUNCTUATION) + "]")
self._whitespace_re = re.compile("\s+")
@property
def template_count(self) -> int:
"""Return the number of templates currently stored."""
return self._template_count
@property
def bot_name(self) -> str:
return self._bot_name
@bot_name.setter
def bot_name(self, value: str) -> None:
"""Set the name of the bot, used to match <bot name="name"> tags in
patterns. The name must be a single word!"""
# Collapse a multi-word name into a single word
self._bot_name = ''.join(value.split())
def dump(self) -> None:
"""Print all learned patterns, for debugging purposes."""
pprint.pprint(self._root)
def save(self, filename: str) -> None:
"""Dump the current patterns to the file specified by filename. To
restore later, use restore()."""
try:
with open(filename, "wb") as file:
marshal.dump(self._template_count, file)
marshal.dump(self._bot_name, file)
marshal.dump(self._root, file)
except:
print("Error saving PatternManager to file %s:" % filename)
raise
def restore(self, filename: str) -> None:
"""Restore a previously saved collection of patterns."""
try:
with open(filename, "rb") as file:
self._template_count = marshal.load(file)
self._bot_name = marshal.load(file)
self._root = marshal.load(file)
except:
print("Error restoring PatternManager from file %s:" % filename)
raise
def add(self, pattern: str, that: str, topic: str, template: list) -> None:
"""
Add a [pattern/that/topic] tuple and its corresponding template
to the node tree.
"""
# TODO: make sure words contains only legal characters
# (alphanumerics,*,_)
# Navigate through the node tree to the template's location, adding
# nodes if necessary.
node = self._root
for word in pattern.split():
key = word
if key == "_":
key = self._UNDERSCORE
elif key == "*":
key = self._STAR
elif key == "BOT_NAME":
key = self._BOT_NAME
if key not in node:
node[key] = {}
node = node[key]
# navigate further down, if a non-empty "that" pattern was included
if len(that) > 0:
if self._THAT not in node:
node[self._THAT] = {}
node = node[self._THAT]
for word in that.split():
key = word
if key == "_":
key = self._UNDERSCORE
elif key == "*":
key = self._STAR
if key not in node:
node[key] = {}
node = node[key]
# navigate yet further down, if a non-empty "topic" string was included
if len(topic) > 0:
if self._TOPIC not in node:
node[self._TOPIC] = {}
node = node[self._TOPIC]
for word in topic.split():
key = word
if key == "_":
key = self._UNDERSCORE
elif key == "*":
key = self._STAR
if key not in node:
node[key] = {}
node = node[key]
# add the template.
if self._TEMPLATE not in node:
self._template_count += 1
node[self._TEMPLATE] = template
def match(self, pattern, that, topic):
"""Return the template which is the closest match to pattern. The
'that' parameter contains the bot's previous response. The 'topic'
parameter contains the current topic of conversation.
Returns None if no template is found.
"""
if not pattern:
return None
# Mutilate the input. Remove all punctuation and convert the text to all caps.
text_input = pattern.upper()
text_input = re.sub(self._punctuation_re, " ", text_input)
if that.strip() == "":
that = "ULTRABOGUSDUMMYTHAT" # 'that' must never be empty
thatInput = that.upper()
thatInput = re.sub(self._punctuation_re, " ", thatInput)
thatInput = re.sub(self._whitespace_re, " ", thatInput)
if topic.strip() == "":
topic = "ULTRABOGUSDUMMYTOPIC" # 'topic' must never be empty
topicInput = topic.upper()
topicInput = re.sub(self._punctuation_re, " ", topicInput)
# Pass the input off to the recursive call
patMatch, template = self._match(text_input.split(), thatInput.split(), topicInput.split(), self._root)
return template
def star(self, starType, pattern, that, topic, index):
"""Returns a string, the portion of pattern that was matched by a *.
The 'starType' parameter specifies which type of star to find.
Legal values are:
- 'star': matches a star in the main pattern.
- 'thatstar': matches a star in the that pattern.
- 'topicstar': matches a star in the topic pattern.
"""
# Mutilate the input. Remove all punctuation and convert the
# text to all caps.
text_input = pattern.upper()
text_input = re.sub(self._punctuation_re, " ", text_input)
text_input = re.sub(self._whitespace_re, " ", text_input)
if that.strip() == "":
that = "ULTRABOGUSDUMMYTHAT" # 'that' must never be empty
thatInput = that.upper()
thatInput = re.sub(self._punctuation_re, " ", thatInput)
thatInput = re.sub(self._whitespace_re, " ", thatInput)
if topic.strip() == "":
topic = "ULTRABOGUSDUMMYTOPIC" # 'topic' must never be empty
topicInput = topic.upper()
topicInput = re.sub(self._punctuation_re, " ", topicInput)
topicInput = re.sub(self._whitespace_re, " ", topicInput)
# Pass the input off to the recursive pattern-matcher
patMatch, template = self._match(text_input.split(), thatInput.split(), topicInput.split(), self._root)
if template is None:
return ""
# Extract the appropriate portion of the pattern, based on the
# starType argument.
if starType == 'star':
patMatch = patMatch[:patMatch.index(self._THAT)]
words = text_input.split()
elif starType == 'thatstar':
patMatch = patMatch[patMatch.index(self._THAT)+1:patMatch.index(self._TOPIC)]
words = thatInput.split()
elif starType == 'topicstar':
patMatch = patMatch[patMatch.index(self._TOPIC)+1:]
words = topicInput.split()
else:
# unknown value
raise ValueError("starType must be in ['star', 'thatstar', 'topicstar']")
# compare the input string to the matched pattern, word by word.
# At the end of this loop, if foundTheRightStar is true, start and
# end will contain the start and end indices (in "words") of
# the substring that the desired star matched.
foundTheRightStar = False
start = end = j = numStars = k = 0
for i in range(len(words)):
# This condition is true after processing a star
# that ISN'T the one we're looking for.
if i < k:
continue
# If we're reached the end of the pattern, we're done.
if j == len(patMatch):
break
if not foundTheRightStar:
if patMatch[j] in [self._STAR, self._UNDERSCORE]: # we got a star
numStars += 1
if numStars == index:
# This is the star we care about.
foundTheRightStar = True
start = i
# Iterate through the rest of the string.
for k in range(i, len(words)):
# If the star is at the end of the pattern,
# we know exactly where it ends.
if j + 1 == len(patMatch):
end = len(words)
break
# If the words have started matching the
# pattern again, the star has ended.
if patMatch[j+1] == words[k]:
end = k - 1
# i = k
break
# If we just finished processing the star we cared
# about, we exit the loop early.
if foundTheRightStar:
break
# Move to the next element of the pattern.
j += 1
# extract the star words from the original, unmutilated input.
if foundTheRightStar:
#print string.join(pattern.split()[start:end+1])
if starType == 'star':
return ' '.join(pattern.split()[start:end+1])
elif starType == 'thatstar':
return ' '.join(that.split()[start:end+1])
elif starType == 'topicstar':
return ' '.join(topic.split()[start:end+1])
else:
return ""
def _match(self, words, thatWords, topicWords, root):
"""Return a tuple (pat, tem) where pat is a list of nodes, starting
at the root and leading to the matching pattern, and tem is the
matched template.
"""
# base-case: if the word list is empty, return the current node's
# template.
if len(words) == 0:
# we're out of words.
pattern = []
template = None
if len(thatWords) > 0:
# If thatWords isn't empty, recursively
# pattern-match on the _THAT node with thatWords as words.
try:
pattern, template = self._match(thatWords, [], topicWords, root[self._THAT])
if pattern is not None:
pattern = [self._THAT] + pattern
except KeyError:
pattern = []
template = None
elif len(topicWords) > 0:
# If thatWords is empty and topicWords isn't, recursively pattern
# on the _TOPIC node with topicWords as words.
try:
pattern, template = self._match(topicWords, [], [], root[self._TOPIC])
if pattern is not None:
pattern = [self._TOPIC] + pattern
except KeyError:
pattern = []
template = None
if template is None:
# we're totally out of input. Grab the template at this node.
pattern = []
try:
template = root[self._TEMPLATE]
except KeyError:
template = None
return pattern, template
first = words[0]
suffix = words[1:]
# Check underscore.
# Note: this is causing problems in the standard AIML set, and is
# currently disabled.
if self._UNDERSCORE in root:
# Must include the case where suf is [] in order to handle the case
# where a * or _ is at the end of the pattern.
for j in range(len(suffix)+1):
suf = suffix[j:]
pattern, template = self._match(suf, thatWords, topicWords, root[self._UNDERSCORE])
if template is not None:
newPattern = [self._UNDERSCORE] + pattern
return newPattern, template
# Check first
if first in root:
pattern, template = self._match(suffix, thatWords, topicWords, root[first])
if template is not None:
newPattern = [first] + pattern
return newPattern, template
# check bot name
if self._BOT_NAME in root and first == self._bot_name:
pattern, template = self._match(suffix, thatWords, topicWords, root[self._BOT_NAME])
if template is not None:
newPattern = [first] + pattern
return newPattern, template
# check star
if self._STAR in root:
# Must include the case where suf is [] in order to handle the case
# where a * or _ is at the end of the pattern.
for j in range(len(suffix)+1):
suf = suffix[j:]
pattern, template = self._match(suf, thatWords, topicWords, root[self._STAR])
if template is not None:
newPattern = [self._STAR] + pattern
return newPattern, template
# No matches were found.
return None, None | AIML-Bot | /AIML_Bot-0.0.3-py3-none-any.whl/aiml_bot/pattern_manager.py | pattern_manager.py |
from xml.sax.handler import ContentHandler
from xml.sax.xmlreader import Locator
import sys
import xml.sax
import xml.sax.handler
class AimlParserError(Exception):
"""AIML syntax error."""
class AimlHandler(ContentHandler):
"""Content handler for xml.sax-based AIML parser."""
# The legal states of the AIML parser
_STATE_OutsideAiml = 0
_STATE_InsideAiml = 1
_STATE_InsideCategory = 2
_STATE_InsidePattern = 3
_STATE_AfterPattern = 4
_STATE_InsideThat = 5
_STATE_AfterThat = 6
_STATE_InsideTemplate = 7
_STATE_AfterTemplate = 8
def __init__(self, encoding="utf-8"):
super().__init__()
self.categories = {}
self._encoding = encoding
self._state = self._STATE_OutsideAiml
self._version = ""
self._namespace = ""
self._forwardCompatibleMode = False
self._currentPattern = ""
self._currentThat = ""
self._currentTopic = ""
self._insideTopic = False
self._currentUnknown = "" # the name of the current unknown element
# This is set to true when a parse error occurs in a category.
self._skipCurrentCategory = False
# Counts the number of parse errors in a particular AIML document.
# query with getNumErrors(). If 0, the document is AIML-compliant.
self._numParseErrors = 0
# TODO: select the proper validInfo table based on the version number.
self._validInfo = self._validationInfo101
# This stack of Booleans is used when parsing <li> elements inside
# <condition> elements, to keep track of whether or not an
# attribute-less "default" <li> element has been found yet. Only
# one default <li> is allowed in each <condition> element. We need
# a stack in order to correctly handle nested <condition> tags.
self._foundDefaultLiStack = []
# This stack of strings indicates what the current whitespace-handling
# behavior should be. Each string in the stack is either "default" or
# "preserve". When a new AIML element is encountered, a new string is
# pushed onto the stack, based on the value of the element's "xml:space"
# attribute (if absent, the top of the stack is pushed again). When
# ending an element, pop an object off the stack.
self._whitespaceBehaviorStack = ["default"]
self._elemStack = []
self._locator = Locator()
self.setDocumentLocator(self._locator)
def getNumErrors(self):
"""Return the number of errors found while parsing the current document."""
return self._numParseErrors
def setEncoding(self, encoding):
"""Set the text encoding to use when encoding strings read from XML.
Defaults to 'utf-8'.
"""
self._encoding = encoding
def _location(self):
"""Return a string describing the current location in the source file."""
line = self._locator.getLineNumber()
column = self._locator.getColumnNumber()
return "(line %d, column %d)" % (line, column)
def _pushWhitespaceBehavior(self, attr):
"""Push a new string onto the whitespaceBehaviorStack.
The string's value is taken from the "xml:space" attribute, if it exists
and has a legal value ("default" or "preserve"). Otherwise, the previous
stack element is duplicated.
"""
assert len(self._whitespaceBehaviorStack) > 0, "Whitespace behavior stack should never be empty!"
try:
if attr["xml:space"] == "default" or attr["xml:space"] == "preserve":
self._whitespaceBehaviorStack.append(attr["xml:space"])
else:
raise AimlParserError("Invalid value for xml:space attribute " + self._location())
except KeyError:
self._whitespaceBehaviorStack.append(self._whitespaceBehaviorStack[-1])
def startElementNS(self, name, qname, attr):
"""Handle the start of a namespace."""
# print("QNAME:", qname)
# print("NAME:", name)
uri, elem = name
if elem == "bot":
print("name:", attr.getValueByQName("name"), "a'ite?")
self.startElement(elem, attr)
pass
def startElement(self, name, attr):
"""Handle the start of an element."""
# Wrapper around _startElement, which catches errors in _startElement()
# and keeps going.
# If we're inside an unknown element, ignore everything until we're
# out again.
if self._currentUnknown != "":
return
# If we're skipping the current category, ignore everything until
# it's finished.
if self._skipCurrentCategory:
return
# process this start-element.
try:
self._startElement(name, attr)
except AimlParserError as msg:
# Print the error message
sys.stderr.write("PARSE ERROR: %s\n" % msg)
self._numParseErrors += 1 # increment error count
# In case of a parse error, if we're inside a category, skip it.
if self._state >= self._STATE_InsideCategory:
self._skipCurrentCategory = True
def _startElement(self, name, attr):
if name == "aiml":
# <aiml> tags are only legal in the OutsideAiml state
if self._state != self._STATE_OutsideAiml:
raise AimlParserError("Unexpected <aiml> tag " + self._location())
self._state = self._STATE_InsideAiml
self._insideTopic = False
self._currentTopic = ""
try:
self._version = attr["version"]
except KeyError:
# This SHOULD be a syntax error, but so many AIML sets out there are missing
# "version" attributes that it just seems nicer to let it slide.
#raise AimlParserError, "Missing 'version' attribute in <aiml> tag "+self._location()
#print "WARNING: Missing 'version' attribute in <aiml> tag "+self._location()
#print " Defaulting to version 1.0"
self._version = "1.0"
self._forwardCompatibleMode = (self._version != "1.0.1")
self._pushWhitespaceBehavior(attr)
# Not sure about this namespace business yet...
#try:
# self._namespace = attr["xmlns"]
# if self._version == "1.0.1" and self._namespace != "http://alicebot.org/2001/AIML-1.0.1":
# raise AimlParserError, "Incorrect namespace for AIML v1.0.1 "+self._location()
#except KeyError:
# if self._version != "1.0":
# raise AimlParserError, "Missing 'version' attribute(s) in <aiml> tag "+self._location()
elif self._state == self._STATE_OutsideAiml:
# If we're outside of an AIML element, we ignore all tags.
return
elif name == "topic":
# <topic> tags are only legal in the InsideAiml state, and only
# if we're not already inside a topic.
if (self._state != self._STATE_InsideAiml) or self._insideTopic:
raise AimlParserError("Unexpected <topic> tag", self._location())
try:
self._currentTopic = str(attr['name'])
except KeyError:
raise AimlParserError("Required \"name\" attribute missing in <topic> element " + self._location())
self._insideTopic = True
elif name == "category":
# <category> tags are only legal in the InsideAiml state
if self._state != self._STATE_InsideAiml:
raise AimlParserError("Unexpected <category> tag "+self._location())
self._state = self._STATE_InsideCategory
self._currentPattern = ""
self._currentThat = ""
# If we're not inside a topic, the topic is implicitly set to *
if not self._insideTopic:
self._currentTopic = "*"
self._elemStack = []
self._pushWhitespaceBehavior(attr)
elif name == "pattern":
# <pattern> tags are only legal in the InsideCategory state
if self._state != self._STATE_InsideCategory:
raise AimlParserError("Unexpected <pattern> tag "+self._location())
self._state = self._STATE_InsidePattern
elif name == "that" and self._state == self._STATE_AfterPattern:
# <that> are legal either inside a <template> element, or
# inside a <category> element, between the <pattern> and the
# <template> elements. This clause handles the latter case.
self._state = self._STATE_InsideThat
elif name == "template":
# <template> tags are only legal in the AfterPattern and AfterThat
# states
if self._state not in [self._STATE_AfterPattern, self._STATE_AfterThat]:
raise AimlParserError("Unexpected <template> tag "+self._location())
# if no <that> element was specified, it is implicitly set to *
if self._state == self._STATE_AfterPattern:
self._currentThat = "*"
self._state = self._STATE_InsideTemplate
self._elemStack.append(['template', {}])
self._pushWhitespaceBehavior(attr)
elif self._state == self._STATE_InsidePattern:
# Certain tags are allowed inside <pattern> elements.
if name == "bot" and 'name' in attr and attr["name"] == "name":
# Insert a special character string that the PatternManager will
# replace with the bot's name.
self._currentPattern += " BOT_NAME "
else:
raise AimlParserError(("Unexpected <%s> tag " % name)+self._location())
elif self._state == self._STATE_InsideThat:
# Certain tags are allowed inside <that> elements.
if name == "bot" and 'name' in attr and attr["name"] == "name":
# Insert a special character string that the PatternManager will
# replace with the bot's name.
self._currentThat += " BOT_NAME "
else:
raise AimlParserError(("Unexpected <%s> tag " % name)+self._location())
elif self._state == self._STATE_InsideTemplate and name in self._validInfo:
# Starting a new element inside the current pattern. First
# we need to convert 'attr' into a native Python dictionary,
# so it can later be marshaled.
attrDict = {}
for k, v in attr.items():
#attrDict[k[1]] = v
attrDict[k] = str(v)
self._validateElemStart(name, attrDict, self._version)
# Push the current element onto the element stack.
self._elemStack.append([name, attrDict])
self._pushWhitespaceBehavior(attr)
# If this is a condition element, push a new entry onto the
# foundDefaultLiStack
if name == "condition":
self._foundDefaultLiStack.append(False)
else:
# we're now inside an unknown element.
if self._forwardCompatibleMode:
# In Forward Compatibility Mode, we ignore the element and its
# contents.
self._currentUnknown = name
else:
# Otherwise, unknown elements are grounds for error!
raise AimlParserError(("Unexpected <%s> tag " % name)+self._location())
def characters(self, ch):
"""Handle a sequence of characters between tags."""
# Wrapper around _characters which catches errors in _characters()
# and keeps going.
if self._state == self._STATE_OutsideAiml:
# If we're outside of an AIML element, we ignore all text
return
if self._currentUnknown != "":
# If we're inside an unknown element, ignore all text
return
if self._skipCurrentCategory:
# If we're skipping the current category, ignore all text.
return
try:
self._characters(ch)
except AimlParserError as msg:
# Print the message
sys.stderr.write("PARSE ERROR: %s\n" % msg)
self._numParseErrors += 1 # increment error count
# In case of a parse error, if we're inside a category, skip it.
if self._state >= self._STATE_InsideCategory:
self._skipCurrentCategory = True
def _characters(self, ch):
text = str(ch)
if self._state == self._STATE_InsidePattern:
self._currentPattern += text
elif self._state == self._STATE_InsideThat:
self._currentThat += text
elif self._state == self._STATE_InsideTemplate:
# First, see whether the element at the top of the element stack
# is permitted to contain text.
try:
parent = self._elemStack[-1][0]
parentAttr = self._elemStack[-1][1]
required, optional, canBeParent = self._validInfo[parent]
nonBlockStyleCondition = (parent == "condition" and
not ('name' in parentAttr and "value" in parentAttr))
if not canBeParent:
raise AimlParserError(("Unexpected text inside <%s> element " % parent) + self._location())
elif parent == "random" or nonBlockStyleCondition:
# <random> elements can only contain <li> subelements. However,
# there's invariably some whitespace around the <li> that we need
# to ignore. Same for non-block-style <condition> elements (i.e.
# those which don't have both a "name" and a "value" attribute).
if len(text.strip()) == 0:
# ignore whitespace inside these elements.
return
else:
# non-whitespace text inside these elements is a syntax error.
raise AimlParserError(("Unexpected text inside <%s> element " % parent) + self._location())
except IndexError:
# the element stack is empty. This should never happen.
raise AimlParserError("Element stack is empty while validating text "+self._location())
# Add a new text element to the element at the top of the element
# stack. If there's already a text element there, simply append the
# new characters to its contents.
try:
textElemOnStack = (self._elemStack[-1][-1][0] == "text")
except IndexError:
textElemOnStack = False
except KeyError:
textElemOnStack = False
if textElemOnStack:
self._elemStack[-1][-1][2] += text
else:
self._elemStack[-1].append(["text", {"xml:space": self._whitespaceBehaviorStack[-1]}, text])
else:
# all other text is ignored
pass
def endElementNS(self, name, qname):
"""Handle the end of a namespace."""
uri, elem = name
self.endElement(elem)
def endElement(self, name):
"""Handle the end of an element."""
# Wrapper around _endElement which catches errors in _characters() and keeps going.
if self._state == self._STATE_OutsideAiml:
# If we're outside of an AIML element, ignore all tags
return
if self._currentUnknown != "":
# see if we're at the end of an unknown element. If so, we can
# stop ignoring everything.
if name == self._currentUnknown:
self._currentUnknown = ""
return
if self._skipCurrentCategory:
# If we're skipping the current category, see if it's ending. We
# stop on ANY </category> tag, since we're not keeping track of
# state in ignore-mode.
if name == "category":
self._skipCurrentCategory = False
self._state = self._STATE_InsideAiml
return
try:
self._endElement(name)
except AimlParserError as msg:
# Print the message
sys.stderr.write("PARSE ERROR: %s\n" % msg)
self._numParseErrors += 1 # increment error count
# In case of a parse error, if we're inside a category, skip it.
if self._state >= self._STATE_InsideCategory:
self._skipCurrentCategory = True
def _endElement(self, name):
"""Verify that an AIML end element is valid in the current
context.
Raises an AimlParserError if an illegal end element is encountered.
"""
if name == "aiml":
# </aiml> tags are only legal in the InsideAiml state
if self._state != self._STATE_InsideAiml:
raise AimlParserError("Unexpected </aiml> tag "+self._location())
self._state = self._STATE_OutsideAiml
self._whitespaceBehaviorStack.pop()
elif name == "topic":
# </topic> tags are only legal in the InsideAiml state, and
# only if _insideTopic is true.
if self._state != self._STATE_InsideAiml or not self._insideTopic:
raise AimlParserError("Unexpected </topic> tag "+self._location())
self._insideTopic = False
self._currentTopic = ""
elif name == "category":
# </category> tags are only legal in the AfterTemplate state
if self._state != self._STATE_AfterTemplate:
raise AimlParserError("Unexpected </category> tag "+self._location())
self._state = self._STATE_InsideAiml
# End the current category. Store the current pattern/that/topic and
# element in the categories dictionary.
key = (self._currentPattern.strip(), self._currentThat.strip(), self._currentTopic.strip())
self.categories[key] = self._elemStack[-1]
self._whitespaceBehaviorStack.pop()
elif name == "pattern":
# </pattern> tags are only legal in the InsidePattern state
if self._state != self._STATE_InsidePattern:
raise AimlParserError("Unexpected </pattern> tag "+self._location())
self._state = self._STATE_AfterPattern
elif name == "that" and self._state == self._STATE_InsideThat:
# </that> tags are only allowed inside <template> elements or in
# the InsideThat state. This clause handles the latter case.
self._state = self._STATE_AfterThat
elif name == "template":
# </template> tags are only allowed in the InsideTemplate state.
if self._state != self._STATE_InsideTemplate:
raise AimlParserError("Unexpected </template> tag "+self._location())
self._state = self._STATE_AfterTemplate
self._whitespaceBehaviorStack.pop()
elif self._state == self._STATE_InsidePattern:
# Certain tags are allowed inside <pattern> elements.
if name not in ["bot"]:
raise AimlParserError(("Unexpected </%s> tag " % name)+self._location())
elif self._state == self._STATE_InsideThat:
# Certain tags are allowed inside <that> elements.
if name not in ["bot"]:
raise AimlParserError(("Unexpected </%s> tag " % name)+self._location())
elif self._state == self._STATE_InsideTemplate:
# End of an element inside the current template. Append the
# element at the top of the stack onto the one beneath it.
elem = self._elemStack.pop()
self._elemStack[-1].append(elem)
self._whitespaceBehaviorStack.pop()
# If the element was a condition, pop an item off the
# foundDefaultLiStack as well.
if elem[0] == "condition":
self._foundDefaultLiStack.pop()
else:
# Unexpected closing tag
raise AimlParserError(("Unexpected </%s> tag " % name)+self._location())
# A dictionary containing a validation information for each AIML
# element. The keys are the names of the elements. The values are a
# tuple of three items. The first is a list containing the names of
# REQUIRED attributes, the second is a list of OPTIONAL attributes,
# and the third is a boolean value indicating whether or not the
# element can contain other elements and/or text (if False, the
# element can only appear in an atomic context, such as <date/>).
_validationInfo101 = {
"bot": (["name"], [], False),
"condition": ([], ["name", "value"], True), # can only contain <li> elements
"date": ([], [], False),
"formal": ([], [], True),
"gender": ([], [], True),
"get": (["name"], [], False),
"gossip": ([], [], True),
"id": ([], [], False),
"input": ([], ["index"], False),
"javascript": ([], [], True),
"learn": ([], [], True),
"li": ([], ["name", "value"], True),
"lowercase": ([], [], True),
"person": ([], [], True),
"person2": ([], [], True),
"random": ([], [], True), # can only contain <li> elements
"sentence": ([], [], True),
"set": (["name"], [], True),
"size": ([], [], False),
"sr": ([], [], False),
"srai": ([], [], True),
"star": ([], ["index"], False),
"system": ([], [], True),
"template": ([], [], True), # needs to be in the list because it can be a parent.
"that": ([], ["index"], False),
"thatstar": ([], ["index"], False),
"think": ([], [], True),
"topicstar": ([], ["index"], False),
"uppercase": ([], [], True),
"version": ([], [], False),
}
# noinspection PyUnusedLocal
def _validateElemStart(self, name, attr, version):
"""Test the validity of an element starting inside a <template>
element.
This function raises an AimlParserError exception if it the tag is
invalid. Otherwise, no news is good news.
"""
# Check the element's attributes. Make sure that all required
# attributes are present, and that any remaining attributes are
# valid options.
required, optional, canBeParent = self._validInfo[name]
for a in required:
if a not in attr and not self._forwardCompatibleMode:
raise AimlParserError(("Required \"%s\" attribute missing in <%s> element " % (a, name)) +
self._location())
for a in attr:
if a in required:
continue
if a[0:4] == "xml:":
continue # attributes in the "xml" namespace can appear anywhere
if a not in optional and not self._forwardCompatibleMode:
raise AimlParserError(("Unexpected \"%s\" attribute in <%s> element " % (a, name)) + self._location())
# special-case: several tags contain an optional "index" attribute.
# This attribute's value must be a positive integer.
if name in ["star", "thatstar", "topicstar"]:
for k, v in attr.items():
if k == "index":
try:
temp = int(v)
except (ValueError, TypeError):
raise AimlParserError(
("Bad type for \"%s\" attribute (expected integer, found \"%s\") " % (k, v)) +
self._location()
)
if temp < 1:
raise AimlParserError(("\"%s\" attribute must have non-negative value " % k) + self._location())
# See whether the containing element is permitted to contain
# subelements. If not, this element is invalid no matter what it is.
try:
parent = self._elemStack[-1][0]
parentAttr = self._elemStack[-1][1]
except IndexError:
# If the stack is empty, no parent is present. This should never
# happen.
raise AimlParserError(("Element stack is empty while validating <%s> " % name)+self._location())
required, optional, canBeParent = self._validInfo[parent]
nonBlockStyleCondition = (parent == "condition" and not ('name' in parentAttr and 'value' in parentAttr))
if not canBeParent:
raise AimlParserError(("<%s> elements cannot have any contents " % parent) + self._location())
# Special-case test if the parent element is <condition> (the
# non-block-style variant) or <random>: these elements can only
# contain <li> subelements.
elif (parent == "random" or nonBlockStyleCondition) and name != "li":
raise AimlParserError(("<%s> elements can only contain <li> subelements " % parent) + self._location())
# Special-case test for <li> elements, which can only be contained
# by non-block-style <condition> and <random> elements, and whose
# required attributes are dependent upon which attributes are
# present in the <condition> parent.
elif name == "li":
if not (parent == "random" or nonBlockStyleCondition):
raise AimlParserError(("Unexpected <li> element contained by <%s> element " % parent) +
self._location())
if nonBlockStyleCondition:
if 'name' in parentAttr:
# Single-predicate condition. Each <li> element except the
# last must have a "value" attribute.
if len(attr) == 0:
# This could be the default <li> element for this <condition>,
# unless we've already found one.
if self._foundDefaultLiStack[-1]:
raise AimlParserError("Unexpected default <li> element inside <condition> " +
self._location())
else:
self._foundDefaultLiStack[-1] = True
elif len(attr) == 1 and 'value' in attr:
pass # this is the valid case
else:
raise AimlParserError("Invalid <li> inside single-predicate <condition> " + self._location())
elif len(parentAttr) == 0:
# Multi-predicate condition. Each <li> element except the
# last must have a "name" and a "value" attribute.
if len(attr) == 0:
# This could be the default <li> element for this <condition>,
# unless we've already found one.
if self._foundDefaultLiStack[-1]:
raise AimlParserError("Unexpected default <li> element inside <condition> " +
self._location())
else:
self._foundDefaultLiStack[-1] = True
elif len(attr) == 2 and 'value' in attr and 'name' in attr:
pass # this is the valid case
else:
raise AimlParserError("Invalid <li> inside multi-predicate <condition> "+self._location())
# All is well!
return True
def create_parser():
"""Create and return an AIML parser object."""
parser = xml.sax.make_parser()
handler = AimlHandler()
parser.setContentHandler(handler)
#parser.setFeature(xml.sax.handler.feature_namespaces, True)
return parser | AIML-Bot | /AIML_Bot-0.0.3-py3-none-any.whl/aiml_bot/aiml_parser.py | aiml_parser.py |
import os
import sys
import traceback
# The Bot class is the only class most implementations should need.
from .bot import Bot
__all__ = [
'Bot',
'main',
'USAGE'
]
__author__ = 'Cort Stratton'
__maintainer__ = 'Aaron Hosford'
__license__ = 'https://opensource.org/licenses/BSD-2-Clause'
__version__ = '0.0.3'
USAGE = """
Usage:
python -m aiml [BRAIN_PATH] [OPTIONS]
BRAIN_PATH
The path to the .brn "brain file" where the compiled AIML is stored.
-r
--reset
Reset the "brain file".
-n
--no-std
Do not automatically load the standard AIML rules.
""".strip()
def main():
"""
This script demonstrates how to create a bare-bones, fully functional
chatbot using AIML Bot.
"""
# When loading an AIML set, you have two options: load the original
# AIML files, or load a precompiled "brain" that was created from a
# previous run. If no brain file is available, we force a reload of
# the AIML files.
brain_path = None
reset = False
no_std = False
for arg in sys.argv[1:]:
if arg in ('-r', '--reset'):
reset = True
elif arg in ('-n', '--no-std'):
no_std = True
elif brain_path is None:
brain_path = arg
if not brain_path.endswith('.brn'):
brain_path += '.brn'
else:
print("Unexpected argument: %s" % arg)
print(USAGE)
return 1
if brain_path is None:
brain_path = os.path.expanduser('~/.aiml/default.brn')
if not os.path.isfile(brain_path):
reset = True
robot = None
if not reset:
# Attempt to load the brain file. If it fails, fall back on the
# Reload method.
# noinspection PyBroadException
try:
# The optional branFile argument specifies a brain file to load.
robot = Bot(brain_file=brain_path)
except Exception:
print("Error loading saved brain file:")
traceback.print_exc()
reset = True
if reset:
print("Resetting.")
# Use the Bot's bootstrap() method to initialize the Bot. The
# optional learnFiles argument is a file (or list of files) to load.
# The optional commands argument is a command (or list of commands)
# to run after the files are loaded.
if no_std:
robot = Bot()
else:
robot = Bot(commands="load std aiml")
# Now that we've loaded the brain, save it to speed things up for
# next time.
robot.save_brain(brain_path)
assert robot is not None, "Bot initialization failed!"
# Enter the main input/output loop.
print("\nINTERACTIVE MODE (ctrl-c to exit)")
while True:
try:
print(robot.respond(input("> ")))
except KeyboardInterrupt:
break
robot.save_brain(brain_path)
return 0 | AIML-Bot | /AIML_Bot-0.0.3-py3-none-any.whl/aiml_bot/__init__.py | __init__.py |
import copy
import glob
import os
import random
import re
import string
import sys
import threading
import time
import xml.sax
from configparser import ConfigParser
from .aiml_parser import create_parser
from .default_substitutions import default_gender, default_person, default_person2, default_normal
from .pattern_manager import PatternManager
from .utilities import split_sentences
from .word_substitutions import WordSub
AIML_INSTALL_PATH = os.path.expanduser('~/.aiml')
try:
import aiml_sets
except ImportError:
aiml_sets = None
else:
for set_name in aiml_sets.list_aiml_sets():
if not aiml_sets.is_installed(set_name, destination_path=AIML_INSTALL_PATH):
aiml_sets.install(set_name, destination_path=AIML_INSTALL_PATH)
__version__ = '0.0'
DEFAULT_ENCODING = 'utf-8'
DEFAULT_SESSION_ID = "anonymous"
# special predicate keys
INPUT_HISTORY = "<INPUT HISTORY>" # keys to a queue (list) of recent user input
OUTPUT_HISTORY = "<OUTPUT HISTORY>" # keys to a queue (list) of recent responses.
INPUT_STACK = "<INPUT STACK>" # Should always be empty in between calls to respond()
BOOTSTRAP_AIML_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), 'bootstrap.aiml'))
class Bot:
"""
The AIML bot.
"""
_max_history_size = 10 # maximum length of the _inputs and _responses lists
_max_recursion_depth = 100 # maximum number of recursive <srai>/<sr> tags before the response is aborted.
def __init__(self, brain_file: str = None, learn=None, commands=None, verbose: bool = True) -> None:
self._verbose_mode = verbose
self._brain = PatternManager()
self._respond_lock = threading.RLock()
self._text_encoding = DEFAULT_ENCODING
# set up the sessions
self._sessions = {}
self.add_session(DEFAULT_SESSION_ID)
# Set up the bot predicates
self._bot_predicates = {}
self.set_bot_predicate("name", "Nameless")
# set up the word substitutors (subbers):
self._subbers = {
'gender': WordSub(default_gender),
'person': WordSub(default_person),
'person2': WordSub(default_person2),
'normal': WordSub(default_normal)
}
self.bootstrap(brain_file, learn, commands)
def bootstrap(self, brain_file: str = None, learn=None, commands=None) -> None:
"""Prepare a Bot object for use.
If a brainFile argument is provided, the Bot attempts to
load the brain at the specified filename.
If learnFiles is provided, the Bot attempts to load the
specified AIML files.
Finally, each of the input strings in the commands list is
passed to respond().
"""
loaded_brain = False
start = time.clock()
if brain_file and os.path.isfile(brain_file):
self.load_brain(brain_file)
loaded_brain = True
if learn is None:
if loaded_brain:
learn = []
else:
learn = [BOOTSTRAP_AIML_PATH]
elif isinstance(learn, str):
# learnFiles might be a string, in which case it should be
# turned into a single-element list.
learn = [learn]
else:
learn = learn
for file in learn:
file = os.path.abspath(os.path.expanduser(file))
self.learn(file)
if commands is None:
commands = []
elif isinstance(commands, str):
# ditto for commands
commands = [commands]
for cmd in commands:
print(self._respond(cmd, DEFAULT_SESSION_ID))
if self._verbose_mode:
print("Bot bootstrap completed in %.2f seconds" % (time.clock() - start))
@property
def name(self) -> str:
"""The name of the bot."""
return self._brain.bot_name
@name.setter
def name(self, value: str) -> None:
"""The name of the bot."""
self._bot_predicates['name'] = value
self._brain.bot_name = value
@property
def verbose(self) -> bool:
"""Verbose output mode."""
return self._verbose_mode
@verbose.setter
def verbose(self, value: bool) -> None:
"""Verbose output mode."""
self._verbose_mode = value
@property
def version(self) -> str:
"""Return the Bot's version string."""
return 'AIML Bot ' + __version__
@property
def text_encoding(self) -> str:
"""Set the text encoding used when loading AIML files (Latin-1, UTF-8, etc.)."""
return self._text_encoding
@text_encoding.setter
def text_encoding(self, value: str) -> None:
"""Set the text encoding used when loading AIML files (Latin-1, UTF-8, etc.)."""
self._text_encoding = value
@property
def category_count(self) -> int:
"""Return the number of categories the Bot has learned."""
# there's a one-to-one mapping between templates and categories
return self._brain.template_count
def reset_brain(self) -> None:
"""Reset the brain to its initial state."""
self._brain = PatternManager()
def load_brain(self, filename: str) -> None:
"""Attempt to load a previously-saved 'brain' from the
specified filename.
NOTE: the current contents of the 'brain' will be discarded!
"""
if self._verbose_mode:
print("Loading brain from %s..." % filename,)
start = time.clock()
self._brain.restore(filename)
if self._verbose_mode:
end = time.clock() - start
print("done (%d categories in %.2f seconds)" % (self._brain.template_count, end))
def save_brain(self, filename: str) -> None:
"""Dump the contents of the bot's brain to a file on disk."""
if self._verbose_mode:
print("Saving brain to %s..." % filename,)
start = time.clock()
self._brain.save(filename)
if self._verbose_mode:
print("done (%.2f seconds)" % (time.clock() - start))
def get_predicate(self, name: str, session_id: str = None) -> str:
"""Retrieve the current value of the predicate 'name' from the
specified session.
If name is not a valid predicate in the session, the empty
string is returned.
"""
assert name not in (INPUT_STACK, INPUT_HISTORY, OUTPUT_HISTORY)
if session_id is None:
session_id = DEFAULT_SESSION_ID
return self._sessions[session_id].get(name, '')
def set_predicate(self, name: str, value: object, session_id: str = None) -> None:
"""Set the value of the predicate 'name' in the specified
session.
If sessionID is not a valid session, it will be created. If
name is not a valid predicate in the session, it will be
created.
"""
assert name not in (INPUT_STACK, INPUT_HISTORY, OUTPUT_HISTORY)
if session_id is None:
session_id = DEFAULT_SESSION_ID
self.add_session(session_id) # add the session, if it doesn't already exist.
self._sessions[session_id][name] = value
def get_input_history(self, session_id: str = None) -> list:
"""Get the input history for the given session."""
if session_id is None:
session_id = DEFAULT_SESSION_ID
self.add_session(session_id)
return self._sessions[session_id][INPUT_HISTORY]
def set_input_history(self, history: list, session_id: str = None) -> None:
"""Set the input history for the given session."""
if session_id is None:
session_id = DEFAULT_SESSION_ID
self.add_session(session_id)
self._sessions[session_id][INPUT_HISTORY] = history
def get_output_history(self, session_id: str = None) -> list:
"""Get the output history for the given session."""
if session_id is None:
session_id = DEFAULT_SESSION_ID
self.add_session(session_id)
return self._sessions[session_id][OUTPUT_HISTORY]
def set_output_history(self, history: list, session_id: str = None) -> None:
"""Set the output history for the given session."""
if session_id is None:
session_id = DEFAULT_SESSION_ID
self.add_session(session_id)
self._sessions[session_id][OUTPUT_HISTORY] = history
def get_input_stack(self, session_id: str = None) -> list:
"""Get the input stack for the given session."""
if session_id is None:
session_id = DEFAULT_SESSION_ID
self.add_session(session_id)
return self._sessions[session_id][INPUT_STACK]
def set_input_stack(self, stack: list, session_id: str = None) -> None:
"""Set the input stack for the given session."""
if session_id is None:
session_id = DEFAULT_SESSION_ID
self.add_session(session_id)
self._sessions[session_id][INPUT_STACK] = stack
def get_bot_predicate(self, name: str) -> str:
"""Retrieve the value of the specified bot predicate."""
return self._bot_predicates.get(name, '')
def set_bot_predicate(self, name: str, value: str) -> None:
"""Set the value of the specified bot predicate.
If name is not a valid bot predicate, it will be created.
"""
self._bot_predicates[name] = value
# Clumsy hack: if updating the bot name, we must update the
# name in the brain as well
if name == "name":
self._brain.bot_name = value
def load_substitutions(self, filename: str) -> None:
"""
Load a substitutions file.
The file must be in the Windows-style INI format (see the
standard ConfigParser module docs for information on this
format). Each section of the file is loaded into its own
substitutor.
"""
parser = ConfigParser()
parser.read(filename)
for s in parser.sections():
# Add a new WordSub instance for this section. If one already
# exists, delete it.
if s in self._subbers:
del(self._subbers[s])
self._subbers[s] = WordSub()
# iterate over the key,value pairs and add them to the subber
for key, v in parser.items(s):
self._subbers[s][key] = v
def add_session(self, session_id: str) -> None:
"""Create a new session with the specified ID string."""
# Create the session.
if session_id in self._sessions:
session_data = self._sessions[session_id]
else:
session_data = self._sessions[session_id] = {}
# Initialize the special reserved predicates
for key in INPUT_HISTORY, OUTPUT_HISTORY, INPUT_STACK:
if key not in session_data:
session_data[key] = []
def delete_session(self, session_id: str):
"""Delete the specified session."""
if session_id in self._sessions:
self._sessions.pop(session_id)
def get_session_data(self, session_id: str = None) -> dict:
"""Return a copy of the session data dictionary for the
specified session.
"""
if session_id is None:
session_id = DEFAULT_SESSION_ID
self.add_session(session_id)
return copy.deepcopy(self._sessions[session_id])
def set_session_data(self, data: dict, session_id: str = None) -> None:
"""
Set the session data dictionary for the specified session.
"""
if session_id is None:
session_id = DEFAULT_SESSION_ID
self._sessions[session_id] = data
def learn(self, filename: str) -> None:
"""Load and learn the contents of the specified AIML file.
If filename includes wildcard characters, all matching files
will be loaded and learned.
"""
filenames = [filename]
if filename != os.path.join(AIML_INSTALL_PATH, filename):
filenames.append(os.path.join(AIML_INSTALL_PATH, filename))
filenames += [filename.lower() for filename in filenames]
for filename in filenames:
for f in glob.glob(filename):
if not os.path.isfile(f):
continue # Skip folders.
if self._verbose_mode:
print("Loading %s..." % f,)
start = time.clock()
# Load and parse the AIML file.
parser = create_parser()
handler = parser.getContentHandler()
handler.setEncoding(self._text_encoding)
try:
parser.parse(f)
except xml.sax.SAXParseException as msg:
err = "\nFATAL PARSE ERROR in file %s:\n%s\n" % (f, msg)
sys.stderr.write(err)
continue
# store the pattern/template pairs in the PatternManager.
for key, tem in handler.categories.items():
self._brain.add(*key, tem)
# Parsing was successful.
if self._verbose_mode:
print("done (%.2f seconds)" % (time.clock() - start))
def respond(self, text: str, session_id: str = None) -> str:
"""Return the Bot's response to the input string."""
if not text:
return ""
if session_id is None:
session_id = DEFAULT_SESSION_ID
# prevent other threads from stomping all over us.
self._respond_lock.acquire()
# Add the session, if it doesn't already exist
self.add_session(session_id)
# split the input into discrete sentences
sentences = split_sentences(text)
final_response = ""
for s in sentences:
# Add the input to the history list before fetching the
# response, so that <input/> tags work properly.
input_history = self.get_input_history(session_id)
if not isinstance(input_history, list):
input_history = []
input_history.append(s)
while len(input_history) > self._max_history_size:
input_history.pop(0)
self.set_input_history(input_history, session_id)
# Fetch the response
response = self._respond(s, session_id)
# add the data from this exchange to the history lists
output_history = self.get_output_history(session_id)
if not isinstance(output_history, list):
output_history = []
output_history.append(response)
while len(output_history) > self._max_history_size:
output_history.pop(0)
self.set_output_history(output_history, session_id)
# append this response to the final response.
final_response += (response + " ")
final_response = final_response.strip()
assert not self.get_input_stack(session_id)
# release the lock and return
self._respond_lock.release()
return final_response
# This version of _respond() just fetches the response for some input.
# It does not mess with the input and output histories. Recursive calls
# to respond() spawned from tags like <srai> should call this function
# instead of respond().
def _respond(self, text: str, session_id: str) -> str:
"""Private version of respond(), does the real work."""
if not text:
return ""
# guard against infinite recursion
input_stack = self.get_input_stack(session_id)
if len(input_stack) > self._max_recursion_depth:
if self._verbose_mode:
err = "WARNING: maximum recursion depth exceeded (input='%s')" % text
sys.stderr.write(err)
return ""
# push the input onto the input stack
input_stack.append(text)
self.set_input_stack(input_stack, session_id)
# run the input through the 'normal' subber
subbed_input = self._subbers['normal'].sub(text)
# fetch the bot's previous response, to pass to the match()
# function as 'that'.
output_history = self.get_output_history(session_id)
if output_history:
that = output_history[-1]
else:
that = ""
subbed_that = self._subbers['normal'].sub(that)
# fetch the current topic
topic = self.get_predicate("topic", session_id)
subbed_topic = self._subbers['normal'].sub(topic)
# Determine the final response.
response = ""
elem = self._brain.match(subbed_input, subbed_that, subbed_topic)
if elem is None:
if self._verbose_mode:
err = "WARNING: No match found for input: %s\n" % text
sys.stderr.write(err)
else:
# Process the element into a response string.
response += self._process_element(elem, session_id).strip()
response += " "
response = response.strip()
# pop the top entry off the input stack.
input_stack = self.get_input_stack(session_id)
input_stack.pop()
self.set_input_stack(input_stack, session_id)
return response
def _process_element(self, element: list, session_id: str) -> str:
"""Process an AIML element.
The first item of the elem list is the name of the element's
XML tag. The second item is a dictionary containing any
attributes passed to that tag, and their values. Any further
items in the list are the elements enclosed by the current
element's begin and end tags; they are handled by each
element's handler function.
"""
element_name = element[0]
handler_name = '_process_' + element_name
handler = getattr(self, handler_name, None)
if handler is None:
# Oops -- there's no handler function for this element type!
if self._verbose_mode:
err = "WARNING: No handler found for <%s> element\n" % element[0]
sys.stderr.write(err)
return ""
return handler(element, session_id)
##################################################
# Individual element-processing functions follow #
##################################################
# <bot>
# noinspection PyUnusedLocal
def _process_bot(self, element: list, session_id: str) -> str:
"""Process a <bot> AIML element.
Required element attributes:
name: The name of the bot predicate to retrieve.
<bot> elements are used to fetch the value of global,
read-only "bot predicates." These predicates cannot be set
from within AIML; you must use the setBotPredicate() function.
"""
return self.get_bot_predicate(element[1]['name'])
# <condition>
def _process_condition(self, element: list, session_id: str) -> str:
"""Process a <condition> AIML element.
Optional element attributes:
name: The name of a predicate to test.
value: The value to test the predicate for.
<condition> elements come in three flavors. Each has different
attributes, and each handles their contents differently.
The simplest case is when the <condition> tag has both a 'name'
and a 'value' attribute. In this case, if the predicate
'name' has the value 'value', then the contents of the element
are processed and returned.
If the <condition> element has only a 'name' attribute, then
its contents are a series of <li> elements, each of which has
a 'value' attribute. The list is scanned from top to bottom
until a match is found. Optionally, the last <li> element can
have no 'value' attribute, in which case it is processed and
returned if no other match is found.
If the <condition> element has neither a 'name' nor a 'value'
attribute, then it behaves almost exactly like the previous
case, except that each <li> subelement (except the optional
last entry) must now include both 'name' and 'value'
attributes.
"""
response = ""
attributes = element[1]
name = attributes.get('name', None)
value = attributes.get('value', None)
# Case #1: test the value of a specific predicate for a
# specific value.
if name is not None and value is not None:
if self.get_predicate(name, session_id) == value:
for e in element[2:]:
response += self._process_element(e, session_id)
return response
else:
# Case #2 and #3: Cycle through <li> contents, testing a
# name and value pair for each one.
try:
# Get the list of <li> elements
list_items = []
for e in element[2:]:
if e[0] == 'li':
list_items.append(e)
# if list_items is empty, return the empty string
if not list_items:
return ""
# iterate through the list looking for a condition that
# matches.
found_match = False
for index, li in enumerate(list_items):
try:
li_attributes = li[1]
# if this is the last list item, it's allowed
# to have no attributes. We just skip it for now.
if not li_attributes and index + 1 == len(list_items):
break
# get the name of the predicate to test
li_name = name
if li_name is None:
li_name = li_attributes['name']
# get the value to check against
li_value = li_attributes['value']
# do the test
if self.get_predicate(li_name, session_id) == li_value:
found_match = True
response += self._process_element(li, session_id)
break
except:
# No attributes, no name/value attributes, no
# such predicate/session, or processing error.
if self._verbose_mode:
print("Something amiss -- skipping list item", li)
raise
if not found_match:
# Check the last element of list_items. If it has
# no 'name' or 'value' attribute, process it.
try:
li = list_items[-1]
li_attributes = li[1]
if not ('name' in li_attributes or 'value' in li_attributes):
response += self._process_element(li, session_id)
except:
# list_items was empty, no attributes, missing
# name/value attributes, or processing error.
if self._verbose_mode:
print("error in default list item")
raise
except Exception:
# Some other catastrophic cataclysm
if self._verbose_mode:
print("catastrophic condition failure")
raise
return response
# <date>
# noinspection PyUnusedLocal
@staticmethod
def _process_date(element: list, session_id: str) -> str:
"""Process a <date> AIML element.
<date> elements resolve to the current date and time. The
AIML specification doesn't require any particular format for
this information, so I go with whatever's simplest.
"""
return time.asctime()
# <formal>
def _process_formal(self, element: list, session_id: str) -> str:
"""Process a <formal> AIML element.
<formal> elements process their contents recursively, and then
capitalize the first letter of each word of the result.
"""
response = ""
for e in element[2:]:
response += self._process_element(e, session_id)
return string.capwords(response)
# <gender>
def _process_gender(self, element: list, session_id: str) -> str:
"""Process a <gender> AIML element.
<gender> elements process their contents, and then swap the
gender of any third-person singular pronouns in the result.
This substitution is handled by the aiml.WordSub module.
"""
response = ""
for e in element[2:]:
response += self._process_element(e, session_id)
return self._subbers['gender'].sub(response)
# <get>
def _process_get(self, element: list, session_id: str) -> str:
"""Process a <get> AIML element.
Required element attributes:
name: The name of the predicate whose value should be
retrieved from the specified session and returned. If the
predicate doesn't exist, the empty string is returned.
<get> elements return the value of a predicate from the
specified session.
"""
return self.get_predicate(element[1]['name'], session_id)
# <gossip>
def _process_gossip(self, element: list, session_id: str) -> str:
"""Process a <gossip> AIML element.
<gossip> elements are used to capture and store user input in
an implementation-defined manner, theoretically allowing the
bot to learn from the people it chats with. I haven't
decided how to define my implementation, so right now
<gossip> behaves identically to <think>.
"""
return self._process_think(element, session_id)
# <id>
# noinspection PyUnusedLocal
@staticmethod
def _process_id(element: list, session_id: str) -> str:
""" Process an <id> AIML element.
<id> elements return a unique "user id" for a specific
conversation. In AIML Bot, the user id is the name of the
current session.
"""
return session_id
# <input>
def _process_input(self, element: list, session_id: str) -> str:
"""Process an <input> AIML element.
Optional attribute elements:
index: The index of the element from the history list to
return. 1 means the most recent item, 2 means the one
before that, and so on.
<input> elements return an entry from the input history for
the current session.
"""
index = int(element[1].get('index', 1))
input_history = self.get_input_history(session_id)
if len(input_history) >= index:
return input_history[-index]
else:
if self._verbose_mode:
err = "No such index %d while processing <input> element.\n" % index
sys.stderr.write(err)
return ""
# <javascript>
def _process_javascript(self, element: list, session_id: str) -> str:
"""Process a <javascript> AIML element.
<javascript> elements process their contents recursively, and
then run the results through a server-side Javascript
interpreter to compute the final response. Implementations
are not required to provide an actual Javascript interpreter,
and right now AIML Bot doesn't; <javascript> elements are behave
exactly like <think> elements.
"""
return self._process_think(element, session_id)
# <learn>
def _process_learn(self, element: list, session_id: str) -> str:
"""Process a <learn> AIML element.
<learn> elements process their contents recursively, and then
treat the result as an AIML file to open and learn.
"""
filename = ""
for e in element[2:]:
filename += self._process_element(e, session_id)
self.learn(filename)
return ""
# <li>
def _process_li(self, element: list, session_id: str) -> str:
"""Process an <li> AIML element.
Optional attribute elements:
name: the name of a predicate to query.
value: the value to check that predicate for.
<li> elements process their contents recursively and return
the results. They can only appear inside <condition> and
<random> elements. See _processCondition() and
_processRandom() for details of their usage.
"""
response = ""
for e in element[2:]:
response += self._process_element(e, session_id)
return response
# <lowercase>
def _process_lowercase(self, element: list, session_id: str) -> str:
"""Process a <lowercase> AIML element.
<lowercase> elements process their contents recursively, and
then convert the results to all-lowercase.
"""
response = ""
for e in element[2:]:
response += self._process_element(e, session_id)
return response.lower()
# <person>
def _process_person(self, element: list, session_id: str) -> str:
"""Process a <person> AIML element.
<person> elements process their contents recursively, and then
convert all pronouns in the results from 1st person to 2nd
person, and vice versa. This substitution is handled by the
aiml.WordSub module.
If the <person> tag is used atomically (e.g. <person/>), it is
a shortcut for <person><star/></person>.
"""
response = ""
for e in element[2:]:
response += self._process_element(e, session_id)
if len(element) <= 2: # atomic <person/> = <person><star/></person>
response = self._process_element(['star', {}], session_id)
return self._subbers['person'].sub(response)
# <person2>
def _process_person2(self, element: list, session_id: str) -> str:
"""Process a <person2> AIML element.
<person2> elements process their contents recursively, and then
convert all pronouns in the results from 1st person to 3rd
person, and vice versa. This substitution is handled by the
aiml.WordSub module.
If the <person2> tag is used atomically (e.g. <person2/>), it is
a shortcut for <person2><star/></person2>.
"""
response = ""
for e in element[2:]:
response += self._process_element(e, session_id)
if len(element) <= 2: # atomic <person2/> = <person2><star/></person2>
response = self._process_element(['star', {}], session_id)
return self._subbers['person2'].sub(response)
# <random>
def _process_random(self, element: list, session_id: str) -> str:
"""Process a <random> AIML element.
<random> elements contain zero or more <li> elements. If
none, the empty string is returned. If one or more <li>
elements are present, one of them is selected randomly to be
processed recursively and have its results returned. Only the
chosen <li> element's contents are processed. Any non-<li> contents are
ignored.
"""
list_items = []
for e in element[2:]:
if e[0] == 'li':
list_items.append(e)
if not list_items:
return ""
# select and process a random list item.
item = random.choice(list_items)
return self._process_element(item, session_id)
# <sentence>
def _process_sentence(self, element: list, session_id: str) -> str:
"""Process a <sentence> AIML element.
<sentence> elements process their contents recursively, and
then capitalize the first letter of the results.
"""
response = ""
for e in element[2:]:
response += self._process_element(e, session_id)
response = response.strip()
return response[:1].upper() + response[1:]
# <set>
def _process_set(self, element: list, session_id: str) -> str:
"""Process a <set> AIML element.
Required element attributes:
name: The name of the predicate to set.
<set> elements process their contents recursively, and assign the results to a predicate
(given by their 'name' attribute) in the current session. The contents of the element
are also returned.
"""
value = ""
for e in element[2:]:
value += self._process_element(e, session_id)
self.set_predicate(element[1]['name'], value, session_id)
return value
# <size>
# noinspection PyUnusedLocal
def _process_size(self, element: list, session_id: str) -> str:
"""Process a <size> AIML element.
<size> elements return the number of AIML categories currently
in the bot's brain.
"""
return str(self.category_count)
# <sr>
# noinspection PyUnusedLocal
def _process_sr(self, element: list, session_id: str) -> str:
"""Process an <sr> AIML element.
<sr> elements are shortcuts for <srai><star/></srai>.
"""
star = self._process_element(['star', {}], session_id)
return self._respond(star, session_id)
# <srai>
def _process_srai(self, element: list, session_id: str) -> str:
"""Process a <srai> AIML element.
<srai> elements recursively process their contents, and then
pass the results right back into the AIML interpreter as a new
piece of input. The results of this new input string are
returned.
"""
new_input = ""
for e in element[2:]:
new_input += self._process_element(e, session_id)
return self._respond(new_input, session_id)
# <star>
def _process_star(self, element: list, session_id: str) -> str:
"""Process a <star> AIML element.
Optional attribute elements:
index: Which "*" character in the current pattern should
be matched?
<star> elements return the text fragment matched by the "*"
character in the current input pattern. For example, if the
input "Hello Tom Smith, how are you?" matched the pattern
"HELLO * HOW ARE YOU", then a <star> element in the template
would evaluate to "Tom Smith".
"""
index = int(element[1].get('index', 1))
# fetch the user's last input
input_stack = self.get_input_stack(session_id)
text_input = self._subbers['normal'].sub(input_stack[-1])
# fetch the Bot's last response (for 'that' context)
output_history = self.get_output_history(session_id)
if output_history:
that = self._subbers['normal'].sub(output_history[-1])
else:
that = '' # there might not be any output yet
topic = self.get_predicate("topic", session_id)
return self._brain.star("star", text_input, that, topic, index)
# <system>
def _process_system(self, element: list, session_id: str) -> str:
"""Process a <system> AIML element.
<system> elements process their contents recursively, and then
attempt to execute the results as a shell command on the
server. The AIML interpreter blocks until the command is
complete, and then returns the command's output.
For cross-platform compatibility, any file paths inside
<system> tags should use Unix-style forward slashes ("/") as a
directory separator.
"""
# build up the command string
command = ""
for e in element[2:]:
command += self._process_element(e, session_id)
# normalize the path to the command. Under Windows, this
# switches forward-slashes to back-slashes; all system
# elements should use unix-style paths for cross-platform
# compatibility.
#executable,args = command.split(" ", 1)
#executable = os.path.normpath(executable)
#command = executable + " " + args
command = os.path.normpath(command)
# execute the command.
response = ""
try:
out = os.popen(command)
except RuntimeError as msg:
if self._verbose_mode:
err = "WARNING: RuntimeError while processing \"system\" element:\n%s\n" % str(msg)
sys.stderr.write(err)
return "There was an error while computing my response. Please inform my botmaster."
time.sleep(0.01) # I'm told this works around a potential IOError exception.
for line in out:
response += line + "\n"
response = ' '.join(response.splitlines()).strip()
return response
# <template>
def _process_template(self, element: list, session_id: str) -> str:
"""Process a <template> AIML element.
<template> elements recursively process their contents, and
return the results. <template> is the root node of any AIML
response tree.
"""
response = ""
for e in element[2:]:
response += self._process_element(e, session_id)
return response
# text
# noinspection PyUnusedLocal
@staticmethod
def _process_text(element: list, session_id: str) -> str:
"""Process a raw text element.
Raw text elements aren't really AIML tags. Text elements cannot contain
other elements; instead, the third item of the 'elem' list is a text
string, which is immediately returned. They have a single attribute,
automatically inserted by the parser, which indicates whether whitespace
in the text should be preserved or not.
"""
if not isinstance(element[2], str):
raise TypeError("Text element contents are not text")
# If the the whitespace behavior for this element is "default",
# we reduce all stretches of >1 whitespace characters to a single
# space. To improve performance, we do this only once for each
# text element encountered, and save the results for the future.
if element[1]["xml:space"] == "default":
# We can't just split and join because we need to preserve the
# leading and trailing spaces.
element[2] = re.sub('\s+', ' ', element[2])
element[1]["xml:space"] = "preserve"
return element[2]
# <that>
def _process_that(self, element: list, session_id: str) -> str:
"""Process a <that> AIML element.
Optional element attributes:
index: Specifies which element from the output history to
return. 1 is the most recent response, 2 is the next most
recent, and so on.
<that> elements (when they appear inside <template> elements)
are the output equivalent of <input> elements; they return one
of the Bot's previous responses.
"""
output_history = self.get_output_history(session_id)
index = element[1].get('index', '1')
if ',' in index:
index, sentence_index = index.split(',')
sentence_index = int(sentence_index)
else:
sentence_index = None
index = int(index)
if len(output_history) >= index:
previous_output = output_history[-index]
else:
if self._verbose_mode:
err = "No such history index %d while processing <that> element.\n" % index
sys.stderr.write(err)
return ''
if sentence_index is None:
return previous_output
sentences = split_sentences(previous_output)
if 0 < sentence_index <= len(sentences):
return split_sentences(previous_output)[sentence_index - 1]
else:
if self._verbose_mode:
err = "No such sentence index %d while processing <that> element.\n" % sentence_index
sys.stderr.write(err)
return ''
# <thatstar>
def _process_thatstar(self, element: list, session_id: str) -> str:
"""Process a <thatstar> AIML element.
Optional element attributes:
index: Specifies which "*" in the <that> pattern to match.
<thatstar> elements are similar to <star> elements, except
that where <star/> returns the portion of the input string
matched by a "*" character in the pattern, <thatstar/> returns
the portion of the previous input string that was matched by a
"*" in the current category's <that> pattern.
"""
index = int(element[1].get('index', 1))
# fetch the user's last input
input_stack = self.get_input_stack(session_id)
text_input = self._subbers['normal'].sub(input_stack[-1])
# fetch the Bot's last response (for 'that' context)
output_history = self.get_output_history(session_id)
if output_history:
that = self._subbers['normal'].sub(output_history[-1])
else:
that = '' # there might not be any output yet
topic = self.get_predicate("topic", session_id)
return self._brain.star("thatstar", text_input, that, topic, index)
# <think>
def _process_think(self, element: list, session_id: str) -> str:
"""Process a <think> AIML element.
<think> elements process their contents recursively, and then
discard the results and return the empty string. They're
useful for setting predicates and learning AIML files without
generating any output.
"""
for e in element[2:]:
self._process_element(e, session_id)
return ""
# <topicstar>
def _process_topicstar(self, element: list, session_id: str) -> str:
"""Process a <topicstar> AIML element.
Optional element attributes:
index: Specifies which "*" in the <topic> pattern to match.
<topicstar> elements are similar to <star> elements, except
that where <star/> returns the portion of the input string
matched by a "*" character in the pattern, <topicstar/>
returns the portion of current topic string that was matched
by a "*" in the current category's <topic> pattern.
"""
index = int(element[1].get('index', 1))
# fetch the user's last input
input_stack = self.get_input_stack(session_id)
text_input = self._subbers['normal'].sub(input_stack[-1])
# fetch the Bot's last response (for 'that' context)
output_history = self.get_output_history(session_id)
if output_history:
that = self._subbers['normal'].sub(output_history[-1])
else:
that = '' # there might not be any output yet
topic = self.get_predicate("topic", session_id)
return self._brain.star("topicstar", text_input, that, topic, index)
# <uppercase>
def _process_uppercase(self, element: list, session_id: str) -> str:
"""Process an <uppercase> AIML element.
<uppercase> elements process their contents recursively, and
return the results with all lower-case characters converted to
upper-case.
"""
response = ""
for e in element[2:]:
response += self._process_element(e, session_id)
return response.upper()
# <version>
# noinspection PyUnusedLocal
def _process_version(self, element: list, session_id: str) -> str:
"""Process a <version> AIML element.
<version> elements return the version number of the AIML
interpreter.
"""
return self.version | AIML-Bot | /AIML_Bot-0.0.3-py3-none-any.whl/aiml_bot/bot.py | bot.py |
import glob
import os
import shutil
__author__ = 'Aaron Hosford'
__version__ = '1.0.1'
_ROOT_PATH = os.path.abspath(os.path.dirname(__file__))
_DEFAULT_INSTALL_PATH = os.path.abspath(os.path.expanduser('~/.aiml'))
def list_aiml_sets() -> list:
return [set_name for set_name in os.listdir(_ROOT_PATH) if has_aiml_set(set_name)]
def has_aiml_set(set_name: str):
return set_name[:1].isalpha() and os.path.isdir(os.path.join(_ROOT_PATH, set_name))
def get_aiml_set_path(set_name: str) -> str:
if not has_aiml_set(set_name):
raise KeyError(set_name)
return os.path.join(_ROOT_PATH, set_name)
def list_aiml_files(set_name: str = None, pattern: str = None) -> list:
if pattern is None:
pattern = '*.aiml'
elif not pattern.endswith('.aiml'):
pattern += '.aiml'
if set_name is None:
return glob.glob(os.path.join(_ROOT_PATH, '*', pattern))
else:
set_path = get_aiml_set_path(set_name)
return glob.glob(os.path.join(set_path, pattern))
def install(set_name: str = None, pattern: str = None, destination_path: str = _DEFAULT_INSTALL_PATH) -> str:
if set_name is None:
set_names = list_aiml_sets()
else:
set_names = [set_name]
if not has_aiml_set(set_name):
raise KeyError(set_name)
for set_name in set_names:
for aiml_file in list_aiml_files(set_name, pattern):
path_tail = aiml_file[len(_ROOT_PATH):].lstrip('/').lstrip('\\')
copy_path = os.path.join(destination_path, path_tail)
if not os.path.isdir(os.path.dirname(copy_path)):
os.makedirs(os.path.dirname(copy_path))
shutil.copy2(aiml_file, copy_path)
return destination_path
def is_installed(set_name: str = None, pattern: str = None, destination_path: str = _DEFAULT_INSTALL_PATH) -> bool:
if set_name is None:
set_names = list_aiml_sets()
else:
set_names = [set_name]
if not has_aiml_set(set_name):
raise KeyError(set_name)
for set_name in set_names:
for aiml_file in list_aiml_files(set_name, pattern):
path_tail = aiml_file[len(_ROOT_PATH):].lstrip('/').lstrip('\\')
if not os.path.isfile(os.path.join(destination_path, path_tail)):
return False
return True | AIML-Sets | /AIML_Sets-1.0.1-py3-none-any.whl/aiml_sets/__init__.py | __init__.py |
Free AIML Rule Sets
===================
This is a collection of open-sourced AIML (Artificial Intelligence
Markup Language) rule sets, for use with any compatible AIML
conversational engine. The files have been repackaged into a single repo
for easy download and installation. I plan to eventually distribute this
on the Python Package Index to make it installable with Python's package
installer, pip, making it easily available for use with `AIML
Bot <https://github.com/hosford42/aiml_bot>`__.
GNU General Public License
--------------------------
All files are released under the GNU General Public License. The
included AIML files are (c) ALICE A.I. Foundation, Inc. I have taken
care to exclude any files that did not specifically contain a copyright
& license header provided by the original author. Any additional files
that are not marked with a copyright header of their own are (c) Aaron
Hosford.
Included AIML Sets
------------------
- `Free A.L.I.C.E. AIML
Set <https://code.google.com/archive/p/aiml-en-us-foundation-alice/downloads>`__
(ALICE)
- `Square Bear's AIML files <http://www.square-bear.co.uk/aiml/>`__
(Mitsuku)
- `Standard AIML
Set <https://github.com/cdwfs/pyaiml/tree/master/standard>`__
(PyAIML)
| AIML-Sets | /AIML_Sets-1.0.1-py3-none-any.whl/AIML_Sets-1.0.1.dist-info/DESCRIPTION.rst | DESCRIPTION.rst |
AIMM 5G system simulator
------------------------
The AIMM simulator emulates a cellular radio system roughly following 5G concepts and channel models. The intention is to have an easy-to-use and fast system written in pure Python with minimal dependencies. It is especially designed to be suitable for interfacing to AI engines such as ``tensorflow`` or ``pytorch``, and it is not a principal aim for it to be extremely accurate at the level of the radio channel. The simulator was developed for the AIMM project (<https://aimm.celticnext.eu>) by Keith Briggs (<https://keithbriggs.info>).
The full documentation is at <https://aimm-simulator.readthedocs.io/en/latest/>.
Software dependencies
---------------------
1. Python 3.8 or higher <https://python.org>.
2. NumPy <https://numpy.org/>.
3. Simpy <https://pypi.org/project/simpy/>.
4. If real-time plotting is needed, matplotlib <https://matplotlib.org>.
Installation
------------
Three ways are possible:
* The simplest way, direct from PyPI: ``pip install AIMM-simulator``. This will not always get the latest version.
* Download the wheel, typically ``dist/aimm_simulator-2.x.y-py3-none-any.whl`` from github, and run ``pip install <wheel>``.
* Alternatively, the package can be installed by downloading the complete repository (using the green ``<> Code ⌄`` button) as a zip, unpacking it, and then doing ``make install_local`` from inside the unpacked zip.
After installation, run a test with ``python3 examples/basic_test.py``.
Simulator block diagram
-----------------------
The diagram below (not visible on pypi.org) shows the main classes in the code and the relationships between them.

| AIMM-simulator | /aimm_simulator-2.0.3.tar.gz/aimm_simulator-2.0.3/README.md | README.md |
Search.setIndex({docnames:["index"],envversion:{"sphinx.domains.c":1,"sphinx.domains.changeset":1,"sphinx.domains.cpp":1,"sphinx.domains.javascript":1,"sphinx.domains.math":2,"sphinx.domains.python":1,"sphinx.domains.rst":1,"sphinx.domains.std":1,"sphinx.ext.viewcode":1,sphinx:55},filenames:["index.rst"],objects:{"":{AIMM_simulator:[0,0,0,"-"],InH_pathloss_model:[0,0,0,"-"],NR_5G_standard_functions:[0,0,0,"-"],UMa_pathloss_model:[0,0,0,"-"],UMi_pathloss_model:[0,0,0,"-"],geometry_3d:[0,0,0,"-"]},"AIMM_simulator.Cell":{boost_power_dBm:[0,2,1,""],get_RSRP_reports:[0,2,1,""],get_RSRP_reports_dict:[0,2,1,""],get_UE_CQI:[0,2,1,""],get_UE_throughput:[0,2,1,""],get_average_throughput:[0,2,1,""],get_nattached:[0,2,1,""],get_power_dBm:[0,2,1,""],get_rsrp:[0,2,1,""],get_rsrp_history:[0,2,1,""],get_subband_mask:[0,2,1,""],get_xyz:[0,2,1,""],loop:[0,2,1,""],set_MIMO_gain:[0,2,1,""],set_f_callback:[0,2,1,""],set_pattern:[0,2,1,""],set_power_dBm:[0,2,1,""],set_subband_mask:[0,2,1,""],set_xyz:[0,2,1,""]},"AIMM_simulator.Logger":{finalize:[0,2,1,""],loop:[0,2,1,""]},"AIMM_simulator.MME":{do_handovers:[0,2,1,""],finalize:[0,2,1,""],loop:[0,2,1,""]},"AIMM_simulator.RIC":{finalize:[0,2,1,""],loop:[0,2,1,""]},"AIMM_simulator.Scenario":{loop:[0,2,1,""]},"AIMM_simulator.Sim":{add_MME:[0,2,1,""],add_logger:[0,2,1,""],add_loggers:[0,2,1,""],add_ric:[0,2,1,""],add_scenario:[0,2,1,""],get_UE_position:[0,2,1,""],get_average_throughput:[0,2,1,""],get_best_rsrp_cell:[0,2,1,""],get_ncells:[0,2,1,""],get_nearest_cell:[0,2,1,""],get_nues:[0,2,1,""],get_strongest_cell_simple_pathloss_model:[0,2,1,""],make_UE:[0,2,1,""],make_cell:[0,2,1,""],wait:[0,2,1,""]},"AIMM_simulator.UE":{attach:[0,2,1,""],attach_to_nearest_cell:[0,2,1,""],attach_to_strongest_cell_simple_pathloss_model:[0,2,1,""],detach:[0,2,1,""],get_CQI:[0,2,1,""],get_SINR_dB:[0,2,1,""],get_serving_cell:[0,2,1,""],get_serving_cell_i:[0,2,1,""],get_xyz:[0,2,1,""],loop:[0,2,1,""],send_rsrp_reports:[0,2,1,""],send_subband_cqi_report:[0,2,1,""],set_f_callback:[0,2,1,""],set_xyz:[0,2,1,""]},"InH_pathloss_model.InH_pathloss":{__call__:[0,2,1,""],__init__:[0,2,1,""]},"UMa_pathloss_model.UMa_pathloss":{__call__:[0,2,1,""],__init__:[0,2,1,""]},"UMi_pathloss_model.UMi_streetcanyon_pathloss":{__call__:[0,2,1,""],__init__:[0,2,1,""]},"geometry_3d.Ray":{plot:[0,2,1,""]},"geometry_3d.Triangle":{plot:[0,2,1,""]},AIMM_simulator:{Cell:[0,1,1,""],Logger:[0,1,1,""],MME:[0,1,1,""],RIC:[0,1,1,""],Scenario:[0,1,1,""],Sim:[0,1,1,""],UE:[0,1,1,""]},InH_pathloss_model:{InH_pathloss:[0,1,1,""],plot:[0,3,1,""]},NR_5G_standard_functions:{RSRP_report:[0,3,1,""],Radio_state:[0,1,1,""]},UMa_pathloss_model:{UMa_pathloss:[0,1,1,""],plot:[0,3,1,""]},UMi_pathloss_model:{UMi_streetcanyon_pathloss:[0,1,1,""],plot:[0,3,1,""]},geometry_3d:{Building:[0,1,1,""],Panel:[0,1,1,""],Plane:[0,1,1,""],RIS:[0,1,1,""],Ray:[0,1,1,""],Triangle:[0,1,1,""],block:[0,3,1,""],cube:[0,3,1,""],draw_building_3d:[0,3,1,""]}},objnames:{"0":["py","module","Python module"],"1":["py","class","Python class"],"2":["py","method","Python method"],"3":["py","function","Python function"]},objtypes:{"0":"py:module","1":"py:class","2":"py:method","3":"py:function"},terms:{"18t13":[],"1km":0,"24t14":0,"2x2":0,"30t09":[],"3db":0,"3gpp":0,"3x3":0,"5ghz":0,"6db":0,"break":0,"case":0,"class":0,"default":0,"final":0,"float":0,"import":0,"int":0,"new":0,"return":0,"static":0,"switch":0,"true":0,"while":0,But:0,For:0,LOS:0,MCS:0,RIS:0,The:0,There:0,These:0,UEs:0,Uses:0,With:0,__call__:0,__init__:0,__main__:0,__name__:0,_io:0,abl:0,about:0,abov:0,abs:0,accept:0,access:0,accord:0,account:0,accur:0,across:0,action:0,actual:0,add:0,add_collect:0,add_logg:0,add_mm:0,add_ric:0,add_scenario:0,add_subplot:0,added:0,addit:0,adequ:0,adsorpt:0,affect:0,after:0,agent:0,aid:0,aim:0,aimm_simul:0,aimm_simulator_example_n0:0,aimm_simulator_example_n1:0,aimm_simulator_example_n2:0,aimm_simulator_example_n3:0,aimm_simulator_example_n3a:0,aimm_simulator_example_n5:0,aimm_simulator_example_n6:0,aimm_simulator_example_n7:0,aimm_simulator_example_n8:0,aimm_simulator_ric_exampl:0,aimm_simulator_ric_example_01:0,all:0,alloc:0,allow:0,almost:0,alon:0,along:0,alpha:0,also:0,altern:0,alwai:0,analys:0,analysi:0,angl:0,ani:0,anim:0,annot:0,anti_pingpong:0,apart:0,append:0,appli:0,applic:0,appropri:0,arbitrari:0,argument:0,around:0,arrai:0,arrang:0,aspect:0,aspx:0,assum:0,atan2:0,attach:0,attach_to_nearest_cel:0,attach_to_strongest_cell_simple_pathloss_model:0,author:0,automat:0,avail:0,ave:0,averag:0,avoid:0,axes:0,axi:0,backend:0,bandwidth:0,base:0,bash:0,basic:0,basic_test:0,bbox:0,beam:0,becaus:0,been:0,befor:0,behaviour:0,being:0,below:0,best:0,best_rsrp_cel:0,between:0,bit:0,blk0:0,blk1:0,blk2:0,blk3:0,blk4:0,blue:0,bool:0,boost:0,boost_power_dbm:0,bottom:0,boxstyl:0,bracket:0,brigg:0,build:0,building0:0,built:0,button:0,bw_mhz:0,c70:0,call:0,callabl:0,callack:[],callback:0,can:0,canyon:0,care:0,cell0:0,cell1:0,cellular:0,center:0,centr:0,chang:0,channel:0,check:0,circl:0,close:0,code:0,collect:0,collections_api:0,color:0,column:0,column_to_axis_map:0,com:0,comma:0,command:0,complet:0,compon:0,comput:0,concept:0,configur:0,consider:0,construct:0,control:0,conveni:0,convert:0,corner:0,correctli:0,cos:0,counterclockwis:0,cover:0,creat:0,crude:0,cube:0,current:0,cut:0,data:0,dbg:0,dbm:0,debug:0,decreas:0,def:0,defin:0,degre:0,deliv:0,densiti:0,deploy:0,deprec:0,design:0,desir:0,desktopmodul:0,detach:0,detail:0,detect:0,determin:0,develop:0,dict:0,dictionari:0,differ:0,diffract:0,dimens:0,dimension:0,direct:0,discret:0,displai:0,dist:0,distanc:0,do_handov:0,doc:0,doe:0,doing:0,dot:0,downlink:0,download:0,downstream:0,draw:0,draw_build:0,draw_building_3d:0,drawedg:0,driven:0,drop:0,dual:0,each:0,easi:0,easili:0,east:0,edg:0,edgecolor:0,effect:0,element:0,els:0,empti:0,emul:0,encod:0,end:0,engin:0,enough:0,ensur:0,enumer:0,env:0,especi:0,etc:0,even:0,event:0,exactli:0,example_n2:0,example_n3:0,example_n3a:0,example_n4:0,example_n5:0,example_n8:0,exce:0,execut:0,exit:0,expon:0,extra:0,extrem:0,f_callback:0,f_callback_kwarg:0,face:0,facecolor:0,fail:0,fall:0,fals:0,fast:0,fc_ghz:0,featur:0,fenc:0,few:0,fig:0,fig_timestamp:0,figsiz:0,figur:0,file:0,filenam:0,finish:0,first:0,flat:0,fnb:0,folder:0,follow:0,fontsiz:0,format:0,four:0,framework:0,frequenc:0,from:0,fst:0,full:0,func:0,further:0,gain:0,gener:0,geograph:0,get:0,get_average_throughput:0,get_best_rsrp_cel:0,get_cqi:0,get_nattach:0,get_ncel:0,get_nearest_cel:0,get_nu:0,get_power_dbm:0,get_rsrp:0,get_rsrp_histori:0,get_rsrp_report:0,get_rsrp_reports_dict:0,get_serving_cel:0,get_serving_cell_i:0,get_sinr_db:0,get_strongest_cell_simple_pathloss_model:0,get_subband_mask:0,get_ue_cqi:0,get_ue_posit:0,get_ue_throughput:0,get_xyz:0,gigahertz:0,github:0,give:0,given:0,global:0,gnb:0,grai:0,graphic:0,greater:0,green:0,grid:0,h_b:0,h_cqi0:0,h_cqi1:0,h_ut:0,half:0,handl:0,handov:0,has:0,have:0,head:0,header:0,height:0,help:0,here:0,high:0,higher:0,highest:0,histogram:0,histogram_logg:0,host:0,hotspot:0,how:0,howev:0,html:0,http:0,idepend:[],illustr:0,imag:0,img:0,immedi:0,implement:0,improv:0,includ:0,increas:0,increment:0,independ:0,index:0,indic:0,indoor:0,inf:0,infinit:0,inh_pathloss:0,inh_pathloss_model:0,initi:0,input:0,inputfil:0,insid:0,install_loc:0,instanc:0,instead:0,intellig:0,intend:0,intent:0,interf:0,interfac:0,interfer:0,intern:0,intersect:0,interv:0,introduc:0,invis:0,iter:0,its:0,just:0,keith:0,keithbrigg:0,kwarg:0,label:0,lambda:0,last:0,later:0,latest:0,law:0,length:0,level:0,like:0,limit:0,line:0,line_seg:0,linewidth:0,link:0,list:0,locat:0,log:0,logfil:0,logger_func:0,logginf:0,logging_interv:0,loop:0,low:0,lowest:0,macrocel:0,mai:0,main:0,maintain:0,make:0,make_cel:0,make_u:0,manag:0,manual:0,margin:0,mask:0,math:0,matplotlib:0,max:0,maximum:0,mean:0,member:0,messag:0,method:0,metr:0,mhz:0,microcel:0,might:0,mimo:0,mimo_gain_db:0,minim:0,mme:0,mode:0,modifi:0,more:0,move:0,mp4:0,multipl:0,must:0,mylogg:0,myric:0,myscenario:0,n_subband:0,name:0,nan:0,nax:0,nearbi:0,nearest:0,need:0,never:0,next:0,nlo:0,nofradioframepersec:0,nofslotsperradiofram:0,non:0,none:0,normal:0,note:0,noth:0,now:0,np_array_to_str:0,nplot:0,nprb:0,nprb_oh:0,nr_5g_standard_funct:0,nr_5g_standard_functions_00:0,nrb_sc:0,nred:0,nsh_symb:0,number:0,numer:0,numpi:0,object:0,observ:0,occur:0,off:0,omnidirect:0,one:0,onli:0,open:0,oper:0,opposit:0,optim:0,option:0,order:0,org:0,other:0,otherwis:0,output:0,over:0,overhead:0,overridden:0,own:0,packag:0,pad:0,panel:0,parallel:0,param:0,paramet:0,patch:0,patchcollect:0,path:0,pathloss_model:0,pdf:0,pdffn:0,pend:0,perform:0,period:0,pingpong:0,pip:0,pipe:0,place:0,planar:0,plane:0,plot_histogram:0,plotter:0,plt:0,png:0,pngfn:0,point:0,portal:0,posit:0,possibl:0,power:0,power_dbm:0,predict:0,previou:0,princip:0,print:0,process:0,profil:0,program:0,programm:0,project:0,proper:0,provid:0,pure:0,py3:0,pypi:0,pyplot:0,pyqt5:0,python3:0,python:0,pytorch:0,queue:0,quiet:0,radio:0,radio_st:0,radiu:0,rai:0,random:0,randomli:0,rang:0,rarrow:0,rather:0,raw:0,ray0:0,read:0,realtime_plott:0,realtime_plotter_03:0,realtime_plotter_05:0,reattach:0,receiv:0,rectangl:0,rectangular:0,reflect:0,rel:0,reli:0,remain:0,remov:0,report:0,reporting_interv:0,repositori:0,repres:0,requir:0,result:0,rng_seed:0,rotat:0,roughli:0,round:0,row:0,rsrp:0,rsrp_dbm:0,rsrp_report:0,run:0,same:0,save:0,savefig:0,scenario_func:0,script:0,second:0,see:0,seed:0,seen:0,self:0,selftest:0,send:0,send_rsrp_report:0,send_subband_cqi_report:0,sent:0,separ:0,sequenc:0,serv:0,serving_cel:0,set:0,set_f_callback:0,set_mimo_gain:0,set_pattern:0,set_power_dbm:0,set_subband_mask:0,set_xlabel:0,set_xlim:0,set_xyz:0,set_ylabel:0,set_ylim:0,shell:0,should:0,show:0,show_param:0,sight:0,signal:0,signatur:0,sim:0,similar:0,simpi:0,simpl:0,simplest:0,sin:0,singl:0,sinr:0,size:0,sleep:0,slope:0,some:0,somewher:0,sort:0,sourc:0,space:0,specif:0,specifi:0,specificationdetail:0,specificationid:0,speed:0,spreadsheet:0,squar:0,src:0,stabl:0,stai:0,stand:0,standard_norm:0,station:0,stderr:0,stdin:0,stdout:0,step:0,still:0,str:0,strategi:0,street:0,string:0,strongest:0,strongest_cell_simple_pathloss_model:0,structur:0,subband:0,subband_mask:0,subclass:0,subsequ:0,suitabl:0,sum:0,suppress:0,sys:0,system:0,t_max:0,tab:0,tabl:0,tail:0,take:0,task:0,tcell:0,tensorflow:0,termin:0,test:0,text:0,textiowrapp:0,than:0,them:0,theta:0,thi:0,three:0,threshold:0,through:0,throughout:0,throughput:0,timeout:0,titl:0,tmax:0,to_db:0,todo:0,toi:0,top:0,total:0,toward:0,trace:0,transmit:0,transmitt:0,triangl:0,trivial:0,tthroughput:0,ttp:0,tue:0,tupl:0,two:0,type:0,typic:0,ue0:0,ue_i:0,ues:0,uma_pathloss:0,uma_pathloss_model:0,umi_pathloss_model:0,umi_streetcanyon_pathloss:0,uniqu:0,unori:0,unpack:0,until:0,updat:0,urban:0,usag:0,use:0,used:0,user:0,uses:0,using:0,utf:0,valu:0,variabl:0,vector:0,verbos:0,version:0,via:0,wai:0,wait:0,walk:0,wall:0,what:0,wheel:0,when:0,whether:0,which:0,whl:0,width:0,window:0,within:0,without:0,would:0,write:0,written:0,xapp:0,xlabel:0,xy0:0,xyz:0,xyz_cel:0,xyz_u:0,yet:0,yield:0,ylabel:0,ylim:0,ymax:0,your:0,zero:0,zip:0},titles:["AIMM simulator documentation"],titleterms:{"function":0,Using:0,adding:0,aimm:0,antenna:0,api:0,block:0,cell:0,cqi:0,custom:0,depend:0,distribut:0,document:0,estim:0,exampl:0,geometry_3d:0,heterogen:0,hetnet:0,inh:0,instal:0,logger:0,macro:0,mobil:0,model:0,modul:0,network:0,pathloss:0,pattern:0,plot:0,purpos:0,quick:0,radiat:0,real:0,refer:0,ric:0,scenario:0,simul:0,small:0,softwar:0,standard:0,start:0,time:0,tutori:0,uma:0,umi:0,util:0}}) | AIMM-simulator | /aimm_simulator-2.0.3.tar.gz/aimm_simulator-2.0.3/doc/sphinx_build/searchindex.js | searchindex.js |
* select a different prefix for underscore
*/
$u = _.noConflict();
/**
* make the code below compatible with browsers without
* an installed firebug like debugger
if (!window.console || !console.firebug) {
var names = ["log", "debug", "info", "warn", "error", "assert", "dir",
"dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace",
"profile", "profileEnd"];
window.console = {};
for (var i = 0; i < names.length; ++i)
window.console[names[i]] = function() {};
}
*/
/**
* small helper function to urldecode strings
*/
jQuery.urldecode = function(x) {
return decodeURIComponent(x).replace(/\+/g, ' ');
};
/**
* small helper function to urlencode strings
*/
jQuery.urlencode = encodeURIComponent;
/**
* This function returns the parsed url parameters of the
* current request. Multiple values per key are supported,
* it will always return arrays of strings for the value parts.
*/
jQuery.getQueryParameters = function(s) {
if (typeof s === 'undefined')
s = document.location.search;
var parts = s.substr(s.indexOf('?') + 1).split('&');
var result = {};
for (var i = 0; i < parts.length; i++) {
var tmp = parts[i].split('=', 2);
var key = jQuery.urldecode(tmp[0]);
var value = jQuery.urldecode(tmp[1]);
if (key in result)
result[key].push(value);
else
result[key] = [value];
}
return result;
};
/**
* highlight a given string on a jquery object by wrapping it in
* span elements with the given class name.
*/
jQuery.fn.highlightText = function(text, className) {
function highlight(node, addItems) {
if (node.nodeType === 3) {
var val = node.nodeValue;
var pos = val.toLowerCase().indexOf(text);
if (pos >= 0 &&
!jQuery(node.parentNode).hasClass(className) &&
!jQuery(node.parentNode).hasClass("nohighlight")) {
var span;
var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg");
if (isInSVG) {
span = document.createElementNS("http://www.w3.org/2000/svg", "tspan");
} else {
span = document.createElement("span");
span.className = className;
}
span.appendChild(document.createTextNode(val.substr(pos, text.length)));
node.parentNode.insertBefore(span, node.parentNode.insertBefore(
document.createTextNode(val.substr(pos + text.length)),
node.nextSibling));
node.nodeValue = val.substr(0, pos);
if (isInSVG) {
var bbox = span.getBBox();
var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect");
rect.x.baseVal.value = bbox.x;
rect.y.baseVal.value = bbox.y;
rect.width.baseVal.value = bbox.width;
rect.height.baseVal.value = bbox.height;
rect.setAttribute('class', className);
var parentOfText = node.parentNode.parentNode;
addItems.push({
"parent": node.parentNode,
"target": rect});
}
}
}
else if (!jQuery(node).is("button, select, textarea")) {
jQuery.each(node.childNodes, function() {
highlight(this, addItems);
});
}
}
var addItems = [];
var result = this.each(function() {
highlight(this, addItems);
});
for (var i = 0; i < addItems.length; ++i) {
jQuery(addItems[i].parent).before(addItems[i].target);
}
return result;
};
/*
* backward compatibility for jQuery.browser
* This will be supported until firefox bug is fixed.
*/
if (!jQuery.browser) {
jQuery.uaMatch = function(ua) {
ua = ua.toLowerCase();
var match = /(chrome)[ \/]([\w.]+)/.exec(ua) ||
/(webkit)[ \/]([\w.]+)/.exec(ua) ||
/(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) ||
/(msie) ([\w.]+)/.exec(ua) ||
ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) ||
[];
return {
browser: match[ 1 ] || "",
version: match[ 2 ] || "0"
};
};
jQuery.browser = {};
jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true;
}
/**
* Small JavaScript module for the documentation.
*/
var Documentation = {
init : function() {
this.fixFirefoxAnchorBug();
this.highlightSearchWords();
this.initIndexTable();
if (DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) {
this.initOnKeyListeners();
}
},
/**
* i18n support
*/
TRANSLATIONS : {},
PLURAL_EXPR : function(n) { return n === 1 ? 0 : 1; },
LOCALE : 'unknown',
// gettext and ngettext don't access this so that the functions
// can safely bound to a different name (_ = Documentation.gettext)
gettext : function(string) {
var translated = Documentation.TRANSLATIONS[string];
if (typeof translated === 'undefined')
return string;
return (typeof translated === 'string') ? translated : translated[0];
},
ngettext : function(singular, plural, n) {
var translated = Documentation.TRANSLATIONS[singular];
if (typeof translated === 'undefined')
return (n == 1) ? singular : plural;
return translated[Documentation.PLURALEXPR(n)];
},
addTranslations : function(catalog) {
for (var key in catalog.messages)
this.TRANSLATIONS[key] = catalog.messages[key];
this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')');
this.LOCALE = catalog.locale;
},
/**
* add context elements like header anchor links
*/
addContextElements : function() {
$('div[id] > :header:first').each(function() {
$('<a class="headerlink">\u00B6</a>').
attr('href', '#' + this.id).
attr('title', _('Permalink to this headline')).
appendTo(this);
});
$('dt[id]').each(function() {
$('<a class="headerlink">\u00B6</a>').
attr('href', '#' + this.id).
attr('title', _('Permalink to this definition')).
appendTo(this);
});
},
/**
* workaround a firefox stupidity
* see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075
*/
fixFirefoxAnchorBug : function() {
if (document.location.hash && $.browser.mozilla)
window.setTimeout(function() {
document.location.href += '';
}, 10);
},
/**
* highlight the search words provided in the url in the text
*/
highlightSearchWords : function() {
var params = $.getQueryParameters();
var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : [];
if (terms.length) {
var body = $('div.body');
if (!body.length) {
body = $('body');
}
window.setTimeout(function() {
$.each(terms, function() {
body.highlightText(this.toLowerCase(), 'highlighted');
});
}, 10);
$('<p class="highlight-link"><a href="javascript:Documentation.' +
'hideSearchWords()">' + _('Hide Search Matches') + '</a></p>')
.appendTo($('#searchbox'));
}
},
/**
* init the domain index toggle buttons
*/
initIndexTable : function() {
var togglers = $('img.toggler').click(function() {
var src = $(this).attr('src');
var idnum = $(this).attr('id').substr(7);
$('tr.cg-' + idnum).toggle();
if (src.substr(-9) === 'minus.png')
$(this).attr('src', src.substr(0, src.length-9) + 'plus.png');
else
$(this).attr('src', src.substr(0, src.length-8) + 'minus.png');
}).css('display', '');
if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) {
togglers.click();
}
},
/**
* helper function to hide the search marks again
*/
hideSearchWords : function() {
$('#searchbox .highlight-link').fadeOut(300);
$('span.highlighted').removeClass('highlighted');
},
/**
* make the url absolute
*/
makeURL : function(relativeURL) {
return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL;
},
/**
* get the current relative url
*/
getCurrentURL : function() {
var path = document.location.pathname;
var parts = path.split(/\//);
$.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() {
if (this === '..')
parts.pop();
});
var url = parts.join('/');
return path.substring(url.lastIndexOf('/') + 1, path.length - 1);
},
initOnKeyListeners: function() {
$(document).keyup(function(event) {
var activeElementType = document.activeElement.tagName;
// don't navigate when in search box or textarea
if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT') {
switch (event.keyCode) {
case 37: // left
var prevHref = $('link[rel="prev"]').prop('href');
if (prevHref) {
window.location.href = prevHref;
return false;
}
case 39: // right
var nextHref = $('link[rel="next"]').prop('href');
if (nextHref) {
window.location.href = nextHref;
return false;
}
}
}
});
}
};
// quick alias for translations
_ = Documentation.gettext;
$(document).ready(function() {
Documentation.init();
}); | AIMM-simulator | /aimm_simulator-2.0.3.tar.gz/aimm_simulator-2.0.3/doc/sphinx_build/_static/doctools.js | doctools.js |
var stopwords = ["a","and","are","as","at","be","but","by","for","if","in","into","is","it","near","no","not","of","on","or","such","that","the","their","then","there","these","they","this","to","was","will","with"];
/* Non-minified version JS is _stemmer.js if file is provided */
/**
* Porter Stemmer
*/
var Stemmer = function() {
var step2list = {
ational: 'ate',
tional: 'tion',
enci: 'ence',
anci: 'ance',
izer: 'ize',
bli: 'ble',
alli: 'al',
entli: 'ent',
eli: 'e',
ousli: 'ous',
ization: 'ize',
ation: 'ate',
ator: 'ate',
alism: 'al',
iveness: 'ive',
fulness: 'ful',
ousness: 'ous',
aliti: 'al',
iviti: 'ive',
biliti: 'ble',
logi: 'log'
};
var step3list = {
icate: 'ic',
ative: '',
alize: 'al',
iciti: 'ic',
ical: 'ic',
ful: '',
ness: ''
};
var c = "[^aeiou]"; // consonant
var v = "[aeiouy]"; // vowel
var C = c + "[^aeiouy]*"; // consonant sequence
var V = v + "[aeiou]*"; // vowel sequence
var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0
var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1
var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1
var s_v = "^(" + C + ")?" + v; // vowel in stem
this.stemWord = function (w) {
var stem;
var suffix;
var firstch;
var origword = w;
if (w.length < 3)
return w;
var re;
var re2;
var re3;
var re4;
firstch = w.substr(0,1);
if (firstch == "y")
w = firstch.toUpperCase() + w.substr(1);
// Step 1a
re = /^(.+?)(ss|i)es$/;
re2 = /^(.+?)([^s])s$/;
if (re.test(w))
w = w.replace(re,"$1$2");
else if (re2.test(w))
w = w.replace(re2,"$1$2");
// Step 1b
re = /^(.+?)eed$/;
re2 = /^(.+?)(ed|ing)$/;
if (re.test(w)) {
var fp = re.exec(w);
re = new RegExp(mgr0);
if (re.test(fp[1])) {
re = /.$/;
w = w.replace(re,"");
}
}
else if (re2.test(w)) {
var fp = re2.exec(w);
stem = fp[1];
re2 = new RegExp(s_v);
if (re2.test(stem)) {
w = stem;
re2 = /(at|bl|iz)$/;
re3 = new RegExp("([^aeiouylsz])\\1$");
re4 = new RegExp("^" + C + v + "[^aeiouwxy]$");
if (re2.test(w))
w = w + "e";
else if (re3.test(w)) {
re = /.$/;
w = w.replace(re,"");
}
else if (re4.test(w))
w = w + "e";
}
}
// Step 1c
re = /^(.+?)y$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
re = new RegExp(s_v);
if (re.test(stem))
w = stem + "i";
}
// Step 2
re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
suffix = fp[2];
re = new RegExp(mgr0);
if (re.test(stem))
w = stem + step2list[suffix];
}
// Step 3
re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
suffix = fp[2];
re = new RegExp(mgr0);
if (re.test(stem))
w = stem + step3list[suffix];
}
// Step 4
re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/;
re2 = /^(.+?)(s|t)(ion)$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
re = new RegExp(mgr1);
if (re.test(stem))
w = stem;
}
else if (re2.test(w)) {
var fp = re2.exec(w);
stem = fp[1] + fp[2];
re2 = new RegExp(mgr1);
if (re2.test(stem))
w = stem;
}
// Step 5
re = /^(.+?)e$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
re = new RegExp(mgr1);
re2 = new RegExp(meq1);
re3 = new RegExp("^" + C + v + "[^aeiouwxy]$");
if (re.test(stem) || (re2.test(stem) && !(re3.test(stem))))
w = stem;
}
re = /ll$/;
re2 = new RegExp(mgr1);
if (re.test(w) && re2.test(w)) {
re = /.$/;
w = w.replace(re,"");
}
// and turn initial Y back to y
if (firstch == "y")
w = firstch.toLowerCase() + w.substr(1);
return w;
}
}
var splitChars = (function() {
var result = {};
var singles = [96, 180, 187, 191, 215, 247, 749, 885, 903, 907, 909, 930, 1014, 1648,
1748, 1809, 2416, 2473, 2481, 2526, 2601, 2609, 2612, 2615, 2653, 2702,
2706, 2729, 2737, 2740, 2857, 2865, 2868, 2910, 2928, 2948, 2961, 2971,
2973, 3085, 3089, 3113, 3124, 3213, 3217, 3241, 3252, 3295, 3341, 3345,
3369, 3506, 3516, 3633, 3715, 3721, 3736, 3744, 3748, 3750, 3756, 3761,
3781, 3912, 4239, 4347, 4681, 4695, 4697, 4745, 4785, 4799, 4801, 4823,
4881, 5760, 5901, 5997, 6313, 7405, 8024, 8026, 8028, 8030, 8117, 8125,
8133, 8181, 8468, 8485, 8487, 8489, 8494, 8527, 11311, 11359, 11687, 11695,
11703, 11711, 11719, 11727, 11735, 12448, 12539, 43010, 43014, 43019, 43587,
43696, 43713, 64286, 64297, 64311, 64317, 64319, 64322, 64325, 65141];
var i, j, start, end;
for (i = 0; i < singles.length; i++) {
result[singles[i]] = true;
}
var ranges = [[0, 47], [58, 64], [91, 94], [123, 169], [171, 177], [182, 184], [706, 709],
[722, 735], [741, 747], [751, 879], [888, 889], [894, 901], [1154, 1161],
[1318, 1328], [1367, 1368], [1370, 1376], [1416, 1487], [1515, 1519], [1523, 1568],
[1611, 1631], [1642, 1645], [1750, 1764], [1767, 1773], [1789, 1790], [1792, 1807],
[1840, 1868], [1958, 1968], [1970, 1983], [2027, 2035], [2038, 2041], [2043, 2047],
[2070, 2073], [2075, 2083], [2085, 2087], [2089, 2307], [2362, 2364], [2366, 2383],
[2385, 2391], [2402, 2405], [2419, 2424], [2432, 2436], [2445, 2446], [2449, 2450],
[2483, 2485], [2490, 2492], [2494, 2509], [2511, 2523], [2530, 2533], [2546, 2547],
[2554, 2564], [2571, 2574], [2577, 2578], [2618, 2648], [2655, 2661], [2672, 2673],
[2677, 2692], [2746, 2748], [2750, 2767], [2769, 2783], [2786, 2789], [2800, 2820],
[2829, 2830], [2833, 2834], [2874, 2876], [2878, 2907], [2914, 2917], [2930, 2946],
[2955, 2957], [2966, 2968], [2976, 2978], [2981, 2983], [2987, 2989], [3002, 3023],
[3025, 3045], [3059, 3076], [3130, 3132], [3134, 3159], [3162, 3167], [3170, 3173],
[3184, 3191], [3199, 3204], [3258, 3260], [3262, 3293], [3298, 3301], [3312, 3332],
[3386, 3388], [3390, 3423], [3426, 3429], [3446, 3449], [3456, 3460], [3479, 3481],
[3518, 3519], [3527, 3584], [3636, 3647], [3655, 3663], [3674, 3712], [3717, 3718],
[3723, 3724], [3726, 3731], [3752, 3753], [3764, 3772], [3774, 3775], [3783, 3791],
[3802, 3803], [3806, 3839], [3841, 3871], [3892, 3903], [3949, 3975], [3980, 4095],
[4139, 4158], [4170, 4175], [4182, 4185], [4190, 4192], [4194, 4196], [4199, 4205],
[4209, 4212], [4226, 4237], [4250, 4255], [4294, 4303], [4349, 4351], [4686, 4687],
[4702, 4703], [4750, 4751], [4790, 4791], [4806, 4807], [4886, 4887], [4955, 4968],
[4989, 4991], [5008, 5023], [5109, 5120], [5741, 5742], [5787, 5791], [5867, 5869],
[5873, 5887], [5906, 5919], [5938, 5951], [5970, 5983], [6001, 6015], [6068, 6102],
[6104, 6107], [6109, 6111], [6122, 6127], [6138, 6159], [6170, 6175], [6264, 6271],
[6315, 6319], [6390, 6399], [6429, 6469], [6510, 6511], [6517, 6527], [6572, 6592],
[6600, 6607], [6619, 6655], [6679, 6687], [6741, 6783], [6794, 6799], [6810, 6822],
[6824, 6916], [6964, 6980], [6988, 6991], [7002, 7042], [7073, 7085], [7098, 7167],
[7204, 7231], [7242, 7244], [7294, 7400], [7410, 7423], [7616, 7679], [7958, 7959],
[7966, 7967], [8006, 8007], [8014, 8015], [8062, 8063], [8127, 8129], [8141, 8143],
[8148, 8149], [8156, 8159], [8173, 8177], [8189, 8303], [8306, 8307], [8314, 8318],
[8330, 8335], [8341, 8449], [8451, 8454], [8456, 8457], [8470, 8472], [8478, 8483],
[8506, 8507], [8512, 8516], [8522, 8525], [8586, 9311], [9372, 9449], [9472, 10101],
[10132, 11263], [11493, 11498], [11503, 11516], [11518, 11519], [11558, 11567],
[11622, 11630], [11632, 11647], [11671, 11679], [11743, 11822], [11824, 12292],
[12296, 12320], [12330, 12336], [12342, 12343], [12349, 12352], [12439, 12444],
[12544, 12548], [12590, 12592], [12687, 12689], [12694, 12703], [12728, 12783],
[12800, 12831], [12842, 12880], [12896, 12927], [12938, 12976], [12992, 13311],
[19894, 19967], [40908, 40959], [42125, 42191], [42238, 42239], [42509, 42511],
[42540, 42559], [42592, 42593], [42607, 42622], [42648, 42655], [42736, 42774],
[42784, 42785], [42889, 42890], [42893, 43002], [43043, 43055], [43062, 43071],
[43124, 43137], [43188, 43215], [43226, 43249], [43256, 43258], [43260, 43263],
[43302, 43311], [43335, 43359], [43389, 43395], [43443, 43470], [43482, 43519],
[43561, 43583], [43596, 43599], [43610, 43615], [43639, 43641], [43643, 43647],
[43698, 43700], [43703, 43704], [43710, 43711], [43715, 43738], [43742, 43967],
[44003, 44015], [44026, 44031], [55204, 55215], [55239, 55242], [55292, 55295],
[57344, 63743], [64046, 64047], [64110, 64111], [64218, 64255], [64263, 64274],
[64280, 64284], [64434, 64466], [64830, 64847], [64912, 64913], [64968, 65007],
[65020, 65135], [65277, 65295], [65306, 65312], [65339, 65344], [65371, 65381],
[65471, 65473], [65480, 65481], [65488, 65489], [65496, 65497]];
for (i = 0; i < ranges.length; i++) {
start = ranges[i][0];
end = ranges[i][1];
for (j = start; j <= end; j++) {
result[j] = true;
}
}
return result;
})();
function splitQuery(query) {
var result = [];
var start = -1;
for (var i = 0; i < query.length; i++) {
if (splitChars[query.charCodeAt(i)]) {
if (start !== -1) {
result.push(query.slice(start, i));
start = -1;
}
} else if (start === -1) {
start = i;
}
}
if (start !== -1) {
result.push(query.slice(start));
}
return result;
} | AIMM-simulator | /aimm_simulator-2.0.3.tar.gz/aimm_simulator-2.0.3/doc/sphinx_build/_static/language_data.js | language_data.js |
(function() {
// Baseline setup
// --------------
// Establish the root object, `window` (`self`) in the browser, `global`
// on the server, or `this` in some virtual machines. We use `self`
// instead of `window` for `WebWorker` support.
var root = typeof self == 'object' && self.self === self && self ||
typeof global == 'object' && global.global === global && global ||
this ||
{};
// Save the previous value of the `_` variable.
var previousUnderscore = root._;
// Save bytes in the minified (but not gzipped) version:
var ArrayProto = Array.prototype, ObjProto = Object.prototype;
var SymbolProto = typeof Symbol !== 'undefined' ? Symbol.prototype : null;
// Create quick reference variables for speed access to core prototypes.
var push = ArrayProto.push,
slice = ArrayProto.slice,
toString = ObjProto.toString,
hasOwnProperty = ObjProto.hasOwnProperty;
// All **ECMAScript 5** native function implementations that we hope to use
// are declared here.
var nativeIsArray = Array.isArray,
nativeKeys = Object.keys,
nativeCreate = Object.create;
// Naked function reference for surrogate-prototype-swapping.
var Ctor = function(){};
// Create a safe reference to the Underscore object for use below.
var _ = function(obj) {
if (obj instanceof _) return obj;
if (!(this instanceof _)) return new _(obj);
this._wrapped = obj;
};
// Export the Underscore object for **Node.js**, with
// backwards-compatibility for their old module API. If we're in
// the browser, add `_` as a global object.
// (`nodeType` is checked to ensure that `module`
// and `exports` are not HTML elements.)
if (typeof exports != 'undefined' && !exports.nodeType) {
if (typeof module != 'undefined' && !module.nodeType && module.exports) {
exports = module.exports = _;
}
exports._ = _;
} else {
root._ = _;
}
// Current version.
_.VERSION = '1.9.1';
// Internal function that returns an efficient (for current engines) version
// of the passed-in callback, to be repeatedly applied in other Underscore
// functions.
var optimizeCb = function(func, context, argCount) {
if (context === void 0) return func;
switch (argCount == null ? 3 : argCount) {
case 1: return function(value) {
return func.call(context, value);
};
// The 2-argument case is omitted because we’re not using it.
case 3: return function(value, index, collection) {
return func.call(context, value, index, collection);
};
case 4: return function(accumulator, value, index, collection) {
return func.call(context, accumulator, value, index, collection);
};
}
return function() {
return func.apply(context, arguments);
};
};
var builtinIteratee;
// An internal function to generate callbacks that can be applied to each
// element in a collection, returning the desired result — either `identity`,
// an arbitrary callback, a property matcher, or a property accessor.
var cb = function(value, context, argCount) {
if (_.iteratee !== builtinIteratee) return _.iteratee(value, context);
if (value == null) return _.identity;
if (_.isFunction(value)) return optimizeCb(value, context, argCount);
if (_.isObject(value) && !_.isArray(value)) return _.matcher(value);
return _.property(value);
};
// External wrapper for our callback generator. Users may customize
// `_.iteratee` if they want additional predicate/iteratee shorthand styles.
// This abstraction hides the internal-only argCount argument.
_.iteratee = builtinIteratee = function(value, context) {
return cb(value, context, Infinity);
};
// Some functions take a variable number of arguments, or a few expected
// arguments at the beginning and then a variable number of values to operate
// on. This helper accumulates all remaining arguments past the function’s
// argument length (or an explicit `startIndex`), into an array that becomes
// the last argument. Similar to ES6’s "rest parameter".
var restArguments = function(func, startIndex) {
startIndex = startIndex == null ? func.length - 1 : +startIndex;
return function() {
var length = Math.max(arguments.length - startIndex, 0),
rest = Array(length),
index = 0;
for (; index < length; index++) {
rest[index] = arguments[index + startIndex];
}
switch (startIndex) {
case 0: return func.call(this, rest);
case 1: return func.call(this, arguments[0], rest);
case 2: return func.call(this, arguments[0], arguments[1], rest);
}
var args = Array(startIndex + 1);
for (index = 0; index < startIndex; index++) {
args[index] = arguments[index];
}
args[startIndex] = rest;
return func.apply(this, args);
};
};
// An internal function for creating a new object that inherits from another.
var baseCreate = function(prototype) {
if (!_.isObject(prototype)) return {};
if (nativeCreate) return nativeCreate(prototype);
Ctor.prototype = prototype;
var result = new Ctor;
Ctor.prototype = null;
return result;
};
var shallowProperty = function(key) {
return function(obj) {
return obj == null ? void 0 : obj[key];
};
};
var has = function(obj, path) {
return obj != null && hasOwnProperty.call(obj, path);
}
var deepGet = function(obj, path) {
var length = path.length;
for (var i = 0; i < length; i++) {
if (obj == null) return void 0;
obj = obj[path[i]];
}
return length ? obj : void 0;
};
// Helper for collection methods to determine whether a collection
// should be iterated as an array or as an object.
// Related: http://people.mozilla.org/~jorendorff/es6-draft.html#sec-tolength
// Avoids a very nasty iOS 8 JIT bug on ARM-64. #2094
var MAX_ARRAY_INDEX = Math.pow(2, 53) - 1;
var getLength = shallowProperty('length');
var isArrayLike = function(collection) {
var length = getLength(collection);
return typeof length == 'number' && length >= 0 && length <= MAX_ARRAY_INDEX;
};
// Collection Functions
// --------------------
// The cornerstone, an `each` implementation, aka `forEach`.
// Handles raw objects in addition to array-likes. Treats all
// sparse array-likes as if they were dense.
_.each = _.forEach = function(obj, iteratee, context) {
iteratee = optimizeCb(iteratee, context);
var i, length;
if (isArrayLike(obj)) {
for (i = 0, length = obj.length; i < length; i++) {
iteratee(obj[i], i, obj);
}
} else {
var keys = _.keys(obj);
for (i = 0, length = keys.length; i < length; i++) {
iteratee(obj[keys[i]], keys[i], obj);
}
}
return obj;
};
// Return the results of applying the iteratee to each element.
_.map = _.collect = function(obj, iteratee, context) {
iteratee = cb(iteratee, context);
var keys = !isArrayLike(obj) && _.keys(obj),
length = (keys || obj).length,
results = Array(length);
for (var index = 0; index < length; index++) {
var currentKey = keys ? keys[index] : index;
results[index] = iteratee(obj[currentKey], currentKey, obj);
}
return results;
};
// Create a reducing function iterating left or right.
var createReduce = function(dir) {
// Wrap code that reassigns argument variables in a separate function than
// the one that accesses `arguments.length` to avoid a perf hit. (#1991)
var reducer = function(obj, iteratee, memo, initial) {
var keys = !isArrayLike(obj) && _.keys(obj),
length = (keys || obj).length,
index = dir > 0 ? 0 : length - 1;
if (!initial) {
memo = obj[keys ? keys[index] : index];
index += dir;
}
for (; index >= 0 && index < length; index += dir) {
var currentKey = keys ? keys[index] : index;
memo = iteratee(memo, obj[currentKey], currentKey, obj);
}
return memo;
};
return function(obj, iteratee, memo, context) {
var initial = arguments.length >= 3;
return reducer(obj, optimizeCb(iteratee, context, 4), memo, initial);
};
};
// **Reduce** builds up a single result from a list of values, aka `inject`,
// or `foldl`.
_.reduce = _.foldl = _.inject = createReduce(1);
// The right-associative version of reduce, also known as `foldr`.
_.reduceRight = _.foldr = createReduce(-1);
// Return the first value which passes a truth test. Aliased as `detect`.
_.find = _.detect = function(obj, predicate, context) {
var keyFinder = isArrayLike(obj) ? _.findIndex : _.findKey;
var key = keyFinder(obj, predicate, context);
if (key !== void 0 && key !== -1) return obj[key];
};
// Return all the elements that pass a truth test.
// Aliased as `select`.
_.filter = _.select = function(obj, predicate, context) {
var results = [];
predicate = cb(predicate, context);
_.each(obj, function(value, index, list) {
if (predicate(value, index, list)) results.push(value);
});
return results;
};
// Return all the elements for which a truth test fails.
_.reject = function(obj, predicate, context) {
return _.filter(obj, _.negate(cb(predicate)), context);
};
// Determine whether all of the elements match a truth test.
// Aliased as `all`.
_.every = _.all = function(obj, predicate, context) {
predicate = cb(predicate, context);
var keys = !isArrayLike(obj) && _.keys(obj),
length = (keys || obj).length;
for (var index = 0; index < length; index++) {
var currentKey = keys ? keys[index] : index;
if (!predicate(obj[currentKey], currentKey, obj)) return false;
}
return true;
};
// Determine if at least one element in the object matches a truth test.
// Aliased as `any`.
_.some = _.any = function(obj, predicate, context) {
predicate = cb(predicate, context);
var keys = !isArrayLike(obj) && _.keys(obj),
length = (keys || obj).length;
for (var index = 0; index < length; index++) {
var currentKey = keys ? keys[index] : index;
if (predicate(obj[currentKey], currentKey, obj)) return true;
}
return false;
};
// Determine if the array or object contains a given item (using `===`).
// Aliased as `includes` and `include`.
_.contains = _.includes = _.include = function(obj, item, fromIndex, guard) {
if (!isArrayLike(obj)) obj = _.values(obj);
if (typeof fromIndex != 'number' || guard) fromIndex = 0;
return _.indexOf(obj, item, fromIndex) >= 0;
};
// Invoke a method (with arguments) on every item in a collection.
_.invoke = restArguments(function(obj, path, args) {
var contextPath, func;
if (_.isFunction(path)) {
func = path;
} else if (_.isArray(path)) {
contextPath = path.slice(0, -1);
path = path[path.length - 1];
}
return _.map(obj, function(context) {
var method = func;
if (!method) {
if (contextPath && contextPath.length) {
context = deepGet(context, contextPath);
}
if (context == null) return void 0;
method = context[path];
}
return method == null ? method : method.apply(context, args);
});
});
// Convenience version of a common use case of `map`: fetching a property.
_.pluck = function(obj, key) {
return _.map(obj, _.property(key));
};
// Convenience version of a common use case of `filter`: selecting only objects
// containing specific `key:value` pairs.
_.where = function(obj, attrs) {
return _.filter(obj, _.matcher(attrs));
};
// Convenience version of a common use case of `find`: getting the first object
// containing specific `key:value` pairs.
_.findWhere = function(obj, attrs) {
return _.find(obj, _.matcher(attrs));
};
// Return the maximum element (or element-based computation).
_.max = function(obj, iteratee, context) {
var result = -Infinity, lastComputed = -Infinity,
value, computed;
if (iteratee == null || typeof iteratee == 'number' && typeof obj[0] != 'object' && obj != null) {
obj = isArrayLike(obj) ? obj : _.values(obj);
for (var i = 0, length = obj.length; i < length; i++) {
value = obj[i];
if (value != null && value > result) {
result = value;
}
}
} else {
iteratee = cb(iteratee, context);
_.each(obj, function(v, index, list) {
computed = iteratee(v, index, list);
if (computed > lastComputed || computed === -Infinity && result === -Infinity) {
result = v;
lastComputed = computed;
}
});
}
return result;
};
// Return the minimum element (or element-based computation).
_.min = function(obj, iteratee, context) {
var result = Infinity, lastComputed = Infinity,
value, computed;
if (iteratee == null || typeof iteratee == 'number' && typeof obj[0] != 'object' && obj != null) {
obj = isArrayLike(obj) ? obj : _.values(obj);
for (var i = 0, length = obj.length; i < length; i++) {
value = obj[i];
if (value != null && value < result) {
result = value;
}
}
} else {
iteratee = cb(iteratee, context);
_.each(obj, function(v, index, list) {
computed = iteratee(v, index, list);
if (computed < lastComputed || computed === Infinity && result === Infinity) {
result = v;
lastComputed = computed;
}
});
}
return result;
};
// Shuffle a collection.
_.shuffle = function(obj) {
return _.sample(obj, Infinity);
};
// Sample **n** random values from a collection using the modern version of the
// [Fisher-Yates shuffle](http://en.wikipedia.org/wiki/Fisher–Yates_shuffle).
// If **n** is not specified, returns a single random element.
// The internal `guard` argument allows it to work with `map`.
_.sample = function(obj, n, guard) {
if (n == null || guard) {
if (!isArrayLike(obj)) obj = _.values(obj);
return obj[_.random(obj.length - 1)];
}
var sample = isArrayLike(obj) ? _.clone(obj) : _.values(obj);
var length = getLength(sample);
n = Math.max(Math.min(n, length), 0);
var last = length - 1;
for (var index = 0; index < n; index++) {
var rand = _.random(index, last);
var temp = sample[index];
sample[index] = sample[rand];
sample[rand] = temp;
}
return sample.slice(0, n);
};
// Sort the object's values by a criterion produced by an iteratee.
_.sortBy = function(obj, iteratee, context) {
var index = 0;
iteratee = cb(iteratee, context);
return _.pluck(_.map(obj, function(value, key, list) {
return {
value: value,
index: index++,
criteria: iteratee(value, key, list)
};
}).sort(function(left, right) {
var a = left.criteria;
var b = right.criteria;
if (a !== b) {
if (a > b || a === void 0) return 1;
if (a < b || b === void 0) return -1;
}
return left.index - right.index;
}), 'value');
};
// An internal function used for aggregate "group by" operations.
var group = function(behavior, partition) {
return function(obj, iteratee, context) {
var result = partition ? [[], []] : {};
iteratee = cb(iteratee, context);
_.each(obj, function(value, index) {
var key = iteratee(value, index, obj);
behavior(result, value, key);
});
return result;
};
};
// Groups the object's values by a criterion. Pass either a string attribute
// to group by, or a function that returns the criterion.
_.groupBy = group(function(result, value, key) {
if (has(result, key)) result[key].push(value); else result[key] = [value];
});
// Indexes the object's values by a criterion, similar to `groupBy`, but for
// when you know that your index values will be unique.
_.indexBy = group(function(result, value, key) {
result[key] = value;
});
// Counts instances of an object that group by a certain criterion. Pass
// either a string attribute to count by, or a function that returns the
// criterion.
_.countBy = group(function(result, value, key) {
if (has(result, key)) result[key]++; else result[key] = 1;
});
var reStrSymbol = /[^\ud800-\udfff]|[\ud800-\udbff][\udc00-\udfff]|[\ud800-\udfff]/g;
// Safely create a real, live array from anything iterable.
_.toArray = function(obj) {
if (!obj) return [];
if (_.isArray(obj)) return slice.call(obj);
if (_.isString(obj)) {
// Keep surrogate pair characters together
return obj.match(reStrSymbol);
}
if (isArrayLike(obj)) return _.map(obj, _.identity);
return _.values(obj);
};
// Return the number of elements in an object.
_.size = function(obj) {
if (obj == null) return 0;
return isArrayLike(obj) ? obj.length : _.keys(obj).length;
};
// Split a collection into two arrays: one whose elements all satisfy the given
// predicate, and one whose elements all do not satisfy the predicate.
_.partition = group(function(result, value, pass) {
result[pass ? 0 : 1].push(value);
}, true);
// Array Functions
// ---------------
// Get the first element of an array. Passing **n** will return the first N
// values in the array. Aliased as `head` and `take`. The **guard** check
// allows it to work with `_.map`.
_.first = _.head = _.take = function(array, n, guard) {
if (array == null || array.length < 1) return n == null ? void 0 : [];
if (n == null || guard) return array[0];
return _.initial(array, array.length - n);
};
// Returns everything but the last entry of the array. Especially useful on
// the arguments object. Passing **n** will return all the values in
// the array, excluding the last N.
_.initial = function(array, n, guard) {
return slice.call(array, 0, Math.max(0, array.length - (n == null || guard ? 1 : n)));
};
// Get the last element of an array. Passing **n** will return the last N
// values in the array.
_.last = function(array, n, guard) {
if (array == null || array.length < 1) return n == null ? void 0 : [];
if (n == null || guard) return array[array.length - 1];
return _.rest(array, Math.max(0, array.length - n));
};
// Returns everything but the first entry of the array. Aliased as `tail` and `drop`.
// Especially useful on the arguments object. Passing an **n** will return
// the rest N values in the array.
_.rest = _.tail = _.drop = function(array, n, guard) {
return slice.call(array, n == null || guard ? 1 : n);
};
// Trim out all falsy values from an array.
_.compact = function(array) {
return _.filter(array, Boolean);
};
// Internal implementation of a recursive `flatten` function.
var flatten = function(input, shallow, strict, output) {
output = output || [];
var idx = output.length;
for (var i = 0, length = getLength(input); i < length; i++) {
var value = input[i];
if (isArrayLike(value) && (_.isArray(value) || _.isArguments(value))) {
// Flatten current level of array or arguments object.
if (shallow) {
var j = 0, len = value.length;
while (j < len) output[idx++] = value[j++];
} else {
flatten(value, shallow, strict, output);
idx = output.length;
}
} else if (!strict) {
output[idx++] = value;
}
}
return output;
};
// Flatten out an array, either recursively (by default), or just one level.
_.flatten = function(array, shallow) {
return flatten(array, shallow, false);
};
// Return a version of the array that does not contain the specified value(s).
_.without = restArguments(function(array, otherArrays) {
return _.difference(array, otherArrays);
});
// Produce a duplicate-free version of the array. If the array has already
// been sorted, you have the option of using a faster algorithm.
// The faster algorithm will not work with an iteratee if the iteratee
// is not a one-to-one function, so providing an iteratee will disable
// the faster algorithm.
// Aliased as `unique`.
_.uniq = _.unique = function(array, isSorted, iteratee, context) {
if (!_.isBoolean(isSorted)) {
context = iteratee;
iteratee = isSorted;
isSorted = false;
}
if (iteratee != null) iteratee = cb(iteratee, context);
var result = [];
var seen = [];
for (var i = 0, length = getLength(array); i < length; i++) {
var value = array[i],
computed = iteratee ? iteratee(value, i, array) : value;
if (isSorted && !iteratee) {
if (!i || seen !== computed) result.push(value);
seen = computed;
} else if (iteratee) {
if (!_.contains(seen, computed)) {
seen.push(computed);
result.push(value);
}
} else if (!_.contains(result, value)) {
result.push(value);
}
}
return result;
};
// Produce an array that contains the union: each distinct element from all of
// the passed-in arrays.
_.union = restArguments(function(arrays) {
return _.uniq(flatten(arrays, true, true));
});
// Produce an array that contains every item shared between all the
// passed-in arrays.
_.intersection = function(array) {
var result = [];
var argsLength = arguments.length;
for (var i = 0, length = getLength(array); i < length; i++) {
var item = array[i];
if (_.contains(result, item)) continue;
var j;
for (j = 1; j < argsLength; j++) {
if (!_.contains(arguments[j], item)) break;
}
if (j === argsLength) result.push(item);
}
return result;
};
// Take the difference between one array and a number of other arrays.
// Only the elements present in just the first array will remain.
_.difference = restArguments(function(array, rest) {
rest = flatten(rest, true, true);
return _.filter(array, function(value){
return !_.contains(rest, value);
});
});
// Complement of _.zip. Unzip accepts an array of arrays and groups
// each array's elements on shared indices.
_.unzip = function(array) {
var length = array && _.max(array, getLength).length || 0;
var result = Array(length);
for (var index = 0; index < length; index++) {
result[index] = _.pluck(array, index);
}
return result;
};
// Zip together multiple lists into a single array -- elements that share
// an index go together.
_.zip = restArguments(_.unzip);
// Converts lists into objects. Pass either a single array of `[key, value]`
// pairs, or two parallel arrays of the same length -- one of keys, and one of
// the corresponding values. Passing by pairs is the reverse of _.pairs.
_.object = function(list, values) {
var result = {};
for (var i = 0, length = getLength(list); i < length; i++) {
if (values) {
result[list[i]] = values[i];
} else {
result[list[i][0]] = list[i][1];
}
}
return result;
};
// Generator function to create the findIndex and findLastIndex functions.
var createPredicateIndexFinder = function(dir) {
return function(array, predicate, context) {
predicate = cb(predicate, context);
var length = getLength(array);
var index = dir > 0 ? 0 : length - 1;
for (; index >= 0 && index < length; index += dir) {
if (predicate(array[index], index, array)) return index;
}
return -1;
};
};
// Returns the first index on an array-like that passes a predicate test.
_.findIndex = createPredicateIndexFinder(1);
_.findLastIndex = createPredicateIndexFinder(-1);
// Use a comparator function to figure out the smallest index at which
// an object should be inserted so as to maintain order. Uses binary search.
_.sortedIndex = function(array, obj, iteratee, context) {
iteratee = cb(iteratee, context, 1);
var value = iteratee(obj);
var low = 0, high = getLength(array);
while (low < high) {
var mid = Math.floor((low + high) / 2);
if (iteratee(array[mid]) < value) low = mid + 1; else high = mid;
}
return low;
};
// Generator function to create the indexOf and lastIndexOf functions.
var createIndexFinder = function(dir, predicateFind, sortedIndex) {
return function(array, item, idx) {
var i = 0, length = getLength(array);
if (typeof idx == 'number') {
if (dir > 0) {
i = idx >= 0 ? idx : Math.max(idx + length, i);
} else {
length = idx >= 0 ? Math.min(idx + 1, length) : idx + length + 1;
}
} else if (sortedIndex && idx && length) {
idx = sortedIndex(array, item);
return array[idx] === item ? idx : -1;
}
if (item !== item) {
idx = predicateFind(slice.call(array, i, length), _.isNaN);
return idx >= 0 ? idx + i : -1;
}
for (idx = dir > 0 ? i : length - 1; idx >= 0 && idx < length; idx += dir) {
if (array[idx] === item) return idx;
}
return -1;
};
};
// Return the position of the first occurrence of an item in an array,
// or -1 if the item is not included in the array.
// If the array is large and already in sort order, pass `true`
// for **isSorted** to use binary search.
_.indexOf = createIndexFinder(1, _.findIndex, _.sortedIndex);
_.lastIndexOf = createIndexFinder(-1, _.findLastIndex);
// Generate an integer Array containing an arithmetic progression. A port of
// the native Python `range()` function. See
// [the Python documentation](http://docs.python.org/library/functions.html#range).
_.range = function(start, stop, step) {
if (stop == null) {
stop = start || 0;
start = 0;
}
if (!step) {
step = stop < start ? -1 : 1;
}
var length = Math.max(Math.ceil((stop - start) / step), 0);
var range = Array(length);
for (var idx = 0; idx < length; idx++, start += step) {
range[idx] = start;
}
return range;
};
// Chunk a single array into multiple arrays, each containing `count` or fewer
// items.
_.chunk = function(array, count) {
if (count == null || count < 1) return [];
var result = [];
var i = 0, length = array.length;
while (i < length) {
result.push(slice.call(array, i, i += count));
}
return result;
};
// Function (ahem) Functions
// ------------------
// Determines whether to execute a function as a constructor
// or a normal function with the provided arguments.
var executeBound = function(sourceFunc, boundFunc, context, callingContext, args) {
if (!(callingContext instanceof boundFunc)) return sourceFunc.apply(context, args);
var self = baseCreate(sourceFunc.prototype);
var result = sourceFunc.apply(self, args);
if (_.isObject(result)) return result;
return self;
};
// Create a function bound to a given object (assigning `this`, and arguments,
// optionally). Delegates to **ECMAScript 5**'s native `Function.bind` if
// available.
_.bind = restArguments(function(func, context, args) {
if (!_.isFunction(func)) throw new TypeError('Bind must be called on a function');
var bound = restArguments(function(callArgs) {
return executeBound(func, bound, context, this, args.concat(callArgs));
});
return bound;
});
// Partially apply a function by creating a version that has had some of its
// arguments pre-filled, without changing its dynamic `this` context. _ acts
// as a placeholder by default, allowing any combination of arguments to be
// pre-filled. Set `_.partial.placeholder` for a custom placeholder argument.
_.partial = restArguments(function(func, boundArgs) {
var placeholder = _.partial.placeholder;
var bound = function() {
var position = 0, length = boundArgs.length;
var args = Array(length);
for (var i = 0; i < length; i++) {
args[i] = boundArgs[i] === placeholder ? arguments[position++] : boundArgs[i];
}
while (position < arguments.length) args.push(arguments[position++]);
return executeBound(func, bound, this, this, args);
};
return bound;
});
_.partial.placeholder = _;
// Bind a number of an object's methods to that object. Remaining arguments
// are the method names to be bound. Useful for ensuring that all callbacks
// defined on an object belong to it.
_.bindAll = restArguments(function(obj, keys) {
keys = flatten(keys, false, false);
var index = keys.length;
if (index < 1) throw new Error('bindAll must be passed function names');
while (index--) {
var key = keys[index];
obj[key] = _.bind(obj[key], obj);
}
});
// Memoize an expensive function by storing its results.
_.memoize = function(func, hasher) {
var memoize = function(key) {
var cache = memoize.cache;
var address = '' + (hasher ? hasher.apply(this, arguments) : key);
if (!has(cache, address)) cache[address] = func.apply(this, arguments);
return cache[address];
};
memoize.cache = {};
return memoize;
};
// Delays a function for the given number of milliseconds, and then calls
// it with the arguments supplied.
_.delay = restArguments(function(func, wait, args) {
return setTimeout(function() {
return func.apply(null, args);
}, wait);
});
// Defers a function, scheduling it to run after the current call stack has
// cleared.
_.defer = _.partial(_.delay, _, 1);
// Returns a function, that, when invoked, will only be triggered at most once
// during a given window of time. Normally, the throttled function will run
// as much as it can, without ever going more than once per `wait` duration;
// but if you'd like to disable the execution on the leading edge, pass
// `{leading: false}`. To disable execution on the trailing edge, ditto.
_.throttle = function(func, wait, options) {
var timeout, context, args, result;
var previous = 0;
if (!options) options = {};
var later = function() {
previous = options.leading === false ? 0 : _.now();
timeout = null;
result = func.apply(context, args);
if (!timeout) context = args = null;
};
var throttled = function() {
var now = _.now();
if (!previous && options.leading === false) previous = now;
var remaining = wait - (now - previous);
context = this;
args = arguments;
if (remaining <= 0 || remaining > wait) {
if (timeout) {
clearTimeout(timeout);
timeout = null;
}
previous = now;
result = func.apply(context, args);
if (!timeout) context = args = null;
} else if (!timeout && options.trailing !== false) {
timeout = setTimeout(later, remaining);
}
return result;
};
throttled.cancel = function() {
clearTimeout(timeout);
previous = 0;
timeout = context = args = null;
};
return throttled;
};
// Returns a function, that, as long as it continues to be invoked, will not
// be triggered. The function will be called after it stops being called for
// N milliseconds. If `immediate` is passed, trigger the function on the
// leading edge, instead of the trailing.
_.debounce = function(func, wait, immediate) {
var timeout, result;
var later = function(context, args) {
timeout = null;
if (args) result = func.apply(context, args);
};
var debounced = restArguments(function(args) {
if (timeout) clearTimeout(timeout);
if (immediate) {
var callNow = !timeout;
timeout = setTimeout(later, wait);
if (callNow) result = func.apply(this, args);
} else {
timeout = _.delay(later, wait, this, args);
}
return result;
});
debounced.cancel = function() {
clearTimeout(timeout);
timeout = null;
};
return debounced;
};
// Returns the first function passed as an argument to the second,
// allowing you to adjust arguments, run code before and after, and
// conditionally execute the original function.
_.wrap = function(func, wrapper) {
return _.partial(wrapper, func);
};
// Returns a negated version of the passed-in predicate.
_.negate = function(predicate) {
return function() {
return !predicate.apply(this, arguments);
};
};
// Returns a function that is the composition of a list of functions, each
// consuming the return value of the function that follows.
_.compose = function() {
var args = arguments;
var start = args.length - 1;
return function() {
var i = start;
var result = args[start].apply(this, arguments);
while (i--) result = args[i].call(this, result);
return result;
};
};
// Returns a function that will only be executed on and after the Nth call.
_.after = function(times, func) {
return function() {
if (--times < 1) {
return func.apply(this, arguments);
}
};
};
// Returns a function that will only be executed up to (but not including) the Nth call.
_.before = function(times, func) {
var memo;
return function() {
if (--times > 0) {
memo = func.apply(this, arguments);
}
if (times <= 1) func = null;
return memo;
};
};
// Returns a function that will be executed at most one time, no matter how
// often you call it. Useful for lazy initialization.
_.once = _.partial(_.before, 2);
_.restArguments = restArguments;
// Object Functions
// ----------------
// Keys in IE < 9 that won't be iterated by `for key in ...` and thus missed.
var hasEnumBug = !{toString: null}.propertyIsEnumerable('toString');
var nonEnumerableProps = ['valueOf', 'isPrototypeOf', 'toString',
'propertyIsEnumerable', 'hasOwnProperty', 'toLocaleString'];
var collectNonEnumProps = function(obj, keys) {
var nonEnumIdx = nonEnumerableProps.length;
var constructor = obj.constructor;
var proto = _.isFunction(constructor) && constructor.prototype || ObjProto;
// Constructor is a special case.
var prop = 'constructor';
if (has(obj, prop) && !_.contains(keys, prop)) keys.push(prop);
while (nonEnumIdx--) {
prop = nonEnumerableProps[nonEnumIdx];
if (prop in obj && obj[prop] !== proto[prop] && !_.contains(keys, prop)) {
keys.push(prop);
}
}
};
// Retrieve the names of an object's own properties.
// Delegates to **ECMAScript 5**'s native `Object.keys`.
_.keys = function(obj) {
if (!_.isObject(obj)) return [];
if (nativeKeys) return nativeKeys(obj);
var keys = [];
for (var key in obj) if (has(obj, key)) keys.push(key);
// Ahem, IE < 9.
if (hasEnumBug) collectNonEnumProps(obj, keys);
return keys;
};
// Retrieve all the property names of an object.
_.allKeys = function(obj) {
if (!_.isObject(obj)) return [];
var keys = [];
for (var key in obj) keys.push(key);
// Ahem, IE < 9.
if (hasEnumBug) collectNonEnumProps(obj, keys);
return keys;
};
// Retrieve the values of an object's properties.
_.values = function(obj) {
var keys = _.keys(obj);
var length = keys.length;
var values = Array(length);
for (var i = 0; i < length; i++) {
values[i] = obj[keys[i]];
}
return values;
};
// Returns the results of applying the iteratee to each element of the object.
// In contrast to _.map it returns an object.
_.mapObject = function(obj, iteratee, context) {
iteratee = cb(iteratee, context);
var keys = _.keys(obj),
length = keys.length,
results = {};
for (var index = 0; index < length; index++) {
var currentKey = keys[index];
results[currentKey] = iteratee(obj[currentKey], currentKey, obj);
}
return results;
};
// Convert an object into a list of `[key, value]` pairs.
// The opposite of _.object.
_.pairs = function(obj) {
var keys = _.keys(obj);
var length = keys.length;
var pairs = Array(length);
for (var i = 0; i < length; i++) {
pairs[i] = [keys[i], obj[keys[i]]];
}
return pairs;
};
// Invert the keys and values of an object. The values must be serializable.
_.invert = function(obj) {
var result = {};
var keys = _.keys(obj);
for (var i = 0, length = keys.length; i < length; i++) {
result[obj[keys[i]]] = keys[i];
}
return result;
};
// Return a sorted list of the function names available on the object.
// Aliased as `methods`.
_.functions = _.methods = function(obj) {
var names = [];
for (var key in obj) {
if (_.isFunction(obj[key])) names.push(key);
}
return names.sort();
};
// An internal function for creating assigner functions.
var createAssigner = function(keysFunc, defaults) {
return function(obj) {
var length = arguments.length;
if (defaults) obj = Object(obj);
if (length < 2 || obj == null) return obj;
for (var index = 1; index < length; index++) {
var source = arguments[index],
keys = keysFunc(source),
l = keys.length;
for (var i = 0; i < l; i++) {
var key = keys[i];
if (!defaults || obj[key] === void 0) obj[key] = source[key];
}
}
return obj;
};
};
// Extend a given object with all the properties in passed-in object(s).
_.extend = createAssigner(_.allKeys);
// Assigns a given object with all the own properties in the passed-in object(s).
// (https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object/assign)
_.extendOwn = _.assign = createAssigner(_.keys);
// Returns the first key on an object that passes a predicate test.
_.findKey = function(obj, predicate, context) {
predicate = cb(predicate, context);
var keys = _.keys(obj), key;
for (var i = 0, length = keys.length; i < length; i++) {
key = keys[i];
if (predicate(obj[key], key, obj)) return key;
}
};
// Internal pick helper function to determine if `obj` has key `key`.
var keyInObj = function(value, key, obj) {
return key in obj;
};
// Return a copy of the object only containing the whitelisted properties.
_.pick = restArguments(function(obj, keys) {
var result = {}, iteratee = keys[0];
if (obj == null) return result;
if (_.isFunction(iteratee)) {
if (keys.length > 1) iteratee = optimizeCb(iteratee, keys[1]);
keys = _.allKeys(obj);
} else {
iteratee = keyInObj;
keys = flatten(keys, false, false);
obj = Object(obj);
}
for (var i = 0, length = keys.length; i < length; i++) {
var key = keys[i];
var value = obj[key];
if (iteratee(value, key, obj)) result[key] = value;
}
return result;
});
// Return a copy of the object without the blacklisted properties.
_.omit = restArguments(function(obj, keys) {
var iteratee = keys[0], context;
if (_.isFunction(iteratee)) {
iteratee = _.negate(iteratee);
if (keys.length > 1) context = keys[1];
} else {
keys = _.map(flatten(keys, false, false), String);
iteratee = function(value, key) {
return !_.contains(keys, key);
};
}
return _.pick(obj, iteratee, context);
});
// Fill in a given object with default properties.
_.defaults = createAssigner(_.allKeys, true);
// Creates an object that inherits from the given prototype object.
// If additional properties are provided then they will be added to the
// created object.
_.create = function(prototype, props) {
var result = baseCreate(prototype);
if (props) _.extendOwn(result, props);
return result;
};
// Create a (shallow-cloned) duplicate of an object.
_.clone = function(obj) {
if (!_.isObject(obj)) return obj;
return _.isArray(obj) ? obj.slice() : _.extend({}, obj);
};
// Invokes interceptor with the obj, and then returns obj.
// The primary purpose of this method is to "tap into" a method chain, in
// order to perform operations on intermediate results within the chain.
_.tap = function(obj, interceptor) {
interceptor(obj);
return obj;
};
// Returns whether an object has a given set of `key:value` pairs.
_.isMatch = function(object, attrs) {
var keys = _.keys(attrs), length = keys.length;
if (object == null) return !length;
var obj = Object(object);
for (var i = 0; i < length; i++) {
var key = keys[i];
if (attrs[key] !== obj[key] || !(key in obj)) return false;
}
return true;
};
// Internal recursive comparison function for `isEqual`.
var eq, deepEq;
eq = function(a, b, aStack, bStack) {
// Identical objects are equal. `0 === -0`, but they aren't identical.
// See the [Harmony `egal` proposal](http://wiki.ecmascript.org/doku.php?id=harmony:egal).
if (a === b) return a !== 0 || 1 / a === 1 / b;
// `null` or `undefined` only equal to itself (strict comparison).
if (a == null || b == null) return false;
// `NaN`s are equivalent, but non-reflexive.
if (a !== a) return b !== b;
// Exhaust primitive checks
var type = typeof a;
if (type !== 'function' && type !== 'object' && typeof b != 'object') return false;
return deepEq(a, b, aStack, bStack);
};
// Internal recursive comparison function for `isEqual`.
deepEq = function(a, b, aStack, bStack) {
// Unwrap any wrapped objects.
if (a instanceof _) a = a._wrapped;
if (b instanceof _) b = b._wrapped;
// Compare `[[Class]]` names.
var className = toString.call(a);
if (className !== toString.call(b)) return false;
switch (className) {
// Strings, numbers, regular expressions, dates, and booleans are compared by value.
case '[object RegExp]':
// RegExps are coerced to strings for comparison (Note: '' + /a/i === '/a/i')
case '[object String]':
// Primitives and their corresponding object wrappers are equivalent; thus, `"5"` is
// equivalent to `new String("5")`.
return '' + a === '' + b;
case '[object Number]':
// `NaN`s are equivalent, but non-reflexive.
// Object(NaN) is equivalent to NaN.
if (+a !== +a) return +b !== +b;
// An `egal` comparison is performed for other numeric values.
return +a === 0 ? 1 / +a === 1 / b : +a === +b;
case '[object Date]':
case '[object Boolean]':
// Coerce dates and booleans to numeric primitive values. Dates are compared by their
// millisecond representations. Note that invalid dates with millisecond representations
// of `NaN` are not equivalent.
return +a === +b;
case '[object Symbol]':
return SymbolProto.valueOf.call(a) === SymbolProto.valueOf.call(b);
}
var areArrays = className === '[object Array]';
if (!areArrays) {
if (typeof a != 'object' || typeof b != 'object') return false;
// Objects with different constructors are not equivalent, but `Object`s or `Array`s
// from different frames are.
var aCtor = a.constructor, bCtor = b.constructor;
if (aCtor !== bCtor && !(_.isFunction(aCtor) && aCtor instanceof aCtor &&
_.isFunction(bCtor) && bCtor instanceof bCtor)
&& ('constructor' in a && 'constructor' in b)) {
return false;
}
}
// Assume equality for cyclic structures. The algorithm for detecting cyclic
// structures is adapted from ES 5.1 section 15.12.3, abstract operation `JO`.
// Initializing stack of traversed objects.
// It's done here since we only need them for objects and arrays comparison.
aStack = aStack || [];
bStack = bStack || [];
var length = aStack.length;
while (length--) {
// Linear search. Performance is inversely proportional to the number of
// unique nested structures.
if (aStack[length] === a) return bStack[length] === b;
}
// Add the first object to the stack of traversed objects.
aStack.push(a);
bStack.push(b);
// Recursively compare objects and arrays.
if (areArrays) {
// Compare array lengths to determine if a deep comparison is necessary.
length = a.length;
if (length !== b.length) return false;
// Deep compare the contents, ignoring non-numeric properties.
while (length--) {
if (!eq(a[length], b[length], aStack, bStack)) return false;
}
} else {
// Deep compare objects.
var keys = _.keys(a), key;
length = keys.length;
// Ensure that both objects contain the same number of properties before comparing deep equality.
if (_.keys(b).length !== length) return false;
while (length--) {
// Deep compare each member
key = keys[length];
if (!(has(b, key) && eq(a[key], b[key], aStack, bStack))) return false;
}
}
// Remove the first object from the stack of traversed objects.
aStack.pop();
bStack.pop();
return true;
};
// Perform a deep comparison to check if two objects are equal.
_.isEqual = function(a, b) {
return eq(a, b);
};
// Is a given array, string, or object empty?
// An "empty" object has no enumerable own-properties.
_.isEmpty = function(obj) {
if (obj == null) return true;
if (isArrayLike(obj) && (_.isArray(obj) || _.isString(obj) || _.isArguments(obj))) return obj.length === 0;
return _.keys(obj).length === 0;
};
// Is a given value a DOM element?
_.isElement = function(obj) {
return !!(obj && obj.nodeType === 1);
};
// Is a given value an array?
// Delegates to ECMA5's native Array.isArray
_.isArray = nativeIsArray || function(obj) {
return toString.call(obj) === '[object Array]';
};
// Is a given variable an object?
_.isObject = function(obj) {
var type = typeof obj;
return type === 'function' || type === 'object' && !!obj;
};
// Add some isType methods: isArguments, isFunction, isString, isNumber, isDate, isRegExp, isError, isMap, isWeakMap, isSet, isWeakSet.
_.each(['Arguments', 'Function', 'String', 'Number', 'Date', 'RegExp', 'Error', 'Symbol', 'Map', 'WeakMap', 'Set', 'WeakSet'], function(name) {
_['is' + name] = function(obj) {
return toString.call(obj) === '[object ' + name + ']';
};
});
// Define a fallback version of the method in browsers (ahem, IE < 9), where
// there isn't any inspectable "Arguments" type.
if (!_.isArguments(arguments)) {
_.isArguments = function(obj) {
return has(obj, 'callee');
};
}
// Optimize `isFunction` if appropriate. Work around some typeof bugs in old v8,
// IE 11 (#1621), Safari 8 (#1929), and PhantomJS (#2236).
var nodelist = root.document && root.document.childNodes;
if (typeof /./ != 'function' && typeof Int8Array != 'object' && typeof nodelist != 'function') {
_.isFunction = function(obj) {
return typeof obj == 'function' || false;
};
}
// Is a given object a finite number?
_.isFinite = function(obj) {
return !_.isSymbol(obj) && isFinite(obj) && !isNaN(parseFloat(obj));
};
// Is the given value `NaN`?
_.isNaN = function(obj) {
return _.isNumber(obj) && isNaN(obj);
};
// Is a given value a boolean?
_.isBoolean = function(obj) {
return obj === true || obj === false || toString.call(obj) === '[object Boolean]';
};
// Is a given value equal to null?
_.isNull = function(obj) {
return obj === null;
};
// Is a given variable undefined?
_.isUndefined = function(obj) {
return obj === void 0;
};
// Shortcut function for checking if an object has a given property directly
// on itself (in other words, not on a prototype).
_.has = function(obj, path) {
if (!_.isArray(path)) {
return has(obj, path);
}
var length = path.length;
for (var i = 0; i < length; i++) {
var key = path[i];
if (obj == null || !hasOwnProperty.call(obj, key)) {
return false;
}
obj = obj[key];
}
return !!length;
};
// Utility Functions
// -----------------
// Run Underscore.js in *noConflict* mode, returning the `_` variable to its
// previous owner. Returns a reference to the Underscore object.
_.noConflict = function() {
root._ = previousUnderscore;
return this;
};
// Keep the identity function around for default iteratees.
_.identity = function(value) {
return value;
};
// Predicate-generating functions. Often useful outside of Underscore.
_.constant = function(value) {
return function() {
return value;
};
};
_.noop = function(){};
// Creates a function that, when passed an object, will traverse that object’s
// properties down the given `path`, specified as an array of keys or indexes.
_.property = function(path) {
if (!_.isArray(path)) {
return shallowProperty(path);
}
return function(obj) {
return deepGet(obj, path);
};
};
// Generates a function for a given object that returns a given property.
_.propertyOf = function(obj) {
if (obj == null) {
return function(){};
}
return function(path) {
return !_.isArray(path) ? obj[path] : deepGet(obj, path);
};
};
// Returns a predicate for checking whether an object has a given set of
// `key:value` pairs.
_.matcher = _.matches = function(attrs) {
attrs = _.extendOwn({}, attrs);
return function(obj) {
return _.isMatch(obj, attrs);
};
};
// Run a function **n** times.
_.times = function(n, iteratee, context) {
var accum = Array(Math.max(0, n));
iteratee = optimizeCb(iteratee, context, 1);
for (var i = 0; i < n; i++) accum[i] = iteratee(i);
return accum;
};
// Return a random integer between min and max (inclusive).
_.random = function(min, max) {
if (max == null) {
max = min;
min = 0;
}
return min + Math.floor(Math.random() * (max - min + 1));
};
// A (possibly faster) way to get the current timestamp as an integer.
_.now = Date.now || function() {
return new Date().getTime();
};
// List of HTML entities for escaping.
var escapeMap = {
'&': '&',
'<': '<',
'>': '>',
'"': '"',
"'": ''',
'`': '`'
};
var unescapeMap = _.invert(escapeMap);
// Functions for escaping and unescaping strings to/from HTML interpolation.
var createEscaper = function(map) {
var escaper = function(match) {
return map[match];
};
// Regexes for identifying a key that needs to be escaped.
var source = '(?:' + _.keys(map).join('|') + ')';
var testRegexp = RegExp(source);
var replaceRegexp = RegExp(source, 'g');
return function(string) {
string = string == null ? '' : '' + string;
return testRegexp.test(string) ? string.replace(replaceRegexp, escaper) : string;
};
};
_.escape = createEscaper(escapeMap);
_.unescape = createEscaper(unescapeMap);
// Traverses the children of `obj` along `path`. If a child is a function, it
// is invoked with its parent as context. Returns the value of the final
// child, or `fallback` if any child is undefined.
_.result = function(obj, path, fallback) {
if (!_.isArray(path)) path = [path];
var length = path.length;
if (!length) {
return _.isFunction(fallback) ? fallback.call(obj) : fallback;
}
for (var i = 0; i < length; i++) {
var prop = obj == null ? void 0 : obj[path[i]];
if (prop === void 0) {
prop = fallback;
i = length; // Ensure we don't continue iterating.
}
obj = _.isFunction(prop) ? prop.call(obj) : prop;
}
return obj;
};
// Generate a unique integer id (unique within the entire client session).
// Useful for temporary DOM ids.
var idCounter = 0;
_.uniqueId = function(prefix) {
var id = ++idCounter + '';
return prefix ? prefix + id : id;
};
// By default, Underscore uses ERB-style template delimiters, change the
// following template settings to use alternative delimiters.
_.templateSettings = {
evaluate: /<%([\s\S]+?)%>/g,
interpolate: /<%=([\s\S]+?)%>/g,
escape: /<%-([\s\S]+?)%>/g
};
// When customizing `templateSettings`, if you don't want to define an
// interpolation, evaluation or escaping regex, we need one that is
// guaranteed not to match.
var noMatch = /(.)^/;
// Certain characters need to be escaped so that they can be put into a
// string literal.
var escapes = {
"'": "'",
'\\': '\\',
'\r': 'r',
'\n': 'n',
'\u2028': 'u2028',
'\u2029': 'u2029'
};
var escapeRegExp = /\\|'|\r|\n|\u2028|\u2029/g;
var escapeChar = function(match) {
return '\\' + escapes[match];
};
// In order to prevent third-party code injection through
// `_.templateSettings.variable`, we test it against the following regular
// expression. It is intentionally a bit more liberal than just matching valid
// identifiers, but still prevents possible loopholes through defaults or
// destructuring assignment.
var bareIdentifier = /^\s*(\w|\$)+\s*$/;
// JavaScript micro-templating, similar to John Resig's implementation.
// Underscore templating handles arbitrary delimiters, preserves whitespace,
// and correctly escapes quotes within interpolated code.
// NB: `oldSettings` only exists for backwards compatibility.
_.template = function(text, settings, oldSettings) {
if (!settings && oldSettings) settings = oldSettings;
settings = _.defaults({}, settings, _.templateSettings);
// Combine delimiters into one regular expression via alternation.
var matcher = RegExp([
(settings.escape || noMatch).source,
(settings.interpolate || noMatch).source,
(settings.evaluate || noMatch).source
].join('|') + '|$', 'g');
// Compile the template source, escaping string literals appropriately.
var index = 0;
var source = "__p+='";
text.replace(matcher, function(match, escape, interpolate, evaluate, offset) {
source += text.slice(index, offset).replace(escapeRegExp, escapeChar);
index = offset + match.length;
if (escape) {
source += "'+\n((__t=(" + escape + "))==null?'':_.escape(__t))+\n'";
} else if (interpolate) {
source += "'+\n((__t=(" + interpolate + "))==null?'':__t)+\n'";
} else if (evaluate) {
source += "';\n" + evaluate + "\n__p+='";
}
// Adobe VMs need the match returned to produce the correct offset.
return match;
});
source += "';\n";
var argument = settings.variable;
if (argument) {
// Insure against third-party code injection.
if (!bareIdentifier.test(argument)) throw new Error(
'variable is not a bare identifier: ' + argument
);
} else {
// If a variable is not specified, place data values in local scope.
source = 'with(obj||{}){\n' + source + '}\n';
argument = 'obj';
}
source = "var __t,__p='',__j=Array.prototype.join," +
"print=function(){__p+=__j.call(arguments,'');};\n" +
source + 'return __p;\n';
var render;
try {
render = new Function(argument, '_', source);
} catch (e) {
e.source = source;
throw e;
}
var template = function(data) {
return render.call(this, data, _);
};
// Provide the compiled source as a convenience for precompilation.
template.source = 'function(' + argument + '){\n' + source + '}';
return template;
};
// Add a "chain" function. Start chaining a wrapped Underscore object.
_.chain = function(obj) {
var instance = _(obj);
instance._chain = true;
return instance;
};
// OOP
// ---------------
// If Underscore is called as a function, it returns a wrapped object that
// can be used OO-style. This wrapper holds altered versions of all the
// underscore functions. Wrapped objects may be chained.
// Helper function to continue chaining intermediate results.
var chainResult = function(instance, obj) {
return instance._chain ? _(obj).chain() : obj;
};
// Add your own custom functions to the Underscore object.
_.mixin = function(obj) {
_.each(_.functions(obj), function(name) {
var func = _[name] = obj[name];
_.prototype[name] = function() {
var args = [this._wrapped];
push.apply(args, arguments);
return chainResult(this, func.apply(_, args));
};
});
return _;
};
// Add all of the Underscore functions to the wrapper object.
_.mixin(_);
// Add all mutator Array functions to the wrapper.
_.each(['pop', 'push', 'reverse', 'shift', 'sort', 'splice', 'unshift'], function(name) {
var method = ArrayProto[name];
_.prototype[name] = function() {
var obj = this._wrapped;
method.apply(obj, arguments);
if ((name === 'shift' || name === 'splice') && obj.length === 0) delete obj[0];
return chainResult(this, obj);
};
});
// Add all accessor Array functions to the wrapper.
_.each(['concat', 'join', 'slice'], function(name) {
var method = ArrayProto[name];
_.prototype[name] = function() {
return chainResult(this, method.apply(this._wrapped, arguments));
};
});
// Extracts the result from a wrapped and chained object.
_.prototype.value = function() {
return this._wrapped;
};
// Provide unwrapping proxy for some methods used in engine operations
// such as arithmetic and JSON stringification.
_.prototype.valueOf = _.prototype.toJSON = _.prototype.value;
_.prototype.toString = function() {
return String(this._wrapped);
};
// AMD registration happens at the end for compatibility with AMD loaders
// that may not enforce next-turn semantics on modules. Even though general
// practice for AMD registration is to be anonymous, underscore registers
// as a named module because, like jQuery, it is a base library that is
// popular enough to be bundled in a third party lib, but not be part of
// an AMD load request. Those cases could generate an error when an
// anonymous define() is called outside of a loader request.
if (typeof define == 'function' && define.amd) {
define('underscore', [], function() {
return _;
});
}
}()); | AIMM-simulator | /aimm_simulator-2.0.3.tar.gz/aimm_simulator-2.0.3/doc/sphinx_build/_static/underscore.js | underscore.js |
if (!Scorer) {
/**
* Simple result scoring code.
*/
var Scorer = {
// Implement the following function to further tweak the score for each result
// The function takes a result array [filename, title, anchor, descr, score]
// and returns the new score.
/*
score: function(result) {
return result[4];
},
*/
// query matches the full name of an object
objNameMatch: 11,
// or matches in the last dotted part of the object name
objPartialMatch: 6,
// Additive scores depending on the priority of the object
objPrio: {0: 15, // used to be importantResults
1: 5, // used to be objectResults
2: -5}, // used to be unimportantResults
// Used when the priority is not in the mapping.
objPrioDefault: 0,
// query found in title
title: 15,
// query found in terms
term: 5
};
}
if (!splitQuery) {
function splitQuery(query) {
return query.split(/\s+/);
}
}
/**
* Search Module
*/
var Search = {
_index : null,
_queued_query : null,
_pulse_status : -1,
init : function() {
var params = $.getQueryParameters();
if (params.q) {
var query = params.q[0];
$('input[name="q"]')[0].value = query;
this.performSearch(query);
}
},
loadIndex : function(url) {
$.ajax({type: "GET", url: url, data: null,
dataType: "script", cache: true,
complete: function(jqxhr, textstatus) {
if (textstatus != "success") {
document.getElementById("searchindexloader").src = url;
}
}});
},
setIndex : function(index) {
var q;
this._index = index;
if ((q = this._queued_query) !== null) {
this._queued_query = null;
Search.query(q);
}
},
hasIndex : function() {
return this._index !== null;
},
deferQuery : function(query) {
this._queued_query = query;
},
stopPulse : function() {
this._pulse_status = 0;
},
startPulse : function() {
if (this._pulse_status >= 0)
return;
function pulse() {
var i;
Search._pulse_status = (Search._pulse_status + 1) % 4;
var dotString = '';
for (i = 0; i < Search._pulse_status; i++)
dotString += '.';
Search.dots.text(dotString);
if (Search._pulse_status > -1)
window.setTimeout(pulse, 500);
}
pulse();
},
/**
* perform a search for something (or wait until index is loaded)
*/
performSearch : function(query) {
// create the required interface elements
this.out = $('#search-results');
this.title = $('<h2>' + _('Searching') + '</h2>').appendTo(this.out);
this.dots = $('<span></span>').appendTo(this.title);
this.status = $('<p style="display: none"></p>').appendTo(this.out);
this.output = $('<ul class="search"/>').appendTo(this.out);
$('#search-progress').text(_('Preparing search...'));
this.startPulse();
// index already loaded, the browser was quick!
if (this.hasIndex())
this.query(query);
else
this.deferQuery(query);
},
/**
* execute search (requires search index to be loaded)
*/
query : function(query) {
var i;
// stem the searchterms and add them to the correct list
var stemmer = new Stemmer();
var searchterms = [];
var excluded = [];
var hlterms = [];
var tmp = splitQuery(query);
var objectterms = [];
for (i = 0; i < tmp.length; i++) {
if (tmp[i] !== "") {
objectterms.push(tmp[i].toLowerCase());
}
if ($u.indexOf(stopwords, tmp[i].toLowerCase()) != -1 || tmp[i].match(/^\d+$/) ||
tmp[i] === "") {
// skip this "word"
continue;
}
// stem the word
var word = stemmer.stemWord(tmp[i].toLowerCase());
// prevent stemmer from cutting word smaller than two chars
if(word.length < 3 && tmp[i].length >= 3) {
word = tmp[i];
}
var toAppend;
// select the correct list
if (word[0] == '-') {
toAppend = excluded;
word = word.substr(1);
}
else {
toAppend = searchterms;
hlterms.push(tmp[i].toLowerCase());
}
// only add if not already in the list
if (!$u.contains(toAppend, word))
toAppend.push(word);
}
var highlightstring = '?highlight=' + $.urlencode(hlterms.join(" "));
// console.debug('SEARCH: searching for:');
// console.info('required: ', searchterms);
// console.info('excluded: ', excluded);
// prepare search
var terms = this._index.terms;
var titleterms = this._index.titleterms;
// array of [filename, title, anchor, descr, score]
var results = [];
$('#search-progress').empty();
// lookup as object
for (i = 0; i < objectterms.length; i++) {
var others = [].concat(objectterms.slice(0, i),
objectterms.slice(i+1, objectterms.length));
results = results.concat(this.performObjectSearch(objectterms[i], others));
}
// lookup as search terms in fulltext
results = results.concat(this.performTermsSearch(searchterms, excluded, terms, titleterms));
// let the scorer override scores with a custom scoring function
if (Scorer.score) {
for (i = 0; i < results.length; i++)
results[i][4] = Scorer.score(results[i]);
}
// now sort the results by score (in opposite order of appearance, since the
// display function below uses pop() to retrieve items) and then
// alphabetically
results.sort(function(a, b) {
var left = a[4];
var right = b[4];
if (left > right) {
return 1;
} else if (left < right) {
return -1;
} else {
// same score: sort alphabetically
left = a[1].toLowerCase();
right = b[1].toLowerCase();
return (left > right) ? -1 : ((left < right) ? 1 : 0);
}
});
// for debugging
//Search.lastresults = results.slice(); // a copy
//console.info('search results:', Search.lastresults);
// print the results
var resultCount = results.length;
function displayNextItem() {
// results left, load the summary and display it
if (results.length) {
var item = results.pop();
var listItem = $('<li style="display:none"></li>');
if (DOCUMENTATION_OPTIONS.FILE_SUFFIX === '') {
// dirhtml builder
var dirname = item[0] + '/';
if (dirname.match(/\/index\/$/)) {
dirname = dirname.substring(0, dirname.length-6);
} else if (dirname == 'index/') {
dirname = '';
}
listItem.append($('<a/>').attr('href',
DOCUMENTATION_OPTIONS.URL_ROOT + dirname +
highlightstring + item[2]).html(item[1]));
} else {
// normal html builders
listItem.append($('<a/>').attr('href',
item[0] + DOCUMENTATION_OPTIONS.FILE_SUFFIX +
highlightstring + item[2]).html(item[1]));
}
if (item[3]) {
listItem.append($('<span> (' + item[3] + ')</span>'));
Search.output.append(listItem);
listItem.slideDown(5, function() {
displayNextItem();
});
} else if (DOCUMENTATION_OPTIONS.HAS_SOURCE) {
var suffix = DOCUMENTATION_OPTIONS.SOURCELINK_SUFFIX;
if (suffix === undefined) {
suffix = '.txt';
}
$.ajax({url: DOCUMENTATION_OPTIONS.URL_ROOT + '_sources/' + item[5] + (item[5].slice(-suffix.length) === suffix ? '' : suffix),
dataType: "text",
complete: function(jqxhr, textstatus) {
var data = jqxhr.responseText;
if (data !== '' && data !== undefined) {
listItem.append(Search.makeSearchSummary(data, searchterms, hlterms));
}
Search.output.append(listItem);
listItem.slideDown(5, function() {
displayNextItem();
});
}});
} else {
// no source available, just display title
Search.output.append(listItem);
listItem.slideDown(5, function() {
displayNextItem();
});
}
}
// search finished, update title and status message
else {
Search.stopPulse();
Search.title.text(_('Search Results'));
if (!resultCount)
Search.status.text(_('Your search did not match any documents. Please make sure that all words are spelled correctly and that you\'ve selected enough categories.'));
else
Search.status.text(_('Search finished, found %s page(s) matching the search query.').replace('%s', resultCount));
Search.status.fadeIn(500);
}
}
displayNextItem();
},
/**
* search for object names
*/
performObjectSearch : function(object, otherterms) {
var filenames = this._index.filenames;
var docnames = this._index.docnames;
var objects = this._index.objects;
var objnames = this._index.objnames;
var titles = this._index.titles;
var i;
var results = [];
for (var prefix in objects) {
for (var name in objects[prefix]) {
var fullname = (prefix ? prefix + '.' : '') + name;
if (fullname.toLowerCase().indexOf(object) > -1) {
var score = 0;
var parts = fullname.split('.');
// check for different match types: exact matches of full name or
// "last name" (i.e. last dotted part)
if (fullname == object || parts[parts.length - 1] == object) {
score += Scorer.objNameMatch;
// matches in last name
} else if (parts[parts.length - 1].indexOf(object) > -1) {
score += Scorer.objPartialMatch;
}
var match = objects[prefix][name];
var objname = objnames[match[1]][2];
var title = titles[match[0]];
// If more than one term searched for, we require other words to be
// found in the name/title/description
if (otherterms.length > 0) {
var haystack = (prefix + ' ' + name + ' ' +
objname + ' ' + title).toLowerCase();
var allfound = true;
for (i = 0; i < otherterms.length; i++) {
if (haystack.indexOf(otherterms[i]) == -1) {
allfound = false;
break;
}
}
if (!allfound) {
continue;
}
}
var descr = objname + _(', in ') + title;
var anchor = match[3];
if (anchor === '')
anchor = fullname;
else if (anchor == '-')
anchor = objnames[match[1]][1] + '-' + fullname;
// add custom score for some objects according to scorer
if (Scorer.objPrio.hasOwnProperty(match[2])) {
score += Scorer.objPrio[match[2]];
} else {
score += Scorer.objPrioDefault;
}
results.push([docnames[match[0]], fullname, '#'+anchor, descr, score, filenames[match[0]]]);
}
}
}
return results;
},
/**
* search for full-text terms in the index
*/
performTermsSearch : function(searchterms, excluded, terms, titleterms) {
var docnames = this._index.docnames;
var filenames = this._index.filenames;
var titles = this._index.titles;
var i, j, file;
var fileMap = {};
var scoreMap = {};
var results = [];
// perform the search on the required terms
for (i = 0; i < searchterms.length; i++) {
var word = searchterms[i];
var files = [];
var _o = [
{files: terms[word], score: Scorer.term},
{files: titleterms[word], score: Scorer.title}
];
// no match but word was a required one
if ($u.every(_o, function(o){return o.files === undefined;})) {
break;
}
// found search word in contents
$u.each(_o, function(o) {
var _files = o.files;
if (_files === undefined)
return
if (_files.length === undefined)
_files = [_files];
files = files.concat(_files);
// set score for the word in each file to Scorer.term
for (j = 0; j < _files.length; j++) {
file = _files[j];
if (!(file in scoreMap))
scoreMap[file] = {}
scoreMap[file][word] = o.score;
}
});
// create the mapping
for (j = 0; j < files.length; j++) {
file = files[j];
if (file in fileMap)
fileMap[file].push(word);
else
fileMap[file] = [word];
}
}
// now check if the files don't contain excluded terms
for (file in fileMap) {
var valid = true;
// check if all requirements are matched
if (fileMap[file].length != searchterms.length)
continue;
// ensure that none of the excluded terms is in the search result
for (i = 0; i < excluded.length; i++) {
if (terms[excluded[i]] == file ||
titleterms[excluded[i]] == file ||
$u.contains(terms[excluded[i]] || [], file) ||
$u.contains(titleterms[excluded[i]] || [], file)) {
valid = false;
break;
}
}
// if we have still a valid result we can add it to the result list
if (valid) {
// select one (max) score for the file.
// for better ranking, we should calculate ranking by using words statistics like basic tf-idf...
var score = $u.max($u.map(fileMap[file], function(w){return scoreMap[file][w]}));
results.push([docnames[file], titles[file], '', null, score, filenames[file]]);
}
}
return results;
},
/**
* helper function to return a node containing the
* search summary for a given text. keywords is a list
* of stemmed words, hlwords is the list of normal, unstemmed
* words. the first one is used to find the occurrence, the
* latter for highlighting it.
*/
makeSearchSummary : function(text, keywords, hlwords) {
var textLower = text.toLowerCase();
var start = 0;
$.each(keywords, function() {
var i = textLower.indexOf(this.toLowerCase());
if (i > -1)
start = i;
});
start = Math.max(start - 120, 0);
var excerpt = ((start > 0) ? '...' : '') +
$.trim(text.substr(start, 240)) +
((start + 240 - text.length) ? '...' : '');
var rv = $('<div class="context"></div>').text(excerpt);
$.each(hlwords, function() {
rv = rv.highlightText(this, 'highlighted');
});
return rv;
}
};
$(document).ready(function() {
Search.init();
}); | AIMM-simulator | /aimm_simulator-2.0.3.tar.gz/aimm_simulator-2.0.3/doc/sphinx_build/_static/searchtools.js | searchtools.js |
.. AIMM_simulator documentation master file, created by
sphinx-quickstart on Mon Mar 15 16:10:21 2021.
Indices and tables
------------------
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
.. toctree::
:maxdepth: 2
:caption: Contents:
AIMM simulator documentation
============================
Last modified: |today|
Purpose
-------
The AIMM simulator emulates a cellular radio system roughly following 5G concepts and channel models. The intention is to have an easy-to-use and fast system written in pure Python with minimal dependencies. It is especially designed to be suitable for interfacing to AI engines such as ``tensorflow`` or ``pytorch``, and it is not a principal aim for it to be extremely accurate at the level of the radio channel. The simulator was developed for the `AIMM project <https://aimm.celticnext.eu>`_.
The github sources are at <https://github.com/keithbriggs/AIMM-simulator>.
Software dependencies
---------------------
1. `Python 3.8 <https://python.org>`_ or higher.
2. `NumPy <https://numpy.org/>`_.
3. `Simpy <https://pypi.org/project/simpy/>`_.
4. If real-time plotting is needed, `matplotlib <https://matplotlib.org>`_, with an appropriate backend such as PyQt5 (``pip install PyQt5``).
Installation
------------
Three ways are possible:
* The simplest way, direct from PyPI: ``pip install AIMM-simulator``. This will not always get the latest version.
* Download the wheel, typically ``dist/aimm_simulator-2.x.y-py3-none-any.whl`` from github, and run ``pip install <wheel>``.
* Alternatively, the package can be installed by downloading the complete repository (using the green ``<> Code ⌄`` button) as a zip, unpacking it, and then doing ``make install_local`` from inside the unpacked zip.
After installation, run a test with ``python3 examples/basic_test.py``.
Note that a plotting utility ``src/realtime_plotter.py`` is included but not installed. If needed, this script should be placed somewhere in your python path.
A folder ``img`` is used by the examples to save plots in png and pdf format. So in order to run the examples with plotting, this folder must be created.
The documentation can be built from the sources with ``make doc``.
Quick start
-----------
The following example will test the installation and introduce the basic concepts. This creates a simulator instance ``sim``, creates one cell, creates one UE and immediately attaches it to the cell, and runs for 100 seconds of simulated time (typically about 0.03 seconds of run time). There is no logger defined, which means there will be no output apart from a few set-up messages (which are sent to stderr). The code is in ``AIMM_simulator_example_n0.py``.
.. code-block:: python
from AIMM_simulator import Sim
sim=Sim()
sim.make_cell()
sim.make_UE().attach_to_nearest_cell()
sim.run(until=100)
The basic steps to build and run a simulation are:
1. Create a ``Sim`` instance.
2. Create one or more cells with ``make_cell()``. Cells are given a unique index, starting from 0.
3. Create one or more UEs with ``make_UE()``. UEs are given a unique index, starting from 0.
4. Attach UEs with the method ``attach_to_nearest_cell()``.
5. Create a ``Scenario``, which typically moves the UEs according to some mobility model, but in general can include any events which affect the network.
6. Create one or more instances of ``Logger``.
7. Optionally create a ``RIC``, possibly linking to an AI engine.
8. Start the simulation with ``sim.run()``.
9. Plot or analyse the results in the logfiles.
The AIMM simulator uses a discrete event simulation framework. Internally, a queue of pending events is maintained, but this is invisible to the programmer.
All functions and classes have default arguments appropriate to the simulation of a 5G macrocell deployment at 3.5GHz. This means that setting up a simple simulation is almost trivial, but also means that care is needed to set parameters correctly for other scenarios. Subbanding is implemented on all ``Cell`` objects, but the number of subbands may be set to 1, effectively switching off this feature.
The AIMM simulator normally operates without a graphical user interface, and just writes logfiles for subsequent analysis. The default logfile format is tab-separated columns, with purely numerical data. These files can then be easily processed with shell utilities such as ``cut``, ``head``, ``tail``, etc., or read into python or R scripts, or, if all else fails, even imported into spreadsheet programs. However, a custom logger can create a logfile in any desired format.
AIMM simulator blocks
---------------------
.. figure:: AIMM_Simulator_block_diagram.png
:scale: 50 %
:alt: AIMM simulator block diagram
AIMM simulator block structure.
Tutorial examples
-----------------
Example 1
~~~~~~~~~
This example (the code is in ``AIMM_simulator_example_n1.py``) creates a simulator instance, creates one cell, creates four UEs and immediately attaches them to the cell, adds a default logger, and runs for 100 seconds of simulated time. UEs by default are placed randomly in a 1km square.
.. literalinclude:: ../../examples/AIMM_simulator_example_n1.py
:language: python
:linenos:
:lines: 4,6-10
Typical output follows. The locations are 3-dimensional, with the z component being the antenna height. The default logger prints 3 columns to stdout, with a row for each UE report: cell index, UE index, CQI value. We will see later how to create a custom logger.
.. code-block:: text
Cell[0] is at [434.44 591.64 20. ]
UE[0] is at [602.14 403.7 2. ]
UE[1] is at [263.87 301.28 2. ]
UE[2] is at [319.12 506.63 2. ]
UE[3] is at [370.7 394.92 2. ]
Sim: starting main loop for simulation time 100 seconds...
0 0 15
0 1 15
0 2 15
0 3 15
...
Sim: finished main loop in 0.04 seconds.
Example 2 - adding a scenario
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A scenario is created by subclassing the ``Scenario`` class. The code is in ``AIMM_simulator_example_n2.py``. The subclass must implement the ``loop`` method as in the example: it must have an infinite loop, yielding an ``s.sim.wait()`` object, which determines the time to the next event; in this case the next change to the UE positions. In this example, an MME is also added. This handles UE handovers, and ensures that UEs are always attached to the nearest cell.
.. literalinclude:: ../../examples/AIMM_simulator_example_n2.py
:language: python
:linenos:
:lines: 5-
Example 3 - adding a custom logger
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
There are two ways to create a custom logger. The simplest way is to specify a function when creating the Logger instance. This function must accept two arguments, the Sim instance and the file object to write to. Example code for this method is in ``AIMM_simulator_example_n3a.py``. The custom logger must format a line of output, and write it to the file object ``f``. Access to all simulation variables is possible; see the API documentation below. A convenience function ``np_array_to_str`` is available, which removes square brackets and commas from normal numpy array formatting.
.. literalinclude:: ../../examples/AIMM_simulator_example_n3a.py
:language: python
:linenos:
:lines: 4-
More generally, a custom logger can be created by subclassing the ``Logger`` class. The subclass must implement the ``loop`` method as in the example: it must have an infinite loop, yielding an ``s.sim.wait()`` object, which determines the time to the next write to the logfile (which defaults to stdout). The custom logger must format a line of output, and write it to the file object ``self.f``. Example code for this method is in ``AIMM_simulator_example_n3.py``.
.. literalinclude:: ../../examples/AIMM_simulator_example_n3.py
:language: python
:linenos:
:lines: 4-
Typical output is:
.. code-block:: text
#time cell UE x y throughput Mb/s
0.0 0 5 618 694 6.63
0.0 0 7 435 549 1.17
0.0 1 1 709 593 13.26
0.0 2 0 395 405 0.98
0.0 2 2 567 266 2.65
0.0 2 3 718 496 0.61
0.0 2 4 484 346 2.65
0.0 2 6 310 377 0.61
1.0 0 5 616 694 6.63
1.0 0 7 437 548 1.17
1.0 1 1 710 592 13.26
1.0 2 0 395 406 0.98
1.0 2 2 566 264 2.05
1.0 2 3 719 497 0.61
1.0 2 4 484 347 2.65
1.0 2 6 312 377 0.61
Example 4 - adding a RIC
~~~~~~~~~~~~~~~~~~~~~~~~
A RIC (radio intelligent controller) is an agent able to control any aspect of a cell configuration in real time. This toy example illustrates the detection of the UE with the lowest throughout (``throughputs[0][1]``, after the sort), and allocates a new subband to the cell serving that UE.
.. literalinclude:: ../../examples/AIMM_simulator_example_n4.py
:language: python
:linenos:
:lines: 4-
Example 5 - adding an antenna radiation pattern
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In this example there are two cells, and one UE which is driven around a circle centred on Cell[0] by the MyScenario class. There is no MME, so no handovers occur. With omnidirectional antennas, the UE would be exactly at the cell edge at times which are multiples of 100 seconds. But with the pattern implemented for Cell[0] in line 29, the antenna has a beam pointing east, towards the interfering cell (Cell[1], which remains omnidirectional). This considerable improves the average throughput.
.. literalinclude:: ../../examples/AIMM_simulator_example_n5.py
:language: python
:linenos:
:lines: 6-
A typical command to run this using the real-time plotter would be:
``python3 AIMM_simulator_example_n5.py | ./realtime_plotter_05.py -np=3 -tm=500 -ylims='{0: (-100,100), 1: (-100,100), 2: (0,45)}' -ylabels='{0: "UE[0] $x$", 1: "UE[0] $y$", 2: "UE[0] throughput"}' -fnb='img/AIMM_simulator_example_n5'``.
This generates a plot like this:
.. figure:: ../../examples/img/AIMM_simulator_example_n5.png
:scale: 80 %
:alt: AIMM_simulator_example_n5.png
Example 6 - a hetnet (heterogeneous network) with macro and small cells
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In this example we start with 9 macro-cells in a 3x3 grid arrangement (line 30). We then drop 50 UEs at random into the system (line 32), and start the simulation (there is no UE mobility). The logger just computes the average throughput over all UEs. The scenario has these discrete events:
1. At time 20, 20 small cells at random locations are added to the system (line 8). There is a drop in average throughput because the new cells just generate interference and are not yet used as serving cells.
2. At time 40, the UEs are reattached to the best cell (line 11). Now throughput improves to above the initial value, because some UEs are now served by a nearby small cell.
3. At time 60, subband masks are applied to make the macro and small cells non-interfering. Macro cells are given 1/4 of the channel bandwidth (line 15), and the small cells have 3/4 of the channel bandwidth (line 17). Now throughput improves further. As this subband allocation will not be optimal, further improvement will still be possible.
.. literalinclude:: ../../examples/AIMM_simulator_example_n6.py
:language: python
:linenos:
:lines: 4-
The command
``python3 AIMM_simulator_example_n6.py | ./realtime_plotter_03.py -np=1 -tm=100 -ylims='(0,0.25)' -ylabels='{0: "average downlink throughput over all UEs"}' -fnb='img/AIMM_simulator_example_n6'``
then generates a plot like this:
.. figure:: ../../examples/img/AIMM_simulator_example_n6.png
:scale: 80 %
:alt: AIMM_simulator_example_n6.png
Example 7 - a hetnet with mobility
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This is similar to example 6, but now an MME is added to perform handovers for the mobile UEs.
.. literalinclude:: ../../examples/AIMM_simulator_example_n7.py
:language: python
:linenos:
:lines: 4-
The command
``python3 AIMM_simulator_example_n7.py | ./realtime_plotter_03.py -np=4 -tm=2000 -ylims='{0: (0,10), 1: (0,1000), 2: (0,1000), 3: (0,30)}' -ylabels='{0: "UE[0] throughput", 1: "UE[0] $x$", 2: "UE[0] $y$", 3: "UE[0] serving cell"}' -fnb='img/AIMM_simulator_example_n7'``
then generates a plot like this:
.. figure:: ../../examples/img/AIMM_simulator_example_n7.png
:scale: 80 %
:alt: AIMM_simulator_example_n7.png
Example 8 - estimating CQI distribution
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This is similar to example 7, but now we create a histogram at the end of the simulation, rather than using real-time plotting. A MIMO gain boost is added half-way through the simulation, in order to observe the effect on the CQI values at UE[0]. This example also illustrates the use of the ``finalize`` function, to create the histograms after the simulation has finished.
.. literalinclude:: ../../examples/AIMM_simulator_example_n8.py
:language: python
:linenos:
:lines: 4-
The command
``python3 AIMM_simulator_example_n8.py``
then generates a plot like this:
.. figure:: ../../examples/img/AIMM_simulator_example_n8.png
:scale: 80 %
:alt: AIMM_simulator_example_n8.png
Using the geometry_3d module
----------------------------
This module is provided for running indoor simulations, and takes account of wall adsorptions, but not reflections or diffraction. It is not a full ray-tracing code. The main task it performs is to compute intersections of signal paths with walls, and it is optimized to be fast for this application. An example of usage is below.
More details on usage to be added here.
.. code-block:: python
from geometry_3d import block,Panel,Building,Ray,draw_building
blk0=block(np.array([0, 0,0]),np.array([5,10,3]))
blk1=block(np.array([0,10,0]),np.array([6,12,2]))
blk2=block(np.array([0,12,0]),np.array([6,14,2]))
blk3=block(np.array([0,14,0]),np.array([6,16,2]))
blk4=block(np.array([0,16.5,0]),np.array([6,17,2]))
fence=Panel([Triangle((8,0,0),(8,15,0),(8,15,1)),
Triangle((8,0,1),(8, 0,0),(8,15,1))])
b=Building(blk0+blk1+blk2+blk3+blk4+(fence,))
ray0=Ray((0.3,0.3,2.0),(0.1,1,-0.01))
line_segments=[(8,8),(18,18),(0,4)] # [xs,ys,zs]
draw_building(b,rays=[ray0],line_segments=line_segments,color='y',limits=[(0,10),(0,20),(0,4)],labels=['$x$','$y$','$z$'],fontsize=6,show=True,pdffn='building0.pdf')
This constructs a building like this:
.. figure:: ../../examples/img//building0.png
:scale: 80 %
:alt: building0.png
Using the real-time plotting utility
------------------------------------
As an aid to development and debugging, a stand-alone python script ``realtime_plotter_05.py`` for real-time plotting is included. This reads stdin and plots in a window as the data is generated. By default, png and pdf images are saved when all data has been read. It is configured with these command-line arguments:
.. code-block:: text
-np number of plots (default 1)
-tm t_max (maximum time on x-axis, default 10)
-xl x-axis label (default 'time')
-fst final sleep time (before closing the window and saving the images)
-fnb filename base
-ylims y-axis limits (a python dictionary)
-ylabels y-axis labels (a python dictionary
-title figure title
-lw line width (default 2)
If ``-fnb`` is specifed, the final plot is saved as png and pdf figures.
Typical usage would be in a bash script like this:
.. literalinclude:: ../../examples/run_RIC_example.sh
:language: bash
:linenos:
:lines: 4-
This generates a plot like this:
.. figure:: ../../examples/img/AIMM_simulator_RIC_example.png
:scale: 80 %
:alt: AIMM_simulator_RIC_example.png
Simulator module API reference
------------------------------
AIMM simulator
~~~~~~~~~~~~~~
.. automodule:: AIMM_simulator
:members: Sim,Cell,UE,Scenario,MME,RIC,Logger
NR 5G standard functions
~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: NR_5G_standard_functions
:members:
UMa pathloss model
~~~~~~~~~~~~~~~~~~~
.. automodule:: UMa_pathloss_model
:members:
:special-members:
:exclude-members: __dict__,__weakref__,_PL1,_PL2
UMi pathloss model
~~~~~~~~~~~~~~~~~~~
.. automodule:: UMi_pathloss_model
:members:
:special-members:
:exclude-members: __dict__,__weakref__,_PL1,_PL2
InH pathloss model
~~~~~~~~~~~~~~~~~~~
.. automodule:: InH_pathloss_model
:members:
:special-members:
:exclude-members: __dict__,__weakref__,_PL1,_PL2
geometry_3d module
~~~~~~~~~~~~~~~~~~
.. automodule:: geometry_3d
:members:
:exclude-members: distance_to_plane,test_05,intersect_triangle
Real-time plotting utility
~~~~~~~~~~~~~~~~~~~~~~~~~~
This is independent of the main simulator code. It reads output from the Logger via a pipe from stdin, and plots it using a matplotlib animation. It is driven with command-line options, which can be seen by running ``realtime_plotter.py --help``.
.. code-block:: text
-h, --help show this help message and exit
--selftest self-test
-naxes NAXES number of axes
-nplots NPLOTS number of plots
-tmax TMAX t_max
-xlabel XLABEL x axis label
-fst FST final sleep time
-fnb FNB filename base
-ylims YLIMS y limits (dict)
-ylabels YLABELS ylabels (dict)
-title TITLE figure title
-lw LW plot linewidth
-author AUTHOR author name for plot bottom margin
-extra EXTRA extra features to be added to the plot; raw python code
-inputfile INPUTFILE file to read input from instead of stdin; in this case the plot is not displayed, but written to an mp4 file
-column_to_axis_map COLUMN_TO_AXIS_MAP column_to_axis_map (dict)
..
.. argparse::
:module: realtime_plotter
:func: main
:prog: realtime_plotter
Last modified: |today|
| AIMM-simulator | /aimm_simulator-2.0.3.tar.gz/aimm_simulator-2.0.3/doc/sphinx_source/index.rst | index.rst |
__version__='2.0.0'
from sys import stdin,stderr,exit,argv
from os.path import basename
from time import time,sleep,strftime,localtime
import numpy
from random import random
import argparse
from matplotlib import animation,use as matplotlib_use
from matplotlib import pyplot as plt
try:
from matplotlib import __version__ as matplotlib_version
from matplotlib.patches import ConnectionPatch
from matplotlib.lines import Line2D
matplotlib_use('Qt5Agg') # TkAgg or wxAgg or Qt5Agg or Qt4Agg
except:
pass
_t0=time()
_second_call=False
def fig_timestamp(fig,author='',brand='AIMM Sim —',fontsize=6,color='blue',alpha=0.7,rotation=0,prespace=' '):
# Keith Briggs 2020-01-07
date=strftime('%Y-%m-%d %H:%M',localtime())
fig.text( # position text relative to Figure
0.01,0.005,prespace+'%s %s'%(brand+' '+author,date,),
ha='left',va='bottom',fontsize=fontsize,color=color,
rotation=rotation,
transform=fig.transFigure,alpha=alpha)
class Animate:
def __init__(s,getter,naxes,nplots,xlim=(0,1),ylims={},xlabel='',ylabels={},legends={},title='',lw=2,image_fnbase='',tmax=None,figscale=1.5,final_sleep_time=5,author='',extra='',inputfile='',column_to_axis_map={},xlabel_fontsize=10,ylabel_fontsize=10,title_fontsize=12,cmap_type='hsv'):
# https://matplotlib.org/stable/tutorials/colors/colormaps.html
s.getter=getter
s.naxes=naxes
s.nplots=nplots
s.xlim,s.ylims=xlim,ylims
s.lw=lw
s.xlabel,s.ylabels,s.title=xlabel,ylabels,title
s.image_fnbase=image_fnbase
s.legends=legends
s.tmax=tmax
s.lines=[]
s.final_sleep_time=final_sleep_time
s.x=[]
s.ys=[[] for i in range(s.nplots)]
s.fig=plt.figure(figsize=(figscale*6.4,figscale*4.8))
#s.fig.tight_layout()
if 0: # old
if column_to_axis_map:
s.column_to_axis_map=column_to_axis_map
else:
s.column_to_axis_map=dict((i,i) for i in range(s.naxes)) # default
else:
if column_to_axis_map:
s.column_to_axis_map={} # Keith Briggs 2022-08-08
else:
s.column_to_axis_map=dict((i,i) for i in range(s.naxes)) # default
if s.nplots>s.naxes: # map extra plots to last axis (so we don't lose any)
for x in range(s.naxes,s.nplots):
s.column_to_axis_map[x]=s.naxes-1
for x,y in column_to_axis_map.items():
s.column_to_axis_map[x]=y # overwrite defaults with passed argument
s.ax=[s.fig.add_subplot(s.naxes,1,1+i) for i in range(s.naxes)]
s.fig.align_ylabels(s.ax)
print(f' naxes={s.naxes} nplots={s.nplots}',file=stderr)
print(f' column_to_axis_map={s.column_to_axis_map}',file=stderr)
print(f' ylims={s.ylims}',file=stderr)
s.transfigure=s.fig.transFigure.inverted()
s.ax.reverse() # ax[0] at bottom
s.extra=extra
s.inputfile=inputfile
s.anim=None # gets created later (in run())
if 0: # old
s.colors=('r','g','b','c','y','k',)
s.ncolors=len(s.colors)
else: # better
s.ncolors=nplots
s.cmap=plt.get_cmap(cmap_type)
s.colors=tuple(s.cmap(0.9*i/s.ncolors) for i in range(s.ncolors))
props=dict(boxstyle='round',facecolor='white',alpha=0.8)
for i in range(s.naxes):
if i==0: # bottom plot
if s.xlabel: s.ax[i].set_xlabel(s.xlabel,fontsize=xlabel_fontsize)
else: # other plots
s.ax[i].xaxis.set_ticklabels([])
if i in ylims: s.ax[i].set_ylim(*ylims[i])
if i in ylabels: s.ax[i].set_ylabel(s.ylabels[i],fontsize=ylabel_fontsize)
s.ax[i].grid(lw=0.5,alpha=0.5,color='gray')
s.ax[i].set_xlim(*xlim)
s.ax[i].xaxis.set_major_locator(plt.MaxNLocator(10))
if s.naxes<4: # set number of ticks on y axes...
s.ax[i].yaxis.set_major_locator(plt.MaxNLocator(6))
else:
s.ax[i].yaxis.set_major_locator(plt.MaxNLocator(4))
if i in s.legends: # FIXME
try:
lx,ly,lt=s.legends[i].split('\t')
lx,ly=float(lx),float(ly) # legend position
s.ax[i].text(lx,ly,lt,fontsize=8,verticalalignment='top',horizontalalignment='right',bbox=props)
except:
print('legend must have format "x<tab>y<tab>text"',file=stderr)
if s.title: s.ax[-1].set_title(s.title,fontsize=title_fontsize)
s.pdf_saved=False
fig_timestamp(s.fig,author=author,rotation=0,fontsize=8)
def init(s):
for line in s.lines: line.set_data([],[])
return s.lines
def animate(s,k,dbg=True):
global _second_call
xy=next(s.getter)
if xy is None or len(xy)==0: # no more data
if dbg: print(f'{basename(__file__)}: input data exhausted.',file=stderr)
if not _second_call:
#for i in range(s.nplots): # replot; it gets deleted when show() returns
# s.ax[i].plot(s.x,s.ys[i],lw=s.lw,color=s.colors[i%5],alpha=1)
try: # Keith Briggs 2022-08-08 FIXME why is this needed?
#print(f'not _second_call: s.column_to_axis_map={s.column_to_axis_map}',file=stderr)
for i,j in s.column_to_axis_map.items(): # 2021-12-17 replot
#print(f'not _second_call: i={i} j={j}',file=stderr)
if i<len(s.ys): s.ax[j].plot(s.x,s.ys[i],lw=s.lw,color=s.colors[i%s.ncolors],alpha=1) # line
#s.ax[j].plot(s.x,s.ys[i],lw=0.5,marker='o',markersize=0.5,color=s.colors[i%s.ncolors]) # dot only
except:
print(f'not _second_call: plot failed!',file=stderr)
if s.extra: # plot "extra" again to make sure it's on top!
s.transfigure=s.fig.transFigure.inverted() # this needs updating!
try:
exec(s.extra)
print(f'"extra" executed at t={time()-_t0:.2f}',file=stderr)
s.extra=None # make sure it's only done once
except Exception as e:
print(f'extra="{s.extra}" failed with message "{str(e)}"!',file=stderr)
if s.image_fnbase:
print(f'animate: saving final image files at t={time()-_t0:.2f}...',file=stderr,end='')
s.fig.savefig(s.image_fnbase+'.png')
s.fig.savefig(s.image_fnbase+'.pdf')
print('done.',file=stderr)
print('eog '+s.image_fnbase+'.png &',file=stderr)
print('evince '+s.image_fnbase+'.pdf &',file=stderr)
_second_call=True
sleep(s.final_sleep_time)
exit(0)
# else (xy is not None)...
s.x.append(xy[0]) # time
if 1: # old way
for j in range(s.nplots): s.ys[j].append(xy[1+j])
else: # FIXME
for i,j in s.column_to_axis_map.items():
print(f'{i}->{j}',file=stderr)
s.ys[j].append(xy[1+i])
#print(f'{s.ys}',file=stderr)
#exit()
for i,ysi in enumerate(s.ys):
s.lines[i].set_data(s.x,ysi)
#s.lines[s.column_to_axis_map[i]].set_data(s.x,ysi)
return s.lines
def run_OLD(s,nframes=1000):
plt.ion()
for i in range(s.naxes):
lobj=s.ax[i].plot([],[],lw=s.lw,color=s.colors[i%s.ncolors])[0]
s.lines.append(lobj)
s.anim=animation.FuncAnimation(s.fig,s.animate,init_func=s.init,frames=nframes,interval=0.01,blit=True,save_count=1000) #,repeat=False)
plt.show(block=True)
def run(s,nframes=1000):
# create a plot object for each plot, and map them to axes
plt.ion()
for i,j in s.column_to_axis_map.items():
print(f'run: column[{i}] is mapped to axis [{j}].',file=stderr)
s.lines.append(s.ax[j].plot([],[],lw=s.lw,color=s.colors[i%s.ncolors],alpha=1)[0])
#s.lines.append(s.ax[j].plot(s.x,s.ys[i],lw=0.0,marker='o',markersize=0.5,color=s.colors[i%s.ncolors])[0]) # dot only
s.anim=animation.FuncAnimation(s.fig,s.animate,init_func=s.init,frames=nframes,interval=0.01,blit=True,save_count=1000) #,repeat=False)
plt.show(block=True)
def run_noshow(s,nframes=2*5000):
# FIXME need a good way to set nframes
for i in range(s.naxes):
axi=s.ax[i]
axi.plot([],[],lw=s.lw)
lobj=axi.plot([],[],lw=s.lw,color=s.colors[i%s.ncolors])[0]
s.lines.append(lobj)
s.anim=animation.FuncAnimation(s.fig,s.animate,init_func=s.init,frames=nframes,interval=0.01,blit=True,save_count=nframes)
plt.draw()
s.save_mp4()
def save_mp4(s):
print(f's.anim={s.anim}',file=stderr)
writervideo=animation.FFMpegWriter(fps=30,bitrate=2000)
#print(f'writervideo={writervideo} ...',file=stderr)
filename_mp4=f'{s.inputfile}.mp4'
print(f'Writing {filename_mp4} ...',end='',file=stderr); stderr.flush()
s.anim.save(filename_mp4,writer=writervideo)
print('done',file=stderr)
def add_line_betweenaxes(s,xy0,xy1,ax0,ax1,color='r',lw=1,arrowstyle='-',shrinkB=0): # 2021-10-28
# Draw an arrow between two points in data coordinates, possibly
# in different axes.
s.fig.add_artist(ConnectionPatch(
xyA=xy0, coordsA=s.ax[ax0].transData,
xyB=xy1, coordsB=s.ax[ax1].transData,
arrowstyle=arrowstyle,shrinkB=shrinkB,color=color,lw=lw)
)
def _getter_random(n):
global _k,last
while True:
if _k>n: yield None
_k+=1
x=numpy.random.random(3)
nxt=0.2*x+0.8*last
last=nxt
yield _k,nxt[0],nxt[1],10*nxt[2]
def getter_stdin(nrowsmax=None):
k=0
while True:
if nrowsmax and k>nrowsmax: yield None
k+=1
line=stdin.readline()
if not line: yield None
if line and line[0]=='#':
continue # 2021-10-29
else:
yield numpy.fromstring(line,sep='\t') # 2021-12-15
#yield numpy.array(list(map(float,line.split()))) # 2021-07-15
def getter_tsv(tsv,skip=10):
# Keith Briggs 2021-07-19 - return rows of a pre-loaded tsv file
k=0
nrows=tsv.shape[0]
while k<nrows:
yield tsv[k]
k+=skip
print('getter_tsv done',file=stderr)
yield None
def test_01(n=100,naxes=3):
animate=Animate(_getter_random(n),naxes=naxes,ncols=naxes,xlim=(0,n),ylims=[(0,1),(0,1),(0,10),],xlabel='time',ylabels=['random']*naxes,legends=['90\t0.9\trandom','90\t0.9\trandom','90\t0.9\trandom'])
animate.run(nframes=n)
def main():
parser=argparse.ArgumentParser()
parser.add_argument('--selftest', help='self-test',action='store_true')
parser.add_argument('-naxes',type=int, help='number of axes',default=0)
parser.add_argument('-nplots',type=int, help='number of plots',default=1)
parser.add_argument('-tmax',type=float, help='t_max',default=100.0)
parser.add_argument('-xlabel',type=str, help='x axis label',default='time')
parser.add_argument('-fst',type=float, help='final sleep time',default=5.0)
parser.add_argument('-fnb',type=str, help='filename base',default='')
parser.add_argument('-ylims',type=str, help='y limits (dict)',default='')
parser.add_argument('-ylabels',type=str,help='ylabels (dict)',default='')
parser.add_argument('-title',type=str, help='figure title',default='')
parser.add_argument('-lw',type=str, help='linewidth',default=2)
parser.add_argument('-author',type=str, help='author name for plot bottom margin',default='')
parser.add_argument('-extra',type=str, help='extra features to be added to the plot; raw python code',default='')
parser.add_argument('-inputfile',type=str, help='file to read input from instead of stdin; in this case the plot is not displayed, but written to an mp4 file',default='')
parser.add_argument('-column_to_axis_map',type=str, help='column_to_axis_map',default='{}')
args=parser.parse_args()
if args.selftest:
global _k,last,nplots
_k=0; last=numpy.zeros(3); test_01(); exit()
if args.naxes==0: # default
args.naxes=args.nplots
#if args.ncols: ncols=args.ncols
#else: ncols=nplots
xlim=(0.0,args.tmax),
ylims={i: (0.0,20.0) for i in range(args.naxes)} # default ylims
if args.ylims:
try:
d=eval(args.ylims)
if type(d) is dict:
for q in d: ylims[q]=d[q]
elif type(d) in (tuple,list):
for i,q in enumerate(d): ylims[i]=q
except:
print(f'Could not parse -ylims="{args.ylims}"',file=stderr)
ylabels={i: f'$y_{{{i}}}$' for i in range(args.naxes)}
if args.ylabels:
try:
d=eval(args.ylabels)
if type(d) is dict:
for q in d: ylabels[q]=d[q]
elif type(d) is list: # 2021-11-09 allow list of labels
for i,q in enumerate(d): ylabels[i]=q
elif type(d) is str:
for q in range(args.naxes): ylabels[q]=f'{d}$_{{{q}}}$'
except:
print(f'Could not parse -ylabels="{args.ylabels}"',file=stderr)
if args.inputfile and args.inputfile not in ('stdin','-',):
try:
tsv=numpy.loadtxt(args.inputfile)
nrows=tsv.shape[0]
print(f'Loaded tsv file "{args.inputfile}", {nrows} rows',file=stderr)
except:
print(f'Could not load tsv file "{args.inputfile}", quitting',file=stderr)
exit(1)
getter=getter_tsv(tsv)
else:
getter=getter_stdin()
if args.naxes>4: plt.rcParams.update({'font.size': 6})
#column_to_axis_map={}
#column_to_axis_map={0:0,1:0,2:1,3:2,4:2,5:3,6:4} # FIXME
#column_to_axis_map={1:0,2:1,3:2,4:2,5:3,6:4} # FIXME
try:
column_to_axis_map=eval(args.column_to_axis_map)
except:
print(f'{basename(__file__)}: could not parse column_to_axis_map={column_to_axis_map},using default',file=stderr)
column_to_axis_map={}
animate=Animate(
getter,
naxes=args.naxes,
nplots=args.nplots,
#ncols=ncols, # number of columns read from input file
xlim=xlim,
title=args.title,
lw=args.lw,
ylims=ylims,
xlabel=args.xlabel,
ylabels=ylabels,
legends=[],
final_sleep_time=args.fst,
image_fnbase=args.fnb,
author=args.author,
extra=args.extra,
inputfile=args.inputfile,
column_to_axis_map=column_to_axis_map
)
if args.inputfile in ('','stdin','-',):
animate.run(nframes=100)
else:
animate.run_noshow()
return parser
if __name__=='__main__':
print(f'matplotlib version={matplotlib_version}',file=stderr)
print(f'{basename(__file__)} starting...',file=stderr)
plt.rcParams.update({'font.size': 12})
plt.rcParams.update({'figure.autolayout': True})
# https://matplotlib.org/stable/tutorials/intermediate/constrainedlayout_guide.html
# 'figure.autolayout': True,
# 'figure.constrained_layout.use': True
main() | AIMM-simulator | /aimm_simulator-2.0.3.tar.gz/aimm_simulator-2.0.3/src/realtime_plotter.py | realtime_plotter.py |
from math import log10,hypot
from numpy.linalg import norm
class UMa_pathloss:
'''
Urban macrocell dual-slope pathloss model, from 3GPP standard 36.873,
Table 7.2-1.
The model is defined in 36873-c70.doc from https://portal.3gpp.org/desktopmodules/Specifications/SpecificationDetails.aspx?specificationId=2574.
This code covers the cases 3D-UMa LOS and NLOS.
3D-UMa = three dimensional urban macrocell model.
LOS = line-of-sight.
NLOS = non-line-of-sight.
'''
def __init__(s,fc_GHz=3.5,h_UT=2.0,h_BS=25.0,LOS=True,h=20.0,W=20.0):
'''
Initialize a pathloss model instance.
Parameters
----------
fc_GHz : float
Centre frequency in GigaHertz (default 3.5).
h_UT : float
Height of User Terminal (=UE) in metres (default 2).
h_BS : float
Height of Base Station in metres (default 25).
LOS: bool
Whether line-of-sight model is to be used (default True).
h : float
Average building height (default 20, used in NLOS case only)
W : float
Street width (default 20, used in NLOS case only)
'''
s.fc=fc_GHz # GHz
s.log10fc=log10(s.fc)
s.h_UT=h_UT
s.h_BS=h_BS
s.LOS=LOS
s.h=h
s.W=W
s.c=3e8
s.h_E=1.0
s.dBP=4.0*(s.h_BS-s.h_E)*(s.h_UT-s.h_E)*s.fc*1e9/s.c # Note 1
s.a=9.0*log10(s.dBP**2+(s.h_BS-s.h_UT)**2)
# pre-compute constants to speed up calls...
s.const_close=28.0+20.0*s.log10fc
s.const_far =28.0+20.0*s.log10fc-s.a
def __call__(s,xyz_cell,xyz_UE):
'''
Return the pathloss between 3-dimensional positions xyz_cell and
xyz_UE (in metres).
Note that the distances, building heights, etc. are not checked
to ensure that this pathloss model is actually applicable.
'''
# TODO: could we usefully vectorize this, so that xyz_cell,xyz_UE have shape (n,3) to compute n pathlosses at once?
d3D_m=norm(xyz_cell-xyz_UE) # new way 2021-10-29
# TODO 2022-04-27: is the next faster?
#dxyz=xyz_cell-xyz_UE; d3D_m=hypot(dxyz[0],hypot(dxyz[1],dxyz[2]))
if d3D_m<s.dBP: PL3D_UMa_LOS=s.const_close+22.0*log10(d3D_m)
else: PL3D_UMa_LOS=s.const_far +40.0*log10(d3D_m)
if s.LOS:
return PL3D_UMa_LOS
# else NLOS:
# Formulas from Table 7.2-1 are...
# PL3D-UMa-NLOS=161.04-7.1*log10(W)+7.5*log10(h)-(24.37-3.7*(h/hBS)**2)*log10(hBS)+(43.42-3.1*log10(hBS))*(log10(d3D)-3)+20*log10(fc)-(3.2*(log10(17.625))**2-4.97)-0.6*(hUT-1.5)
# PL=max(PL3D-UMa-NLOS,PL3D-UMa-LOS)
c1=-9.1904695449517596702522e-4 # =3.2*(log10(17.625))**2-4.97
PL3D_UMa_NLOS=161.04-7.1*log10(s.W)+7.5*log10(s.h)-(24.37-3.7*(s.h/s.h_BS)**2)*log10(s.h_BS)+(43.42-3.1*log10(s.h_BS))*(log10(d3D_m)-3.0)+20*log10(s.fc)-(c1)-0.6*(s.h_UT-1.5) # TODO pre-compute more constants to speed this up!
return max(PL3D_UMa_NLOS,PL3D_UMa_LOS)
def plot():
' Plot the pathloss model predictions, as a self-test. '
import numpy as np
import matplotlib.pyplot as plt
from fig_timestamp import fig_timestamp
fig=plt.figure(figsize=(8,6))
ax=fig.add_subplot()
ax.grid(color='gray',alpha=0.7,lw=0.5)
d=np.linspace(10,5000,100) # valid from 10m
PL=UMa_pathloss(fc_GHz=1.8,h_UT=1.5,h_BS=17.5,LOS=False)
NLOS=np.array([PL(0,di) for di in d])
ax.plot(d,NLOS,lw=2,label='NLOS ($\sigma=6$)') # or semilogx
ax.fill_between(d,NLOS-6.0,NLOS+6.0,alpha=0.2) # sigma_{SF}=6 for NLOS case
PL=UMa_pathloss(fc_GHz=1.8,h_UT=1.5,h_BS=17.5,LOS=True)
LOS=np.array([PL(0,di) for di in d])
ax.plot(d,LOS,lw=2,label='LOS ($\sigma=4$)') # or semilogx
ax.fill_between(d,LOS-4.0,LOS+4.0,alpha=0.2) # sigma_{SF}=4 for LOS case
ax.set_xlabel('distance (metres)')
ax.set_ylabel('pathloss (dB)')
ax.set_xlim(0,np.max(d))
ax.set_ylim(40)
ax.legend()
ax.set_title('3GPP UMa pathloss models')
fig.tight_layout()
fig_timestamp(fig,rotation=0,fontsize=6,author='Keith Briggs')
fnbase='img/UMa_pathloss_model_01'
fig.savefig(f'{fnbase}.png')
print(f'eog {fnbase}.png &')
fig.savefig(f'{fnbase}.pdf')
print(f'evince {fnbase}.pdf &')
if __name__=='__main__':
plot() # simple self-test | AIMM-simulator | /aimm_simulator-2.0.3.tar.gz/aimm_simulator-2.0.3/src/AIMM_simulator/UMa_pathloss_model.py | UMa_pathloss_model.py |
from sys import exit
from bisect import bisect
from math import floor,ceil,log2
from functools import lru_cache
from dataclasses import dataclass
import numpy as np
SINR90pc=np.array([-float('inf'),-1.89,-0.82,0.95,2.95,4.90,7.39,8.89,11.02,13.32,14.68,16.62,18.91,21.58,24.88,29.32,float('inf'),])
# TS_38_214.pdf page 43 Table 5.2.2.1-2: 4-bit CQI Table for reporting CQI based on QPSK
# The CQI indices and their interpretations are given in Table 5.2.2.1-2 or Table 5.2.2.1-4 for reporting CQI based on QPSK, 16QAM and 64QAM. The CQI indices and their interpretations are given in Table 5.2.2.1-3 for reporting CQI based on QPSK, 16QAM, 64QAM and 256QAM.
_CQI_to_efficiency_QPSK=np.array([
[ 0, float('inf'),float('inf')],
[ 1, 78, 0.1523],
[ 2, 120, 0.2344],
[ 3, 193, 0.3770],
[ 4, 308, 0.6016],
[ 5, 449, 0.8770],
[ 6, 602, 1.1758],
[ 7, 378, 1.4766],
[ 8, 490, 1.9141],
[ 9, 616, 2.4063],
[10, 466, 2.7305],
[11, 567, 3.3223],
[12, 666, 3.9023],
[13, 772, 4.5234],
[14, 873, 5.1152],
[15, 948, 5.5547],
])
# 38.214 Table 5.1.3.2-2
# http://www.techplayon.com/5g-nr-modulation-and-coding-scheme-modulation-and-code-rate/
# MCS Index: & Modulation Order Qm & Target code Rate x1024 R & Spectral efficiency\\
MCS_to_Qm_table_64QAM={
0: ( 2,120,0.2344),
1: ( 2,157,0.3066),
2: ( 2,193,0.3770),
3: ( 2,251,0.4902),
4: ( 2,308,0.6016),
5: ( 2,379,0.7402),
6: ( 2,449,0.8770),
7: ( 2,526,1.0273),
8: ( 2,602,1.1758),
9: ( 2,679,1.3262),
10: ( 4,340,1.3281),
11: ( 4,378,1.4766),
12: ( 4,434,1.6953),
13: ( 4,490,1.9141),
14: ( 4,553,2.1602),
15: ( 4,616,2.4063),
16: ( 4,658,2.5703),
17: ( 6,438,2.5664),
18: ( 6,466,2.7305),
19: ( 6,517,3.0293),
20: ( 6,567,3.3223),
21: ( 6,616,3.6094),
22: ( 6,666,3.9023),
23: ( 6,719,4.2129),
24: ( 6,772,4.5234),
25: ( 6,822,4.8164),
26: ( 6,873,5.1152),
27: ( 6,910,5.3320),
28: ( 6,948,5.5547),
29: ( 2,'reserved', 'reserved'),
30: ( 4,'reserved', 'reserved'),
31: ( 6,'reserved', 'reserved'),
}
def SINR_to_CQI(sinr_dB):
return np.searchsorted(SINR90pc,sinr_dB)-1 # vectorized
# 2021-03-08...
#@lru_cache(maxsize=None)
#def SINR_to_CQI_cached(sinr_dB_int):
# return np.searchsorted(SINR90pc,sinr_dB_int)-1 # vectorized
def CQI_to_efficiency_QPSK(cqi):
# non-vectorized (TODO)
if not 0<=cqi<=15: return float('nan')
return _CQI_to_efficiency_QPSK[cqi,2]
def RSRP_report(rsrp_dBm):
'''
Convert RSRP report from dBm to standard range.
Parameters
----------
rsrp_dBm : float
RSRP report in dBm
Returns
-------
int
RSRP report in standard range.
'''
if rsrp_dBm==float('inf'): return 127
if rsrp_dBm<-156.0: return 0
if rsrp_dBm>=-31.0: return 126
return int(rsrp_dBm+156.0)
@dataclass
class Radio_state:
NofSlotsPerRadioFrame: int=20
NofRadioFramePerSec: int =100
NRB_sc: int =12
Nsh_symb: int =13
NPRB_oh: int =0
nPRB: int =273
Qm: int =8 # Modulation order
v: int =4 # Number of Layers
R: float =0.948
MCS: int =20
def max_5G_throughput_64QAM(radio_state):
# https://www.sharetechnote.com/html/5G/5G_MCS_TBS_CodeRate.html
# converted from octave/matlab Keith Briggs 2020-10-09
Qm,R,Spectral_efficiency=MCS_to_Qm_table_64QAM[radio_state.MCS]
R/=1024.0 # MCS_to_Qm_table has 1024*R
NPRB_DMRS=_DMRS_RE('type1','A',1,0)
NREprime=radio_state.NRB_sc*radio_state.Nsh_symb-NPRB_DMRS-radio_state.NPRB_oh
NREbar=min(156,NREprime)
NRE=NREbar*radio_state.nPRB
Ninfo=NRE*R*Qm*radio_state.v
if Ninfo>3824:
n=int(log2(Ninfo-24))-5
Ninfo_prime=2**n*round((Ninfo-24)/(2**n))
if R>0.25:
C=ceil((Ninfo_prime+24)/8424) if Ninfo_prime>=8424 else 1.0
else: # R<=1/4
C=ceil((Ninfo_prime+24)/3816)
TBS_bits=8*C*ceil((Ninfo_prime+24)/(8*C))-24
else: # Ninfo<=3824
Ninfo=max(24,2**n*int(Ninfo/2**n))
print('Ninfo<=3824 not yet implemented - need 38.214 Table 5.1.3.2-2')
exit(1)
TP_bps=TBS_bits*radio_state.NofSlotsPerRadioFrame*radio_state.NofRadioFramePerSec
return TP_bps/1024/1024
def _DMRS_RE(typ,mapping,length,addPos):
# https://www.sharetechnote.com/html/5G/5G_MCS_TBS_CodeRate.html#PDSCH_TBS
# converted from octave/matlab Keith Briggs 2020-10-09
if typ=='type1':
DMRSType='type1'
if mapping=='A':
PDSCH_MappingType='A'
if addPos==0: dmrsRE= 6*length
elif addPos==1: dmrsRE=2*6*length
elif addPos==2: dmrsRE=3*6*length
elif addPos==3: dmrsRE=4*6*length
AdditionalPos=addPos
elif mapping=='B': dmrsRE=6*length
else:
DMRSType='type2'
if mapping=='B':
PDSCH_MappingType='A'
if addPos==0: dmrsRE= 4*length
elif addPos==1: dmrsRE=2*4*length
elif addPos==2: dmrsRE=3*4*length
elif addPos==3: dmrsRE=4*4*length
AdditionalPos=addPos
elif mapping=='A': dmrsRE=4*length; # FIXME is 'A' right here?
return dmrsRE
# plot functions - only for testing...
def plot_SINR_to_CQI(fn='img/plot_SINR_to_CQI'):
n,bot,top=1000,-10.0,35.0
x=np.linspace(bot,top,n)
y=[SINR_to_CQI(x[i]) for i in range(n)]
# write table to tab-separated file...
f=open('SINR_to_CQI_table.tsv','w')
for xy in zip(x,y): f.write('%.3f\t%.3f\n'%xy)
f.close()
fig=plt.figure()
ax=fig.add_subplot(1,1,1)
ax.set_xlim(bot,top)
ax.set_ylim(0,15)
ax.grid(linewidth=1,color='gray',alpha=0.25)
ax.scatter(x,y,marker='.',s=1,label='exact SINR to CQI mapping',color='red')
ax.plot([-5,29],[0,15],':',color='blue',alpha=0.7,label='linear approximation')
ax.set_xlabel('SINR (dB)')
ax.set_ylabel('CQI')
ax.legend(loc='lower right')
fig.tight_layout()
fig_timestamp(fig,author='Keith Briggs',rotation=90)
fig.savefig('%s.png'%fn)
fig.savefig('%s.pdf'%fn)
print('eog %s.png &'%fn)
print('evince %s.pdf &'%fn)
def plot_CQI_to_efficiency_QPSK(fn='img/plot_CQI_to_efficiency_QPSK'):
bot,top=0,15
x=range(bot,1+top)
y=[CQI_to_efficiency_QPSK(xi) for xi in x]
fig=plt.figure()
ax=fig.add_subplot(1,1,1)
ax.set_xlim(1+bot,top)
ax.set_ylim(0,6)
ax.grid(linewidth=1,color='gray',alpha=0.25)
ax.scatter(x,y,marker='o',s=2,label='CQI to efficiency (QPSK)',color='red')
ax.plot(x,y,':',color='gray',alpha=0.5)
ax.set_xlabel('CQI')
ax.set_ylabel('spectral efficiency')
ax.legend(loc='lower right')
fig.tight_layout()
fig_timestamp(fig,author='Keith Briggs',rotation=90)
fig.savefig('%s.png'%fn)
fig.savefig('%s.pdf'%fn)
print('eog %s.png &'%fn)
print('evince %s.pdf &'%fn)
#def CQI_to_64QAM_efficiency(cqi):
# # FIXME better version of this... vectorize
# CQI_to_MCS=lambda cqi: max(0,min(28,int(28*cqi/15.0)))
# return MCS_to_Qm_table_64QAM[CQI_to_MCS(cqi)][2]
# better 2021-03-08 (cannot easily vectorize)...
@lru_cache(maxsize=None)
def CQI_to_64QAM_efficiency(cqi):
CQI_to_MCS=max(0,min(28,int(28*cqi/15.0)))
return MCS_to_Qm_table_64QAM[CQI_to_MCS][2]
def plot_CQI_to_efficiency(fn='img/plot_CQI_to_efficiency'):
# TODO 256QAM
bot,top=0,15
cqi=range(bot,1+top)
y=[CQI_to_64QAM_efficiency(x) for x in cqi]
fig=plt.figure()
ax=fig.add_subplot(1,1,1)
ax.set_xlim(bot,top)
ax.set_ylim(ymin=0,ymax=6)
ax.grid(linewidth=0.5,color='gray',alpha=0.25)
ax.plot(cqi,y,':',color='gray',ms=0.5,alpha=0.7)
ax.scatter(cqi,y,marker='o',s=9,label='efficiency (64 QAM)',color='red')
ax.set_xlabel('CQI')
ax.set_ylabel('spectral efficiency')
ax.legend(loc='lower right')
fig.tight_layout()
fig_timestamp(fig,author='Keith Briggs',rotation=90)
fig.savefig('%s.png'%fn)
fig.savefig('%s.pdf'%fn)
print('eog %s.png &'%fn)
print('evince %s.pdf &'%fn)
if __name__=='__main__':
from sys import exit
from fig_timestamp_00 import fig_timestamp
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 8, 'figure.autolayout': True})
radio_state=Radio_state()
print(max_5G_throughput_64QAM(radio_state))
plot_SINR_to_CQI()
plot_CQI_to_efficiency_QPSK()
plot_CQI_to_efficiency()
exit()
for rsrp_dBm in range(-160,0):
print(rsrp_dBm,RSRP_report(rsrp_dBm)) # python3 NR_5G_standard_functions_00.py | p | AIMM-simulator | /aimm_simulator-2.0.3.tar.gz/aimm_simulator-2.0.3/src/AIMM_simulator/NR_5G_standard_functions.py | NR_5G_standard_functions.py |
from math import log10,hypot
from numpy.linalg import norm
class UMi_streetcanyon_pathloss:
'''
Urban microcell dual-slope pathloss model, from 3GPP standard 36.873,
Table 7.2-1.
The model is defined in 36873-c70.doc from https://portal.3gpp.org/desktopmodules/Specifications/SpecificationDetails.aspx?specificationId=2574.
This code covers the cases 3D-UMi LOS and NLOS.
3D-UMi = three-dimensional urban street canyon model.
LOS = line-of-sight.
NLOS = non-line-of-sight.
'''
def __init__(s,fc_GHz=3.5,h_UT=2.0,h_BS=10.0,LOS=True):
'''
Initialize a pathloss model instance.
Parameters
----------
fc_GHz : float
Centre frequency in GigaHertz (default 3.5).
h_UT : float
Height of User Terminal (=UE) in metres (default 2).
h_BS : float
Height of Base Station in metres (default 10 for UMi).
LOS: bool
Whether line-of-sight model is to be used (default True).
'''
s.fc=fc_GHz # GHz
s.log10fc=log10(s.fc)
s.h_UT=h_UT
s.h_BS=h_BS
s.LOS=LOS
s.c=3e8
# Adjustment for effective antenna height, 1.0 in LOS for UMa.
# Same for UMi, assuming the effective antenna environment height is 1m.
s.h_E=1.0
# Note 1. This is the same for UMi and UMa.
s.dBP=4.0*(s.h_BS-s.h_E)*(s.h_UT-s.h_E)*s.fc*1e9/s.c
# This is used in the LOS models for both UMI and UMa...
# next line is a better way s.a=9.0*log10(s.dBP**2+(s.h_BS-s.h_UT)**2)
s.a=18.0*log10(hypot(s.dBP,s.h_BS-s.h_UT))
# pre-compute constants to speed up calls...
# LOS Model same for UMi and NLOS...
s.const_close=28.0+20.0*s.log10fc
s.const_far =28.0+20.0*s.log10fc-s.a
def __call__(s,xyz_cell,xyz_UE):
'''
Return the pathloss between 3-dimensional positions xyz_cell and
xyz_UE (in metres).
Note that the distances, building heights, etc. are not checked
to ensure that this pathloss model is actually applicable.
'''
# TODO: could we usefully vectorize this, so that xyz_cell,xyz_UE have shape (n,3) to compute n pathlosses at once?
d3D_m=norm(xyz_cell-xyz_UE)
if d3D_m<s.dBP:
PL3D_UMi_LOS=s.const_close+22.0*log10(d3D_m) # Same as for UMa
else:
PL3D_UMi_LOS=s.const_far +40.0*log10(d3D_m)
if s.LOS:
return PL3D_UMi_LOS
PL3D_UMi_NLOS=36.7*log10(d3D_m)+22.7+26*log10(s.fc)-0.3*(s.h_UT-1.5)
return max(PL3D_UMi_NLOS,PL3D_UMi_LOS)
def plot():
' Plot the pathloss model predictions, as a self-test. '
import numpy as np
import matplotlib.pyplot as plt
from fig_timestamp import fig_timestamp
fig=plt.figure(figsize=(8,6))
ax=fig.add_subplot()
ax.grid(color='gray',alpha=0.7,lw=0.5)
d=np.linspace(1,5000,100)
PL=UMi_streetcanyon_pathloss(fc_GHz=1.8,h_UT=1.5,h_BS=17.5,LOS=False)
NLOS=np.array([PL(0,di) for di in d])
ax.plot(d,NLOS,lw=2,label='NLOS ($\sigma=4$)')
ax.fill_between(d,NLOS-4.0,NLOS+4.0,alpha=0.2) # sigma_{SF}=4 for NLOS case
PL=UMi_streetcanyon_pathloss(fc_GHz=1.8,h_UT=1.5,h_BS=17.5,LOS=True)
LOS=np.array([PL(0,di) for di in d])
ax.plot(d,LOS,lw=2,label='LOS ($\sigma=3$)') # or semilogx
ax.fill_between(d,LOS-3.0,LOS+3.0,alpha=0.2) # sigma_{SF}=3 for LOS case
ax.set_xlabel('distance (metres)')
ax.set_ylabel('pathloss (dB)')
ax.set_xlim(np.min(d),np.max(d))
ax.set_ylim(40)
ax.legend()
ax.set_title('3GPP UMi street-canyon pathloss models')
fig.tight_layout()
fig_timestamp(fig,rotation=0,fontsize=6,author='Keith Briggs')
fnbase='img/UMi_pathloss_model_01'
fig.savefig(f'{fnbase}.png')
print(f'eog {fnbase}.png &')
fig.savefig(f'{fnbase}.pdf')
print(f'evince {fnbase}.pdf &')
if __name__=='__main__': # simple self-test
plot() | AIMM-simulator | /aimm_simulator-2.0.3.tar.gz/aimm_simulator-2.0.3/src/AIMM_simulator/UMi_pathloss_model.py | UMi_pathloss_model.py |
from sys import stderr,exit
import numpy as np
try: # if matplotlib is not installed, turn off plotting...
from matplotlib import rcParams as matplotlib_rcParams
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from fig_timestamp import fig_timestamp
except:
plt=None
class Plane:
''' Represents an infinite plane, defined by a point on the plane and a normal vector '''
def __init__(s,point,normal):
s.point =np.array(point ,dtype=float)
s.normal=np.array(normal,dtype=float)
def __repr__(s):
return f'Plane(point={s.point},normal={s.normal})'
class Ray:
''' Represents a ray, defined by a tail (starting point) and a direction vector '''
def __init__(s,tail,dv):
s.tail=np.array(tail,dtype=float)
s.dv =np.array(dv, dtype=float)
s.dv/=np.linalg.norm(s.dv)
def __repr__(s):
return f'Ray({s.tail},{s.dv})'
def intersect_triangle(s,t):
' convenience function '
return intersect3D_RayTriangle(s,t)
def distance_to_plane(ray,plane):
'''
Ray: r(t)=r0+t*u
Plane: points q s.t. (q-p0)@v=0
Intersection: t s.t. (r0+t*u-p0)@v=0
t*u@v+(r0-p0)@v=0
t*u@v=-(r0-p0)@v
t=-(r0-p0)@v/u@v
t=(p0-r0)@v/u@v
'''
r0,u=ray.tail,ray.dv
p0,v=plane.point,plane.normal
v/=np.linalg.norm(v)
u/=np.linalg.norm(u)
uv=u@v
if abs(uv)<1e-12: return np.inf # parallel
return (p0-r0)@v/uv
def reflect_in_plane(s,p):
r0,u=s.tail,s.dv
p0,v=p.point,p.normal
v/=np.linalg.norm(v)
u/=np.linalg.norm(u)
uv=u@v
d=v@(p0-r0)/uv
intersection=r0+d*u
reflected=u-2.0*uv*v
return Ray(intersection,reflected/np.linalg.norm(reflected))
def plot(s,ax,length=1.0,color='b',alpha=0.5):
''' Plots the ray in 3d '''
if plt is None: return
tip=s.tail+length*s.dv/np.linalg.norm(s.dv)
x=(s.tail[0],tip[0])
y=(s.tail[1],tip[1])
z=(s.tail[2],tip[2])
ax.plot(x,y,z,color=color,alpha=alpha)
class Triangle:
''' Represents a planar triangle in 3d space, defined by three points. Unoriented. '''
def __init__(s,p0,p1,p2):
s.p0=np.array(p0,dtype=float)
s.p1=np.array(p1,dtype=float)
s.p2=np.array(p2,dtype=float)
s.side0=s.p1-s.p0
s.side1=s.p2-s.p0
s.normal=np.cross(s.side0,s.side1)
s.plane=Plane(s.p0,s.normal)
def __repr__(s):
return f'Triangle({s.p0},{s.p1},{s.p1})'
def __add__(s,c):
' return a new Triangle, translated by the vector c '
return Triangle(s.p0+c,s.p1+c,s.p2+c)
def plot(s,ax,color='y',alpha=0.5,drawedges=True):
''' Plots the triangle in 3d. For kwargs, see https://matplotlib.org/stable/api/collections_api.html#matplotlib.collections.Collection '''
if plt is None: return
if drawedges:
pc=Poly3DCollection([(s.p0,s.p1,s.p2)],facecolor=color,edgecolor='olive',linewidth=0.5,alpha=alpha)
else:
pc=Poly3DCollection([(s.p0,s.p1,s.p2)],facecolor=color,linewidth=0.25,alpha=alpha)
ax.add_collection3d(pc)
def intersect3D_RayTriangle(r,t):
# find the 3D intersection of a ray with a triangle
# Input: a ray R, and a triangle T
# Return: intersection point, and distance to triangle.
u=t.side0
v=t.side1
n=t.normal
d=r.dv
d/=np.linalg.norm(d) # assumes unit direction vector
w0=r.tail-t.p0
a=-n@w0
b=n@d
if abs(b)<1e-12: # ray is parallel to triangle plane
if abs(a)<1e-12: return r.tail,0.0 # ray lies in triangle plane
return None,np.inf # ray disjoint from plane
# get intersect point of ray with triangle plane...
q=a/b
if q<0.0: return None,np.inf
# for a segment, also test if q>1.0 => no intersect...
I=r.tail+q*d
# is I inside T?
w=I-t.p0
uu=u@u; uv=u@v; vv=v@v; wu=w@u; wv=w@v
D=uv*uv-uu*vv
s=(uv*wv-vv*wu)/D
if s<0.0 or s> 1.0: return None,np.inf # I is outside T
z=(uv*wu-uu*wv)/D
if z<0.0 or s+z>1.0: return None,np.inf # I is outside T
return I,q # it does intersect
class Panel:
''' Represents a collection of triangles (which must be parallel) making up a single flat wall panel. '''
def __init__(s,triangles):
if len(triangles)<1:
print('Panel: empty triangle list!')
exit(1)
s.triangles=triangles
# check normals are parallel...
n0=triangles[0].normal
for triangle in triangles[1:]:
if np.linalg.norm(np.cross(triangle.normal,n0))>1e-10:
print('Panel: triangles are not parallel!')
exit(1)
def __repr__(s):
r=','.join([str(t) for t in s.triangles])
return f'Panel({r})'
def __iter__(s):
return iter(s.triangles)
def plot(s,ax,color='b',alpha=0.5,drawedges=True):
for triangle in s.triangles:
triangle.plot(ax,color=color,alpha=alpha,drawedges=drawedges)
class RIS: # TODO
''' TODO a RIS. '''
def __init__(s,panel):
s.panel=panel
def __repr__(s):
return f'RIS({s.panel})'
class Building:
''' Represents a collection of panels making up a building. '''
def __init__(s,panels):
s.panels=panels
def __repr__(s):
r=','.join([str(p) for p in s.panels])
return f'Building({r})'
def plot(s,ax,color='b',alpha=0.5,drawedges=True):
for panel in s.panels:
panel.plot(ax,color=color,alpha=alpha,drawedges=drawedges)
def number_of_panels_cut(s,ray,max_distance,dbg=False):
k,d,dd=0,0.0,0.0
d_seen=[]
for panel in s.panels:
panel_cut=False
for triangle in panel:
I,d=ray.intersect_triangle(triangle)
if dbg: print(f'# I={I} d={d:.2f}',file=stderr)
if I is not None and d>1e-9: # this triangle is cut (and is not at the tail of the ray)
panel_cut,dd=True,d
break # this panel is cut, so we don't need to check other triangles
if panel_cut:
if dbg: print(f'{panel} is cut, dd={dd:.2f}',file=stderr)
if max_distance<dd<np.inf: return k,set(d_seen)
if all(abs(dd-d)>1e-6 for d in d_seen): k+=1 # do not count identical panels
d_seen.append(dd)
if dbg: print(f'# panel_cut={panel_cut} k={k}',file=stderr)
return k,set(d_seen)
def draw_building_3d(building,rays=[],line_segments=[],dots=[],color='y',fontsize=6,limits=[(0,10),(0,10),(0,5)],labels=['','',''],drawedges=True,show=True,pdffn='',pngfn='',dbg=False):
' General function to draw a building, also rays and lines. '
matplotlib_rcParams.update({'font.size': fontsize})
fig=plt.figure()
fig_timestamp(fig)
ax=Axes3D(fig)
building.plot(ax,color=color,drawedges=drawedges)
for ray in rays:
ray.plot(ax,length=20,color='r',alpha=1)
k,dists=building.number_of_panels_cut(ray,max_distance=20,dbg=False)
if dbg: print(f'{ray} has {k} cuts')
for dist in dists: # plot intersections...
x=(ray.tail[0]+dist*ray.dv[0],)
y=(ray.tail[1]+dist*ray.dv[1],)
z=(ray.tail[2]+dist*ray.dv[2],)
ax.plot(x,y,z,color='k',marker='o',ms=6,alpha=1.0)
if line_segments: ax.plot(*line_segments,color='b',marker='o',ms=1,lw=1,alpha=1.0)
for dot in dots:
ax.plot(*dot,color='r',marker='o',ms=8,alpha=1.0)
if labels[0]: ax.set_xlabel(labels[0])
if labels[1]: ax.set_ylabel(labels[1])
if labels[2]: ax.set_zlabel(labels[2])
ax.set_xlim(limits[0])
ax.set_ylim(limits[1])
ax.set_zlim(limits[2])
# https://stackoverflow.com/questions/8130823/set-matplotlib-3d-plot-aspect-ratio
limits=np.array([getattr(ax,f'get_{axis}lim')() for axis in 'xyz'])
ax.set_box_aspect(np.ptp(limits,axis=1))
if show: plt.show()
if pngfn:
fig.savefig(pngfn)
print(f'eog {pngfn} &',file=stderr)
if pdffn:
fig.savefig(pdffn)
print(f'e {pdffn} &',file=stderr)
def test_00():
p=Plane((0,0,0),(1,1,1))
print(p.point)
print(p.normal)
t=Triangle((0,0,0),(1,1,0),(1,0,0))
r=Ray((0.75,0.75,-1.0),(0,-0.2,1))
I,q=intersect3D_RayTriangle(r,t)
print(I,q)
def test_01():
# set of unit-square vertical panels at integer x values
panels=[]
for i in range(10):
panel=Panel([Triangle((i,0,0),(i,1,0),(i,1,1)),Triangle((i,0,0),(i,0,1),(i,1,1))])
panels.append(panel)
b=Building(panels)
r=Ray((-1.0,0.8,0.7),(1,0,0))
k,d=b.number_of_panels_cut(r,1.5)
print(k)
def cube(a,b,c=(0.0,0.0,0.0)):
# deprecated - use block()
' cube c+[a,b]x[a,b]x[a,b], with each face a square Panel of two Triangles '
c=np.array(c,dtype=float)
return (
Panel([Triangle((a,a,a),(a,b,a),(b,b,a))+c,Triangle((a,a,a),(b,a,a),(b,b,a))+c]),
Panel([Triangle((a,a,b),(a,b,b),(b,b,b))+c,Triangle((a,a,b),(b,a,b),(b,b,b))+c]),
Panel([Triangle((a,a,a),(a,a,b),(a,b,b))+c,Triangle((a,a,a),(a,b,a),(a,b,b))+c]),
Panel([Triangle((b,a,a),(b,a,b),(b,b,b))+c,Triangle((b,a,a),(b,b,a),(b,b,b))+c]),
Panel([Triangle((a,a,a),(b,a,a),(b,a,b))+c,Triangle((a,a,a),(a,a,b),(b,a,b))+c]),
Panel([Triangle((a,b,a),(b,b,a),(b,b,b))+c,Triangle((a,b,a),(a,b,b),(b,b,b))+c]),
)
#def rectangle(c0,c1):
# ' rectangular panel with opposite corners c0 and c1 '
# a,b,c=c0
# d,e,f=c1
# return Panel([Triangle((a,b,c),(d,b,c),(d,e,f)),
# Triangle((a,b,c),(a,e,f),(d,b,f))])
def block(c0,c1):
''' Represents a rectangular block with opposite corners c0 and c1, with each face a rectangular Panel '''
a,b,c=c0
d,e,f=c1
return (
Panel([Triangle((a,b,c),(d,b,c),(d,b,f)),Triangle((a,b,c),(a,b,f),(d,b,f))]), # front
Panel([Triangle((a,e,c),(d,e,c),(d,e,f)),Triangle((a,e,c),(a,e,f),(d,e,f))]), # back
Panel([Triangle((a,b,c),(a,e,c),(a,e,f)),Triangle((a,b,c),(a,b,f),(a,e,f))]), # one side
Panel([Triangle((d,b,c),(d,e,c),(d,e,f)),Triangle((d,b,c),(d,b,f),(d,e,f))]), # opposite side
Panel([Triangle((a,b,c),(d,b,c),(d,e,c)),Triangle((a,b,c),(d,e,c),(a,e,c))]), # floor
Panel([Triangle((a,b,f),(d,b,f),(d,e,f)),Triangle((a,b,f),(d,e,f),(a,e,f))]), # ceiling
)
def test_02():
room0=cube(0,1)
room1=cube(0,1,c=(1.1,0,0))
room2=cube(0,1,c=(2,0,0))
b=Building(room0+room1+room2)
#print(b)
r=Ray((-0.01,0.1,0.2),(1.0,0.0,0.0))
k,d=b.number_of_panels_cut(r,max_distance=2.9,dbg=False)
print(f'{k} intersections of {r} with Building')
def test_03():
r=Ray((0.0,0.0,0.0),(1.0,1.0,1.0))
p=Plane((0.5,0.5,0.5),(1,1,1))
ref=r.reflect_in_plane(p)
print(ref)
panel=Panel([Triangle((0,0,0),(0,0.866,0),(0,1,0))])
ris=RIS(panel)
print(f'ris={ris}')
def test_04(dbg=False,fontsize=4):
fig=plt.figure()
ax=Axes3D(fig)
t0=Triangle((0,0,0),(0,1,0),(0,0,1))
t1=Triangle((0,1,1),(0,1,0),(0,0,1))
panel=Panel([t0,t1])
room0=cube(0,1)
room1=cube(0,1,c=(1.0,0,0))
room2=cube(0,1,c=(2.1,0,0))
room2=cube(0,0.5,c=(0,1.0,0))
b=Building(room0+room1+room2)
b.plot(ax,color='y')
r=Ray((0.0,0.0,0.0),(1.0,0.3,0.2))
p=Plane((0,0,2),(0,0,1))
if dbg: print(f'p={p}')
d=r.distance_to_plane(p)
k,dists=b.number_of_panels_cut(r,max_distance=10,dbg=False)
for dist in dists: # plot intersections...
x=(r.tail[0]+dist*r.dv[0],)
y=(r.tail[1]+dist*r.dv[1],)
z=(r.tail[2]+dist*r.dv[2],)
ax.plot(x,y,z,color='k',marker='o',ms=6,alpha=1.0)
print(dist,x,y,z)
if dbg: print(f'r={r} distance_to_plane={d} number_of_panels_cut={k}')
r.plot(ax,length=d,color='r',alpha=1)
ref=r.reflect_in_plane(p)
if dbg: print(f'ref={ref}')
ref.plot(ax,length=1,color='b',alpha=1)
ax.set_xlim((0,3.5e0))
ax.set_ylim((0,3.0e0))
ax.set_zlim((0,2.0e0))
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
ax.set_zlabel('$z$')
plt.show()
fig.savefig('foo.pdf')
def test_05():
' the best example to follow! '
blk0=block(np.array([0, 0,0]),np.array([5,10,3]))
blk1=block(np.array([0,10,0]),np.array([6,12,2]))
blk2=block(np.array([0,12,0]),np.array([6,14,2]))
blk3=block(np.array([0,14,0]),np.array([6,16,2]))
blk4=block(np.array([0,16.5,0]),np.array([6,17,2]))
fence=Panel([Triangle((8,0,0),(8,15,0),(8,15,1)),
Triangle((8,0,1),(8, 0,0),(8,15,1))])
b=Building(blk0+blk1+blk2+blk3+blk4+(fence,))
ray0=Ray((0.3,0.3,2.0),(0.1,1,-0.01))
line_segments=[(8,8),(18,18),(0,4)] # [xs,ys,zs]
draw_building(b,rays=[ray0],line_segments=line_segments,color='y',limits=[(0,10),(0,20),(0,4)],labels=['$x$','$y$','$z$'],fontsize=6,show=True,pdffn='img/building0.pdf',pngfn='img/building0.png')
if __name__=='__main__':
test_05() | AIMM-simulator | /aimm_simulator-2.0.3.tar.gz/aimm_simulator-2.0.3/src/AIMM_simulator/geometry_3d.py | geometry_3d.py |
__version__='2.0.2'
'''The AIMM simulator emulates a cellular radio system roughly following 5G concepts and channel models.'''
from os.path import basename
from sys import stderr,stdout,exit,version as pyversion
from math import hypot,atan2,pi as math_pi
from time import time,sleep
from collections import deque
try:
import numpy as np
except:
print('numpy not found: please do "pip install numpy"',file=stderr)
exit(1)
try:
import simpy
except:
print('simpy not found: please do "pip install simpy"',file=stderr)
exit(1)
from .NR_5G_standard_functions import SINR_to_CQI,CQI_to_64QAM_efficiency
from .UMa_pathloss_model import UMa_pathloss
def np_array_to_str(x):
' Formats a 1-axis np.array as a tab-separated string '
return np.array2string(x,separator='\t').replace('[','').replace(']','')
def _nearest_weighted_point(x,pts,w=1.0):
'''
Internal use only.
Given a point x of shape (dim,), where dim is typically 2 or 3,
an array of points pts of shape (npts,dim),
and a vector of weights w of the same length as pts,
return the index of the point minimizing w[i]*d[i],
where d[i] is the distance from x to point i.
Returns the index of the point minimizing w[i]*d[i].
For the application to cellular radio systems, we let pts be the
cell locations, and then if we set
w[i]=p[i]**(-1/alpha),
where p[i] is the transmit power of cell i, and alpha>=2 is the pathloss
exponent, then this algorithm will give us the index of the cell providing
largest received power at the point x.
'''
weighted_distances=w*np.linalg.norm(pts-x,axis=1)
imin=np.argmin(weighted_distances)
if 0: # dbg
print('x=',x)
print('pts=',pts)
print('weighted_distances=',weighted_distances)
return weighted_distances[imin],imin
def to_dB(x):
return 10.0*np.log10(x)
def from_dB(x):
return np.power(10.0,x/10.0)
class Cell:
'''
Class representing a single Cell (gNB). As instances are created, the are automatically given indices starting from 0. This index is available as the data member ``cell.i``. The variable ``Cell.i`` is always the current number of cells.
Parameters
----------
sim : Sim
Simulator instance which will manage this Cell.
interval : float
Time interval between Cell updates.
bw_MHz : float
Channel bandwidth in MHz.
n_subbands : int
Number of subbands.
xyz : [float, float, float]
Position of cell in metres, and antenna height.
h_BS : float
Antenna height in metres; only used if xyz is not provided.
power_dBm : float
Transmit power in dBm.
MIMO_gain_dB : float
Effective power gain from MIMO in dB. This is no more than a crude way to
estimate the performance gain from using MIMO. A typical value might be 3dB for 2x2 MIMO.
pattern : array or function
If an array, then a 360-element array giving the antenna gain in dB in 1-degree increments (0=east, then counterclockwise). Otherwise, a function giving the antenna gain in dB in the direction theta=(180/pi)*atan2(y,x).
f_callback :
A function with signature ``f_callback(self,kwargs)``, which will be called
at each iteration of the main loop.
verbosity : int
Level of debugging output (0=none).
'''
i=0
def __init__(s,
sim,
interval=10.0,
bw_MHz=10.0,
n_subbands=1,
xyz=None,
h_BS=20.0,
power_dBm=30.0,
MIMO_gain_dB=0.0,
pattern=None,
f_callback=None,
f_callback_kwargs={},
verbosity=0):
# default scene 1000m x 1000m, but keep cells near the centre
s.i=Cell.i; Cell.i+=1
s.sim=sim
s.interval=interval
s.bw_MHz=bw_MHz
s.n_subbands=n_subbands
s.subband_mask=np.ones(n_subbands) # dtype is float, to allow soft masking
s.rbs=simpy.Resource(s.sim.env,capacity=50)
s.power_dBm=power_dBm
s.pattern=pattern
s.f_callback=f_callback
s.f_callback_kwargs=f_callback_kwargs
s.MIMO_gain_dB=MIMO_gain_dB
s.attached=set()
s.reports={'cqi': {}, 'rsrp': {}, 'throughput_Mbps': {}}
# rsrp_history[i] will be the last 10 reports of rsrp received
# at this cell from UE[i] (no timestamps, just for getting trend)
s.rsrp_history={}
if xyz is not None:
s.xyz=np.array(xyz)
else: # random cell locations
s.xyz=np.empty(3)
s.xyz[:2]=100.0+900.0*s.sim.rng.random(2)
s.xyz[2]=h_BS
if verbosity>1: print(f'Cell[{s.i}] is at',s.xyz,file=stderr)
s.verbosity=verbosity
# every time we make a new Cell, we have to check whether
# we have a hetnet or not...
s.sim._set_hetnet()
#s.sim.env.process(s.loop()) # start Cell main loop
def set_f_callback(s,f_callback,**kwargs):
' Add a callback function to the main loop of this Cell '
s.f_callback=f_callback
s.f_callback_kwargs=kwargs
def loop(s):
'''
Main loop of Cell class. Default: do nothing.
'''
while True:
if s.f_callback is not None: s.f_callback(s,**s.f_callback_kwargs)
yield s.sim.env.timeout(s.interval)
def __repr__(s):
return f'Cell(index={s.i},xyz={s.xyz})'
def get_nattached(s):
'''
Return the current number of UEs attached to this Cell.
'''
return len(s.attached)
def get_xyz(s):
'''
Return the current position of this Cell.
'''
return s.xyz
def set_xyz(s,xyz):
'''
Set a new position for this Cell.
'''
s.xyz=np.array(xyz)
s.sim.cell_locations[s.i]=s.xyz
print(f'Cell[{s.i}] is now at {s.xyz}',file=stderr)
def get_power_dBm(s):
'''
Return the transmit power in dBm currently used by this cell.
'''
return s.power_dBm
def set_power_dBm(s,p):
'''
Set the transmit power in dBm to be used by this cell.
'''
s.power_dBm=p
s.sim._set_hetnet()
def boost_power_dBm(s,p,mn=None,mx=None):
'''
Increase or decrease (if p<0) the transmit power in dBm to be used by this cell.
If mn is not ``None``, then the power will not be set if it falls below mn.
If mx is not ``None``, then the power will not be set if it exceeds mx.
Return the new power.
'''
if p<0.0:
if mn is not None and s.power_dBm+p>=mn:
s.power_dBm+=p
return s.power_dBm
if p>0.0:
if mx is not None and s.power_dBm+p<=mx:
s.power_dBm+=p
return s.power_dBm
s.power_dBm+=p
return s.power_dBm
def get_rsrp(s,i):
'''
Return last RSRP reported to this cell by UE[i].
'''
if i in s.reports['rsrp']:
return s.reports['rsrp'][i][1]
return -np.inf # no reports
def get_rsrp_history(s,i):
'''
Return an array of the last 10 RSRP[1]s reported to this cell by UE[i].
'''
if i in s.rsrp_history:
return np.array(s.rsrp_history[i])
return -np.inf*np.ones(10) # no recorded history
def set_MIMO_gain(s,MIMO_gain_dB):
'''
Set the MIMO gain in dB to be used by this cell.
'''
s.MIMO_gain_dB=MIMO_gain_dB
def get_UE_throughput(s,ue_i): # FIXME do we want an array over subbands?
'''
Return the total current throughput in Mb/s of UE[i] in the simulation.
The value -np.inf indicates that there is no current report.
'''
reports=s.reports['throughput_Mbps']
if ue_i in reports: return reports[ue_i][1]
return -np.inf # special value to indicate no report
def get_UE_CQI(s,ue_i):
'''
Return the current CQI of UE[i] in the simulation, as an array across all subbands. An array of NaNs is returned if there is no report.
'''
reports=s.reports['cqi']
return reports[ue_i][1] if ue_i in reports else np.nan*np.ones(s.n_subbands)
def get_RSRP_reports(s):
'''
Return the current RSRP reports to this cell, as a list of tuples (ue.i, rsrp).
'''
reports=s.reports['rsrp']
return [(ue.i,reports[ue.i][1]) if ue.i in reports else (ue.i,-np.inf) for ue in s.sim.UEs]
def get_RSRP_reports_dict(s):
'''
Return the current RSRP reports to this cell, as a dictionary ue.i: rsrp.
'''
reports=s.reports['rsrp']
return dict((ue.i,reports[ue.i][1]) if ue.i in reports else (ue.i,-np.inf) for ue in s.sim.UEs)
def get_average_throughput(s):
'''
Return the average throughput over all UEs attached to this cell.
'''
reports,k=s.reports['throughput_Mbps'],0
ave=np.zeros(s.n_subbands)
for ue_i in reports:
k+=1
#ave+=(reports[ue_i][1][0]-ave)/k
ave+=(np.sum(reports[ue_i][1])-ave)/k
return np.sum(ave)
def set_pattern(s,pattern):
'''
Set the antenna radiation pattern.
'''
s.pattern=pattern
def set_subband_mask(s,mask):
'''
Set the subband mask to ``mask``.
'''
#print('set_subband_mask',s.subband_mask.shape,len(mask),file=stderr)
assert s.subband_mask.shape[0]==len(mask)
s.subband_mask=np.array(mask)
def get_subband_mask(s):
'''
Get the current subband mask.
'''
return s.subband_mask
def monitor_rbs(s):
while True:
if s.rbs.queue:
if s.verbosity>0: print(f'rbs at {s.sim.env.now:.2f} ={s.rbs.count}')
yield s.sim.env.timeout(5.0)
# END class Cell
class UE:
'''
Represents a single UE. As instances are created, the are automatically given indices starting from 0. This index is available as the data member ``ue.i``. The static (class-level) variable ``UE.i`` is always the current number of UEs.
Parameters
----------
sim : Sim
The Sim instance which will manage this UE.
xyz : [float, float, float]
Position of UE in metres, and antenna height.
h_UT : float
Antenna height of user terminal in metres; only used if xyz is not provided.
reporting_interval : float
Time interval between UE reports being sent to the serving cell.
f_callback :
A function with signature ``f_callback(self,kwargs)``, which will be called at each iteration of the main loop.
f_callback_kwargs :
kwargs for previous function.
pathloss_model
An instance of a pathloss model. This must be a callable object which
takes two arguments, each a 3-vector. The first represent the transmitter
location, and the second the receiver location. It must return the
pathloss in dB along this signal path.
If set to ``None`` (the default), a standard urban macrocell model
is used.
See further ``NR_5G_standard_functions_00.py``.
'''
i=0
def __init__(s,sim,xyz=None,reporting_interval=1.0,pathloss_model=None,h_UT=2.0,f_callback=None,f_callback_kwargs={},verbosity=0):
s.sim=sim
s.i=UE.i; UE.i+=1
s.serving_cell=None
s.f_callback=f_callback
s.f_callback_kwargs=f_callback_kwargs
# next will be a record of last 10 serving cell ids,
# with time of last attachment.
# 0=>current, 1=>previous, etc. -1 => not valid)
# This is for use in handover algorithms
s.serving_cell_ids=deque([(-1,None)]*10,maxlen=10)
s.reporting_interval=reporting_interval
if xyz is not None:
s.xyz=np.array(xyz,dtype=float)
else:
s.xyz=250.0+500.0*s.sim.rng.random(3)
s.xyz[2]=h_UT
if verbosity>1: print(f'UE[{s.i}] is at',s.xyz,file=stderr)
# We assume here that the UMa_pathloss model needs to be instantiated,
# but other user-provided models are already instantiated,
# and provide callable objects...
if pathloss_model is None:
s.pathloss=UMa_pathloss(fc_GHz=s.sim.params['fc_GHz'],h_UT=s.sim.params['h_UT'],h_BS=s.sim.params['h_BS'])
if verbosity>1: print(f'Using 5G standard urban macrocell pathloss model.',file=stderr)
else:
s.pathloss=pathloss_model
if s.pathloss.__doc__ is not None:
if verbosity>1: print(f'Using user-specified pathloss model "{s.pathloss.__doc__}".',file=stderr)
else:
print(f'Using user-specified pathloss model.',file=stderr)
s.verbosity=verbosity
s.noise_power_dBm=-140.0
s.cqi=None
s.sinr_dB=None
# Keith Briggs 2022-10-12 loops now started in Sim.__init__
#s.sim.env.process(s.run_subband_cqi_report())
#s.sim.env.process(s.loop()) # this does reports to all cells
def __repr__(s):
return f'UE(index={s.i},xyz={s.xyz},serving_cell={s.serving_cell})'
def set_f_callback(s,f_callback,**kwargs):
' Add a callback function to the main loop of this UE '
s.f_callback=f_callback
s.f_callback_kwargs=kwargs
def loop(s):
' Main loop of UE class '
if s.verbosity>1:
print(f'Main loop of UE[{s.i}] started')
stdout.flush()
while True:
if s.f_callback is not None: s.f_callback(s,**s.f_callback_kwargs)
s.send_rsrp_reports()
s.send_subband_cqi_report() # FIXME merge these two reports
#print(f'dbg: Main loop of UE class started'); exit()
yield s.sim.env.timeout(s.reporting_interval)
def get_serving_cell(s):
'''
Return the current serving Cell object (not index) for this UE instance.
'''
ss=s.serving_cell
if ss is None: return None
return s.serving_cell
def get_serving_cell_i(s):
'''
Return the current serving Cell index for this UE instance.
'''
ss=s.serving_cell
if ss is None: return None
return s.serving_cell.i
def get_xyz(s):
'''
Return the current position of this UE.
'''
return s.xyz
def set_xyz(s,xyz,verbose=False):
'''
Set a new position for this UE.
'''
s.xyz=np.array(xyz)
if verbose: print(f'UE[{s.i}] is now at {s.xyz}',file=stderr)
def attach(s,cell,quiet=True):
'''
Attach this UE to a specific Cell instance.
'''
cell.attached.add(s.i)
s.serving_cell=cell
s.serving_cell_ids.appendleft((cell.i,s.sim.env.now,))
if not quiet and s.verbosity>0:
print(f'UE[{s.i:2}] is attached to cell[{cell.i}]',file=stderr)
def detach(s,quiet=True):
'''
Detach this UE from its serving cell.
'''
if s.serving_cell is None: # Keith Briggs 2022-08-08 added None test
return
s.serving_cell.attached.remove(s.i)
# clear saved reports from this UE...
reports=s.serving_cell.reports
for x in reports:
if s.i in reports[x]: del reports[x][s.i]
if not quiet and s.verbosity>0:
print(f'UE[{s.i}] detached from cell[{s.serving_cell.i}]',file=stderr)
s.serving_cell=None
def attach_to_strongest_cell_simple_pathloss_model(s):
'''
Attach to the cell delivering the strongest signal
at the current UE position. Intended for initial attachment only.
Uses only a simple power-law pathloss model. For proper handover
behaviour, use the MME module.
'''
celli=s.sim.get_strongest_cell_simple_pathloss_model(s.xyz)
s.serving_cell=s.sim.cells[celli]
s.serving_cell.attached.add(s.i)
if s.verbosity>0:
print(f'UE[{s.i:2}] ⟵⟶ cell[{celli}]',file=stderr)
def attach_to_nearest_cell(s):
'''
Attach this UE to the geographically nearest Cell instance.
Intended for initial attachment only.
'''
dmin,celli=_nearest_weighted_point(s.xyz[:2],s.sim.cell_locations[:,:2])
if 0: # dbg
print(f'_nearest_weighted_point: celli={celli} dmin={dmin:.2f}')
for cell in s.sim.cells:
d=np.linalg.norm(cell.xyz-s.xyz)
print(f'Cell[{cell.i}] is at distance {d:.2f}')
s.serving_cell=s.sim.cells[celli]
s.serving_cell.attached.add(s.i)
if s.verbosity>0:
print(f'UE[{s.i:2}] ⟵⟶ cell[{celli}]',file=stderr)
def get_CQI(s):
'''
Return the current CQI of this UE, as an array across all subbands.
'''
return s.cqi
def get_SINR_dB(s):
'''
Return the current SINR of this UE, as an array across all subbands.
The return value ``None`` indicates that there is no current report.
'''
return s.sinr_dB
def send_rsrp_reports(s,threshold=-120.0):
'''
Send RSRP reports in dBm to all cells for which it is over the threshold.
Subbands not handled.
'''
# antenna pattern computation added Keith Briggs 2021-11-24.
for cell in s.sim.cells:
pl_dB=s.pathloss(cell.xyz,s.xyz) # 2021-10-29
antenna_gain_dB=0.0
if cell.pattern is not None:
vector=s.xyz-cell.xyz # vector pointing from cell to UE
angle_degrees=(180.0/math_pi)*atan2(vector[1],vector[0])
antenna_gain_dB=cell.pattern(angle_degrees) if callable(cell.pattern) \
else cell.pattern[int(angle_degrees)%360]
rsrp_dBm=cell.power_dBm+antenna_gain_dB+cell.MIMO_gain_dB-pl_dB
rsrp=from_dB(rsrp_dBm)
if rsrp_dBm>threshold:
cell.reports['rsrp'][s.i]=(s.sim.env.now,rsrp_dBm)
if s.i not in cell.rsrp_history:
cell.rsrp_history[s.i]=deque([-np.inf,]*10,maxlen=10)
cell.rsrp_history[s.i].appendleft(rsrp_dBm)
def send_subband_cqi_report(s):
'''
For this UE, send an array of CQI reports, one for each subband; and a total throughput report, to the serving cell.
What is sent is a 2-tuple (current time, array of reports).
For RSRP reports, use the function ``send_rsrp_reports``.
Also saves the CQI[1]s in s.cqi, and returns the throughput value.
'''
if s.serving_cell is None: return 0.0 # 2022-08-08 detached
interference=from_dB(s.noise_power_dBm)*np.ones(s.serving_cell.n_subbands)
for cell in s.sim.cells:
pl_dB=s.pathloss(cell.xyz,s.xyz)
antenna_gain_dB=0.0
if cell.pattern is not None:
vector=s.xyz-cell.xyz # vector pointing from cell to UE
angle_degrees=(180.0/math_pi)*atan2(vector[1],vector[0])
antenna_gain_dB=cell.pattern(angle_degrees) if callable(cell.pattern) \
else cell.pattern[int(angle_degrees)%360]
if cell.i==s.serving_cell.i: # wanted signal
rsrp_dBm=cell.MIMO_gain_dB+antenna_gain_dB+cell.power_dBm-pl_dB
else: # unwanted interference
received_interference_power=antenna_gain_dB+cell.power_dBm-pl_dB
interference+=from_dB(received_interference_power)*cell.subband_mask
rsrp=from_dB(rsrp_dBm)
s.sinr_dB=to_dB(rsrp/interference) # scalar/array
s.cqi=cqi=SINR_to_CQI(s.sinr_dB)
spectral_efficiency=np.array([CQI_to_64QAM_efficiency(cqi_i) for cqi_i in cqi])
now=float(s.sim.env.now)
# per-UE throughput...
throughput_Mbps=s.serving_cell.bw_MHz*([email protected]_cell.subband_mask)/s.serving_cell.n_subbands/len(s.serving_cell.attached)
s.serving_cell.reports['cqi'][s.i]=(now,cqi)
s.serving_cell.reports['throughput_Mbps'][s.i]=(now,throughput_Mbps,)
return throughput_Mbps
def run_subband_cqi_report(s): # FIXME merge this with rsrp reporting
while True:
#if s.serving_cell is not None: # UE must be attached 2022-08-08
s.send_subband_cqi_report()
yield s.sim.env.timeout(s.reporting_interval)
# END class UE
class Sim:
'''
Class representing the complete simulation.
Parameters
----------
params : dict
A dictionary of additional global parameters which need to be accessible to downstream functions. In the instance, these parameters will be available as ``sim.params``. If ``params['profile']`` is set to a non-empty string, then a code profile will be performed and the results saved to the filename given by the string. There will be some execution time overhead when profiling.
'''
def __init__(s,params={'fc_GHz':3.5,'h_UT':2.0,'h_BS':20.0},show_params=True,rng_seed=0):
s.__version__=__version__
s.params=params
# set default values for operating frequenct, user terminal height, and
# base station height...
if 'fc_GHz' not in params: params['fc_GHz']=3.5
if 'h_UT' not in params: params['h_UT']=2.0
if 'h_BS' not in params: params['h_BS']=20.0
s.env=simpy.Environment()
s.rng=np.random.default_rng(rng_seed)
s.loggers=[]
s.scenario=None
s.ric=None
s.mme=None
s.hetnet=None # unknown at this point; will be set to True or False
s.cells=[]
s.UEs=[]
s.events=[]
s.cell_locations=np.empty((0,3))
np.set_printoptions(precision=2,linewidth=200)
pyv=pyversion.replace('\n','') #[:pyversion.index('(default')]
print(f'python version={pyv}',file=stderr)
print(f'numpy version={np.__version__}',file=stderr)
print(f'simpy version={simpy.__version__}',file=stderr)
print(f'AIMM simulator version={s.__version__}',file=stderr)
if show_params:
print(f'Simulation parameters:',file=stderr)
for param in s.params:
print(f" {param}={s.params[param]}",file=stderr)
def _set_hetnet(s):
# internal function only - decide whether we have a hetnet
powers=set(cell.get_power_dBm() for cell in s.cells)
s.hetnet=len(powers)>1 # powers are not all equal
def wait(s,interval=1.0):
'''
Convenience function to avoid low-level reference to env.timeout().
``loop`` functions in each class must yield this.
'''
return s.env.timeout(interval)
def make_cell(s,**kwargs):
'''
Convenience function: make a new Cell instance and add it to the simulation; parameters as for the Cell class. Return the new Cell instance. It is assumed that Cells never move after being created (i.e. the initial xyz[1] stays the same throughout the simulation).
'''
s.cells.append(Cell(s,**kwargs))
xyz=s.cells[-1].get_xyz()
s.cell_locations=np.vstack([s.cell_locations,xyz])
return s.cells[-1]
def make_UE(s,**kwargs):
'''
Convenience function: make a new UE instance and add it to the simulation; parameters as for the UE class. Return the new UE instance.
'''
s.UEs.append(UE(s,**kwargs))
return s.UEs[-1]
def get_ncells(s):
'''
Return the current number of cells in the simulation.
'''
return len(s.cells)
def get_nues(s):
'''
Return the current number of UEs in the simulation.
'''
return len(s.UEs)
def get_UE_position(s,ue_i):
'''
Return the xyz position of UE[i] in the simulation.
'''
return s.UEs[ue_i].xyz
def get_average_throughput(s):
'''
Return the average throughput over all UEs attached to all cells.
'''
ave,k=0.0,0
for cell in s.cells:
k+=1
ave+=(cell.get_average_throughput()-ave)/k
return ave
def add_logger(s,logger):
'''
Add a logger to the simulation.
'''
assert isinstance(logger,Logger)
s.loggers.append(logger)
def add_loggers(s,loggers):
'''
Add a sequence of loggers to the simulation.
'''
for logger in loggers:
assert isinstance(logger,Logger)
s.loggers.append(logger)
def add_scenario(s,scenario):
'''
Add a Scenario instance to the simulation.
'''
assert isinstance(scenario,Scenario)
s.scenario=scenario
def add_ric(s,ric):
'''
Add a RIC instance to the simulation.
'''
assert isinstance(ric,RIC)
s.ric=ric
def add_MME(s,mme):
'''
Add an MME instance to the simulation.
'''
assert isinstance(mme,MME)
s.mme=mme
def add_event(s,event):
s.events.append(event)
def get_serving_cell(s,ue_i):
if ue_i<len(s.UEs): return s.UEs[ue_i].serving_cell
return None
def get_serving_cell_i(s,ue_i):
if ue_i<len(s.UEs): return s.UEs[ue_i].serving_cell.i
return None
def get_nearest_cell(s,xy):
'''
Return the index of the geographical nearest cell (in 2 dimensions)
to the point xy.
'''
return _nearest_weighted_point(xy[:2],s.cell_locations[:,:2],w=1.0)[1]
def get_strongest_cell_simple_pathloss_model(s,xyz,alpha=3.5):
'''
Return the index of the cell delivering the strongest signal
at the point xyz (in 3 dimensions), with pathloss exponent alpha.
Note: antenna pattern is not used, so this function is deprecated,
but is adequate for initial UE attachment.
'''
p=np.array([from_dB(cell.get_power_dBm()) for cell in s.cells])
return _nearest_weighted_point(xyz,s.cell_locations,w=p**(-1.0/alpha))[1]
def get_best_rsrp_cell(s,ue_i,dbg=False):
'''
Return the index of the cell delivering the highest RSRP at UE[i].
Relies on UE reports, and ``None`` is returned if there are not enough
reports (yet) to determine the desired output.
'''
k,best_rsrp=None,-np.inf
cell_rsrp_reports=dict((cell.i,cell.reports['rsrp']) for cell in s.cells)
for cell in s.cells:
if ue_i not in cell_rsrp_reports[cell.i]: continue # no reports for this UE
time,rsrp=cell_rsrp_reports[cell.i][ue_i] # (time, subband reports)
if dbg: print(f"get_best_rsrp_cell at {float(s.env.now):.0f}: cell={cell.i} UE={ue_i} rsrp=",rsrp,file=stderr)
ave_rsrp=np.average(rsrp) # average RSRP over subbands
if ave_rsrp>best_rsrp: k,best_rsrp=cell.i,ave_rsrp
return k
def _start_loops(s):
# internal use only - start all main loops
for logger in s.loggers:
s.env.process(logger.loop())
if s.scenario is not None:
s.env.process(s.scenario.loop())
if s.ric is not None:
s.env.process(s.ric.loop())
if s.mme is not None:
s.env.process(s.mme.loop())
for event in s.events: # TODO ?
s.env.process(event)
for cell in s.cells: # 2022-10-12 start Cells
s.env.process(cell.loop())
for ue in s.UEs: # 2022-10-12 start UEs
#print(f'About to start main loop of UE[{ue.i}]..')
s.env.process(ue.loop())
#s.env.process(UE.run_subband_cqi_report())
#sleep(2); exit()
def run(s,until):
s._set_hetnet()
s.until=until
print(f'Sim: starting run for simulation time {until} seconds...',file=stderr)
s._start_loops()
t0=time()
if 'profile' in s.params and s.params['profile']:
# https://docs.python.org/3.6/library/profile.html
# to keep python 3.6 compatibility, we don't use all the
# features for profiling added in 3.8 or 3.9.
profile_filename=s.params['profile']
print(f'profiling enabled: output file will be {profile_filename}.',file=stderr)
import cProfile,pstats,io
pr=cProfile.Profile()
pr.enable()
s.env.run(until=until) # this is what is profiled
pr.disable()
strm=io.StringIO()
ps=pstats.Stats(pr,stream=strm).sort_stats('tottime')
ps.print_stats()
tbl=strm.getvalue().split('\n')
profile_file=open(profile_filename,'w')
for line in tbl[:50]: print(line,file=profile_file)
profile_file.close()
print(f'profile written to {profile_filename}.',file=stderr)
else:
s.env.run(until=until)
print(f'Sim: finished main loop in {(time()-t0):.2f} seconds.',file=stderr)
#print(f'Sim: hetnet={s.hetnet}.',file=stderr)
if s.mme is not None:
s.mme.finalize()
if s.ric is not None:
s.ric.finalize()
for logger in s.loggers:
logger.finalize()
# END class Sim
class Scenario:
'''
Base class for a simulation scenario. The default does nothing.
Parameters
----------
sim : Sim
Simulator instance which will manage this Scenario.
func : function
Function called to perform actions.
interval : float
Time interval between actions.
verbosity : int
Level of debugging output (0=none).
'''
def __init__(s,sim,func=None,interval=1.0,verbosity=0):
s.sim=sim
s.func=func
s.verbosity=verbosity
s.interval=interval
def loop(s):
'''
Main loop of Scenario class. Should be overridden to provide different functionalities.
'''
while True:
if s.func is not None: s.func(s.sim)
yield s.sim.env.timeout(s.interval)
# END class Scenario
class Logger:
'''
Represents a simulation logger. Multiple loggers (each with their own file) can be used if desired.
Parameters
----------
sim : Sim
The Sim instance which will manage this Logger.
func : function
Function called to perform logginf action.
header : str
Arbitrary text to write to the top of the logfile.
f : file object
An open file object which will be written or appended to.
logging_interval : float
Time interval between logging actions.
'''
def __init__(s,sim,func=None,header='',f=stdout,logging_interval=10,np_array_to_str=np_array_to_str):
s.sim=sim
s.func=s.default_logger if func is None else func
s.f=f
s.np_array_to_str=np_array_to_str
s.logging_interval=float(logging_interval)
if header: s.f.write(header)
def default_logger(s,f=stdout):
for cell in s.sim.cells:
for ue_i in cell.reports['cqi']:
rep=cell.reports['cqi'][ue_i]
if rep is None: continue
cqi=s.np_array_to_str(rep[1])
f.write(f'{cell.i}\t{ue_i}\t{cqi}\n')
def loop(s):
'''
Main loop of Logger class.
Can be overridden to provide custom functionality.
'''
while True:
s.func(f=s.f)
yield s.sim.env.timeout(s.logging_interval)
def finalize(s):
'''
Function called at end of simulation, to implement any required finalization actions.
'''
pass
# END class Logger
class MME:
'''
Represents a MME, for handling UE handovers.
Parameters
----------
sim : Sim
Sim instance which will manage this Scenario.
interval : float
Time interval between checks for handover actions.
verbosity : int
Level of debugging output (0=none).
strategy : str
Handover strategy; possible values are ``strongest_cell_simple_pathloss_model`` (default), or ``best_rsrp_cell``.
anti_pingpong : float
If greater than zero, then a handover pattern x->y->x between cells x and y is not allowed within this number of seconds. Default is 0.0, meaning pingponging is not suppressed.
'''
def __init__(s,sim,interval=10.0,strategy='strongest_cell_simple_pathloss_model',anti_pingpong=30.0,verbosity=0):
s.sim=sim
s.interval=interval
s.strategy=strategy
s.anti_pingpong=anti_pingpong
s.verbosity=verbosity
print(f'MME: using handover strategy {s.strategy}.',file=stderr)
def do_handovers(s):
'''
Check whether handovers are required, and do them if so.
Normally called from loop(), but can be called manually if required.
'''
for ue in s.sim.UEs:
if ue.serving_cell is None: continue # no handover needed for this UE. 2022-08-08 added None test
oldcelli=ue.serving_cell.i # 2022-08-26
CQI_before=ue.serving_cell.get_UE_CQI(ue.i)
previous,tm=ue.serving_cell_ids[1]
if s.strategy=='strongest_cell_simple_pathloss_model':
celli=s.sim.get_strongest_cell_simple_pathloss_model(ue.xyz)
elif s.strategy=='best_rsrp_cell':
celli=s.sim.get_best_rsrp_cell(ue.i)
if celli is None:
celli=s.sim.get_strongest_cell_simple_pathloss_model(ue.xyz)
else:
print(f'MME.loop: strategy {s.strategy} not implemented, quitting!',file=stderr)
exit()
if celli==ue.serving_cell.i: continue
if s.anti_pingpong>0.0 and previous==celli:
if s.sim.env.now-tm<s.anti_pingpong:
if s.verbosity>2:
print(f't={float(s.sim.env.now):8.2f} handover of UE[{ue.i}] suppressed by anti_pingpong heuristic.',file=stderr)
continue # not enough time since we were last on this cell
ue.detach(quiet=True)
ue.attach(s.sim.cells[celli])
ue.send_rsrp_reports() # make sure we have reports immediately
ue.send_subband_cqi_report()
if s.verbosity>1:
CQI_after=ue.serving_cell.get_UE_CQI(ue.i)
print(f't={float(s.sim.env.now):8.2f} handover of UE[{ue.i:3}] from Cell[{oldcelli:3}] to Cell[{ue.serving_cell.i:3}]',file=stderr,end=' ')
print(f'CQI change {CQI_before} -> {CQI_after}',file=stderr)
def loop(s):
'''
Main loop of MME.
'''
yield s.sim.env.timeout(0.5*s.interval) # stagger the intervals
print(f'MME started at {float(s.sim.env.now):.2f}, using strategy="{s.strategy}" and anti_pingpong={s.anti_pingpong:.0f}.',file=stderr)
while True:
s.do_handovers()
yield s.sim.env.timeout(s.interval)
def finalize(s):
'''
Function called at end of simulation, to implement any required finalization actions.
'''
pass
# END class MME
class RIC:
'''
Base class for a RIC, for hosting xApps. The default does nothing.
Parameters
----------
sim : Sim
Simulator instance which will manage this Scenario.
interval : float
Time interval between RIC actions.
verbosity : int
Level of debugging output (0=none).
'''
def __init__(s,sim,interval=10,verbosity=0):
s.sim=sim
s.interval=interval
s.verbosity=verbosity
def finalize(s):
'''
Function called at end of simulation, to implement any required finalization actions.
'''
pass
def loop(s):
'''
Main loop of RIC class. Must be overridden to provide functionality.
'''
print(f'RIC started at {float(s.sim.env.now):.2}.',file=stderr)
while True:
yield s.sim.env.timeout(s.interval)
# END class RIC
if __name__=='__main__': # a simple self-test
np.set_printoptions(precision=4,linewidth=200)
class MyLogger(Logger):
def loop(s):
while True:
for cell in s.sim.cells:
if cell.i!=0: continue # cell[0] only
for ue_i in cell.reports['cqi']:
if ue_i!=0: continue # UE[0] only
rep=cell.reports['cqi'][ue_i]
if not rep: continue
xy= s.np_array_to_str(s.sim.UEs[ue_i].xyz[:2])
cqi=s.np_array_to_str(cell.reports['cqi'][ue_i][1])
tp= s.np_array_to_str(cell.reports['throughput_Mbps'][ue_i][1])
s.f.write(f'{s.sim.env.now:.1f}\t{xy}\t{cqi}\t{tp}\n')
yield s.sim.env.timeout(s.logging_interval)
def test_01(ncells=4,nues=9,n_subbands=2,until=1000.0):
sim=Sim()
for i in range(ncells):
sim.make_cell(n_subbands=n_subbands,MIMO_gain_dB=3.0,verbosity=0)
sim.cells[0].set_xyz((500.0,500.0,20.0)) # fix cell[0]
for i in range(nues):
ue=sim.make_UE(verbosity=1)
if 0==i: # force ue[0] to attach to cell[0]
ue.set_xyz([501.0,502.0,2.0],verbose=True)
ue.attach_to_nearest_cell()
scenario=Scenario(sim,verbosity=0)
logger=MyLogger(sim,logging_interval=1.0)
ric=RIC(sim)
sim.add_logger(logger)
sim.add_scenario(scenario)
sim.add_ric(ric)
sim.run(until=until)
test_01() | AIMM-simulator | /aimm_simulator-2.0.3.tar.gz/aimm_simulator-2.0.3/src/AIMM_simulator/AIMM_simulator.py | AIMM_simulator.py |
from sys import exit,stderr
from math import log10
from numpy.linalg import norm
class InH_pathloss:
'''
3D-InH indoor pathloss model, from 3GPP standard 36.873, Table 7.2-1.
Indoor Hotspot cell with high (indoor) UE density.
The model is defined in 36873-c70.doc from https://portal.3gpp.org/desktopmodules/Specifications/SpecificationDetails.aspx?specificationId=2574.
LOS = line-of-sight.
NLOS = non-line-of-sight.
'''
def __init__(s,fc_GHz=3.5,h_UT=2.0,h_BS=4.0,LOS=True):
'''
Initialize a pathloss model instance.
Parameters
----------
fc_GHz : float
Centre frequency in GigaHertz (default 3.5).
h_UT : float
Height of User Terminal (=UE) in metres (default 2).
h_BS : float
Height of Base Station in metres (default 25).
'''
s.fc=fc_GHz # in GHz
s.log10fc=log10(s.fc)
s.h_UT=h_UT
s.h_BS=h_BS
s.LOS=LOS
# pre-compute constants to speed up calls...
s.const_LOS =32.8+20.0*s.log10fc
s.const_NLOS=11.5+20.0*s.log10fc
def __call__(s,xyz_cell,xyz_UE):
'''
Return the pathloss between 3-dimensional positions xyz_cell and
xyz_UE (in metres).
Note that the distances, heights, etc. are not checked
to ensure that this pathloss model is actually applicable.
'''
# TODO: could we usefully vectorize this, so that xyz_cell,xyz_UE have shape (n,3) to compute n pathlosses at once?
d3D_m=norm(xyz_cell-xyz_UE)
if s.LOS:
return s.const_LOS+16.9*log10(d3D_m)
# else NLOS:
return s.const_NLOS+43.3*log10(d3D_m)
def plot():
' Plot the pathloss model predictions, as a self-test. '
import numpy as np
import matplotlib.pyplot as plt
from fig_timestamp_00 import fig_timestamp
fig=plt.figure(figsize=(8,6))
ax=fig.add_subplot()
ax.grid(color='gray',alpha=0.7,lw=0.5)
d=np.linspace(10,150,100) # NLOS valid from 10m
PL=InH_pathloss(LOS=False)
NLOS=np.array([PL(0,di) for di in d])
ax.plot(d,NLOS,lw=2,label='NLOS ($\sigma=4$)') # or semilogx
ax.fill_between(d,NLOS-4.0,NLOS+4.0,alpha=0.2) # sigma_{SF}=4 for NLOS case
d=np.linspace(3,150,100) # NLOS valid from 3m
PL=InH_pathloss(LOS=True)
LOS=np.array([PL(0,di) for di in d])
ax.plot(d,LOS,lw=2,label='LOS ($\sigma=3$)') # or semilogx
ax.fill_between(d,LOS-3.0,LOS+3.0,alpha=0.2) # sigma_{SF}=3 for LOS case
ax.set_xlabel('distance (metres)')
ax.set_ylabel('pathloss (dB)')
ax.set_xlim(0,np.max(d))
ax.set_ylim(40)
ax.legend()
ax.set_title('3GPP Indoor Hotspot cell with high (indoor) UE density')
fig.tight_layout()
fig_timestamp(fig,rotation=0,fontsize=6,author='Keith Briggs')
fnbase='img/InH_pathloss_model_01'
fig.savefig(f'{fnbase}.png')
print(f'eog {fnbase}.png &',file=stderr)
fig.savefig(f'{fnbase}.pdf')
print(f'evince {fnbase}.pdf &',file=stderr)
if __name__=='__main__':
plot() | AIMM-simulator | /aimm_simulator-2.0.3.tar.gz/aimm_simulator-2.0.3/src/AIMM_simulator/InH_pathloss_model.py | InH_pathloss_model.py |
from sys import stdout,stderr,exit
from math import hypot
from random import choices
from numpy import argmax
class Q_learner:
def __init__(s,reward,pick_max=False,alpha=0.1,gamma=0.5,verbose=False):
s.reward=reward # callback
s.pick_max=pick_max
s.verbose=verbose
s.nstates=0
s.alpha,s.gamma=alpha,gamma
s.beta=1.0-s.alpha
s.Q={}
s.last_action=None
s.last_state=None
def add_state(s,state,actions=[]):
s.Q[state]={}
s.nstates=len(s.Q)
for action in actions:
s.Q[state][action]=1.0
def episode(s,state,verbose=False):
actions=list(s.Q[state].keys())
weights=list(s.Q[state].values())
if s.pick_max:
action=s.Q[argmax(weights)] # FIXME
else:
action=choices(actions,weights=weights,k=1)[0]
if verbose: print('episode: state=',state,'action=',action)
s.last_action=action
s.last_state=state
return action
def update_Q(s,new_state,reward=None):
# client must remember to call this after each episode!
mx=max(s.Q[new_state].values())
if reward is not None: # use passed reward
s.Q[s.last_state][s.last_action]+=s.alpha*(reward+s.gamma*mx-s.Q[s.last_state][s.last_action])
else: # used stored reward function
s.Q[s.last_state][s.last_action]+=s.alpha*(s.reward(s.last_action)+s.gamma*mx-s.Q[s.last_state][s.last_action])
def show_Q(s,f=stdout):
for state in s.Q:
actions=s.Q[state]
p=set(actions.values())
if len(p)==1 or p==set([1.0]): continue # don't print states never seen
print(f'state={state}\tactions={actions}',file=f)
if __name__ == '__main__':
from random import seed
seed(1)
def test_00():
ni,nj=4,3
goal=ni-1,nj-1,0
blocked=[(-1,j,0) for j in range(nj)]+\
[(ni,j,0) for j in range(nj)]+\
[(i,-1,0) for i in range(ni)]+\
[(i,nj,0) for i in range(ni)]+\
[(ni//2,nj//2,0)]
ql=Q_learner(reward=lambda state: 1.0/(1e-6+hypot(state[0]-goal[0],state[1]-goal[1])))
for i in range(ni):
for j in range(nj):
if (i,j,0) in blocked: continue
actions=[]
for action in ((0,1),(1,0),(-1,0),(0,-1),):
if (action[0]+i,action[1]+j,0) not in blocked:
actions.append(action)
ql.add_state((i,j,0),actions)
# training...
state=(0,0,0)
for i in range(100000):
action=ql.episode(state)
state=(state[0]+action[0],state[1]+action[1],0)
ql.update_Q(state)
ql.show_Q()
# check it has learnt...
state=(0,0,0)
for i in range(1000):
action=ql.episode(state)
state=(state[0]+action[0],state[1]+action[1],0)
print('episode % 3d: state='%i,state,'action=',action)
if state==goal: break
ql.update_Q(state)
#np.random.seed(1)
#np.set_printoptions(precision=4,suppress=True,linewidth=150,formatter={'complexfloat': lambda x: '% .4f%s%.4fj'%(x.real,('+','-')[x.imag<0],abs(x.imag),)})
test_00() | AIMM-simulator | /aimm_simulator-2.0.3.tar.gz/aimm_simulator-2.0.3/examples/Q_learning_generalized_01.py | Q_learning_generalized_01.py |
from math import cos,sin,pi
import numpy as np
from numpy.random import seed,standard_normal
from AIMM_simulator import Sim,Logger,Scenario,MME
class MyScenario(Scenario):
def loop(self,interval=1,radius=100.0,T=100.0,circle=False):
while True:
for i,ue in enumerate(self.sim.UEs):
if circle and i==0: # walk UE[0] around a circle
t=self.sim.env.now
ue.xyz[:2]=500+radius*cos(2*pi*t/T),500+radius*sin(2*pi*t/T)
else: # random walk, mean speed=1
ue.xyz[:2]+=standard_normal(2)/1.414
yield self.sim.wait(interval)
class Histogram_Logger(Logger):
# CQI histogram for UE[0]
h_cqi0=np.zeros(16)
h_cqi1=np.zeros(16)
def loop(self):
ue0=self.sim.UEs[0]
while self.sim.env.now<0.5*self.sim.until:
sc=ue0.get_serving_cell()
cqi=ue0.get_CQI()
if cqi is not None: self.h_cqi0[cqi[0]]+=1
yield self.sim.wait(self.logging_interval)
# half-time break - boost MIMO gain of all cells
for cell in self.sim.cells:
cell.set_MIMO_gain(6.0)
while True:
sc=ue0.get_serving_cell()
cqi=ue0.get_CQI()
if cqi is not None: self.h_cqi1[cqi[0]]+=1
yield self.sim.wait(self.logging_interval)
def finalize(s):
hs=(s.h_cqi0/np.sum(s.h_cqi0),s.h_cqi1/np.sum(s.h_cqi1))
plot_histograms(hs)
def plot_histograms(hs,fn='examples/img/AIMM_simulator_example_n8'):
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from matplotlib.collections import PatchCollection
from fig_timestamp import fig_timestamp
ymax=max(max(h) for h in hs)
fig=plt.figure(figsize=(8,8))
ax=fig.add_subplot(1,1,1)
ax.grid(linewidth=1,color='gray',alpha=0.25)
ax.set_xlabel('CQI for UE[0]'); ax.set_ylabel('relative frequency')
ax.set_xlim(0,15); ax.set_ylim(0,1.1*ymax)
for h,fc,x in zip(hs,('b','r'),(-0.1,0.1),):
ax.add_collection(PatchCollection([Rectangle((i+x,0),0.2,hi) for i,hi in enumerate(h)],facecolor=fc,alpha=0.8))
ax.annotate('blue: normal\nred: after 6dB MIMO gain boost',(7,0.97*ymax),color='k',fontsize=14,bbox=dict(facecolor='w',edgecolor='k',boxstyle='round,pad=1'))
fig_timestamp(fig,author='Keith Briggs',fontsize=8)
for h,fc in zip(hs,('b','r'),):
mean=sum(i*hi for i,hi in enumerate(h))/np.sum(h)
ax.text(mean,-0.04,'mean',ha='center',va='center',rotation=90,size=8,bbox=dict(boxstyle='rarrow,pad=0.1',fc=fc,ec=fc,lw=1))
fig.savefig(fn+'.pdf')
fig.savefig(fn+'.png')
def example_n8():
sim=Sim()
for i in range(9): # cells
sim.make_cell(xyz=(300+200.0*(i//3),300+200.0*(i%3),10.0),power_dBm=10.0)
for i in range(9): # UEs
sim.make_UE(verbosity=1).attach_to_strongest_cell_simple_pathloss_model()
sim.UEs[0].set_xyz([503.0,507.0,2.0])
sim.UEs[0].attach_to_strongest_cell_simple_pathloss_model()
logger=Histogram_Logger(sim,logging_interval=1.0)
sim.add_logger(logger)
sim.add_scenario(MyScenario(sim))
sim.add_MME(MME(sim,verbosity=0,interval=20.0))
sim.run(until=2*5000)
if __name__=='__main__':
seed(1)
example_n8() | AIMM-simulator | /aimm_simulator-2.0.3.tar.gz/aimm_simulator-2.0.3/examples/AIMM_simulator_example_n8.py | AIMM_simulator_example_n8.py |
PLOTTER="./src/realtime_plotter.py"
# do-nothing example - no output expected...
python3 examples/AIMM_simulator_example_n0.py
if [ $? -ne 0 ]
then
echo "AIMM_simulator_example_n0.py failed with exit code $? - quitting!"
exit 1
fi
# Tutorial example 1...
python3 examples/AIMM_simulator_example_n1.py
if [ $? -ne 0 ]
then
echo "AIMM_simulator_example_n1.py failed - quitting!"
exit 1
fi
# Tutorial example 2...
python3 examples/AIMM_simulator_example_n2.py
if [ $? -ne 0 ]
then
echo "AIMM_simulator_example_n2.py failed - quitting!"
exit 1
fi
# Tutorial example 3...
python3 examples/AIMM_simulator_example_n3.py
if [ $? -ne 0 ]
then
echo "AIMM_simulator_example_n3.py failed - quitting!"
exit 1
fi
# Tutorial example 3a...
python3 examples/AIMM_simulator_example_n3a.py
if [ $? -ne 0 ]
then
echo "AIMM_simulator_example_n3a.py failed - quitting!"
exit 1
fi
# Tutorial example 4...
python3 examples/AIMM_simulator_example_n4.py
if [ $? -ne 0 ]
then
echo "AIMM_simulator_example_n4.py failed - quitting!"
exit 1
fi
# Tutorial example 5...
(time python3 examples/AIMM_simulator_example_n5.py | "${PLOTTER}" -nplots=3 -tmax=500 -ylims='{0: (-100,100), 1: (-100,100), 2: (0,30)}' -ylabels='{0: "UE[0] $x$", 1: "UE[0] $y$", 2: "UE[0] throughput"}' -fnb='examples/img/AIMM_simulator_example_n5' -author='Keith Briggs')
if [ $? -ne 0 ]
then
echo "AIMM_simulator_example_n5.py failed - quitting!"
exit 1
fi
# Tutorial example 6...
(time python3 examples/AIMM_simulator_example_n6.py | "${PLOTTER}" -nplots=1 -tmax=100 -ylims='[(0,1),]' -ylabels='{0: "average downlink throughput over all UEs"}' -fnb='examples/img/AIMM_simulator_example_n6' -author='Keith Briggs')
if [ $? -ne 0 ]
then
echo "AIMM_simulator_example_n6.py failed - quitting!"
exit 1
fi
# Tutorial example 7...
(python3 examples/AIMM_simulator_example_n7.py | "${PLOTTER}" -nplots=4 -tmax=2000 -ylims='{0: (0,10), 1: (0,1000), 2: (0,1000), 3: (0,30)}' -ylabels='{0: "UE[0] throughput", 1: "UE[0] $x$", 2: "UE[0] $y$", 3: "UE[0] serving cell"}' -fnb='examples/img/AIMM_simulator_example_n7' -author='Keith Briggs')
if [ $? -ne 0 ]
then
echo "AIMM_simulator_example_n7.py failed - quitting!"
exit 1
fi
# Tutorial example 8...
python3 examples/AIMM_simulator_example_n8.py
if [ $? -ne 0 ]
then
echo "AIMM_simulator_example_n8.py failed - quitting!"
exit 1
fi
#bash run_RIC_example.sh | AIMM-simulator | /aimm_simulator-2.0.3.tar.gz/aimm_simulator-2.0.3/examples/run_all_examples.sh | run_all_examples.sh |
from sys import stdout,stderr,exit
from copy import copy
from math import hypot
from random import choices
from numpy import array as np_array,argmax,exp as np_exp,sum as np_sum
class Q_learner:
def __init__(s,reward_function,alpha=0.5,gamma=1.0,verbose=False):
s.reward_function=reward_function # callback
s.verbose=verbose
s.alpha=alpha
s.gamma=gamma
s.Q={}
s.last_action=None
s.last_state =None
def add_state(s,state,actions=[],init=1.0):
s.Q.setdefault(state,{})
for action in actions:
s.Q[state][action]=init
def episode(s,state,eps=0.0,linear=True,pick_max=False,verbose=False):
Q_row=s.Q[state]
if 0:
actions=tuple(Q_row.keys())
weights=np_array([Q_row[a] for a in actions])
else: # is this better?
items=Q_row.items()
actions=tuple(item[0] for item in items)
weights=tuple(item[1] for item in items)
if not linear: # eps not used for linear case
weights=np_array(weights)
weights=np_exp(eps*weights/np_sum(weights))
if pick_max:
action=actions[argmax(weights)]
else:
action=choices(actions,weights,k=1)[0]
if verbose: print(f'episode: state={state} weights={weights} action={action}',file=stderr)
s.last_action=copy(action) # FIXME is copy needed?
s.last_state=copy(state)
return s.last_action
def update_Q(s,new_state,reward_value=None):
# client must remember to call this after each episode!
mx=max(s.Q[new_state].values())
if reward_value is None: # used stored reward_function
s.Q[s.last_state][s.last_action]+=s.alpha*(s.reward_function(s.last_state,new_state)+s.gamma*mx-s.Q[s.last_state][s.last_action])
else: # use passed reward_value
s.Q[s.last_state][s.last_action]+=s.alpha*(reward_value+s.gamma*mx-s.Q[s.last_state][s.last_action])
def show_Q(s,f=stdout,verbosity=0):
states=list(s.Q.keys())
states.sort()
for state in states:
actions=s.Q[state]
p=set(actions.values())
if verbosity==0 and len(p)==1: continue # don't print states never seen
a=dict((x,float(f'{actions[x]:.2f}')) for x in actions)
print(f'state={state}\tactions={a}',file=f)
if __name__ == '__main__':
from random import seed
seed(1)
def test_00():
ni,nj=4,3
goal=ni-1,nj-1,0
blocked=[(-1,j,0) for j in range(nj)]+\
[(ni,j,0) for j in range(nj)]+\
[(i,-1,0) for i in range(ni)]+\
[(i,nj,0) for i in range(ni)]+\
[(ni//2,nj//2,0)]
def reward_function(state,new_state):
x,y=new_state[:2]
return 1.0/(1e-20+hypot(x-goal[0],y-goal[1]))**4
ql=Q_learner(reward_function=reward_function)
for i in range(ni):
for j in range(nj):
if (i,j,0) in blocked: continue
actions=[]
for action in ((0,1),(1,0),(-1,0),(0,-1),):
if (action[0]+i,action[1]+j,0) not in blocked:
actions.append(action)
ql.add_state((i,j,0),actions)
# training...
state=(0,0,0)
for i in range(100000):
action=ql.episode(state)
state=(state[0]+action[0],state[1]+action[1],0)
ql.update_Q(state)
ql.show_Q()
# check it has learnt...
state=(0,0,0)
for i in range(1000):
action=ql.episode(state)
state=(state[0]+action[0],state[1]+action[1],0)
print('episode % 3d: state='%i,state,'action=',action)
if state==goal: break
ql.update_Q(state)
#np.random.seed(1)
#np.set_printoptions(precision=4,suppress=True,linewidth=150,formatter={'complexfloat': lambda x: '% .4f%s%.4fj'%(x.real,('+','-')[x.imag<0],abs(x.imag),)})
test_00() | AIMM-simulator | /aimm_simulator-2.0.3.tar.gz/aimm_simulator-2.0.3/examples/Q_learning_generalized_08.py | Q_learning_generalized_08.py |
from sys import stderr,argv
from itertools import combinations
import numpy as np
from random import seed
from AIMM_simulator import Sim,Logger,MME,Scenario,RIC,np_array_to_str
from Q_learning_generalized_01 import Q_learner
class MyScenario(Scenario):
def loop(self,interval=0.1,speed=2.0):
a=speed/1.414
more_UEs=False
while True:
if not more_UEs and self.sim.env.now>15000.0:
for i in range(2):
ue=self.sim.make_UE(xyz=(450.0+20*i,550.0+20*i,2.0),verbosity=1)
ue.attach_to_strongest_cell_simple_pathloss_model()
print(f'ue[{i}]=',ue,file=stderr)
# FIXME the next two steps should be automatic when new UEs are added!
#self.sim.env.process(ue.run_rsrp_reports())
#self.sim.env.process(ue.run_subband_cqi_report())
more_UEs=True
# a random walk, but UE[0] staying near (500,500)...
for ue in self.sim.UEs: #[:1]:
dx=a*np.random.standard_normal(1)
dy=a*np.random.standard_normal(1)
if ue.xyz[0]>520.0: ue.xyz[0]-=abs(dx)
elif ue.xyz[0]<480.0: ue.xyz[0]+=abs(dx)
else: ue.xyz[0]+=dx
if ue.xyz[1]>520.0: ue.xyz[1]-=abs(dy)
elif ue.xyz[1]<480.0: ue.xyz[1]+=abs(dy)
else: ue.xyz[1]+=dy
for ue in self.sim.UEs[1:]: # don't confine other UEs
ue.xyz[:2]+=a*np.random.standard_normal(2)
yield self.sim.wait(interval)
class ThroughputLogger(Logger):
def loop(self):
#self.f.write('#time\tcell\tUE\tx\ty\tthroughput\n')
alpha=0.02; beta=1.0-alpha # smoothing parameters
ric_celledge=self.sim.ric.celledge # set in RIC
tp_smoothed=0.0
while True:
rsrp=[cell.get_RSRP_reports_dict() for cell in self.sim.cells]
for cell in self.sim.cells:
for ue_i in cell.reports['cqi']:
# next not needed as will never be true
#if cell.i!=self.sim.UEs[ue_i].get_serving_cell().i: continue
if ue_i>0: continue # only log UE[0]
celledge=1 if ue_i in ric_celledge else 0
xy=self.sim.get_UE_position(ue_i)[:2]
#print(rsrp)
tp=cell.get_UE_throughput(ue_i)
tp_smoothed=alpha*tp+beta*tp_smoothed
mask=cell.get_subband_mask()
split=1 if mask[0]!=mask[1] else 0
self.f.write(f'{self.sim.env.now:.1f}\t{cell.i}\t{rsrp[cell.i][ue_i]:.2f}\t{rsrp[cell.i][ue_i]:.2f}\t{celledge}\t{split}\t{tp_smoothed:.2f}\n') # no x,y
yield self.sim.wait(self.logging_interval)
class RSRPLogger(Logger):
def loop(self):
while True:
rep=[cell.get_RSRP_reports() for cell in self.sim.cells]
print(self.sim.env.now,rep)
yield self.sim.wait(self.logging_interval)
class MyRIC(RIC):
ql=Q_learner(reward=None)
celledge=set()
def loop(self,interval=10):
def reward(x):
return throughputs_smoothed[0]
n_ues=self.sim.get_nues()
celledge_rsrp_threshold=2.0 # hyperparameter
alpha=0.1
beta=1.0-alpha # smoothing parameters
cells=self.sim.cells
n_cells=len(cells)
state=0 # initial state normal (no cell-edge UEs)
throughputs_smoothed=np.zeros(n_ues)
MyRIC.ql.add_state(state,[0,])
for i,j in combinations(range(n_cells),2):
# state (i,j,l:bool) means that cells i and j have at least one
# cell-edge UE, and that the spectrum is split (l)
# actions will be to split spectrum (or not) between cells i and j
actions=((i,j,False),(i,j,True))
MyRIC.ql.add_state((i,j,False),actions)
MyRIC.ql.add_state((i,j,True), actions)
yield self.sim.wait(5000.0) # wait before switching on Q-learner
while True:
rsrp=[cell.get_RSRP_reports_dict() for cell in cells]
while True: # wait for a throughput report
throughputs=np.array([cells[ue.serving_cell.i].get_UE_throughput(ue.i) for ue in self.sim.UEs])
if not np.any(np.isneginf(throughputs)): break
yield self.sim.wait(1.0)
throughputs/=n_ues # average throughput per UE
if np.all(throughputs>0.0):
throughputs_smoothed=alpha*throughputs+beta*throughputs_smoothed
#print(rsrp,throughputs,throughputs_smoothed)
# look for cell-edge UEs...
# FIXME do we need an min rsrp threshold?
# The condition should really be that the serving cell and exactly
# one other cell have nearly equal rsrp
for ue_k in range(1): #n_ues):
serving_cell_i=self.sim.get_serving_cell_i(ue_k)
for cell_i,cell_j in combinations(range(n_cells),2):
#if self.sim.env.now>14000: print('celledge:',self.sim.env.now,ue_k,rsrp[cell_i][ue_k],rsrp[cell_j][ue_k],file=stderr)
if serving_cell_i not in (cell_i,cell_j): continue
if abs(rsrp[cell_i][ue_k]-rsrp[cell_j][ue_k])<celledge_rsrp_threshold:
MyRIC.celledge.add(ue_k)
state=(cell_i,cell_j,True) # set state
state=MyRIC.ql.episode(state)
if state[2]: # ql is telling us to split the band
cells[state[0]].set_subband_mask((1.0,0.0))
cells[state[1]].set_subband_mask((0.0,1.0))
else: # ql is telling us to unsplit the band
cells[state[0]].set_subband_mask((1.0,1.0))
cells[state[1]].set_subband_mask((1.0,1.0))
yield self.sim.wait(5) # let throughputs adjust
MyRIC.ql.update_Q(state,reward=throughputs_smoothed[0])
else: # not cell-edge
if ue_k in MyRIC.celledge: MyRIC.celledge.remove(ue_k)
state=(cell_i,cell_j,False) # set state
state=MyRIC.ql.episode(state)
MyRIC.ql.update_Q(state,reward=throughputs_smoothed[0])
yield self.sim.wait(interval)
def finalize(self):
MyRIC.ql.show_Q(f=stderr)
def example_n9(until=1000):
sim=Sim()
for i in range(2):
sim.make_cell(xyz=(500.0+100*(i-0.5),500.0,10.0),n_subbands=2)
for i in range(1):
sim.make_UE(xyz=(500.0,500.0,2.0),verbosity=1).attach_to_nearest_cell()
sim.add_loggers([
ThroughputLogger(sim,logging_interval=5.0),
])
sim.add_scenario(MyScenario(sim))
sim.add_MME(MME(sim,interval=10.0,verbosity=0))
sim.add_ric(MyRIC(sim,interval=1.0))
sim.run(until=until)
if __name__ == '__main__':
np.random.seed(1)
seed(1)
until=20000
argc=len(argv)
if argc>1: until=float(argv[1])
example_n9(until) | AIMM-simulator | /aimm_simulator-2.0.3.tar.gz/aimm_simulator-2.0.3/examples/AIMM_simulator_example_n9.py | AIMM_simulator_example_n9.py |
from sys import stderr,argv
from itertools import combinations
import numpy as np
from random import seed
from AIMM_simulator import Sim,Logger,MME,Scenario,RIC,np_array_to_str
from Q_learning_generalized_01 import Q_learner
class MyScenario_OLD(Scenario):
def loop(self,interval=1.0,speed=2.0):
a=speed/1.414
more_UEs=False
bot,top=300.0,700.0
while True:
# a random walk, but UE[0] staying near (500,500)...
for ue in self.sim.UEs:
dx=a*np.random.standard_normal(1)
dy=a*np.random.standard_normal(1)
if ue.xyz[0]>top: ue.xyz[0]-=abs(dx)
elif ue.xyz[0]<bot: ue.xyz[0]+=abs(dx)
else: ue.xyz[0]+=dx
if ue.xyz[1]>top: ue.xyz[1]-=abs(dy)
elif ue.xyz[1]<bot: ue.xyz[1]+=abs(dy)
else: ue.xyz[1]+=dy
yield self.sim.wait(interval)
class MyScenario(Scenario): # circle
def loop(self,interval=1.0,speed=2.0):
while True:
for ue in self.sim.UEs:
ue.xyz[0]=500.0+150.0*np.cos(1e-3*self.sim.env.now)+10*np.random.standard_normal(1)
ue.xyz[1]=500.0+150.0*np.sin(1e-3*self.sim.env.now)+10*np.random.standard_normal(1)
yield self.sim.wait(interval)
class RSRPLogger(Logger):
def loop(self):
while True:
rep=[cell.get_RSRP_reports() for cell in self.sim.cells]
print(self.sim.env.now,rep)
yield self.sim.wait(self.logging_interval)
class ThroughputLogger(Logger):
def loop(self):
alpha=0.01; beta=1.0-alpha # smoothing parameters
ric_celledge=self.sim.ric.celledge # set in RIC
tp_smoothed=0.0
while True:
rsrp=[cell.get_RSRP_reports_dict() for cell in self.sim.cells]
for cell in self.sim.cells:
for ue_i in cell.reports['cqi']:
if cell.i!=self.sim.UEs[ue_i].get_serving_cell().i: continue
if ue_i>0: continue # only log UE[0]
celledge=1 if (ue_i,cell.i,) in ric_celledge else 0
xy=self.sim.get_UE_position(ue_i)[:2]
tp=cell.get_UE_throughput(ue_i)
tp_smoothed=alpha*tp+beta*tp_smoothed
mask=cell.get_subband_mask()
split=1 if mask[0]!=mask[2] else 0
self.f.write(f'{self.sim.env.now:.1f}\t{cell.i}\t{rsrp[cell.i][ue_i]:.2f}\t{celledge}\t{split}\t{tp_smoothed:.2f}\n') # no x,y
yield self.sim.wait(self.logging_interval)
class MyRIC(RIC):
ql=Q_learner(reward=None)
celledge=set()
def loop(self,interval=10):
#def reward(x):
# return throughputs_smoothed[0]
n_ues=self.sim.get_nues()
celledge_diff_threshold=2.0 # hyperparameter
celledge_rsrp_threshold=-70.0 # hyperparameter
alpha=0.5
beta=1.0-alpha # smoothing parameters
cells=self.sim.cells
n_cells=len(cells)
state=0 # initial state normal (no cell-edge UEs)
throughputs_smoothed=np.zeros(n_ues)
MyRIC.ql.add_state(state,[0,])
for i,j in combinations(range(n_cells),2):
# state (i,j,l:bool) means that cells i and j have at least one
# cell-edge UE, and that the spectrum is split (l)
# actions will be to split spectrum (or not) between cells i and j
actions=((i,j,False),(i,j,True))
MyRIC.ql.add_state((i,j,False),actions)
MyRIC.ql.add_state((i,j,True), actions)
MyRIC.ql.show_Q(f=stderr)
yield self.sim.wait(10000.0) # wait before switching on Q-learner
while True:
rsrp=[cell.get_RSRP_reports_dict() for cell in cells]
while True: # wait for a throughput report
throughputs=np.array([cells[ue.serving_cell.i].get_UE_throughput(ue.i)[0] for ue in self.sim.UEs])
if not np.any(np.isneginf(throughputs)): break
yield self.sim.wait(1.0)
throughputs/=n_ues # average throughput per UE
if np.all(throughputs>0.0):
throughputs_smoothed=alpha*throughputs+beta*throughputs_smoothed
#print(rsrp,throughputs,throughputs_smoothed)
# look for cell-edge UEs...
# The condition should really be that the serving cell and exactly
# one other cell have nearly equal rsrp
for ue_k in range(1): # Assumes 1 UE!
serving_cell_i=self.sim.get_serving_cell_i(ue_k)
for cell_i,cell_j in combinations(range(n_cells),2):
if serving_cell_i not in (cell_i,cell_j): continue
if rsrp[cell_i][ue_k]<celledge_rsrp_threshold: continue
if rsrp[cell_j][ue_k]<celledge_rsrp_threshold: continue
if abs(rsrp[cell_i][ue_k]-rsrp[cell_j][ue_k])<celledge_diff_threshold:
MyRIC.celledge.add((ue_k,cell_i,))
MyRIC.celledge.add((ue_k,cell_j,))
state=(cell_i,cell_j,True) # set state
state=MyRIC.ql.episode(state)
if state[2]: # ql is telling us to split the band
if serving_cell_i==cell_i:
cells[cell_i].set_subband_mask((1,1,0))
cells[cell_j].set_subband_mask((0,0,1))
else: # serving_cell_i==cell_j
cells[cell_j].set_subband_mask((1,1,0))
cells[cell_i].set_subband_mask((0,0,1))
else: # ql is telling us to unsplit the band
cells[cell_i].set_subband_mask((1,1,1))
cells[cell_j].set_subband_mask((1,1,1))
while True: # wait for a throughput report
yield self.sim.wait(1.0)
tp=np.array([cells[serving_cell_i].get_UE_throughput(ue_k)[0] for ue in self.sim.UEs])
if not np.any(np.isneginf(tp)): break
yield self.sim.wait(1.0)
MyRIC.ql.update_Q(state,reward=np.min(tp))
#yield self.sim.wait(5) # let throughputs adjust
#MyRIC.ql.update_Q(state,reward=throughputs_smoothed[0])
else: # not cell-edge
if (ue_k,cell_i,) in MyRIC.celledge: MyRIC.celledge.remove((ue_k,cell_i,))
if (ue_k,cell_j,) in MyRIC.celledge: MyRIC.celledge.remove((ue_k,cell_j,))
state=(cell_i,cell_j,False) # set state
#print(f'state={state}',file=stderr)
state=MyRIC.ql.episode(state)
while True: # wait for a throughput report
yield self.sim.wait(1.0)
tp=np.array([cells[serving_cell_i].get_UE_throughput(ue_k)[0] for ue in self.sim.UEs])
if not np.any(np.isneginf(tp)): break
yield self.sim.wait(1.0)
MyRIC.ql.update_Q(state,reward=np.min(tp))
yield self.sim.wait(interval)
def finalize(self):
MyRIC.ql.show_Q(f=stderr)
def example_n10(until=1000):
sim=Sim()
# 7 cells in a hexagonal arrangement
for i in range(2): # top row
sim.make_cell(xyz=(500.0+200*(i-0.5),500.0+200.0*0.866,10.0),n_subbands=3)
for i in range(3): # middle row
sim.make_cell(xyz=(500.0+200*(i-1.0),500.0,10.0),n_subbands=3)
for i in range(2): # bottom row
sim.make_cell(xyz=(500.0+200*(i-0.5),500.0-200.0*0.866,10.0),n_subbands=3)
for i in range(4):
sim.make_UE(xyz=(500.0+10*i,500.0+10*i,2.0),verbosity=1).attach_to_nearest_cell()
sim.add_loggers([
ThroughputLogger(sim,logging_interval=20.0),
#RSRPLogger(sim,logging_interval=10.0),
])
sim.add_scenario(MyScenario(sim))
sim.add_MME(MME(sim,interval=10.0,verbosity=0))
sim.add_ric(MyRIC(sim,interval=100.0))
sim.run(until=until)
if __name__ == '__main__':
np.random.seed(1)
seed(1)
until=1000
argc=len(argv)
if argc>1: until=float(argv[1])
example_n10(until) | AIMM-simulator | /aimm_simulator-2.0.3.tar.gz/aimm_simulator-2.0.3/examples/AIMM_simulator_example_n10.py | AIMM_simulator_example_n10.py |
from sys import stderr,argv
from math import pi
from itertools import combinations
import numpy as np
from random import seed
from AIMM_simulator import Sim,Logger,MME,Scenario,RIC,np_array_to_str
from Q_learning_generalized_02 import Q_learner
def plot_scenario(sim,fn='examples/img/AIMM_simulator_example_n11_scenario'):
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
from matplotlib import cm
from fig_timestamp import fig_timestamp
cmap=plt.get_cmap('Pastel1')
colors=cmap(np.linspace(0,1,8))
colors=('r','g','b','c','m','y','k','w')
fig=plt.figure(figsize=(10,8))
ax=fig.add_subplot(1,1,1)
ax.set_aspect('equal')
ax.set_xlim(-8000.0,8000.0)
ax.set_ylim(-7100.0,7100.0)
ax.grid(linewidth=1,color='gray',alpha=0.25)
for i,cell in enumerate(sim.cells):
ax.add_patch(Circle(cell.xyz[:2],2700.0,color=colors[cell.i],alpha=0.5))
ax.annotate('cell[%d]'%cell.i,1.3*cell.xyz[:2])
na=30
a=0.5e3
ue_circle_radius=sim.params['ue_circle_radius']
for i in range(na):
c,s=np.cos(2*pi*i/na),np.sin(2*pi*i/na)
ax.arrow(ue_circle_radius*c,ue_circle_radius*s,dx=-a*s,dy=a*c,width=50,color='k')
ax.annotate('UE path',(2000,4000),fontsize=20)
fig_timestamp(fig)
fig.savefig(fn+'.pdf')
fig.savefig(fn+'.png')
print(f'e {fn}.pdf &',file=stderr)
print(f'eog {fn}.png &',file=stderr)
class MyScenario(Scenario): # circle
def loop(self):
ue_circle_radius=self.sim.params['ue_circle_radius']
while True:
for ue in self.sim.UEs:
tm=1e-2*self.sim.env.now/(2.0*pi)
ue.xyz[0]=ue_circle_radius*np.cos(tm)
ue.xyz[1]=ue_circle_radius*np.sin(tm)
yield self.sim.wait(self.interval)
class ThroughputLogger(Logger):
def loop(self):
alpha0=0.2; beta0=1.0-alpha0 # smoothing parameters
alpha1=0.01; beta1=1.0-alpha1 # smoothing parameters
ric_celledge=self.sim.ric.celledge # set in RIC
tp_smoothed0=tp_smoothed1=0.0
while True:
for ue_i in range(1): # only log UE[0]
rsrp=[cell.get_RSRP_reports_dict()[ue_i] for cell in self.sim.cells]
serving_cell=self.sim.get_serving_cell(ue_i)
serving_cell_i=serving_cell.i
celledge=1 if (ue_i,serving_cell_i) in ric_celledge else 0
xy=self.sim.get_UE_position(ue_i)[:2]
tp=serving_cell.get_UE_throughput(ue_i)
tp_smoothed0=alpha0*tp+beta0*tp_smoothed0
tp_smoothed1=alpha1*tp+beta1*tp_smoothed1
mask=serving_cell.get_subband_mask()
split=serving_cell_i if mask[0]!=mask[-1] else 0
self.f.write(f'{self.sim.env.now:.1f}\t{serving_cell_i}\t{rsrp[1]:.2f}\t{celledge}\t{split}\t{tp_smoothed0:.2f}\t{tp_smoothed1:.2f}\n')
yield self.sim.wait(self.logging_interval)
class MyRIC(RIC):
ql=Q_learner(reward=None,pick_max=False)
celledge=set()
def loop(self):
n_ues=self.sim.get_nues()
celledge_diff_threshold=15.0 # hyperparameter
celledge_rsrp_threshold=-120.0 # hyperparameter
alpha=0.5
beta=1.0-alpha # smoothing parameters
cells=self.sim.cells
n_cells=len(cells)
throughputs_smoothed=np.zeros(n_ues)
split_ratio=self.sim.params['split_ratio']
n_subbands=self.sim.params['n_subbands']
ones =(1,)*n_subbands
lower=[0,]*n_subbands
upper=[1,]*n_subbands
n_lower=int(split_ratio*n_subbands)
for i in range(n_lower):
lower[i]=1
upper[i]=0
lower,upper=tuple(lower),tuple(upper)
print(f'lower mask={lower}, upper mask={upper}',file=stderr)
# wait before switching on simple heuristic ...
yield self.sim.wait(40000.0)
# run simple heuristic...
while self.sim.env.now<80000.0:
rsrp=[cell.get_RSRP_reports_dict() for cell in cells]
for ue_k in range(n_ues):
serving_cell_i=self.sim.get_serving_cell_i(ue_k)
for other_cell_i in range(n_cells):
if other_cell_i==serving_cell_i: continue
if rsrp[other_cell_i][ue_k]<celledge_rsrp_threshold: continue
celledge=(rsrp[serving_cell_i][ue_k]-rsrp[other_cell_i][ue_k])<celledge_diff_threshold
if celledge:
cells[serving_cell_i].set_subband_mask(lower)
cells[other_cell_i ].set_subband_mask(upper)
MyRIC.celledge.add((ue_k,serving_cell_i,))
MyRIC.celledge.add((ue_k,other_cell_i,))
else:
cells[serving_cell_i].set_subband_mask(ones)
cells[other_cell_i ].set_subband_mask(ones)
for x in ((ue_k,serving_cell_i),(ue_k,other_cell_i),):
if x in MyRIC.celledge: MyRIC.celledge.remove(x)
yield self.sim.wait(self.interval)
# run Q-learning...
state=(0,0,0) # initial state normal (no cell-edge UEs)
MyRIC.ql.add_state(state,state)
for i,j in combinations(range(n_cells),2):
# state (i,j,l:bool) means that cells i and j have at least one
# cell-edge UE, and that the spectrum is split (l)
# actions will be to split spectrum (or not) between cells i and j
actions=(False,True)
MyRIC.ql.add_state((i,j,False),actions)
MyRIC.ql.add_state((i,j,True), actions)
MyRIC.ql.add_state((j,i,False),actions)
MyRIC.ql.add_state((j,i,True), actions)
while True:
rsrp=[cell.get_RSRP_reports_dict() for cell in cells]
for ue_k in range(n_ues):
serving_cell_i=self.sim.get_serving_cell_i(ue_k)
for other_cell_i in range(n_cells):
if other_cell_i==serving_cell_i: continue
if rsrp[other_cell_i][ue_k]<celledge_rsrp_threshold: continue
celledge=(rsrp[serving_cell_i][ue_k]-rsrp[other_cell_i][ue_k])<celledge_diff_threshold
if celledge:
MyRIC.celledge.add((ue_k,serving_cell_i,))
MyRIC.celledge.add((ue_k,other_cell_i,))
action=MyRIC.ql.episode((serving_cell_i,other_cell_i,True,))
if action: # ql is telling us to split the band
cells[serving_cell_i].set_subband_mask(lower)
cells[other_cell_i ].set_subband_mask(upper)
state=(serving_cell_i,other_cell_i,True)
else: # ql is telling us to unsplit the band
cells[serving_cell_i].set_subband_mask(ones)
cells[other_cell_i ].set_subband_mask(ones)
state=(serving_cell_i,other_cell_i,False)
while True: # wait for a throughput report
yield self.sim.wait(1.0)
tp=np.array([cells[serving_cell_i].get_UE_throughput(ue_k)[0] for ue in self.sim.UEs])
if not np.any(np.isneginf(tp)): break
MyRIC.ql.update_Q(state,reward=np.min(tp)**1)
else: # not celledge
for x in ((ue_k,serving_cell_i),(ue_k,other_cell_i),):
if x in MyRIC.celledge: MyRIC.celledge.remove(x)
state=(0,0,0) # set state
cells[serving_cell_i].set_subband_mask(ones)
cells[other_cell_i ].set_subband_mask(ones)
#state=(serving_cell_i,other_cell_i,False) # set state
#state=MyRIC.ql.episode(state)
#if state[2]: # ql is telling us to split the band
# cells[serving_cell_i].set_subband_mask(lower)
# cells[other_cell_i ].set_subband_mask(upper)
#else: # ql is telling us to unsplit the band
# cells[serving_cell_i].set_subband_mask(ones)
# cells[other_cell_i ].set_subband_mask(ones)
#while True: # wait for a throughput report
# yield self.sim.wait(10.0)
# tp=np.array([cells[serving_cell_i].get_UE_throughput(ue_k)[0] for ue in self.sim.UEs])
# if not np.any(np.isneginf(tp)): break
#MyRIC.ql.update_Q(state,reward=np.min(tp)**1)
yield self.sim.wait(self.interval)
def finalize(self):
MyRIC.ql.show_Q(f=stderr)
def example_n11(until=1000,n_ues=1,radius=5000.0,power_dBm=30.0,n_subbands=8):
sim=Sim(params={'ue_circle_radius': 45*radius/50, 'n_subbands': n_subbands, 'split_ratio': 0.9})
# 7 cells in a hexagonal arrangement
# centre cell
sim.make_cell(xyz=(0.0,0.0,10.0),n_subbands=n_subbands,power_dBm=power_dBm)
# ring of six...
for i in range(6):
theta=2*pi*i/6
x=radius*np.cos(theta)
y=radius*np.sin(theta)
sim.make_cell(xyz=(x,y,10.0),n_subbands=n_subbands,power_dBm=power_dBm)
for i in range(n_ues):
sim.make_UE(xyz=(10*i,10*i,2.0),reporting_interval=2.0,verbosity=1).attach_to_nearest_cell()
sim.add_loggers([
ThroughputLogger(sim,logging_interval=50.0),
])
sim.add_scenario(MyScenario(sim,interval=1.0))
sim.add_MME(MME(sim,interval=10.0,verbosity=0))
sim.add_ric(MyRIC(sim,interval=10.0))
plot_scenario(sim)
sim.run(until=until)
if __name__ == '__main__':
np.random.seed(1)
seed(1)
until=1000
argc=len(argv)
if argc>1: until=float(argv[1])
example_n11(until) | AIMM-simulator | /aimm_simulator-2.0.3.tar.gz/aimm_simulator-2.0.3/examples/AIMM_simulator_example_n11.py | AIMM_simulator_example_n11.py |
from sys import stdout,stderr,exit
from copy import copy
from math import hypot
from random import choices
from numpy import argmax
class Q_learner:
def __init__(s,reward,pick_max=False,alpha=0.5,gamma=1.0,verbose=False):
s.reward=reward # callback
s.pick_max=pick_max
s.verbose=verbose
s.nstates=0
s.alpha,s.gamma=alpha,gamma
s.beta=1.0-s.alpha
s.Q={}
s.last_action=None
s.last_state=None
def add_state(s,state,actions=[],init=1.0e-2):
s.Q[state]={}
s.nstates=len(s.Q)
for action in actions:
s.Q[state][action]=init
def episode(s,state,verbose=False):
actions=list(s.Q[state].keys())
weights=list(s.Q[state].values())
if s.pick_max:
action=actions[argmax(weights)] # FIXME
else:
action=choices(actions,weights=weights,k=1)[0]
if verbose: print('episode: state=',state,'action=',action)
s.last_action=action
s.last_state=copy(state)
return action
def update_Q(s,new_state,reward=None):
# client must remember to call this after each episode!
mx=max(s.Q[new_state].values())
if reward is not None: # use passed reward
s.Q[s.last_state][s.last_action]+=s.alpha*(reward+s.gamma*mx-s.Q[s.last_state][s.last_action])
else: # used stored reward function
s.Q[s.last_state][s.last_action]+=s.alpha*(s.reward(s.last_action)+s.gamma*mx-s.Q[s.last_state][s.last_action])
def show_Q(s,f=stdout):
states=list(s.Q.keys())
states.sort()
for state in states:
actions=s.Q[state]
p=set(actions.values())
if len(p)==1: continue # don't print states never seen
a=dict((x,float(f'{actions[x]:.2f}')) for x in actions)
print(f'state={state}\tactions={a}',file=f)
if __name__ == '__main__':
from random import seed
seed(1)
def test_00():
ni,nj=4,3
goal=ni-1,nj-1,0
blocked=[(-1,j,0) for j in range(nj)]+\
[(ni,j,0) for j in range(nj)]+\
[(i,-1,0) for i in range(ni)]+\
[(i,nj,0) for i in range(ni)]+\
[(ni//2,nj//2,0)]
ql=Q_learner(reward=lambda state: 1.0/(1e-6+hypot(state[0]-goal[0],state[1]-goal[1])))
for i in range(ni):
for j in range(nj):
if (i,j,0) in blocked: continue
actions=[]
for action in ((0,1),(1,0),(-1,0),(0,-1),):
if (action[0]+i,action[1]+j,0) not in blocked:
actions.append(action)
ql.add_state((i,j,0),actions)
# training...
state=(0,0,0)
for i in range(100000):
action=ql.episode(state)
state=(state[0]+action[0],state[1]+action[1],0)
ql.update_Q(state)
ql.show_Q()
# check it has learnt...
state=(0,0,0)
for i in range(1000):
action=ql.episode(state)
state=(state[0]+action[0],state[1]+action[1],0)
print('episode % 3d: state='%i,state,'action=',action)
if state==goal: break
ql.update_Q(state)
#np.random.seed(1)
#np.set_printoptions(precision=4,suppress=True,linewidth=150,formatter={'complexfloat': lambda x: '% .4f%s%.4fj'%(x.real,('+','-')[x.imag<0],abs(x.imag),)})
test_00() | AIMM-simulator | /aimm_simulator-2.0.3.tar.gz/aimm_simulator-2.0.3/examples/Q_learning_generalized_02.py | Q_learning_generalized_02.py |
## About upload to lib pypi
1. The account and password was saved in .pypic.
2. Please update lib version in setup.py before upload to pypi.
3. Please do not upload unnessary file (ex: egg.info, pycache...etc).
4. `Web` : <https://pypi.org/project/AIMaker/>
## If this is the first time to upload, please execute the following
Please copy .pypic to your home directory.
`$ cp .pypic ~`
Register account info.
`$ python setup.py register -r pypi`
## Upload lib to pypi
In current directory use the following command:
`$ python setup.py sdist upload -r pypi`
## Usage
`$ pip install AIMaker`
```python
import AIMaker as ai
ai.sendUpdateRequest({result})
ai.saveValidationResult({result})
``` | AIMaker | /AIMaker-1.4.3.tar.gz/AIMaker-1.4.3/README.md | README.md |
## About upload to lib pypi
1. The account and password was saved in .pypic.
2. Please update lib version in setup.py before upload to pypi.
3. Please do not upload unnessary file (ex: egg.info, pycache...etc).
4. `Web` : <https://pypi.org/project/AIMakerMonitor/>
## If this is the first time to upload, please execute the following
Please copy .pypic to your home directory.
`$ cp .pypic ~`
Register account info.
`$ python setup.py register -r pypi`
## Upload lib to pypi
In current directory use the following command:
`$ python setup.py sdist upload -r pypi`
## Usage
`$ pip install AIMakerMonitor`
```python
import AIMakerMonitor as aim
aim.api_count_inc()
aim.counter_inc(chartName, labelName)
aim.gauge_set(chartName, labelName, score)
``` | AIMakerMonitor | /AIMakerMonitor-1.0.5.tar.gz/AIMakerMonitor-1.0.5/README.md | README.md |
# AIOAladdinConnect
Python module that allows interacting with Genie Aladdin Connect devices via AIOHttp
Note that shared doors are not currently supported, only doors that are owned by your account can be controlled
## Usage
```
from AIOAladdinConnect import AladdinConnectClient
# Create session using aladdin connect credentials
client_id = 1000
client_session = aiohttp.ClientSession(timeout = aiohttp.ClientTimeout(total=30))
client = AladdinConnectClient(email, password, client_session, client_id)
await client.login()
# Get list of available doors and their status
doors = await client.get_doors()
my_door = doors[0]
# Issue commands for doors
await client.close_door(my_door['device_id'], my_door['door_number'])
await client.open_door(my_door['device_id'], my_door['door_number'])
# Get door status from internal structure. Must call client.get_doors() to update structure
# Door status also updates on state change from the web socket without calling client.get_doors
await client.async_get_door_status(my_door['device_id'], my_dooregister_callbackr['door_number'])
client.get_door_status(my_door['device_id'], my_door['door_number'])
# Get Doorlink statys from internal structure. Must call client.get_doors() to update structure
await client.async_get_door_link_status(my_door['device_id'], my_door['door_number'])
client.get_door_link_status(my_door['device_id'], my_door['door_number'])
# Get Door Batery status from internal structure. Must call client.get_doors() to update structure
client.get_battery_status(my_door['device_id'], my_door['door_number'])
client.get_rssi_status(my_door['device_id'], my_door['door_number'])
client.get_ble_strength(my_door['device_id'], my_door['door_number'])
ble_strength and battery_status are utilized with the retrofit devices (ALKT1-RB) where the door
position sensor has a BLE connection and battery level reported. Other devices (actual door openers)
tend to report 0 for these values.
Async versions by appending async (example):
await client.async_get_battery_status(my_door['device_id'], my_door['door_number'])
await client.async_get_rssi_status(my_door['device_id'], my_door['door_number'])
await client.aycn_get_ble_strength(my_door['device_id'], my_door['door_number'])
#assign callback for event based status updates:
client.register_callback(your_callback_function)
#Close the sockets at the end of a session:
client.close()
#Get the authtoken after login
token = client.auth_token()
#Set the authtoken if known (can skip login)
client.set_auth_token(token)
```
| AIOAladdinConnect | /AIOAladdinConnect-0.1.56.tar.gz/AIOAladdinConnect-0.1.56/README.md | README.md |
import asyncio
import logging
import typing as t
if t.TYPE_CHECKING: # pragma: no cover
from .config import Config
class Component:
__depends_on__: t.ClassVar[t.Dict[str, t.Type["Component"]]] = {}
config: "Config"
logger: logging.Logger
loop: asyncio.AbstractEventLoop
_active: asyncio.Event
_released: asyncio.Event
required_by: t.Set["Component"]
depends_on: t.Set["Component"]
def __init_subclass__(cls) -> None:
cls.__depends_on__ = {}
for base in reversed(cls.__mro__):
try:
annotations = base.__dict__["__annotations__"]
except KeyError:
pass
else:
cls.__depends_on__.update(
(attr, class_)
for attr, class_ in annotations.items()
if isinstance(class_, type) and issubclass(class_, Component)
)
def __init__(
self,
config: "Config",
logger: logging.Logger,
loop: asyncio.AbstractEventLoop,
) -> None:
self.config = config
self.logger = logger
self.loop = loop
self._active = asyncio.Event()
self._released = asyncio.Event()
self._released.set()
self.required_by = set()
self.depends_on = set()
def __repr__(self):
return f"<{self.__class__.__module__}.{self.__class__.__name__}()>"
async def _acquire(self, component: "Component") -> None:
await self._active.wait()
self.required_by.add(component)
self._released.clear()
async def _release(self, component: "Component") -> None:
self.required_by.remove(component)
if not self.required_by:
self._released.set()
async def _setup(self, depends_on: t.Dict[str, "Component"]) -> None:
if depends_on:
self.logger.info("%r: Acquiring dependencies...", self)
aws = []
for name, component in depends_on.items():
setattr(self, name, component)
self.depends_on.add(component)
aws.append(component._acquire(self))
await asyncio.gather(*aws)
self.logger.info("%r: Setting up...", self)
await self.on_setup()
self._active.set()
self.logger.info("%r: Active", self)
async def _shutdown(self) -> None:
if self.required_by:
self.logger.info("%r: Waiting for release...", self)
await self._released.wait()
self.logger.info("%r: Shutting down...", self)
try:
await self.on_shutdown()
except Exception: # pragma: no cover
self.logger.exception("%r: Unexpected error during shutdown", self)
if self.depends_on:
await asyncio.gather(
*(component._release(self) for component in self.depends_on)
)
self.depends_on.clear()
self._active.clear()
self.logger.info("%r: Inactive", self)
async def on_setup(self) -> None:
""" This method should be implemented by child class """
async def on_shutdown(self) -> None:
""" This method should be implemented by child class """ | AIOConductor | /AIOConductor-0.2-py36-none-any.whl/aioconductor/component.py | component.py |
import asyncio
import signal
import typing as t
from warnings import warn
from .component import Component
from .config import Config, ConfigPolicy, SimpleConfigPolicy
from .logging import (
Logger,
LoggingPolicy,
SimpleLoggingPolicy,
ModuleLoggingPolicy,
get_logger,
)
from .exc import CircularDependencyError
T = t.TypeVar("T", bound=Component)
class Conductor:
config_policy: ConfigPolicy
logging_policy: LoggingPolicy
logger: Logger
loop: asyncio.AbstractEventLoop
patches: t.Dict[t.Type[Component], t.Type[Component]]
components: t.Dict[t.Type[Component], Component]
def __init__(
self,
config_policy: t.Optional[ConfigPolicy] = None,
logging_policy: t.Optional[LoggingPolicy] = None,
config: t.Optional[Config] = None,
logger: t.Optional[Logger] = None,
loop: asyncio.AbstractEventLoop = None,
) -> None:
if config is not None:
warn(
"Parameter ``config`` is deprecated, "
"consider to use ``config_policy`` instead",
DeprecationWarning,
)
if logger is not None:
warn(
"Parameter ``logger`` is deprecated, "
"consider to use ``logging_policy`` instead",
DeprecationWarning,
)
if config_policy is None:
config_policy = SimpleConfigPolicy(config if config is not None else {})
if logging_policy is None:
if logger is not None:
logging_policy = SimpleLoggingPolicy(logger)
else:
logging_policy = ModuleLoggingPolicy()
self.config_policy = config_policy
self.logging_policy = logging_policy
self.logger = logger or get_logger("aioconductor")
self.loop = loop or asyncio.get_event_loop()
self.patches = {}
self.components = {}
def patch(
self,
component_class: t.Type[Component],
patch_class: t.Type[Component],
) -> None:
self.patches[component_class] = patch_class
def add(self, component_class: t.Type[T]) -> T:
actual_class = self.patches.get(component_class, component_class)
try:
component = self.components[actual_class]
except KeyError:
self.components[actual_class] = component = actual_class(
config=self.config_policy(actual_class),
logger=self.logging_policy(actual_class),
loop=self.loop,
)
return t.cast(T, component)
async def setup(self) -> None:
scheduled: t.Set[Component] = set()
aws: t.List[t.Awaitable] = []
def schedule_setup(component: T, chain: t.Tuple[Component, ...] = ()) -> T:
if component in scheduled:
return component
chain += (component,)
depends_on = {}
for name, dependency_class in component.__depends_on__.items():
dependency = self.add(dependency_class)
if dependency in chain:
raise CircularDependencyError(*chain, dependency)
depends_on[name] = schedule_setup(dependency, chain)
aws.append(component._setup(depends_on))
scheduled.add(component)
return component
self.logger.info("Setting up components...")
for component in tuple(self.components.values()):
schedule_setup(component)
await asyncio.gather(*aws)
self.logger.info("All components are active")
async def shutdown(self) -> None:
self.logger.info("Shutting down components...")
await asyncio.gather(
*(component._shutdown() for component in self.components.values())
)
self.logger.info("All components are inactive")
def run(self, aw: t.Awaitable) -> None:
self.loop.run_until_complete(self.setup())
try:
self.loop.run_until_complete(aw)
finally:
self.loop.run_until_complete(self.shutdown())
def serve(self) -> None:
try:
self.loop.run_until_complete(self.setup())
self.loop.add_signal_handler(signal.SIGINT, self.loop.stop)
self.loop.add_signal_handler(signal.SIGTERM, self.loop.stop)
self.logger.info("Serving...")
self.loop.run_forever()
except KeyboardInterrupt: # pragma: no cover
pass
finally:
self.loop.remove_signal_handler(signal.SIGINT)
self.loop.remove_signal_handler(signal.SIGTERM)
self.loop.run_until_complete(self.shutdown()) | AIOConductor | /AIOConductor-0.2-py36-none-any.whl/aioconductor/conductor.py | conductor.py |
Copyright (c) 2018, Cottonwood Technology <[email protected]>
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| AIOConductor | /AIOConductor-0.2-py36-none-any.whl/AIOConductor-0.2.dist-info/LICENCE.rst | LICENCE.rst |
# AIORedis-OpenTracing
This package enables distributed tracing for the Python asyncio Redis library via `The OpenTracing Project`.
It is heavily influenced by the [Redis Opentracing implementation](https://github.com/opentracing-contrib/python-redis).
Installation
============
Run the following command:
```
$ pip install AIORedis-Opentracing
```
Getting started
===============
Tracing a AIORedis client requires calling ``init_tracing()`` and optionally specify an OpenTracing-compatible tracer.
```python
import aioredis
import aioredis_opentracing
# If not provided, opentracing.tracer will be used.
aioredis_opentracing.init_tracing(tracer)
redis = await aioredis.create_redis_pool('redis://localhost')
await redis.set('last_access', datetime.datetime.now())
```
It's possible to trace only specific pipelines:
```python
aioredis_opentracing.init_tracing(tracer)
pipe = redis.multi_exec()
aioredis_opentracing.trace_pipeline(pipe)
# This pipeline will be executed as a single MULTI command.
pipe.set('key1', 'value1')
pipe.set('key2', 'value2')
ok1, ok2 = await pipe.execute()
```
When pipeline commands are executed as a transaction, these commands will be grouped under a single ``MULTI`` operation. They'll also appear as a single operation in the trace. Outside of a transaction, each command will generate a span.
| AIORedis-OpenTracing | /AIORedis-OpenTracing-0.0.1.tar.gz/AIORedis-OpenTracing-0.0.1/README.md | README.md |
from builtins import str
from functools import wraps
import opentracing
from opentracing.ext import tags
_g_tracer = None
_g_start_span_cb = None
def init_tracing(tracer=None, start_span_cb=None):
"""
Set our tracer for Redis. Tracer objects from the
OpenTracing django/flask/pyramid libraries can be passed as well.
:param tracer: the tracer object.
"""
if start_span_cb is not None and not callable(start_span_cb):
raise ValueError('start_span_cb is not callable')
global _g_tracer, _g_start_span_cb
if hasattr(tracer, '_tracer'):
tracer = tracer._tracer
_g_tracer = tracer
_g_start_span_cb = start_span_cb
def trace_client(client):
"""
Marks a client to be traced. All commands and pipelines executed
through this client will be traced.
:param client: the Redis client object.
"""
_patch_client(client)
def trace_pipeline(pipe):
"""
Marks a pipeline to be traced.
:param client: the Redis pipeline object to be traced.
If executed as a transaction, the commands will appear
under a single 'MULTI' operation.
"""
_patch_multi_exec_execute(pipe)
def _reset_tracing():
global _g_tracer, _g_start_span_cb
_g_tracer = _g_start_span_cb = None
def _get_tracer():
return opentracing.tracer if _g_tracer is None else _g_tracer
def _normalize_stmt(args):
return ' '.join([(arg.decode('utf-8') if isinstance(arg, (bytes, bytearray)) else str(arg)) for arg in args])
def _normalize_stmts(command_stack):
commands = [_normalize_stmt(command[1:]) for command in command_stack]
return ';'.join(commands)
def _set_base_span_tags(span, stmt):
span.set_tag(tags.COMPONENT, 'aioredis-py')
span.set_tag(tags.SPAN_KIND, tags.SPAN_KIND_RPC_CLIENT)
span.set_tag(tags.DATABASE_TYPE, 'redis')
span.set_tag(tags.DATABASE_STATEMENT, stmt)
def _patch_client(client):
# Patch the outgoing commands.
_patch_obj_execute(client)
# Patch the created pipelines.
multi_exec_method = client.multi_exec
@wraps(multi_exec_method)
def tracing_multi_exec():
multi_exec = multi_exec_method()
_patch_multi_exec_execute(multi_exec)
return multi_exec
client.multi_exec = tracing_multi_exec
def _patch_multi_exec_execute(multi_exec):
tracer = _get_tracer()
# Patch the execute() method.
execute_method = multi_exec.execute
@wraps(execute_method)
async def tracing_execute(*, return_exceptions=False):
if not multi_exec._pipeline:
# Nothing to process/handle.
return await execute_method(return_exceptions=return_exceptions)
with tracer.start_active_span('MULTI') as scope:
span = scope.span
_set_base_span_tags(span, _normalize_stmts(multi_exec._pipeline))
_call_start_span_cb(span)
try:
res = await execute_method(return_exceptions=return_exceptions)
except Exception as exc:
span.set_tag(tags.ERROR, True)
span.log_kv({
'event': tags.ERROR,
'error.object': exc,
})
raise
return res
multi_exec.execute = tracing_execute
def _patch_obj_execute(redis_obj):
tracer = _get_tracer()
execute_command_method = redis_obj.execute
@wraps(execute_command_method)
def tracing_execute_command(*args, **kwargs):
reported_args = args
command = reported_args[0].decode('utf-8')
with tracer.start_active_span(command) as scope:
span = scope.span
_set_base_span_tags(span, _normalize_stmt(reported_args))
_call_start_span_cb(span)
try:
rv = execute_command_method(*args, **kwargs)
except Exception as exc:
span.set_tag(tags.ERROR, True)
span.log_kv({
'event': tags.ERROR,
'error.object': exc,
})
raise
return rv
redis_obj.execute = tracing_execute_command
def _call_start_span_cb(span):
if _g_start_span_cb is None:
return
try:
_g_start_span_cb(span)
except Exception:
pass | AIORedis-OpenTracing | /AIORedis-OpenTracing-0.0.1.tar.gz/AIORedis-OpenTracing-0.0.1/aioredis_opentracing/tracing.py | tracing.py |
================================
Client for Honeywell Thermostats
================================
**NOTE:** This is for the US model and website. Be aware that EU models are different!
An AsyincIO version based on https://github.com/kk7ds/somecomfort.git
Installing
----------
::
$ pip install AIOSomecomfort
$ test.py -h
usage: test.py [-h] [--get_fan_mode] [--set_fan_mode SET_FAN_MODE]
[--get_system_mode] [--set_system_mode SET_SYSTEM_MODE]
[--get_setpoint_cool]
[--set_setpoint_cool SET_SETPOINT_COOL]
[--get_setpoint_heat]
[--set_setpoint_heat SET_SETPOINT_HEAT]
[--get_current_temperature] [--get_current_humidity]
[--get_outdoor_temperature] [--get_outdoor_humidity]
[--get_equipment_output_status] [--cancel_hold]
[--permanent_hold] [--hold_until HOLD_UNTIL] [--get_hold]
[--username USERNAME] [--password PASSWORD]
[--device DEVICE] [--login] [--devices]
optional arguments:
-h, --help show this help message and exit
--get_fan_mode Get fan_mode
--set_fan_mode SET_FAN_MODE
Set fan_mode
--get_system_mode Get system_mode
--set_system_mode SET_SYSTEM_MODE
Set system_mode
--get_setpoint_cool Get setpoint_cool
--set_setpoint_cool SET_SETPOINT_COOL
Set setpoint_cool
--get_setpoint_heat Get setpoint_heat
--set_setpoint_heat SET_SETPOINT_HEAT
Set setpoint_heat
--get_current_temperature
Get current_temperature
--get_current_humidity
Get current_humidity
--get_outdoor_temperature
Get outdoor_temperature
--get_outdoor_humidity
Get outdoor_humidity
--get_equipment_output_status
Get equipment_output_status
--cancel_hold Set cancel_hold
--permanent_hold Set permanent_hold
--hold_until HOLD_UNTIL
Hold until time (HH:MM)
--get_hold Get the current hold mode
--username USERNAME username
--password PASSWORD password
--device DEVICE device
--login Just try to login
--devices List available devices
--loop Loop on temperature and operating mode
Using
-----
::
$ test.py --username foo --password bar --login
Success
$ test.py --devices
+----------+---------+---------------+
| Location | Device | Name |
+----------+---------+---------------+
| 0123456 | 1177223 | My Thermostat |
+----------+---------+---------------+
$ test.py --get_current_temperature
58.0
$ test.py --get_setpoint_heat
58.0
$ test.py --set_setpoint_heat 56
$ test.py --get_setpoint_heat
56.0
$ test.py --loop
56.0
off
56.0
heat
| AIOSomecomfort | /AIOSomecomfort-0.0.17.tar.gz/AIOSomecomfort-0.0.17/README.rst | README.rst |
from __future__ import annotations
import datetime
import logging
import urllib.parse as urllib
import aiohttp
from yarl import URL
from .location import Location
from .exceptions import *
_LOG = logging.getLogger("somecomfort")
AUTH_COOKIE = ".ASPXAUTH_TRUEHOME"
DOMAIN = "www.mytotalconnectcomfort.com"
MIN_LOGIN_TIME = datetime.timedelta(minutes=10)
MAX_LOGIN_ATTEMPTS = 3
def _convert_errors(fn):
def wrapper(*args, **kwargs):
try:
return fn(*args, **kwargs)
except aiohttp.ClientError as ex:
_LOG.error("Connection Timeout")
raise ConnectionError("Connection Timeout") from ex
return wrapper
class AIOSomeComfort(object):
"""AIOSomeComfort API Class."""
def __init__(
self,
username: str | None,
password: str | None,
timeout=30,
session: aiohttp.ClientSession = None,
) -> None:
self._username = username # = username
self._password = password # password
self._session = session
self._timeout = timeout
self._headers = {
"X-Requested-With": "XMLHttpRequest",
"Accept": "*/*",
"Connection": "keep-alive",
"Accept-Encoding": "gzip, deflate",
}
self._locations = {}
self._baseurl = f"https://{DOMAIN}"
self._null_cookie_count = 0
self._next_login = datetime.datetime.utcnow()
def _set_null_count(self) -> None:
"""Set null cookie count and retry timout."""
self._null_cookie_count += 1
if self._null_cookie_count >= MAX_LOGIN_ATTEMPTS:
self._next_login = datetime.datetime.utcnow() + MIN_LOGIN_TIME
@_convert_errors
async def login(self) -> None:
"""Login to Honeywell API."""
url = f"{self._baseurl}/portal"
params = {
"timeOffset": "480",
"UserName": self._username,
"Password": self._password,
"RememberMe": "false",
}
self._headers["Content-Type"] = "application/x-www-form-urlencoded"
# can't use params because AIOHttp doesn't URL encode like API expects (%40 for @)
url = URL(f"{url}?{urllib.urlencode(params)}", encoded=True)
self._session.cookie_jar.clear_domain(DOMAIN)
if self._next_login > datetime.datetime.utcnow():
raise APIRateLimited(f"Rate limit on login: Waiting {MIN_LOGIN_TIME}")
resp = await self._session.post(
url, timeout=self._timeout, headers=self._headers
)
# The TUREHOME cookie is malformed in some way - need to clear the expiration to make it work with AIOhttp
cookies = resp.cookies
if AUTH_COOKIE in cookies:
cookies[AUTH_COOKIE]["expires"] = ""
self._session.cookie_jar.update_cookies(cookies=cookies)
if resp.status == 401:
# This never seems to happen currently, but
# I'll leave it here in case they start doing the
# right thing.
_LOG.error("Login as %s failed", self._username)
self._set_null_count()
raise AuthError("Login as %s failed" % self._username)
elif resp.status != 200:
_LOG.error("Connection error %s", resp.status)
raise ConnectionError("Connection error %s" % resp.status)
self._headers["Content-Type"] = "application/json"
resp2 = await self._session.get(
f"{self._baseurl}/portal", timeout=self._timeout, headers=self._headers
) # this should redirect if we're logged in
# if we get null cookies for this, the login has failed.
if AUTH_COOKIE in resp2.cookies and resp2.cookies[AUTH_COOKIE].value == "":
_LOG.error("Login null cookie - site may be down")
self._set_null_count()
raise AuthError("Null cookie connection error %s" % resp2.status)
if resp2.status == 401:
_LOG.error(
"Login as %s failed - Unauthorized %s",
self._username,
resp2.status,
)
self._set_null_count()
raise AuthError(
"Login as %s failed - Unauthorized %s" % (self._username, resp2.status)
)
if resp2.status != 200:
_LOG.error("Connection error %s", resp2.status)
raise ConnectionError("Connection error %s" % resp2.status)
async def _request_json(self, method: str, *args, **kwargs) -> str | None:
if "timeout" not in kwargs:
kwargs["timeout"] = self._timeout
kwargs["headers"] = self._headers
resp: aiohttp.ClientResponse = await getattr(self._session, method)(
*args, **kwargs
)
# Check again for the deformed cookie
# API sends a null cookie if really want it to expire
cookies = resp.cookies
if AUTH_COOKIE in cookies:
cookies[AUTH_COOKIE]["expires"] = ""
self._session.cookie_jar.update_cookies(cookies=cookies)
req = args[0].replace(self._baseurl, "")
if resp.status == 200 and resp.content_type == "application/json":
self._null_cookie_count = 0
return await resp.json()
if resp.status == 401:
_LOG.error("401 Error at update (Key expired?).")
raise UnauthorizedError("401 Error at update (Key Expired?).")
if resp.status == 503:
_LOG.error("Service Unavailable.")
raise ConnectionError("Service Unavailable.")
# Some other non 200 status
_LOG.error("API returned %s from %s request", resp.status, req)
_LOG.debug("request json response %s with payload %s", resp, await resp.text())
raise UnexpectedResponse("API returned %s, %s" % (resp.status, req))
async def _get_json(self, *args, **kwargs) -> str | None:
return await self._request_json("get", *args, **kwargs)
async def _post_json(self, *args, **kwargs) -> str | None:
return await self._request_json("post", *args, **kwargs)
async def _get_locations(self) -> list:
json_responses: list = []
url = f"{self._baseurl}/portal/Location/GetLocationListData/"
for page in range(1, 5): # pages 1 - 4
params = {"page": page, "filter": ""}
resp = await self._session.post(url, params=params, headers=self._headers)
if resp.content_type == "application/json":
json_responses.extend(await resp.json())
cookies = resp.cookies
if AUTH_COOKIE in cookies:
cookies[AUTH_COOKIE]["expires"] = ""
self._session.cookie_jar.update_cookies(cookies=cookies)
if len(json_responses) > 0:
return json_responses
return None
async def get_thermostat_data(self, thermostat_id: str) -> str:
"""Get thermostat data from API"""
url = f"{self._baseurl}/portal/Device/CheckDataSession/{thermostat_id}"
return await self._get_json(url)
async def set_thermostat_settings(
self, thermostat_id: str, settings: dict[str, str]
) -> None:
"""Set thermostat settings from a dict."""
data = {
"DeviceID": thermostat_id,
"SystemSwitch": None,
"HeatSetpoint": None,
"CoolSetpoint": None,
"HeatNextPeriod": None,
"CoolNextPeriod": None,
"StatusHeat": None,
"StatusCool": None,
"FanMode": None,
}
data.update(settings)
_LOG.debug("Sending Data: %s", data)
url = f"{self._baseurl}/portal/Device/SubmitControlScreenChanges"
result = await self._post_json(url, json=data)
_LOG.debug("Received setting response %s", result)
if result is None or result.get("success") != 1:
raise APIError("API rejected thermostat settings")
@_convert_errors
async def discover(self) -> None:
"""Discover devices on the account."""
raw_locations = await self._get_locations()
if raw_locations is not None:
for raw_location in raw_locations:
try:
location = await Location.from_api_response(self, raw_location)
except KeyError as ex:
_LOG.exception(
"Failed to process location `%s`: missing %s element"
% (raw_location.get("LocationID", "unknown"), ex.args[0])
)
self._locations[location.locationid] = location
@property
def locations_by_id(self) -> dict:
"""A dict of all locations indexed by id"""
return self._locations
@property
def default_device(self) -> str | None:
"""This is the first device found.
It is only useful if the account has only one device and location
in your account (which is pretty common). It is None if there
are no devices in the account.
"""
for location in self.locations_by_id.values():
for device in location.devices_by_id.values():
return device
return None
def get_device(self, device_id: str) -> str | None:
"""Find a device by id.
:returns: None if not found.
"""
for location in self.locations_by_id.values():
for ident, device in location.devices_by_id.items():
if ident == device_id:
return device | AIOSomecomfort | /AIOSomecomfort-0.0.17.tar.gz/AIOSomecomfort-0.0.17/aiosomecomfort/__init__.py | __init__.py |
from __future__ import annotations
import copy
import datetime
import logging
import time
from .exceptions import *
FAN_MODES = ["auto", "on", "circulate", "follow schedule"]
SYSTEM_MODES = ["emheat", "heat", "off", "cool", "auto", "auto"]
HOLD_TYPES = ["schedule", "temporary", "permanent"]
EQUIPMENT_OUTPUT_STATUS = ["off/fan", "heat", "cool"]
_LOG = logging.getLogger("somecomfort")
def _hold_quarter_hours(deadline):
if deadline.minute not in (0, 15, 30, 45):
raise SomeComfortError("Invalid time: must be on a 15-minute boundary")
return int(((deadline.hour * 60) + deadline.minute) / 15)
def _hold_deadline(quarter_hours) -> datetime.time:
minutes = quarter_hours * 15
return datetime.time(hour=int(minutes / 60), minute=minutes % 60)
class Device(object):
"""Device class for Honeywell device."""
def __init__(self, client, location):
self._client = client
self._location = location
self._data = {}
self._last_refresh = 0
self._deviceid = None
self._macid = None
self._name = None
self._alive = None
self._commslost = None
@classmethod
async def from_location_response(cls, client, location, response) -> Device:
"""Extract device from location response."""
self = cls(client, location)
self._deviceid = response["DeviceID"]
self._macid = response["MacID"]
self._name = response["Name"]
await self.refresh()
return self
async def refresh(self) -> None:
"""Refresh the Honeywell device data."""
data = await self._client.get_thermostat_data(self.deviceid)
if data is not None:
if not data["success"]:
_LOG.error("API reported failure to query device %s" % self.deviceid)
self._alive = data["deviceLive"]
self._commslost = data["communicationLost"]
self._data = data["latestData"]
self._last_refresh = time.time()
@property
def deviceid(self) -> str:
"""The device identifier"""
return self._deviceid
@property
def mac_address(self) -> str:
"""The MAC address of the device"""
return self._macid
@property
def name(self) -> str:
"""The user-set name of this device"""
return self._name
@property
def is_alive(self) -> bool:
"""A boolean indicating whether the device is connected"""
return self._alive and not self._commslost
@property
def fan_running(self) -> bool:
"""Returns a boolean indicating the current state of the fan"""
if self._data["hasFan"]:
return self._data["fanData"]["fanIsRunning"]
return False
@property
def fan_mode(self) -> str | None:
"""Returns one of FAN_MODES indicating the current setting"""
try:
return FAN_MODES[self._data["fanData"]["fanMode"]]
except (KeyError, TypeError, IndexError):
if self._data["hasFan"]:
raise APIError("Unknown fan mode %s" % self._data["fanData"]["fanMode"])
else:
return None
async def set_fan_mode(self, mode) -> None:
"""Set the fan mode async."""
try:
mode_index = FAN_MODES.index(mode)
except ValueError as ex:
raise SomeComfortError("Invalid fan mode %s" % mode) from ex
key = f"fanMode{mode.title()}Allowed"
if not self._data["fanData"][key]:
raise SomeComfortError("Device does not support %s" % mode)
await self._client.set_thermostat_settings(
self.deviceid, {"FanMode": mode_index}
)
self._data["fanData"]["fanMode"] = mode_index
@property
def system_mode(self) -> str:
"""Returns one of SYSTEM_MODES indicating the current setting"""
try:
return SYSTEM_MODES[self._data["uiData"]["SystemSwitchPosition"]]
except KeyError as exc:
raise APIError(
"Unknown system mode %s"
% (self._data["uiData"]["SystemSwitchPosition"])
) from exc
async def set_system_mode(self, mode) -> None:
"""Async set the system mode."""
try:
mode_index = SYSTEM_MODES.index(mode)
except ValueError as exc:
raise SomeComfortError(f"Invalid system mode {mode}") from exc
if mode == "emheat":
key = "SwitchEmergencyHeatAllowed"
else:
key = f"Switch{mode.title()}Allowed"
try:
if not self._data["uiData"][key]:
raise SomeComfortError(f"Device does not support {mode}")
except KeyError as exc:
raise APIError(f"Unknown Key: {key}") from exc
await self._client.set_thermostat_settings(
self.deviceid, {"SystemSwitch": mode_index}
)
self._data["uiData"]["SystemSwitchPosition"] = mode_index
@property
def setpoint_cool(self) -> float:
"""The target temperature when in cooling mode"""
return self._data["uiData"]["CoolSetpoint"]
async def set_setpoint_cool(self, temp) -> None:
"""Async set the target temperature when in cooling mode"""
lower = self._data["uiData"]["CoolLowerSetptLimit"]
upper = self._data["uiData"]["CoolUpperSetptLimit"]
if temp > upper or temp < lower:
raise SomeComfortError(f"Setpoint outside range {lower}-{upper}")
await self._client.set_thermostat_settings(
self.deviceid, {"CoolSetpoint": temp}
)
self._data["uiData"]["CoolSetpoint"] = temp
@property
def setpoint_heat(self) -> float:
"""The target temperature when in heating mode"""
return self._data["uiData"]["HeatSetpoint"]
async def set_setpoint_heat(self, temp) -> None:
"""Async set the target temperature when in heating mode"""
lower = self._data["uiData"]["HeatLowerSetptLimit"]
upper = self._data["uiData"]["HeatUpperSetptLimit"]
# HA sometimes doesn't send the temp, so set to current
if temp is None:
temp = self._data["uiData"]["HeatSetpoint"]
_LOG.error("Didn't receive the temp to set. Setting to current temp.")
if temp > upper or temp < lower:
raise SomeComfortError(f"Setpoint outside range {lower}-{upper}")
await self._client.set_thermostat_settings(
self.deviceid, {"HeatSetpoint": temp}
)
self._data["uiData"]["HeatSetpoint"] = temp
def _get_hold(self, which) -> bool | datetime.time:
try:
hold = HOLD_TYPES[self._data["uiData"][f"Status{which}"]]
except KeyError as exc:
mode = self._data["uiData"][f"Status{which}"]
raise APIError(f"Unknown hold mode {mode}") from exc
period = self._data["uiData"][f"{which}NextPeriod"]
if hold == "schedule":
return False
if hold == "permanent":
return True
else:
return _hold_deadline(period)
async def _set_hold(self, which, hold, temperature=None) -> None:
settings = {}
if hold is True:
settings = {
"StatusCool": HOLD_TYPES.index("permanent"),
"StatusHeat": HOLD_TYPES.index("permanent"),
# "%sNextPeriod" % which: 0,
}
elif hold is False:
settings = {
"StatusCool": HOLD_TYPES.index("schedule"),
"StatusHeat": HOLD_TYPES.index("schedule"),
# "%sNextPeriod" % which: 0,
}
elif isinstance(hold, datetime.time):
qh = _hold_quarter_hours(hold)
settings = {
"StatusCool": HOLD_TYPES.index("temporary"),
"CoolNextPeriod": qh,
"StatusHeat": HOLD_TYPES.index("temporary"),
"HeatNextPeriod": qh,
}
else:
raise SomeComfortError("Hold should be True, False, or datetime.time")
if temperature:
lower = self._data["uiData"][f"{which}LowerSetptLimit"]
upper = self._data["uiData"][f"{which}UpperSetptLimit"]
if temperature > upper or temperature < lower:
raise SomeComfortError(f"Setpoint outside range {lower}-{upper}")
settings.update({f"{which}Setpoint": temperature})
await self._client.set_thermostat_settings(self.deviceid, settings)
self._data["uiData"].update(settings)
@property
def hold_heat(self) -> bool:
"""Return hold heat mode."""
return self._get_hold("Heat")
async def set_hold_heat(self, value, temperature=None) -> None:
"""Async set hold heat mode."""
await self._set_hold("Heat", value, temperature)
@property
def hold_cool(self) -> bool:
"""Return hold cool mode."""
return self._get_hold("Cool")
async def set_hold_cool(self, value, temperature=None) -> None:
"""Async set hold cool mode."""
await self._set_hold("Cool", value, temperature)
@property
def current_temperature(self) -> float:
"""The current measured ambient temperature"""
return self._data["uiData"]["DispTemperature"]
@property
def current_humidity(self) -> float | None:
"""The current measured ambient humidity"""
return (
self._data["uiData"].get("IndoorHumidity")
if self._data["uiData"].get("IndoorHumiditySensorAvailable")
and self._data["uiData"].get("IndoorHumiditySensorNotFault")
else None
)
@property
def equipment_output_status(self) -> str:
"""The current equipment output status"""
if self._data["uiData"]["EquipmentOutputStatus"] in (0, None):
if self.fan_running:
return "fan"
else:
return "off"
return EQUIPMENT_OUTPUT_STATUS[self._data["uiData"]["EquipmentOutputStatus"]]
@property
def outdoor_temperature(self) -> float | None:
"""The current measured outdoor temperature"""
if self._data["uiData"]["OutdoorTemperatureAvailable"]:
return self._data["uiData"]["OutdoorTemperature"]
return None
@property
def outdoor_humidity(self) -> float | None:
"""The current measured outdoor humidity"""
if self._data["uiData"]["OutdoorHumidityAvailable"]:
return self._data["uiData"]["OutdoorHumidity"]
return None
@property
def temperature_unit(self) -> str:
"""The temperature unit currently in use. Either 'F' or 'C'"""
return self._data["uiData"]["DisplayUnits"]
@property
def raw_ui_data(self) -> dict:
"""The raw uiData structure from the API.
Note that this is read only!
"""
return copy.deepcopy(self._data["uiData"])
@property
def raw_fan_data(self) -> dict:
"""The raw fanData structure from the API.
Note that this is read only!
"""
return copy.deepcopy(self._data["fanData"])
@property
def raw_dr_data(self) -> dict:
"""The raw drData structure from the API.
Note that this is read only!
"""
return copy.deepcopy(self._data["drData"])
def __repr__(self) -> str:
return f"Device<{self.deviceid}:{self.name}>" | AIOSomecomfort | /AIOSomecomfort-0.0.17.tar.gz/AIOSomecomfort-0.0.17/aiosomecomfort/device.py | device.py |
from __future__ import unicode_literals
import json
import logging
import sys
import re
try:
# pylint: disable=no-name-in-module
from urllib.parse import urlencode
except ImportError:
# pylint: disable=no-name-in-module
from urllib import urlencode
from .exceptions import Error, ProgrammingError
from .row import Row
from .extensions import _convert_to_python, _adapt_from_python, _column_stripper
if sys.version_info[0] >= 3:
basestring = str
_urlencode = urlencode
else:
# avoid UnicodeEncodeError from urlencode
def _urlencode(query, doseq=0):
return urlencode(dict(
(k if isinstance(k, bytes) else k.encode('utf-8'),
v if isinstance(v, bytes) else v.encode('utf-8'))
for k, v in query.items()), doseq=doseq)
class Cursor(object):
arraysize = 1
def __init__(self, connection):
self._connection = connection
self.messages = []
self.lastrowid = None
self.description = None
self.rownumber = 0
self.rowcount = -1
self.arraysize = 1
self._rows = None
self._column_type_cache = {}
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_value, exc_traceback):
await self.close()
@property
def connection(self):
return self._connection
async def close(self):
self._rows = None
async def _request(self, method, uri, body=None, headers={}):
logger = logging.getLogger(__name__)
debug = logger.getEffectiveLevel() < logging.DEBUG
logger.debug(
'request method: %s uri: %s headers: %s body: %s',
method,
uri,
headers,
body)
response = await self.connection._fetch_response(
method, uri, body=body, headers=headers)
logger.debug(
"status: %s reason: %s",
response.status,
response.reason)
response_text = await response.read()
response_text = response_text.decode('utf-8')
logger.debug("raw response: %s", response_text)
response_json = json.loads(
response_text, object_pairs_hook=dict)
if debug:
logger.debug(
"formatted response: %s",
json.dumps(
response_json,
indent=4))
return response_json
async def _substitute_params(self, operation, parameters):
'''
SQLite natively supports only the types TEXT, INTEGER, REAL, BLOB and
NULL
'''
param_matches = 0
qmark_re = re.compile(r"(\?)")
named_re = re.compile(r"(:{1}[a-zA-Z]+?\b)")
qmark_matches = qmark_re.findall(operation)
named_matches = named_re.findall(operation)
param_matches = len(qmark_matches) + len(named_matches)
# Matches but no parameters
if param_matches > 0 and parameters is None:
raise ProgrammingError('paramater required but not given: %s' %
operation)
# No regex matches and no parameters.
if parameters is None:
return operation
if len(qmark_matches) > 0 and len(named_matches) > 0:
raise ProgrammingError('different paramater types in operation not'
'permitted: %s %s' %
(operation, parameters))
if isinstance(parameters, dict):
# parameters is a dict or a dict subclass
if len(qmark_matches) > 0:
raise ProgrammingError('Unamed binding used, but you supplied '
'a dictionary (which has only names): '
'%s %s' % (operation, parameters))
for op_key in named_matches:
try:
operation = operation.replace(op_key,
_adapt_from_python(parameters[op_key[1:]]))
except KeyError:
raise ProgrammingError('the named parameters given do not '
'match operation: %s %s' %
(operation, parameters))
else:
# parameters is a sequence
if param_matches != len(parameters):
raise ProgrammingError('incorrect number of parameters '
'(%s != %s): %s %s' % (param_matches,
len(parameters), operation, parameters))
if len(named_matches) > 0:
raise ProgrammingError('Named binding used, but you supplied a'
' sequence (which has no names): %s %s' %
(operation, parameters))
for i in range(len(parameters)):
operation = operation.replace('?',
_adapt_from_python(parameters[i]), 1)
return operation
async def _get_sql_command(self, sql_str):
return sql_str.split(None, 1)[0].upper()
async def execute(self, operation, parameters=None):
if not isinstance(operation, basestring):
raise ValueError(
"argument must be a string, not '{}'".format(type(operation).__name__))
operation = await self._substitute_params(operation, parameters)
command = await self._get_sql_command(operation)
if command in ('SELECT', 'PRAGMA'):
payload = await self._request("GET",
"/db/query?" + _urlencode({'q': operation}))
else:
payload = await self._request("POST", "/db/execute?transaction",
headers={'Content-Type': 'application/json'}, body=json.dumps([operation]))
last_insert_id = None
rows_affected = -1
payload_rows = {}
try:
results = payload["results"]
except KeyError:
pass
else:
rows_affected = 0
for item in results:
if 'error' in item:
logging.getLogger(__name__).error(json.dumps(item))
raise Error(json.dumps(item))
try:
rows_affected += item['rows_affected']
except KeyError:
pass
try:
last_insert_id = item['last_insert_id']
except KeyError:
pass
if 'columns' in item:
payload_rows = item
try:
fields = payload_rows['columns']
except KeyError:
self.description = None
self._rows = []
if command == 'INSERT':
self.lastrowid = last_insert_id
else:
rows = []
description = []
for field in fields:
description.append((
_column_stripper(field, parse_colnames=self.connection.parse_colnames),
None,
None,
None,
None,
None,
None,
))
try:
values = payload_rows['values']
types = payload_rows['types']
except KeyError:
pass
else:
if values:
converters = [_convert_to_python(field, type_,
parse_decltypes=self.connection.parse_decltypes,
parse_colnames=self.connection.parse_colnames)
for field, type_ in zip(fields, types)]
for payload_row in values:
row = []
for field, converter, value in zip(fields, converters, payload_row):
row.append((field, (value if converter is None
else converter(value))))
rows.append(Row(row))
self._rows = rows
self.description = tuple(description)
self.rownumber = 0
if command in ('UPDATE', 'DELETE'):
# sqalchemy's _emit_update_statements function asserts
# rowcount for each update, and _emit_delete_statements
# warns unless rowcount matches
self.rowcount = rows_affected
else:
self.rowcount = len(self._rows)
return self
async def executemany(self, operation, seq_of_parameters=None):
if not isinstance(operation, basestring):
raise ValueError(
"argument must be a string, not '{}'".format(type(operation).__name__))
statements = []
for parameters in seq_of_parameters:
statements.append(await self._substitute_params(operation, parameters))
payload = await self._request("POST", "/db/execute?transaction",
headers={'Content-Type': 'application/json'},
body=json.dumps(statements))
rows_affected = -1
try:
results = payload["results"]
except KeyError:
pass
else:
rows_affected = 0
for item in results:
if 'error' in item:
logging.getLogger(__name__).error(json.dumps(item))
try:
rows_affected += item['rows_affected']
except KeyError:
pass
self._rows = []
self.rownumber = 0
self.rowcount = rows_affected
async def fetchone(self):
''' Fetch the next row '''
if self._rows is None or self.rownumber >= len(self._rows):
return None
result = self._rows[self.rownumber]
self.rownumber += 1
return result
async def fetchmany(self, size=None):
remaining = self.arraysize if size is None else size
remaining = min(remaining, self.rowcount - self.rownumber)
return [self.fetchone() for i in range(remaining)]
async def fetchall(self):
rows = []
while self.rownumber < self.rowcount:
row = await self.fetchone()
if not row: break
rows.append(row)
return rows
async def setinputsizes(self, sizes):
raise NotImplementedError(self)
async def setoutputsize(self, size, column=None):
raise NotImplementedError(self)
async def scroll(self, value, mode='relative'):
raise NotImplementedError(self)
async def next(self):
raise NotImplementedError(self)
async def __aiter__(self):
while self.rownumber < self.rowcount:
row = await self.fetchone()
yield row | AIOrqlite | /AIOrqlite-0.2.3-py3-none-any.whl/aiorqlite/cursors.py | cursors.py |
from __future__ import unicode_literals
"""
SQLite natively supports only the types TEXT, INTEGER, REAL, BLOB and NULL.
And RQLite always answers 'bytes' values.
Converters transforms RQLite answers to Python native types.
Adapters transforms Python native types to RQLite-aware values.
"""
import codecs
import datetime
import functools
import re
import sqlite3
import sys
from .exceptions import InterfaceError
if sys.version_info[0] >= 3:
basestring = str
unicode = str
PARSE_DECLTYPES = 1
PARSE_COLNAMES = 2
def _decoder(conv_func):
""" The Python sqlite3 interface returns always byte strings.
This function converts the received value to a regular string before
passing it to the receiver function.
"""
return lambda s: conv_func(s.decode('utf-8'))
if sys.version_info[0] >= 3:
def _escape_string(value):
if isinstance(value, bytes):
return "X'{}'".format(
codecs.encode(value, 'hex').decode('utf-8'))
return "'{}'".format(value.replace("'", "''"))
def _adapt_datetime(val):
return val.isoformat(" ")
else:
def _escape_string(value):
if isinstance(value, bytes):
try:
value = value.decode('utf-8')
except UnicodeDecodeError:
# Encode as a BLOB literal containing hexadecimal data
return "X'{}'".format(
codecs.encode(value, 'hex').decode('utf-8'))
return "'{}'".format(value.replace("'", "''"))
def _adapt_datetime(val):
return val.isoformat(b" ")
def _adapt_date(val):
return val.isoformat()
def _convert_date(val):
return datetime.date(*map(int, val.split('T')[0].split("-")))
def _convert_timestamp(val):
datepart, timepart = val.split("T")
year, month, day = map(int, datepart.split("-"))
timepart_full = timepart.strip('Z').split(".")
hours, minutes, seconds = map(int, timepart_full[0].split(":"))
if len(timepart_full) == 2:
microseconds = int('{:0<6.6}'.format(timepart_full[1]))
else:
microseconds = 0
val = datetime.datetime(year, month, day, hours, minutes, seconds, microseconds)
return val
def _null_wrapper(converter, value):
if value is not None:
value = converter(value)
return value
adapters = {
bytes: lambda x: x,
float: lambda x: x,
int: lambda x: x,
bool: int,
unicode: lambda x: x.encode('utf-8'),
type(None): lambda x: None,
datetime.date: _adapt_date,
datetime.datetime: _adapt_datetime,
}
adapters = {(type_, sqlite3.PrepareProtocol): val for type_, val in adapters.items()}
_default_adapters = adapters.copy()
converters = {
'UNICODE': functools.partial(_null_wrapper, lambda x: x.decode('utf-8')),
'INTEGER': functools.partial(_null_wrapper, int),
'BOOL': functools.partial(_null_wrapper, bool),
'FLOAT': functools.partial(_null_wrapper, float),
'REAL': functools.partial(_null_wrapper, float),
'NULL': lambda x: None,
'BLOB': lambda x: x,
'DATE': functools.partial(_null_wrapper, _convert_date),
'DATETIME': lambda x: x.replace('T', ' ').rstrip('Z'),
'TIMESTAMP': functools.partial(_null_wrapper, _convert_timestamp),
}
# Non-native converters will be decoded from base64 before fed into converter
_native_converters = ('BOOL', 'FLOAT', 'INTEGER', 'REAL', 'NUMBER', 'NULL', 'DATE', 'DATETIME', 'TIMESTAMP')
# SQLite TEXT affinity: https://www.sqlite.org/datatype3.html
_text_affinity_re = re.compile(r'CHAR|CLOB|TEXT')
def register_converter(type_string, function):
converters[type_string.upper()] = function
def register_adapter(type_, function):
adapters[(type_, sqlite3.PrepareProtocol)] = function
def _convert_to_python(column_name, type_, parse_decltypes=False, parse_colnames=False):
"""
Tries to mimic stock sqlite3 module behaviours.
PARSE_COLNAMES have precedence over PARSE_DECLTYPES on _sqlite/cursor.c code
"""
converter = None
type_upper = None
if type_ == '': # q="select 3.0" -> type='' column_name='3.0' value=3
if column_name.isdigit():
type_ = 'int'
elif all([slice.isdigit() for slice in column_name.partition('.')[::2]]): # 3.14
type_ = 'real'
if '[' in column_name and ']' in column_name and parse_colnames:
type_upper = column_name.upper().partition('[')[-1].partition(']')[0]
if type_upper in converters:
converter = converters[type_upper]
if not converter:
type_upper = type_.upper()
if parse_decltypes:
## From: https://github.com/python/cpython/blob/c72b6008e0578e334f962ee298279a23ba298856/Modules/_sqlite/cursor.c#L167
# /* Converter names are split at '(' and blanks.
# * This allows 'INTEGER NOT NULL' to be treated as 'INTEGER' and
# * 'NUMBER(10)' to be treated as 'NUMBER', for example.
# * In other words, it will work as people expect it to work.*/
type_upper = type_upper.partition('(')[0].partition(' ')[0]
if type_upper in converters:
if type_upper in _native_converters or parse_decltypes:
converter = converters[type_upper]
if converter:
if type_upper not in _native_converters:
converter = functools.partial(_decode_base64_converter, converter)
elif not type_upper or _text_affinity_re.search(type_upper):
# Python's sqlite3 module has a text_factory attribute which
# returns unicode by default.
pass
else:
converter = _conditional_string_decode_base64
return converter
def _adapt_from_python(value):
if not isinstance(value, basestring):
adapter_key = (type(value), sqlite3.PrepareProtocol)
adapter = adapters.get(adapter_key)
try:
if adapter is None:
# Fall back to _default_adapters, so that ObjectAdaptationTests
# teardown will correctly restore the default state.
adapter = _default_adapters[adapter_key]
except KeyError as e:
# No adapter registered. Let the object adapt itself via PEP-246.
# It has been rejected by the BDFL, but is still implemented
# on stdlib sqlite3 module even on Python 3 !!
if hasattr(value, '__adapt__'):
adapted = value.__adapt__(sqlite3.PrepareProtocol)
elif hasattr(value, '__conform__'):
adapted = value.__conform__(sqlite3.PrepareProtocol)
else:
raise InterfaceError(e)
else:
adapted = adapter(value)
else:
adapted = value
# The adapter could had returned a string
if isinstance(adapted, (bytes, unicode)):
adapted = _escape_string(adapted)
elif adapted is None:
adapted = 'NULL'
else:
adapted = str(adapted)
return adapted
def _column_stripper(column_name, parse_colnames=False):
return column_name.partition(' ')[0] if parse_colnames else column_name
def _decode_base64_converter(converter, value):
if value is not None:
if not isinstance(value, bytes):
value = value.encode('utf-8')
value = converter(codecs.decode(value, 'base64'))
return value
def _conditional_string_decode_base64(value):
if isinstance(value, basestring):
if not isinstance(value, bytes):
value = value.encode('utf-8')
value = codecs.decode(value, 'base64')
return value | AIOrqlite | /AIOrqlite-0.2.3-py3-none-any.whl/aiorqlite/extensions.py | extensions.py |
from __future__ import unicode_literals
import codecs
import logging
import aiohttp
import asyncio
try:
from urllib.parse import urlparse
except ImportError:
# pylint: disable=import-error
from urlparse import urlparse
from .constants import (
UNLIMITED_REDIRECTS,
)
from .cursors import Cursor
from ._ephemeral import EphemeralRqlited as _EphemeralRqlited
from .extensions import PARSE_DECLTYPES, PARSE_COLNAMES
class Connection(object):
from .exceptions import (
Warning,
Error,
InterfaceError,
DatabaseError,
DataError,
OperationalError,
IntegrityError,
InternalError,
ProgrammingError,
NotSupportedError,
)
def __init__(self, host='localhost', port=None,
user=None, password=None, connect_timeout=None,
detect_types=0, max_redirects=UNLIMITED_REDIRECTS):
self.messages = []
self.host = host
self.port = port
if not self.port:
if self.host.count(':') == 1:
split = self.host.split(':')
self.host = split[0]
self.port = int(split[1])
if not self.port:
self.port = 4001
self._headers = {}
if not (user is None or password is None):
self._headers['Authorization'] = 'Basic ' + \
codecs.encode('{}:{}'.format(user, password).encode('utf-8'),
'base64').decode('utf-8').rstrip('\n')
self.connect_timeout = connect_timeout
self.max_redirects = max_redirects
self.detect_types = detect_types
self.parse_decltypes = detect_types & PARSE_DECLTYPES
self.parse_colnames = detect_types & PARSE_COLNAMES
self._ephemeral = None
if self.host == ':memory:':
self._ephemeral = _EphemeralRqlited()
self._enter = False
async def __aenter__(self):
if not self._enter:
self._connection = await self._init_connection()
if self._ephemeral:
self._ephemeral = await self._ephemeral.__aenter__()
self.host, self.port = self._ephemeral.http
self._enter = True
return self
async def __aexit__(self, exc_type, exc, tb):
self._enter = False
await self.close()
async def _init_connection(self):
timeout = aiohttp.ClientTimeout(total=None, connect=None, sock_connect=None, sock_read=None)
if self.connect_timeout:
timeout.total = float(self.connect_timeout)
return aiohttp.ClientSession(timeout=timeout)
async def _retry_request(self, method, uri, body=None, headers={}):
tries = 10
while tries:
tries -= 1
try:
resp = await self._connection.request(method, 'http://%s:%s%s' % (self.host, self.port, uri), data=body,
headers=dict(self._headers, **headers))
return resp
except Exception:
if not tries:
raise
if self._ephemeral:
await asyncio.sleep(0.5) # allow delay for server to start
await self._connection.close()
self._connection = await self._init_connection()
async def _fetch_response(self, method, uri, body=None, headers={}):
"""
Fetch a response, handling redirection.
"""
response = await self._retry_request(method, uri, body=body, headers=headers)
redirects = 0
while response.status == 301 and \
response.headers.get('Location') is not None and \
(self.max_redirects == UNLIMITED_REDIRECTS or redirects < self.max_redirects):
redirects += 1
uri = response.headers.get('Location')
location = urlparse(uri)
logging.getLogger(__name__).debug("status: %s reason: '%s' location: '%s'",
response.status, response.reason, uri)
if self.host != location.hostname or self.port != location.port:
await self._connection.close()
self.host = location.hostname
self.port = location.port
self._connection = await self._init_connection()
response = await self._retry_request(method, uri, body=body, headers=headers)
return response
async def close(self):
"""Close the connection now (rather than whenever .__del__() is
called).
The connection will be unusable from this point forward; an
Error (or subclass) exception will be raised if any operation
is attempted with the connection. The same applies to all
cursor objects trying to use the connection. Note that closing
a connection without committing the changes first will cause an
implicit rollback to be performed."""
await self._connection.close()
if self._ephemeral is not None:
await self._ephemeral.__aexit__(None, None, None)
self._ephemeral = None
def __del__(self):
""" cannot asynchronously delete """
pass
async def commit(self):
"""Database modules that do not support transactions should
implement this method with void functionality."""
pass
async def rollback(self):
"""This method is optional since not all databases provide
transaction support. """
pass
def cursor(self, factory=Cursor):
"""Return a new Cursor Object using the connection."""
return factory(self)
async def execute(self, *args, **kwargs):
curs = self.cursor()
c = await curs.__aenter__()
await c.execute(*args, **kwargs)
return curs | AIOrqlite | /AIOrqlite-0.2.3-py3-none-any.whl/aiorqlite/connections.py | connections.py |
import contextlib
import errno
import os
import shutil
import socket
import subprocess
import sys
import tempfile
import asyncio
class EphemeralRqlited(object):
def __init__(self):
self.host = None
self.http = None
self.raft = None
self._tempdir = None
self._proc = None
@staticmethod
async def _unused_ports(host, count):
sockets = []
ports = []
try:
sockets.extend(
socket.socket(socket.AF_INET, socket.SOCK_STREAM)
for i in range(count))
for s in sockets:
s.bind((host, 0))
ports.append(s.getsockname()[-1])
finally:
while sockets:
sockets.pop().close()
return ports
@staticmethod
async def _test_port(host, port, timeout=None):
try:
with contextlib.closing(
socket.create_connection((host, port), timeout=timeout)):
return True
except socket.error:
return False
async def _start(self):
self._tempdir = tempfile.mkdtemp()
self.host = 'localhost'
# Allocation of unused ports is racy, so retry
# until ports have been successfully acquired.
while self._proc is None:
ports = await self._unused_ports(self.host, 2)
http_port, raft_port = ports
self.http = (self.host, http_port)
self.raft = (self.host, raft_port)
with open(os.devnull, mode='wb', buffering=0) as devnull:
filename = 'rqlited'
try:
self._proc = subprocess.Popen([filename,
'-http-addr', '{}:{}'.format(*self.http),
'-raft-addr', '{}:{}'.format(*self.raft), self._tempdir],
stdout=devnull, stderr=devnull)
except EnvironmentError as e:
if e.errno == errno.ENOENT and sys.version_info.major < 3:
# Add filename to clarify exception message.
e.filename = filename
raise
cont = True
while cont:
tested = await self._test_port(*self.http)
cont = tested and self._proc.poll() is None
await asyncio.sleep(0.5)
if self._proc.poll() is not None:
self._proc = None
async def __aenter__(self):
await self._start()
return self
async def __aexit__(self, exc_type, exc_value, exc_traceback):
if self._tempdir is not None:
shutil.rmtree(self._tempdir)
self._tempdir = None
if self._proc is not None:
self._proc.terminate()
self._proc.wait()
self._proc = None
return False | AIOrqlite | /AIOrqlite-0.2.3-py3-none-any.whl/aiorqlite/_ephemeral.py | _ephemeral.py |
import requests
from random import choice
import re
radioURL = "https://www.aiowiki.com/audioplayer/data/radio.php"
freeURL = "https://www.aiowiki.com/audioplayer/data/free.php"
podcastURL = "https://www.aiowiki.com/audioplayer/data/podcast.php"
months = {"January": "01", "February": "02", "March": "03", "April": "04", "May": "05", "June": "06", "July": "07",
"August": "08", "September": "09", "October": "10", "November": "11", "December": "12"}
class InvalidDateException(Exception):
pass
def transformDate(date):
"""Used by dateValue"""
global months
parts = date.split(" ")
if parts[0] in months.keys():
month = months[parts[0]]
else:
raise (InvalidDateException(parts[0] + " is not a valid month"))
year = parts[2]
day = parts[1].strip(",")
return year + month + day.zfill(2)
def dateValue(date):
"""Changes the date from aiowiki into the format used in backend URLs."""
if ',' in date:
date = transformDate(date)
return int(date)
def makeRadioURL(date):
"""Gets the URL for an episode from its date."""
urlbase = "https://media.focusonthefamily.com/fotf/mp3/aio/"
urltip = "aio_{0}.mp3".format(date)
# Sometimes the URLS contain aio and sometimes aiow. I don't see a pattern so I try both.
if requests.get(urlbase + urltip, timeout=2, stream=True).status_code == 200:
return urlbase + urltip
else:
return urlbase + urltip.replace("aio", "aiow")
def getRadioEpisodes() -> list:
"""Returns radio episodes as a list of dicts, each with an additional parameter, 'url', which is a link to the audio on fotfproxy.tk"""
global radioURL
e = requests.get(radioURL, timeout=2).json()['Episodes']
for i in e:
e[e.index(i)]['url'] = makeRadioURL(transformDate(e[e.index(i)]['Date']))
e = sorted(e, key=lambda x: dateValue(x['Date']), reverse=True)
for episode in e:
e[e.index(episode)]['Summary'] = stringChoice(episode['Summary'])
return e
def getFreeEpisodes() -> list:
"""Returns free episodes as a list of dicts, with an additional parameter, 'url', which links to the episode as an audio file. """
global freeURL
e = requests.get(freeURL, timeout=2).json()['Episodes']
for episode in e:
e[e.index(episode)]['Summary'] = stringChoice(episode['Summary'])
e[e.index(episode)]['url'] = proxyURL(episode['url'])
return e
def stringChoice(string):
choiceRegex = "\[\[.*?\]\]"
choices = re.findall(choiceRegex, string)
for c in choices:
string = string.replace(c, choice(c.strip("[").strip("]").split("|")))
return string
def getRadioEpisodeByName(name):
"""Returns a dict of info about the episode by its name. The name does not have to be exact."""
candidates = list(filter(lambda x: fuzzyMatch(x['Name'], name), getRadioEpisodes()))
return candidates[0] if len(candidates) > 0 else None
def getFreeEpisodeByName(name):
"""Returns a dict of info about the episode by its name. The name does not have to be exact."""
candidates = list(filter(lambda x: fuzzyMatch(x['Name'], name), getFreeEpisodes()))
return candidates[0] if len(candidates) > 0 else None
def getRadioEpisodeByNumber(episodeNumber):
"""Returns a dict of info about the episode by its number. The name does not have to be exact."""
candidates = list(filter(lambda x: x['Number'] == str(episodeNumber).zfill(3), getRadioEpisodes()))
return candidates[0] if len(candidates) > 0 else None
def getFreeEpisodeByNumber(episodeNumber):
"""Returns a dict of info about the episode by its number. The name does not have to be exact."""
candidates = list(filter(lambda x: x['Number'] == str(episodeNumber).zfill(3), getFreeEpisodes()))
return candidates[0] if len(candidates) > 0 else None
def getEpisodeByUrl(url: str):
"""Gets a dict of info about the episode linked to by the url."""
url = proxyURL(url)
candidates = list(filter(lambda x: x['url'] == url, getRadioEpisodes()))
if len(candidates) < 1:
candidates = list(filter(lambda x: x['url'] == url, getFreeEpisodes()))
return candidates[0] if len(candidates) > 0 else None
def fuzzyMatch(string1, string2):
"""Compare the English character content of two strings."""
replacements = {"1": "one", "2": "two", "3": "three", "4": "four", "5": "five", "6": "six", "7": "seven",
"8": "eight", "9": "nine", "gonna": "going to"}
whiteout = '.,"\'!?/$()'
string1 = string1.strip().lower()
string2 = string2.strip().lower()
for num, word in replacements.items():
string1 = string1.replace(num, word)
string2 = string2.replace(num, word)
for char in whiteout:
string1 = string1.replace(char, "")
string2 = string2.replace(char, "")
return string1 == string2
def proxyURL(url: str):
"""Redirects an AIO media link to an https, proxied link to the same file."""
# Proxying is no longer needed as media.focusonthefamily.com supports HTTPS
return url.replace('http://', 'https://')
# ------Tests------
# print(stringChoice("blah blah blah [[me|you]] candle brick sandwich. Summary information [[this|that]][[who|what]]in the world."))
# print(list(map(lambda x:x['URL'],getRadioEpisodes()['Episodes'])))
# print(getFreeEpisodes())
# print(getFreeEpisodeByName("Youre Not going to believe this!!!"))
# print(getRadioEpisodes())
# print(getRadioEpisodeByNumber(522))
# print(getRadioEpisodeByName("NOTAREALEPISODE"))
# print(getFreeEpisodeByName("happy hunting"))
# print(proxyURL("http://media.focusonthefamily.com/aio/mp3/aiopodcast155.mp3"))
# print(getEpisodeByUrl("http://media.focusonthefamily.com/fotf/mp3/aio/aio_20180102.mp3"))
# print(getEpisodeByUrl("https://fotfproxy.tk/fotf/mp3/aio/aio_20180102.mp3"))
# print(getEpisodeByUrl("https://fotfproxy.tk/aio/mp3/aiopodcast155.mp3")) | AIOwiki | /AIOwiki-1.3.tar.gz/AIOwiki-1.3/AIO/AIO.py | AIO.py |
try:
from utils import ifelse
except Exception as e:
from ..utils import ifelse
import os
os.environ['TF_KERAS'] = '1'
import pandas as pd
from sklearn.model_selection import train_test_split
# from tql.algo_dl.keras.utils import _DataGenerator as DataGenerator
from tql.algo_dl.keras.utils import DataIter
from keras_bert import load_trained_model_from_checkpoint, Tokenizer, load_vocabulary
from tensorflow.python.keras.layers import *
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.optimizers import Adam
from tensorflow.python.keras.preprocessing.sequence import pad_sequences
bert_dir = ifelse('/Users/yuanjie/Desktop/Data/chinese_L-12_H-768_A-12', '/fds/data/wv/chinese_L-12_H-768_A-12')
data_path = ifelse('./sentiment.tsv.zip', '/fds/data/sentiment.tsv.zip')
config_path = bert_dir + '/bert_config.json'
checkpoint_path = bert_dir + '/bert_model.ckpt'
dict_path = bert_dir + '/vocab.txt'
token_dict = load_vocabulary(dict_path)
bert_model = load_trained_model_from_checkpoint(config_path, checkpoint_path)
for l in bert_model.layers:
l.trainable = True
# 重写Token
class OurTokenizer(Tokenizer):
def _tokenize(self, text):
R = []
for c in text:
if c in self._token_dict:
R.append(c)
elif self._is_space(c):
R.append('[unused1]') # space类用未经训练的[unused1]表示
else:
R.append('[UNK]') # 剩余的字符是[UNK]
return R
tokenizer = OurTokenizer(token_dict)
x1_in = Input(shape=(None,))
x2_in = Input(shape=(None,))
x = bert_model([x1_in, x2_in])
x = Lambda(lambda x: x[:, 0])(x)
p = Dense(1, activation='sigmoid')(x)
model = Model([x1_in, x2_in], p)
model.compile(
loss='binary_crossentropy',
optimizer=Adam(1e-5), # 用足够小的学习率
metrics=['accuracy']
)
model.summary()
######################################################
def mapper(X, y):
X = list(map(lambda x: pad_sequences(x, 256), zip(*map(tokenizer.encode, X))))
return X, y
######################################################
df = pd.read_csv(data_path, '\t')
X = df.text.astype(str)
y = df.label.values.reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=42)
# dg_train = DataGenerator(X_train, y_train, 32, mapper)
# dg_valid = DataGenerator(X_test, y_test, 32, mapper)
dg_train = DataIter(X_train, y_train, 32, mapper)
dg_valid = DataIter(X_test, y_test, 32, mapper)
model.fit_generator(dg_train,
epochs=10,
validation_data=dg_valid) | AIPipeline | /AIPipeline-2019.10.18.14.25.12-py3-none-any.whl/aipipeline/trainer/bert.py | bert.py |
import re
import requests
import pandas as pd
from traceback import format_exc
import jieba.analyse as ja
from tqdm import tqdm
from datetime import datetime, timedelta
import time
class DocID(object):
def __init__(self, debug=False, delta=2, duplicate=True):
self.debug = debug
endTime = datetime.today()
startTime = endTime - timedelta(days=delta)
self.duplicate = duplicate
self.url_receive = f'http://c4.admin.browser.miui.srv/panel/api/v1/articleapi/getDocId?cpStatus=1&endTime={str(endTime)[:19]}&startTime={str(startTime)[:19]}'
self.url_send = 'http://c3.admin.browser.miui.srv/panel/api/v1/articleapi/thridV2'
def receive(self):
info_list = []
for id in tqdm(self.yidian_push_ids, "Receive"):
try:
item = self._get_info_yidian(id)
if item:
info_list.append(item)
except Exception as e:
print(f"content-not-found: {id}")
continue
return pd.DataFrame(info_list).drop_duplicates(['title'])
def send(self, userPackage, docid_list, topk=3):
"""发送topk"""
docids = []
for docid in tqdm(docid_list, 'Send'):
item = self._get_info(f"http://content.pt.xiaomi.srv/api/v1/contents/{docid}")['item']
for k in ['keywords', 'extKeywords', 'userTags', 'titleTags']:
if k in item:
keywords = ','.join(item[k])
break
else:
keywords = ''
title = item['title'].strip() # 人工缩短
keywords = keywords if keywords else ','.join(ja.tfidf(item['title'], 3, allowPOS=['n', 'vn']))
if item.get('summary'):
# subTitle = self._get_newsSummary(item['title'], item['summary'], 128)
# print(subTitle)
# subTitle = subTitle if subTitle else item['summary'][:128]
subTitle = item['summary'][:128]
else:
subTitle = item['title']
pay_load = {"cpStatus": 18,
"docIds": "",
"duplicate": self.duplicate,
"article": [{"docId": docid, "userCategory": userPackage, "subTitle": subTitle, "title": title,
"keywords": keywords}]}
if self.debug:
print(pay_load)
else:
try:
if self.duplicate and len(docids) < topk:
r = requests.post(self.url_send, json=pay_load, timeout=10)
# print(r.json(encoding="utf8"))
docids.append(docid)
time.sleep(0.5)
else:
break
except Exception as e:
print(format_exc().strip())
print(f"\nPush: {len(docids)} articles")
return docids
@property
def yidian_push_ids(self):
try:
return set(self._get_info(self.url_receive, 'POST')['data']) # 去重
except Exception as e:
print(format_exc().strip())
def _get_info_yidian(self, docid):
reg = re.compile(r'article/(.*)[?]')
url = 'http://content.pt.xiaomi.srv/api/v1/contents/'
print(url + docid)
info = self._get_info(url + docid)
if info['item'] is not None and 'push' in info['item']['cpApi']:
docid = 'yidian_' + reg.findall(info['item']['url'])[0] # 生成 docid 更新 info
info = self._get_info(url + docid)
if info is not None and info['success']:
return info['item']
def _get_info(self, url, method='GET'):
try:
r = requests.request(method, url, timeout=10)
if r.json(encoding="utf8")['success']:
return r.json(encoding="utf8")
except Exception as e:
print(format_exc().strip())
def _get_newsSummary(self, title, text, maxlen=128):
try:
url = 'http://web.algo.browser.miui.srv/nlp/bd'
json = {'method': 'newsSummary', 'text': text, 'title': title, 'maxlen': maxlen}
r = requests.post(url, json=json, timeout=10).json()
print(r)
return r['Score']['summary']
except Exception as e:
print(format_exc().strip())
if __name__ == '__main__':
rs = DocID(False, duplicate=True)
# print(rs._get_info_yidian('caf464311a06bd4f8ad9dee2b2a9caa2'))
# print(rs.receive())
rs.send('xx', ['yidian_V_04Ib5xbA'] * 5)
# print(rs._get_newsSummary('王者荣耀', '王者荣耀')) | AIPipeline | /AIPipeline-2019.10.18.14.25.12-py3-none-any.whl/aipipeline/push/utils/DocID.py | DocID.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.