file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
athena_cli.py | csv.QUOTE_ALL)
if self.format == 'CSV_HEADER':
csv_writer.writerow(headers)
csv_writer.writerows([[text.encode("utf-8") for text in row] for row in self.athena.yield_rows(results, headers)])
elif self.format == 'TSV':
print(tabulate([row for row in self.athena.yield_rows(results, headers)], tablefmt='tsv'))
elif self.format == 'TSV_HEADER':
print(tabulate([row for row in self.athena.yield_rows(results, headers)], headers=headers, tablefmt='tsv'))
elif self.format == 'VERTICAL':
for num, row in enumerate(self.athena.yield_rows(results, headers)):
print('--[RECORD {}]--'.format(num+1))
print(tabulate(zip(*[headers, row]), tablefmt='presto'))
else: # ALIGNED
print(tabulate([x for x in self.athena.yield_rows(results, headers)], headers=headers, tablefmt='presto'))
if status == 'FAILED':
print(stats['QueryExecution']['Status']['StateChangeReason'])
try:
del cmd.Cmd.do_show # "show" is an Athena command
except AttributeError:
# "show" was removed from Cmd2 0.8.0
pass
class AthenaShell(cmd.Cmd, object):
multilineCommands = ['WITH', 'SELECT', 'ALTER', 'CREATE', 'DESCRIBE', 'DROP', 'MSCK', 'SHOW', 'USE', 'VALUES']
allow_cli_args = False
def __init__(self, athena, db=None):
cmd.Cmd.__init__(self)
self.athena = athena
self.dbname = db
self.execution_id = None
self.row_count = 0
self.set_prompt()
self.pager = os.environ.get('ATHENA_CLI_PAGER', LESS).split(' ')
self.hist_file = os.path.join(os.path.expanduser("~"), ".athena_history")
self.init_history()
def set_prompt(self):
self.prompt = 'athena:%s> ' % self.dbname if self.dbname else 'athena> '
def cmdloop_with_cancel(self, intro=None):
try:
self.cmdloop(intro)
except KeyboardInterrupt:
if self.execution_id:
self.athena.stop_query_execution(self.execution_id)
print('\n\n%s' % self.athena.console_link(self.execution_id))
print('\nQuery aborted by user')
else:
print('\r')
self.cmdloop_with_cancel(intro)
def preloop(self):
if os.path.exists(self.hist_file):
readline.read_history_file(self.hist_file)
def postloop(self):
self.save_history()
def init_history(self):
try:
readline.read_history_file(self.hist_file)
readline.set_history_length(HISTORY_FILE_SIZE)
readline.write_history_file(self.hist_file)
except IOError:
readline.write_history_file(self.hist_file)
atexit.register(self.save_history)
def save_history(self):
try:
readline.write_history_file(self.hist_file)
except IOError:
pass
def | (self, arg):
help_output = """
Supported commands:
QUIT
SELECT
ALTER DATABASE <schema>
ALTER TABLE <table>
CREATE DATABASE <schema>
CREATE TABLE <table>
DESCRIBE <table>
DROP DATABASE <schema>
DROP TABLE <table>
MSCK REPAIR TABLE <table>
SHOW COLUMNS FROM <table>
SHOW CREATE TABLE <table>
SHOW DATABASES [LIKE <pattern>]
SHOW PARTITIONS <table>
SHOW TABLES [IN <schema>] [<pattern>]
SHOW TBLPROPERTIES <table>
USE [<catalog>.]<schema>
VALUES row [, ...]
See http://docs.aws.amazon.com/athena/latest/ug/language-reference.html
"""
print(help_output)
def do_quit(self, arg):
print()
return -1
def do_EOF(self, arg):
return self.do_quit(arg)
def do_use(self, schema):
self.dbname = schema.rstrip(';')
self.set_prompt()
def do_set(self, arg):
try:
statement, param_name, val = arg.parsed.raw.split(None, 2)
val = val.strip()
param_name = param_name.strip().lower()
if param_name == 'debug':
self.athena.debug = cmd.cast(True, val)
except (ValueError, AttributeError):
self.do_show(arg)
super(AthenaShell, self).do_set(arg)
def default(self, line):
self.execution_id = self.athena.start_query_execution(self.dbname, line.full_parsed_statement())
if not self.execution_id:
return
while True:
stats = self.athena.get_query_execution(self.execution_id)
status = stats['QueryExecution']['Status']['State']
status_line = 'Query {0}, {1:9}'.format(self.execution_id, status)
sys.stdout.write('\r' + status_line)
sys.stdout.flush()
if status in ['SUCCEEDED', 'FAILED', 'CANCELLED']:
break
time.sleep(0.2) # 200ms
sys.stdout.write('\r' + ' ' * len(status_line) + '\r') # delete query status line
sys.stdout.flush()
if status == 'SUCCEEDED':
results = self.athena.get_query_results(self.execution_id)
headers = [h['Name'] for h in results['ResultSet']['ResultSetMetadata']['ColumnInfo']]
row_count = len(results['ResultSet']['Rows'])
if headers and len(results['ResultSet']['Rows']) and results['ResultSet']['Rows'][0]['Data'][0].get('VarCharValue', None) == headers[0]:
row_count -= 1 # don't count header
process = subprocess.Popen(self.pager, stdin=subprocess.PIPE)
process.stdin.write(tabulate([x for x in self.athena.yield_rows(results, headers)], headers=headers, tablefmt='presto').encode('utf-8'))
process.communicate()
print('(%s rows)\n' % row_count)
print('Query {0}, {1}'.format(self.execution_id, status))
if status == 'FAILED':
print(stats['QueryExecution']['Status']['StateChangeReason'])
print(self.athena.console_link(self.execution_id))
submission_date = stats['QueryExecution']['Status']['SubmissionDateTime']
completion_date = stats['QueryExecution']['Status']['CompletionDateTime']
execution_time = stats['QueryExecution']['Statistics']['EngineExecutionTimeInMillis']
data_scanned = stats['QueryExecution']['Statistics']['DataScannedInBytes']
query_cost = data_scanned / 1000000000000.0 * 5.0
print('Time: {}, CPU Time: {}ms total, Data Scanned: {}, Cost: ${:,.2f}\n'.format(
str(completion_date - submission_date).split('.')[0],
execution_time,
human_readable(data_scanned),
query_cost
))
class Athena(object):
def __init__(self, profile, region=None, bucket=None, debug=False, encryption=False):
self.session = boto3.Session(profile_name=profile, region_name=region)
self.athena = self.session.client('athena')
self.region = region or os.environ.get('AWS_DEFAULT_REGION', None) or self.session.region_name
self.bucket = bucket or self.default_bucket
self.debug = debug
self.encryption = encryption
@property
def default_bucket(self):
account_id = self.session.client('sts').get_caller_identity().get('Account')
return 's3://{}-query-results-{}-{}'.format(self.session.profile_name or 'aws-athena', account_id, self.region)
def start_query_execution(self, db, query):
try:
if not db:
raise ValueError('Schema must be specified when session schema is not set')
result_configuration = {
'OutputLocation': self.bucket,
}
if self.encryption:
result_configuration['EncryptionConfiguration'] = {
'EncryptionOption': 'SSE_S3'
}
return self.athena.start_query_execution(
QueryString=query,
ClientRequestToken=str(uuid.uuid4()),
QueryExecutionContext={
'Database': db
},
ResultConfiguration=result_configuration
)['QueryExecutionId']
except (ClientError, ParamValidationError, ValueError) as e:
print(e)
return
def get_query_execution(self, execution_id):
try:
return self.athena.get_query_execution(
QueryExecutionId=execution_id
)
except ClientError as e:
print(e)
def get_query_results(self, execution_id):
try:
results = None
paginator = self.athena.get_paginator('get_query_results')
page_iterator = paginator.paginate(
QueryExecutionId=execution_id
)
for page in page_iterator:
if results is None:
results = page
else:
results['ResultSet']['Rows'].extend(page['ResultSet']['Rows'])
except ClientError as e:
sys.exit(e)
if self.debug:
print(json.dumps(results, indent=2))
return results
def stop_query_execution(self, execution_id):
try:
return self.athena.stop_query_execution(
QueryExecutionId=execution_id
)
except ClientError as e:
sys.exit(e)
@staticmethod
def yield_rows(results, headers):
for row in results['ResultSet']['Rows']:
# https | do_help | identifier_name |
athena_cli.py | csv.QUOTE_ALL)
if self.format == 'CSV_HEADER':
csv_writer.writerow(headers)
csv_writer.writerows([[text.encode("utf-8") for text in row] for row in self.athena.yield_rows(results, headers)])
elif self.format == 'TSV':
print(tabulate([row for row in self.athena.yield_rows(results, headers)], tablefmt='tsv'))
elif self.format == 'TSV_HEADER':
print(tabulate([row for row in self.athena.yield_rows(results, headers)], headers=headers, tablefmt='tsv'))
elif self.format == 'VERTICAL':
for num, row in enumerate(self.athena.yield_rows(results, headers)):
print('--[RECORD {}]--'.format(num+1))
print(tabulate(zip(*[headers, row]), tablefmt='presto'))
else: # ALIGNED
print(tabulate([x for x in self.athena.yield_rows(results, headers)], headers=headers, tablefmt='presto'))
if status == 'FAILED':
print(stats['QueryExecution']['Status']['StateChangeReason'])
try:
del cmd.Cmd.do_show # "show" is an Athena command
except AttributeError:
# "show" was removed from Cmd2 0.8.0
pass
class AthenaShell(cmd.Cmd, object):
multilineCommands = ['WITH', 'SELECT', 'ALTER', 'CREATE', 'DESCRIBE', 'DROP', 'MSCK', 'SHOW', 'USE', 'VALUES']
allow_cli_args = False
def __init__(self, athena, db=None):
cmd.Cmd.__init__(self)
self.athena = athena
self.dbname = db
self.execution_id = None
self.row_count = 0
self.set_prompt()
self.pager = os.environ.get('ATHENA_CLI_PAGER', LESS).split(' ')
self.hist_file = os.path.join(os.path.expanduser("~"), ".athena_history")
self.init_history()
def set_prompt(self):
self.prompt = 'athena:%s> ' % self.dbname if self.dbname else 'athena> '
def cmdloop_with_cancel(self, intro=None):
try:
self.cmdloop(intro)
except KeyboardInterrupt:
if self.execution_id:
self.athena.stop_query_execution(self.execution_id)
print('\n\n%s' % self.athena.console_link(self.execution_id))
print('\nQuery aborted by user')
else:
print('\r')
self.cmdloop_with_cancel(intro)
def preloop(self):
if os.path.exists(self.hist_file):
readline.read_history_file(self.hist_file)
def postloop(self):
self.save_history()
def init_history(self):
try:
readline.read_history_file(self.hist_file)
readline.set_history_length(HISTORY_FILE_SIZE)
readline.write_history_file(self.hist_file)
except IOError:
readline.write_history_file(self.hist_file)
atexit.register(self.save_history)
def save_history(self):
try:
readline.write_history_file(self.hist_file)
except IOError:
pass
def do_help(self, arg):
|
See http://docs.aws.amazon.com/athena/latest/ug/language-reference.html
"""
print(help_output)
def do_quit(self, arg):
print()
return -1
def do_EOF(self, arg):
return self.do_quit(arg)
def do_use(self, schema):
self.dbname = schema.rstrip(';')
self.set_prompt()
def do_set(self, arg):
try:
statement, param_name, val = arg.parsed.raw.split(None, 2)
val = val.strip()
param_name = param_name.strip().lower()
if param_name == 'debug':
self.athena.debug = cmd.cast(True, val)
except (ValueError, AttributeError):
self.do_show(arg)
super(AthenaShell, self).do_set(arg)
def default(self, line):
self.execution_id = self.athena.start_query_execution(self.dbname, line.full_parsed_statement())
if not self.execution_id:
return
while True:
stats = self.athena.get_query_execution(self.execution_id)
status = stats['QueryExecution']['Status']['State']
status_line = 'Query {0}, {1:9}'.format(self.execution_id, status)
sys.stdout.write('\r' + status_line)
sys.stdout.flush()
if status in ['SUCCEEDED', 'FAILED', 'CANCELLED']:
break
time.sleep(0.2) # 200ms
sys.stdout.write('\r' + ' ' * len(status_line) + '\r') # delete query status line
sys.stdout.flush()
if status == 'SUCCEEDED':
results = self.athena.get_query_results(self.execution_id)
headers = [h['Name'] for h in results['ResultSet']['ResultSetMetadata']['ColumnInfo']]
row_count = len(results['ResultSet']['Rows'])
if headers and len(results['ResultSet']['Rows']) and results['ResultSet']['Rows'][0]['Data'][0].get('VarCharValue', None) == headers[0]:
row_count -= 1 # don't count header
process = subprocess.Popen(self.pager, stdin=subprocess.PIPE)
process.stdin.write(tabulate([x for x in self.athena.yield_rows(results, headers)], headers=headers, tablefmt='presto').encode('utf-8'))
process.communicate()
print('(%s rows)\n' % row_count)
print('Query {0}, {1}'.format(self.execution_id, status))
if status == 'FAILED':
print(stats['QueryExecution']['Status']['StateChangeReason'])
print(self.athena.console_link(self.execution_id))
submission_date = stats['QueryExecution']['Status']['SubmissionDateTime']
completion_date = stats['QueryExecution']['Status']['CompletionDateTime']
execution_time = stats['QueryExecution']['Statistics']['EngineExecutionTimeInMillis']
data_scanned = stats['QueryExecution']['Statistics']['DataScannedInBytes']
query_cost = data_scanned / 1000000000000.0 * 5.0
print('Time: {}, CPU Time: {}ms total, Data Scanned: {}, Cost: ${:,.2f}\n'.format(
str(completion_date - submission_date).split('.')[0],
execution_time,
human_readable(data_scanned),
query_cost
))
class Athena(object):
def __init__(self, profile, region=None, bucket=None, debug=False, encryption=False):
self.session = boto3.Session(profile_name=profile, region_name=region)
self.athena = self.session.client('athena')
self.region = region or os.environ.get('AWS_DEFAULT_REGION', None) or self.session.region_name
self.bucket = bucket or self.default_bucket
self.debug = debug
self.encryption = encryption
@property
def default_bucket(self):
account_id = self.session.client('sts').get_caller_identity().get('Account')
return 's3://{}-query-results-{}-{}'.format(self.session.profile_name or 'aws-athena', account_id, self.region)
def start_query_execution(self, db, query):
try:
if not db:
raise ValueError('Schema must be specified when session schema is not set')
result_configuration = {
'OutputLocation': self.bucket,
}
if self.encryption:
result_configuration['EncryptionConfiguration'] = {
'EncryptionOption': 'SSE_S3'
}
return self.athena.start_query_execution(
QueryString=query,
ClientRequestToken=str(uuid.uuid4()),
QueryExecutionContext={
'Database': db
},
ResultConfiguration=result_configuration
)['QueryExecutionId']
except (ClientError, ParamValidationError, ValueError) as e:
print(e)
return
def get_query_execution(self, execution_id):
try:
return self.athena.get_query_execution(
QueryExecutionId=execution_id
)
except ClientError as e:
print(e)
def get_query_results(self, execution_id):
try:
results = None
paginator = self.athena.get_paginator('get_query_results')
page_iterator = paginator.paginate(
QueryExecutionId=execution_id
)
for page in page_iterator:
if results is None:
results = page
else:
results['ResultSet']['Rows'].extend(page['ResultSet']['Rows'])
except ClientError as e:
sys.exit(e)
if self.debug:
print(json.dumps(results, indent=2))
return results
def stop_query_execution(self, execution_id):
try:
return self.athena.stop_query_execution(
QueryExecutionId=execution_id
)
except ClientError as e:
sys.exit(e)
@staticmethod
def yield_rows(results, headers):
for row in results['ResultSet']['Rows']:
# https | help_output = """
Supported commands:
QUIT
SELECT
ALTER DATABASE <schema>
ALTER TABLE <table>
CREATE DATABASE <schema>
CREATE TABLE <table>
DESCRIBE <table>
DROP DATABASE <schema>
DROP TABLE <table>
MSCK REPAIR TABLE <table>
SHOW COLUMNS FROM <table>
SHOW CREATE TABLE <table>
SHOW DATABASES [LIKE <pattern>]
SHOW PARTITIONS <table>
SHOW TABLES [IN <schema>] [<pattern>]
SHOW TBLPROPERTIES <table>
USE [<catalog>.]<schema>
VALUES row [, ...] | identifier_body |
athena_cli.py | csv.QUOTE_ALL)
if self.format == 'CSV_HEADER':
csv_writer.writerow(headers)
csv_writer.writerows([[text.encode("utf-8") for text in row] for row in self.athena.yield_rows(results, headers)])
elif self.format == 'TSV':
print(tabulate([row for row in self.athena.yield_rows(results, headers)], tablefmt='tsv'))
elif self.format == 'TSV_HEADER':
print(tabulate([row for row in self.athena.yield_rows(results, headers)], headers=headers, tablefmt='tsv'))
elif self.format == 'VERTICAL':
|
else: # ALIGNED
print(tabulate([x for x in self.athena.yield_rows(results, headers)], headers=headers, tablefmt='presto'))
if status == 'FAILED':
print(stats['QueryExecution']['Status']['StateChangeReason'])
try:
del cmd.Cmd.do_show # "show" is an Athena command
except AttributeError:
# "show" was removed from Cmd2 0.8.0
pass
class AthenaShell(cmd.Cmd, object):
multilineCommands = ['WITH', 'SELECT', 'ALTER', 'CREATE', 'DESCRIBE', 'DROP', 'MSCK', 'SHOW', 'USE', 'VALUES']
allow_cli_args = False
def __init__(self, athena, db=None):
cmd.Cmd.__init__(self)
self.athena = athena
self.dbname = db
self.execution_id = None
self.row_count = 0
self.set_prompt()
self.pager = os.environ.get('ATHENA_CLI_PAGER', LESS).split(' ')
self.hist_file = os.path.join(os.path.expanduser("~"), ".athena_history")
self.init_history()
def set_prompt(self):
self.prompt = 'athena:%s> ' % self.dbname if self.dbname else 'athena> '
def cmdloop_with_cancel(self, intro=None):
try:
self.cmdloop(intro)
except KeyboardInterrupt:
if self.execution_id:
self.athena.stop_query_execution(self.execution_id)
print('\n\n%s' % self.athena.console_link(self.execution_id))
print('\nQuery aborted by user')
else:
print('\r')
self.cmdloop_with_cancel(intro)
def preloop(self):
if os.path.exists(self.hist_file):
readline.read_history_file(self.hist_file)
def postloop(self):
self.save_history()
def init_history(self):
try:
readline.read_history_file(self.hist_file)
readline.set_history_length(HISTORY_FILE_SIZE)
readline.write_history_file(self.hist_file)
except IOError:
readline.write_history_file(self.hist_file)
atexit.register(self.save_history)
def save_history(self):
try:
readline.write_history_file(self.hist_file)
except IOError:
pass
def do_help(self, arg):
help_output = """
Supported commands:
QUIT
SELECT
ALTER DATABASE <schema>
ALTER TABLE <table>
CREATE DATABASE <schema>
CREATE TABLE <table>
DESCRIBE <table>
DROP DATABASE <schema>
DROP TABLE <table>
MSCK REPAIR TABLE <table>
SHOW COLUMNS FROM <table>
SHOW CREATE TABLE <table>
SHOW DATABASES [LIKE <pattern>]
SHOW PARTITIONS <table>
SHOW TABLES [IN <schema>] [<pattern>]
SHOW TBLPROPERTIES <table>
USE [<catalog>.]<schema>
VALUES row [, ...]
See http://docs.aws.amazon.com/athena/latest/ug/language-reference.html
"""
print(help_output)
def do_quit(self, arg):
print()
return -1
def do_EOF(self, arg):
return self.do_quit(arg)
def do_use(self, schema):
self.dbname = schema.rstrip(';')
self.set_prompt()
def do_set(self, arg):
try:
statement, param_name, val = arg.parsed.raw.split(None, 2)
val = val.strip()
param_name = param_name.strip().lower()
if param_name == 'debug':
self.athena.debug = cmd.cast(True, val)
except (ValueError, AttributeError):
self.do_show(arg)
super(AthenaShell, self).do_set(arg)
def default(self, line):
self.execution_id = self.athena.start_query_execution(self.dbname, line.full_parsed_statement())
if not self.execution_id:
return
while True:
stats = self.athena.get_query_execution(self.execution_id)
status = stats['QueryExecution']['Status']['State']
status_line = 'Query {0}, {1:9}'.format(self.execution_id, status)
sys.stdout.write('\r' + status_line)
sys.stdout.flush()
if status in ['SUCCEEDED', 'FAILED', 'CANCELLED']:
break
time.sleep(0.2) # 200ms
sys.stdout.write('\r' + ' ' * len(status_line) + '\r') # delete query status line
sys.stdout.flush()
if status == 'SUCCEEDED':
results = self.athena.get_query_results(self.execution_id)
headers = [h['Name'] for h in results['ResultSet']['ResultSetMetadata']['ColumnInfo']]
row_count = len(results['ResultSet']['Rows'])
if headers and len(results['ResultSet']['Rows']) and results['ResultSet']['Rows'][0]['Data'][0].get('VarCharValue', None) == headers[0]:
row_count -= 1 # don't count header
process = subprocess.Popen(self.pager, stdin=subprocess.PIPE)
process.stdin.write(tabulate([x for x in self.athena.yield_rows(results, headers)], headers=headers, tablefmt='presto').encode('utf-8'))
process.communicate()
print('(%s rows)\n' % row_count)
print('Query {0}, {1}'.format(self.execution_id, status))
if status == 'FAILED':
print(stats['QueryExecution']['Status']['StateChangeReason'])
print(self.athena.console_link(self.execution_id))
submission_date = stats['QueryExecution']['Status']['SubmissionDateTime']
completion_date = stats['QueryExecution']['Status']['CompletionDateTime']
execution_time = stats['QueryExecution']['Statistics']['EngineExecutionTimeInMillis']
data_scanned = stats['QueryExecution']['Statistics']['DataScannedInBytes']
query_cost = data_scanned / 1000000000000.0 * 5.0
print('Time: {}, CPU Time: {}ms total, Data Scanned: {}, Cost: ${:,.2f}\n'.format(
str(completion_date - submission_date).split('.')[0],
execution_time,
human_readable(data_scanned),
query_cost
))
class Athena(object):
def __init__(self, profile, region=None, bucket=None, debug=False, encryption=False):
self.session = boto3.Session(profile_name=profile, region_name=region)
self.athena = self.session.client('athena')
self.region = region or os.environ.get('AWS_DEFAULT_REGION', None) or self.session.region_name
self.bucket = bucket or self.default_bucket
self.debug = debug
self.encryption = encryption
@property
def default_bucket(self):
account_id = self.session.client('sts').get_caller_identity().get('Account')
return 's3://{}-query-results-{}-{}'.format(self.session.profile_name or 'aws-athena', account_id, self.region)
def start_query_execution(self, db, query):
try:
if not db:
raise ValueError('Schema must be specified when session schema is not set')
result_configuration = {
'OutputLocation': self.bucket,
}
if self.encryption:
result_configuration['EncryptionConfiguration'] = {
'EncryptionOption': 'SSE_S3'
}
return self.athena.start_query_execution(
QueryString=query,
ClientRequestToken=str(uuid.uuid4()),
QueryExecutionContext={
'Database': db
},
ResultConfiguration=result_configuration
)['QueryExecutionId']
except (ClientError, ParamValidationError, ValueError) as e:
print(e)
return
def get_query_execution(self, execution_id):
try:
return self.athena.get_query_execution(
QueryExecutionId=execution_id
)
except ClientError as e:
print(e)
def get_query_results(self, execution_id):
try:
results = None
paginator = self.athena.get_paginator('get_query_results')
page_iterator = paginator.paginate(
QueryExecutionId=execution_id
)
for page in page_iterator:
if results is None:
results = page
else:
results['ResultSet']['Rows'].extend(page['ResultSet']['Rows'])
except ClientError as e:
sys.exit(e)
if self.debug:
print(json.dumps(results, indent=2))
return results
def stop_query_execution(self, execution_id):
try:
return self.athena.stop_query_execution(
QueryExecutionId=execution_id
)
except ClientError as e:
sys.exit(e)
@staticmethod
def yield_rows(results, headers):
for row in results['ResultSet']['Rows']:
# https | for num, row in enumerate(self.athena.yield_rows(results, headers)):
print('--[RECORD {}]--'.format(num+1))
print(tabulate(zip(*[headers, row]), tablefmt='presto')) | conditional_block |
cfgparse.go | Type(fileType string) bool {
for _, value := range allowedTypes {
if value == fileType {
return true
}
}
return false
}
func getFileType(filename string) (string, error) {
fileType := filepath.Ext(filename)
if !isValidType(fileType) {
errMessage := "File type not supported. Supported types (" + strings.Join(allowedTypes, " ") + ")"
err := errors.New(errMessage)
return fileType, err
}
return fileType, nil
}
func (c *CfgParser) setDelimitor() {
switch c.fileType {
case ".ini":
c.delimeter = "="
break
case ".cfg":
c.delimeter = ":"
break
default:
c.delimeter = ":"
}
}
func (c *CfgParser) ReadFile(fileName string) error |
func getKeyValuefromSectionValue(sectionValue string, sep string, lineNo uint) (string, string) {
defer func() {
err := recover()
if err != nil {
errMessage := fmt.Sprintf("Config file format error at line no %d. Please format it correctly", lineNo)
panic(errMessage)
}
}()
keyValues := strings.Split(sectionValue, sep)
key := keyValues[0]
value := keyValues[1]
return key, value
}
func (c *CfgParser) Parse(cfgFile *os.File) {
reader := bufio.NewReader(cfgFile)
var lineNo uint
var curSection section
var filePos int64
var numOfBytes int
for {
buff, _, err := reader.ReadLine()
if err != nil {
break
}
if len(buff) == 0 {
filePos++
continue
}
numOfBytes = len(buff)
filePos = filePos + int64(numOfBytes) + 1
line := strings.TrimFunc(string(buff), unicode.IsSpace)
lineNo++
if strings.HasPrefix(line, "#") || line == "" {
continue
}
if isSection(line) {
sectionHeader := sectionRegexp.FindStringSubmatch(line)[1]
curSection = section{}
if c.isSectionAlreadyExists(sectionHeader) {
errMessage := fmt.Sprintf("Parsing Error: Duplicate section %s occured at line %d",sectionHeader, lineNo)
panic(errMessage)
}
curSection.name = sectionHeader
curSection.items = make(map[string]string)
curSection.filePosition = filePos
if c.sections == nil {
c.sections = make(map[string]section)
}
c.sections[curSection.name] = curSection
} else if isKeyValue(line) {
sectionValue := keyValueRegexp.FindStringSubmatch(line)[0]
key, value := getKeyValuefromSectionValue(sectionValue, c.delimeter, lineNo)
pos := strings.Index(";", value) // Checking for comments
if pos > -1 {
if v := value[pos-1]; unicode.IsSpace(rune(v)) {
value = value[:pos-1]
}
}
curSection.items[key] = value
}
}
}
func (c *CfgParser) GetAllSections() []string {
sections := []string{}
for section := range c.sections {
sections = append(sections, section)
}
return sections
}
func (c *CfgParser) Items(section string) map[string]string {
sectionValue, ok := c.sections[section]
if !ok {
errMessage := fmt.Sprintf("No such section %s exists", section)
panic(errMessage)
}
return sectionValue.items
}
func (c *CfgParser) Get(sectionName string, key string) string {
sectionValue, ok := c.sections[sectionName]
if !ok {
errMessage := fmt.Sprintf("No such section %s exists", sectionName)
panic(errMessage)
}
value, ok := sectionValue.items[key]
if !ok {
errMessage := fmt.Sprintf("No such key %s exists in section %s", key, sectionName)
panic(errMessage)
}
return c.interpolate(sectionName, key, value)
}
func (c *CfgParser) GetBool(section string, key string) (bool, error) {
value := c.Get(section, key)
if resValue, err := strconv.ParseBool(value); err != nil {
return resValue, nil
} else {
ErrMessage := fmt.Sprintf("Cannot convert %s to type bool", value)
err := errors.New(ErrMessage)
return resValue, err
}
}
func (c *CfgParser) GetInt(section string, key string) (int64, error) {
value := c.Get(section, key)
if resValue, err := strconv.Atoi(value); err != nil {
return int64(resValue), nil
} else {
ErrMessage := fmt.Sprintf("Cannot convert %s to type int64", value)
err := errors.New(ErrMessage)
return int64(resValue), err
}
}
func (c *CfgParser) GetFloat(section string, key string) (float64, error) {
value := c.Get(section, key)
if resValue, err := strconv.ParseFloat(value, 64); err != nil {
return resValue, nil
} else {
ErrMessage := fmt.Sprintf("Cannot convert %s to type float64", value)
err := errors.New(ErrMessage)
return resValue, err
}
}
func (c *CfgParser) AddSection(sectionName string) error {
newSection := section{}
if c.isSectionAlreadyExists(sectionName) {
errMessage := fmt.Sprintf("Cannot add section %s already exits", sectionName)
err := errors.New(errMessage)
return err
}
c.mutex.Lock()
newSection.name = sectionName
newSection.items = make(map[string]string)
if c.sections == nil {
c.sections = make(map[string]section)
}
f, err := os.OpenFile(c.fileName, os.O_APPEND|os.O_WRONLY, 0644)
defer f.Close()
if err != nil {
errMesssage := fmt.Sprintf("Somthing went wrong while opening file %s. Check if is opened in other places", c.fileName)
err = errors.New(errMesssage)
return err
}
writer := bufio.NewWriter(f)
//TODO: add two new lines only if last char in file is not '\n'
buff := "\n\n[" + sectionName + "]\n"
fileStat, err := f.Stat()
if err != nil {
errMesssage := fmt.Sprintf("Somthing went wrong while opening file %s. Check if is opened in other places", c.fileName)
err = errors.New(errMesssage)
return err
}
filePosition := fileStat.Size()
newSection.filePosition = filePosition + int64(len(buff))
c.sections[newSection.name] = newSection
_, writerErr := writer.WriteString(buff)
if writerErr != nil {
errMesssage := fmt.Sprintf("Somthing went wrong while writing into file %s. Check if is opened in other places", c.fileName)
err = errors.New(errMesssage)
return err
}
err = writer.Flush()
if err != nil {
return err
}
c.mutex.Unlock()
return nil
}
// TODO: Apply locks while writing also load data in memory after writing into file, along with updating the file positions
// TODO: find the best method to update filepositions ( reload the entire file , change all file positions greater than the current writing section
func (c *CfgParser) Set(sectionName string, key string, value string) {
if !c.isSectionAlreadyExists(sectionName) {
err := c.AddSection(sectionName)
if err != nil {
panic("Error adding section name")
}
}
filePos, err := c.getSectionPos(sectionName)
fReader, err := os.OpenFile(c.fileName, os.O_RDONLY, 0644)
if err != nil {
panic("Error accessing the config file")
}
defer fReader.Close()
fileStat, err := fReader.Stat()
if err != nil {
panic("Error accessing the config file")
}
fileSize := fileStat.Size()
sectionPositon, err := fReader.Seek(int64(filePos), 0)
if err != nil {
panic("Error accessing the config file")
}
extraFileSize := fileSize - sectionPositon + 1
buffBytes := make([]byte, extraFileSize)
_ , err = fReader.ReadAt(buffBytes, sectionPositon)
var remainingSlice string
if err != io.EOF {
errMessage := fmt.Sprintf("Error Reading the config file %v", err)
panic(errMessage)
}
if len(buffBytes) == 0 {
remainingSlice = ""
} else {
remainingSlice = string(buffBytes)[:len(buffBytes)-1]
}
keyValueToWrite := key + c.delimeter + value
dataToWrite := keyValueToWrite + "\n" + remainingSlice
bytesToWrite := [] | {
if len(fileName) == 0 {
err := errors.New("file name cannot be empty")
return err
}
fileType, err := getFileType(fileName)
c.fileName = fileName
if err != nil {
return err
}
c.fileType = fileType
c.setDelimitor()
cfgFile, err := os.Open(fileName)
defer cfgFile.Close()
if err != nil {
return err
}
c.Parse(cfgFile)
return nil
} | identifier_body |
cfgparse.go | }
numOfBytes = len(buff)
filePos = filePos + int64(numOfBytes) + 1
line := strings.TrimFunc(string(buff), unicode.IsSpace)
lineNo++
if strings.HasPrefix(line, "#") || line == "" {
continue
}
if isSection(line) {
sectionHeader := sectionRegexp.FindStringSubmatch(line)[1]
curSection = section{}
if c.isSectionAlreadyExists(sectionHeader) {
errMessage := fmt.Sprintf("Parsing Error: Duplicate section %s occured at line %d",sectionHeader, lineNo)
panic(errMessage)
}
curSection.name = sectionHeader
curSection.items = make(map[string]string)
curSection.filePosition = filePos
if c.sections == nil {
c.sections = make(map[string]section)
}
c.sections[curSection.name] = curSection
} else if isKeyValue(line) {
sectionValue := keyValueRegexp.FindStringSubmatch(line)[0]
key, value := getKeyValuefromSectionValue(sectionValue, c.delimeter, lineNo)
pos := strings.Index(";", value) // Checking for comments
if pos > -1 {
if v := value[pos-1]; unicode.IsSpace(rune(v)) {
value = value[:pos-1]
}
}
curSection.items[key] = value
}
}
}
func (c *CfgParser) GetAllSections() []string {
sections := []string{}
for section := range c.sections {
sections = append(sections, section)
}
return sections
}
func (c *CfgParser) Items(section string) map[string]string {
sectionValue, ok := c.sections[section]
if !ok {
errMessage := fmt.Sprintf("No such section %s exists", section)
panic(errMessage)
}
return sectionValue.items
}
func (c *CfgParser) Get(sectionName string, key string) string {
sectionValue, ok := c.sections[sectionName]
if !ok {
errMessage := fmt.Sprintf("No such section %s exists", sectionName)
panic(errMessage)
}
value, ok := sectionValue.items[key]
if !ok {
errMessage := fmt.Sprintf("No such key %s exists in section %s", key, sectionName)
panic(errMessage)
}
return c.interpolate(sectionName, key, value)
}
func (c *CfgParser) GetBool(section string, key string) (bool, error) {
value := c.Get(section, key)
if resValue, err := strconv.ParseBool(value); err != nil {
return resValue, nil
} else {
ErrMessage := fmt.Sprintf("Cannot convert %s to type bool", value)
err := errors.New(ErrMessage)
return resValue, err
}
}
func (c *CfgParser) GetInt(section string, key string) (int64, error) {
value := c.Get(section, key)
if resValue, err := strconv.Atoi(value); err != nil {
return int64(resValue), nil
} else {
ErrMessage := fmt.Sprintf("Cannot convert %s to type int64", value)
err := errors.New(ErrMessage)
return int64(resValue), err
}
}
func (c *CfgParser) GetFloat(section string, key string) (float64, error) {
value := c.Get(section, key)
if resValue, err := strconv.ParseFloat(value, 64); err != nil {
return resValue, nil
} else {
ErrMessage := fmt.Sprintf("Cannot convert %s to type float64", value)
err := errors.New(ErrMessage)
return resValue, err
}
}
func (c *CfgParser) AddSection(sectionName string) error {
newSection := section{}
if c.isSectionAlreadyExists(sectionName) {
errMessage := fmt.Sprintf("Cannot add section %s already exits", sectionName)
err := errors.New(errMessage)
return err
}
c.mutex.Lock()
newSection.name = sectionName
newSection.items = make(map[string]string)
if c.sections == nil {
c.sections = make(map[string]section)
}
f, err := os.OpenFile(c.fileName, os.O_APPEND|os.O_WRONLY, 0644)
defer f.Close()
if err != nil {
errMesssage := fmt.Sprintf("Somthing went wrong while opening file %s. Check if is opened in other places", c.fileName)
err = errors.New(errMesssage)
return err
}
writer := bufio.NewWriter(f)
//TODO: add two new lines only if last char in file is not '\n'
buff := "\n\n[" + sectionName + "]\n"
fileStat, err := f.Stat()
if err != nil {
errMesssage := fmt.Sprintf("Somthing went wrong while opening file %s. Check if is opened in other places", c.fileName)
err = errors.New(errMesssage)
return err
}
filePosition := fileStat.Size()
newSection.filePosition = filePosition + int64(len(buff))
c.sections[newSection.name] = newSection
_, writerErr := writer.WriteString(buff)
if writerErr != nil {
errMesssage := fmt.Sprintf("Somthing went wrong while writing into file %s. Check if is opened in other places", c.fileName)
err = errors.New(errMesssage)
return err
}
err = writer.Flush()
if err != nil {
return err
}
c.mutex.Unlock()
return nil
}
// TODO: Apply locks while writing also load data in memory after writing into file, along with updating the file positions
// TODO: find the best method to update filepositions ( reload the entire file , change all file positions greater than the current writing section
func (c *CfgParser) Set(sectionName string, key string, value string) {
if !c.isSectionAlreadyExists(sectionName) {
err := c.AddSection(sectionName)
if err != nil {
panic("Error adding section name")
}
}
filePos, err := c.getSectionPos(sectionName)
fReader, err := os.OpenFile(c.fileName, os.O_RDONLY, 0644)
if err != nil {
panic("Error accessing the config file")
}
defer fReader.Close()
fileStat, err := fReader.Stat()
if err != nil {
panic("Error accessing the config file")
}
fileSize := fileStat.Size()
sectionPositon, err := fReader.Seek(int64(filePos), 0)
if err != nil {
panic("Error accessing the config file")
}
extraFileSize := fileSize - sectionPositon + 1
buffBytes := make([]byte, extraFileSize)
_ , err = fReader.ReadAt(buffBytes, sectionPositon)
var remainingSlice string
if err != io.EOF {
errMessage := fmt.Sprintf("Error Reading the config file %v", err)
panic(errMessage)
}
if len(buffBytes) == 0 {
remainingSlice = ""
} else {
remainingSlice = string(buffBytes)[:len(buffBytes)-1]
}
keyValueToWrite := key + c.delimeter + value
dataToWrite := keyValueToWrite + "\n" + remainingSlice
bytesToWrite := []byte(dataToWrite)
c.mutex.Lock()
fWriter, err := os.OpenFile(c.fileName, os.O_WRONLY, 0644)
if err != nil {
panic("Error accessing the config file")
}
bytesAdded , wErr := fWriter.WriteAt(bytesToWrite, sectionPositon)
if wErr != nil {
errMsg := fmt.Sprintf("Error Writing to config file %v", wErr)
panic(errMsg)
}
c.sections[sectionName].items[key] = value
fWriter.Close()
noOfExtraBytes := bytesAdded - len(remainingSlice)
c.reOrderFilePositions(sectionPositon, noOfExtraBytes)
c.mutex.Unlock()
}
func (c *CfgParser) reOrderFilePositions(sectionPosition int64, bytesAdded int) {
for sec, secObj := range c.sections {
if secObj.filePosition > sectionPosition {
secObj.filePosition = c.sections[sec].filePosition + int64(bytesAdded)
c.sections[sec] = secObj
}
}
}
func (c *CfgParser) interpolate(sectionName string, key string, value string) string {
for depth := 0; depth < MaxDepth; depth++ {
if strings.Contains(value,"%(") {
value = interpolateRegexp.ReplaceAllStringFunc(value, func(m string) string {
match := interpolateRegexp.FindAllStringSubmatch(m, 1)[0][1]
replacement := c.Get(sectionName, match)
return replacement
})
}
}
return value
}
func (c *CfgParser) getSectionPos(sectionName string) (int64, error){
for sec, _ := range c.sections {
if sec == sectionName {
return c.sections[sectionName].filePosition, nil
}
}
return 0, errors.New("No section exists")
}
func isSection(line string) bool {
match := sectionRegexp.MatchString(line)
return match
}
func (c *CfgParser) isSectionAlreadyExists(sectionName string) bool {
for section, _ := range c.sections {
if section == sectionName {
return true
}
}
return false
}
func | isKeyValue | identifier_name |
|
cfgparse.go | int64
var numOfBytes int
for {
buff, _, err := reader.ReadLine()
if err != nil {
break
}
if len(buff) == 0 {
filePos++
continue
}
numOfBytes = len(buff)
filePos = filePos + int64(numOfBytes) + 1
line := strings.TrimFunc(string(buff), unicode.IsSpace)
lineNo++
if strings.HasPrefix(line, "#") || line == "" {
continue
}
if isSection(line) {
sectionHeader := sectionRegexp.FindStringSubmatch(line)[1]
curSection = section{}
if c.isSectionAlreadyExists(sectionHeader) {
errMessage := fmt.Sprintf("Parsing Error: Duplicate section %s occured at line %d",sectionHeader, lineNo)
panic(errMessage)
}
curSection.name = sectionHeader
curSection.items = make(map[string]string)
curSection.filePosition = filePos
if c.sections == nil {
c.sections = make(map[string]section)
}
c.sections[curSection.name] = curSection
} else if isKeyValue(line) {
sectionValue := keyValueRegexp.FindStringSubmatch(line)[0]
key, value := getKeyValuefromSectionValue(sectionValue, c.delimeter, lineNo)
pos := strings.Index(";", value) // Checking for comments
if pos > -1 {
if v := value[pos-1]; unicode.IsSpace(rune(v)) {
value = value[:pos-1]
}
}
curSection.items[key] = value
}
}
}
func (c *CfgParser) GetAllSections() []string {
sections := []string{}
for section := range c.sections {
sections = append(sections, section)
}
return sections
}
func (c *CfgParser) Items(section string) map[string]string {
sectionValue, ok := c.sections[section]
if !ok {
errMessage := fmt.Sprintf("No such section %s exists", section)
panic(errMessage)
}
return sectionValue.items
}
func (c *CfgParser) Get(sectionName string, key string) string {
sectionValue, ok := c.sections[sectionName]
if !ok {
errMessage := fmt.Sprintf("No such section %s exists", sectionName)
panic(errMessage)
}
value, ok := sectionValue.items[key]
if !ok {
errMessage := fmt.Sprintf("No such key %s exists in section %s", key, sectionName)
panic(errMessage)
}
return c.interpolate(sectionName, key, value)
}
func (c *CfgParser) GetBool(section string, key string) (bool, error) {
value := c.Get(section, key)
if resValue, err := strconv.ParseBool(value); err != nil {
return resValue, nil
} else {
ErrMessage := fmt.Sprintf("Cannot convert %s to type bool", value)
err := errors.New(ErrMessage)
return resValue, err
}
}
func (c *CfgParser) GetInt(section string, key string) (int64, error) {
value := c.Get(section, key)
if resValue, err := strconv.Atoi(value); err != nil {
return int64(resValue), nil
} else {
ErrMessage := fmt.Sprintf("Cannot convert %s to type int64", value)
err := errors.New(ErrMessage)
return int64(resValue), err
}
}
func (c *CfgParser) GetFloat(section string, key string) (float64, error) {
value := c.Get(section, key)
if resValue, err := strconv.ParseFloat(value, 64); err != nil {
return resValue, nil
} else {
ErrMessage := fmt.Sprintf("Cannot convert %s to type float64", value)
err := errors.New(ErrMessage)
return resValue, err
}
}
func (c *CfgParser) AddSection(sectionName string) error {
newSection := section{}
if c.isSectionAlreadyExists(sectionName) {
errMessage := fmt.Sprintf("Cannot add section %s already exits", sectionName)
err := errors.New(errMessage)
return err
}
c.mutex.Lock()
newSection.name = sectionName
newSection.items = make(map[string]string)
if c.sections == nil {
c.sections = make(map[string]section)
}
f, err := os.OpenFile(c.fileName, os.O_APPEND|os.O_WRONLY, 0644)
defer f.Close()
if err != nil {
errMesssage := fmt.Sprintf("Somthing went wrong while opening file %s. Check if is opened in other places", c.fileName)
err = errors.New(errMesssage)
return err
}
writer := bufio.NewWriter(f)
//TODO: add two new lines only if last char in file is not '\n'
buff := "\n\n[" + sectionName + "]\n"
fileStat, err := f.Stat()
if err != nil {
errMesssage := fmt.Sprintf("Somthing went wrong while opening file %s. Check if is opened in other places", c.fileName)
err = errors.New(errMesssage)
return err
}
filePosition := fileStat.Size()
newSection.filePosition = filePosition + int64(len(buff))
c.sections[newSection.name] = newSection
_, writerErr := writer.WriteString(buff)
if writerErr != nil {
errMesssage := fmt.Sprintf("Somthing went wrong while writing into file %s. Check if is opened in other places", c.fileName)
err = errors.New(errMesssage)
return err
}
err = writer.Flush()
if err != nil {
return err
}
c.mutex.Unlock()
return nil
}
// TODO: Apply locks while writing also load data in memory after writing into file, along with updating the file positions
// TODO: find the best method to update filepositions ( reload the entire file , change all file positions greater than the current writing section
func (c *CfgParser) Set(sectionName string, key string, value string) {
if !c.isSectionAlreadyExists(sectionName) {
err := c.AddSection(sectionName)
if err != nil {
panic("Error adding section name")
}
}
filePos, err := c.getSectionPos(sectionName)
fReader, err := os.OpenFile(c.fileName, os.O_RDONLY, 0644)
if err != nil {
panic("Error accessing the config file")
}
defer fReader.Close()
fileStat, err := fReader.Stat()
if err != nil {
panic("Error accessing the config file")
}
fileSize := fileStat.Size()
sectionPositon, err := fReader.Seek(int64(filePos), 0)
if err != nil {
panic("Error accessing the config file")
}
extraFileSize := fileSize - sectionPositon + 1
buffBytes := make([]byte, extraFileSize)
_ , err = fReader.ReadAt(buffBytes, sectionPositon)
var remainingSlice string
if err != io.EOF {
errMessage := fmt.Sprintf("Error Reading the config file %v", err)
panic(errMessage)
}
if len(buffBytes) == 0 {
remainingSlice = ""
} else {
remainingSlice = string(buffBytes)[:len(buffBytes)-1]
}
keyValueToWrite := key + c.delimeter + value
dataToWrite := keyValueToWrite + "\n" + remainingSlice
bytesToWrite := []byte(dataToWrite)
c.mutex.Lock()
fWriter, err := os.OpenFile(c.fileName, os.O_WRONLY, 0644)
if err != nil {
panic("Error accessing the config file")
}
bytesAdded , wErr := fWriter.WriteAt(bytesToWrite, sectionPositon)
if wErr != nil {
errMsg := fmt.Sprintf("Error Writing to config file %v", wErr)
panic(errMsg)
}
c.sections[sectionName].items[key] = value
fWriter.Close()
noOfExtraBytes := bytesAdded - len(remainingSlice)
c.reOrderFilePositions(sectionPositon, noOfExtraBytes)
c.mutex.Unlock()
}
func (c *CfgParser) reOrderFilePositions(sectionPosition int64, bytesAdded int) {
for sec, secObj := range c.sections {
if secObj.filePosition > sectionPosition {
secObj.filePosition = c.sections[sec].filePosition + int64(bytesAdded)
c.sections[sec] = secObj
}
}
}
func (c *CfgParser) interpolate(sectionName string, key string, value string) string {
for depth := 0; depth < MaxDepth; depth++ {
if strings.Contains(value,"%(") {
value = interpolateRegexp.ReplaceAllStringFunc(value, func(m string) string {
match := interpolateRegexp.FindAllStringSubmatch(m, 1)[0][1]
replacement := c.Get(sectionName, match)
return replacement
})
}
}
return value
}
func (c *CfgParser) getSectionPos(sectionName string) (int64, error){
for sec, _ := range c.sections {
if sec == sectionName {
return c.sections[sectionName].filePosition, nil
}
}
return 0, errors.New("No section exists")
}
func isSection(line string) bool {
match := sectionRegexp.MatchString(line) | return match
} | random_line_split |
|
cfgparse.go | (c *CfgParser) Parse(cfgFile *os.File) {
reader := bufio.NewReader(cfgFile)
var lineNo uint
var curSection section
var filePos int64
var numOfBytes int
for {
buff, _, err := reader.ReadLine()
if err != nil {
break
}
if len(buff) == 0 {
filePos++
continue
}
numOfBytes = len(buff)
filePos = filePos + int64(numOfBytes) + 1
line := strings.TrimFunc(string(buff), unicode.IsSpace)
lineNo++
if strings.HasPrefix(line, "#") || line == "" {
continue
}
if isSection(line) {
sectionHeader := sectionRegexp.FindStringSubmatch(line)[1]
curSection = section{}
if c.isSectionAlreadyExists(sectionHeader) {
errMessage := fmt.Sprintf("Parsing Error: Duplicate section %s occured at line %d",sectionHeader, lineNo)
panic(errMessage)
}
curSection.name = sectionHeader
curSection.items = make(map[string]string)
curSection.filePosition = filePos
if c.sections == nil {
c.sections = make(map[string]section)
}
c.sections[curSection.name] = curSection
} else if isKeyValue(line) {
sectionValue := keyValueRegexp.FindStringSubmatch(line)[0]
key, value := getKeyValuefromSectionValue(sectionValue, c.delimeter, lineNo)
pos := strings.Index(";", value) // Checking for comments
if pos > -1 {
if v := value[pos-1]; unicode.IsSpace(rune(v)) {
value = value[:pos-1]
}
}
curSection.items[key] = value
}
}
}
func (c *CfgParser) GetAllSections() []string {
sections := []string{}
for section := range c.sections {
sections = append(sections, section)
}
return sections
}
func (c *CfgParser) Items(section string) map[string]string {
sectionValue, ok := c.sections[section]
if !ok {
errMessage := fmt.Sprintf("No such section %s exists", section)
panic(errMessage)
}
return sectionValue.items
}
func (c *CfgParser) Get(sectionName string, key string) string {
sectionValue, ok := c.sections[sectionName]
if !ok {
errMessage := fmt.Sprintf("No such section %s exists", sectionName)
panic(errMessage)
}
value, ok := sectionValue.items[key]
if !ok {
errMessage := fmt.Sprintf("No such key %s exists in section %s", key, sectionName)
panic(errMessage)
}
return c.interpolate(sectionName, key, value)
}
func (c *CfgParser) GetBool(section string, key string) (bool, error) {
value := c.Get(section, key)
if resValue, err := strconv.ParseBool(value); err != nil {
return resValue, nil
} else {
ErrMessage := fmt.Sprintf("Cannot convert %s to type bool", value)
err := errors.New(ErrMessage)
return resValue, err
}
}
func (c *CfgParser) GetInt(section string, key string) (int64, error) {
value := c.Get(section, key)
if resValue, err := strconv.Atoi(value); err != nil {
return int64(resValue), nil
} else {
ErrMessage := fmt.Sprintf("Cannot convert %s to type int64", value)
err := errors.New(ErrMessage)
return int64(resValue), err
}
}
func (c *CfgParser) GetFloat(section string, key string) (float64, error) {
value := c.Get(section, key)
if resValue, err := strconv.ParseFloat(value, 64); err != nil {
return resValue, nil
} else {
ErrMessage := fmt.Sprintf("Cannot convert %s to type float64", value)
err := errors.New(ErrMessage)
return resValue, err
}
}
func (c *CfgParser) AddSection(sectionName string) error {
newSection := section{}
if c.isSectionAlreadyExists(sectionName) {
errMessage := fmt.Sprintf("Cannot add section %s already exits", sectionName)
err := errors.New(errMessage)
return err
}
c.mutex.Lock()
newSection.name = sectionName
newSection.items = make(map[string]string)
if c.sections == nil {
c.sections = make(map[string]section)
}
f, err := os.OpenFile(c.fileName, os.O_APPEND|os.O_WRONLY, 0644)
defer f.Close()
if err != nil {
errMesssage := fmt.Sprintf("Somthing went wrong while opening file %s. Check if is opened in other places", c.fileName)
err = errors.New(errMesssage)
return err
}
writer := bufio.NewWriter(f)
//TODO: add two new lines only if last char in file is not '\n'
buff := "\n\n[" + sectionName + "]\n"
fileStat, err := f.Stat()
if err != nil {
errMesssage := fmt.Sprintf("Somthing went wrong while opening file %s. Check if is opened in other places", c.fileName)
err = errors.New(errMesssage)
return err
}
filePosition := fileStat.Size()
newSection.filePosition = filePosition + int64(len(buff))
c.sections[newSection.name] = newSection
_, writerErr := writer.WriteString(buff)
if writerErr != nil {
errMesssage := fmt.Sprintf("Somthing went wrong while writing into file %s. Check if is opened in other places", c.fileName)
err = errors.New(errMesssage)
return err
}
err = writer.Flush()
if err != nil {
return err
}
c.mutex.Unlock()
return nil
}
// TODO: Apply locks while writing also load data in memory after writing into file, along with updating the file positions
// TODO: find the best method to update filepositions ( reload the entire file , change all file positions greater than the current writing section
func (c *CfgParser) Set(sectionName string, key string, value string) {
if !c.isSectionAlreadyExists(sectionName) {
err := c.AddSection(sectionName)
if err != nil {
panic("Error adding section name")
}
}
filePos, err := c.getSectionPos(sectionName)
fReader, err := os.OpenFile(c.fileName, os.O_RDONLY, 0644)
if err != nil {
panic("Error accessing the config file")
}
defer fReader.Close()
fileStat, err := fReader.Stat()
if err != nil {
panic("Error accessing the config file")
}
fileSize := fileStat.Size()
sectionPositon, err := fReader.Seek(int64(filePos), 0)
if err != nil {
panic("Error accessing the config file")
}
extraFileSize := fileSize - sectionPositon + 1
buffBytes := make([]byte, extraFileSize)
_ , err = fReader.ReadAt(buffBytes, sectionPositon)
var remainingSlice string
if err != io.EOF {
errMessage := fmt.Sprintf("Error Reading the config file %v", err)
panic(errMessage)
}
if len(buffBytes) == 0 {
remainingSlice = ""
} else {
remainingSlice = string(buffBytes)[:len(buffBytes)-1]
}
keyValueToWrite := key + c.delimeter + value
dataToWrite := keyValueToWrite + "\n" + remainingSlice
bytesToWrite := []byte(dataToWrite)
c.mutex.Lock()
fWriter, err := os.OpenFile(c.fileName, os.O_WRONLY, 0644)
if err != nil {
panic("Error accessing the config file")
}
bytesAdded , wErr := fWriter.WriteAt(bytesToWrite, sectionPositon)
if wErr != nil {
errMsg := fmt.Sprintf("Error Writing to config file %v", wErr)
panic(errMsg)
}
c.sections[sectionName].items[key] = value
fWriter.Close()
noOfExtraBytes := bytesAdded - len(remainingSlice)
c.reOrderFilePositions(sectionPositon, noOfExtraBytes)
c.mutex.Unlock()
}
func (c *CfgParser) reOrderFilePositions(sectionPosition int64, bytesAdded int) {
for sec, secObj := range c.sections {
if secObj.filePosition > sectionPosition {
secObj.filePosition = c.sections[sec].filePosition + int64(bytesAdded)
c.sections[sec] = secObj
}
}
}
func (c *CfgParser) interpolate(sectionName string, key string, value string) string {
for depth := 0; depth < MaxDepth; depth++ {
if strings.Contains(value,"%(") {
value = interpolateRegexp.ReplaceAllStringFunc(value, func(m string) string {
match := interpolateRegexp.FindAllStringSubmatch(m, 1)[0][1]
replacement := c.Get(sectionName, match)
return replacement
})
}
}
return value
}
func (c *CfgParser) getSectionPos(sectionName string) (int64, error){
for sec, _ := range c.sections {
if sec == sectionName | {
return c.sections[sectionName].filePosition, nil
} | conditional_block |
|
partition_hash_test.go | BY HASH(col2) PARTITIONS 4;",
"CREATE TABLE t1 (col1 INT, col2 CHAR(5), col3 DATETIME) PARTITION BY HASH (YEAR(col3));",
"CREATE TABLE t1 (col1 INT, col2 CHAR(5), col3 DATETIME) PARTITION BY HASH (YEAR(col3) + col1 % (7*24));",
"CREATE TABLE t1 (col1 INT, col2 CHAR(5), col3 DATE) PARTITION BY LINEAR HASH( YEAR(col3)) PARTITIONS 6;",
"create table t2 (a date, b datetime) partition by hash (EXTRACT(YEAR_MONTH FROM a)) partitions 7",
"create table t3 (a int, b int) partition by hash(ceiling(a-b)) partitions 10",
"create table t4 (a int, b int) partition by hash(floor(a-b)) partitions 10",
`CREATE TABLE employees (
id INT NOT NULL,
fname VARCHAR(30),
lname VARCHAR(30),
hired DATE NOT NULL DEFAULT '1970-01-01',
separated DATE NOT NULL DEFAULT '9999-12-31',
job_code INT,
store_id INT
)
PARTITION BY HASH(store_id)
PARTITIONS 4;`,
`CREATE TABLE t1 (
col1 INT NOT NULL,
col2 DATE NOT NULL,
col3 INT NOT NULL,
col4 INT NOT NULL,
PRIMARY KEY (col1, col2)
)
PARTITION BY HASH(col1)
PARTITIONS 4;`,
`CREATE TABLE t1 (
col1 INT NOT NULL,
col2 DATE NOT NULL,
col3 INT NOT NULL,
col4 INT NOT NULL,
PRIMARY KEY (col1, col3)
)
PARTITION BY HASH(col1 + col3)
PARTITIONS 4;`,
`CREATE TABLE t2 (
col1 INT NOT NULL,
col2 DATE NOT NULL,
col3 INT NOT NULL,
col4 INT NOT NULL,
PRIMARY KEY (col1)
)
PARTITION BY HASH(col1+10)
PARTITIONS 4;`,
`CREATE TABLE employees (
id INT NOT NULL,
fname VARCHAR(30),
lname VARCHAR(30),
hired DATE NOT NULL DEFAULT '1970-01-01',
separated DATE NOT NULL DEFAULT '9999-12-31',
job_code INT,
store_id INT
)
PARTITION BY LINEAR HASH( YEAR(hired) )
PARTITIONS 4;`,
}
mock := NewMockOptimizer(false)
for _, sql := range sqls {
t.Log(sql)
_, err := buildSingleStmt(mock, t, sql)
require.Nil(t, err)
if err != nil {
t.Fatalf("%+v", err)
}
}
}
func TestHashPartition2(t *testing.T) {
// HASH(expr) Partition
sqls := []string{
"CREATE TABLE t2 (col1 INT, col2 CHAR(5)) " +
"PARTITION BY HASH(col1) PARTITIONS 1 " +
"( PARTITION p0 " +
"ENGINE = 'engine_name' " +
"COMMENT = 'p0_comment' " +
"DATA DIRECTORY = 'data_dir' " +
"INDEX DIRECTORY = 'data_dir' " +
"MAX_ROWS = 100 " +
"MIN_ROWS = 100 " +
"TABLESPACE = space " +
"(SUBPARTITION sub_name) " +
");",
}
mock := NewMockOptimizer(false)
for _, sql := range sqls {
t.Log(sql)
_, err := buildSingleStmt(mock, t, sql)
require.Nil(t, err)
if err != nil {
t.Fatalf("%+v", err)
}
}
}
func TestHashPartitionError(t *testing.T) {
// HASH(expr) Partition
sqls := []string{
// In MySQL, RANGE, LIST, and HASH partitions require that the partitioning key must be of type INT or be returned through an expression.
// For the following Partition table test case, in matrixone, when the parameter of ceil function is of decimal type and the return value type is of decimal type,
// it cannot be used as the partition expression type, but in MySQL, when the parameter of ceil function is of decimal type and the return
// value is of int type, it can be used as the partition expression type
"create table p_hash_table_08(col1 tinyint,col2 varchar(30),col3 decimal(6,3))partition by hash(ceil(col3)) partitions 2;",
"CREATE TABLE t1 (col1 INT, col2 CHAR(5)) PARTITION BY HASH(col2);",
"CREATE TABLE t1 (col1 INT, col2 DECIMAL) PARTITION BY HASH(col2) PARTITIONS 4;",
"CREATE TABLE t1 (col1 INT, col2 DECIMAL) PARTITION BY HASH(12);",
"CREATE TABLE t1 (col1 INT, col2 CHAR(5), col3 DATETIME) PARTITION BY HASH (YEAR(col3)) PARTITIONS 4 SUBPARTITION BY KEY(col1);",
"CREATE TABLE t1 (col1 INT, col2 CHAR(5), col3 DATE) PARTITION BY HASH( YEAR(col3) ) PARTITIONS;",
"create table t3 (a int, b int) partition by hash(ceiling(a-b) + 23.5) partitions 10",
`CREATE TABLE employees (
id INT NOT NULL,
fname VARCHAR(30),
lname VARCHAR(30),
hired DATE NOT NULL DEFAULT '1970-01-01',
separated DATE NOT NULL DEFAULT '9999-12-31',
job_code INT,
store_id INT
)
PARTITION BY HASH(4)
PARTITIONS 4;`,
`CREATE TABLE t1 (
col1 INT NOT NULL,
col2 DATE NOT NULL,
col3 INT NOT NULL,
col4 INT NOT NULL,
PRIMARY KEY (col1, col2)
)
PARTITION BY HASH(col3)
PARTITIONS 4;`,
`CREATE TABLE t2 (
col1 INT NOT NULL,
col2 DATE NOT NULL,
col3 INT NOT NULL,
col4 INT NOT NULL,
PRIMARY KEY (col1)
)
PARTITION BY HASH(col1 + col3)
PARTITIONS 4;`,
`CREATE TABLE t2 (
col1 INT NOT NULL,
col2 DATE NOT NULL,
col3 INT NOT NULL,
col4 INT NOT NULL,
UNIQUE KEY (col1),
UNIQUE KEY (col3)
)
PARTITION BY HASH(col1+col3)
PARTITIONS 4;`,
`create table p_hash_table_03(
col1 bigint ,
col2 date default '1970-01-01',
col3 varchar(30)
)
partition by hash(year(col3))
partitions 8;`,
`CREATE TABLE employees (
id INT NOT NULL,
fname VARCHAR(30),
lname VARCHAR(30),
hired DATE NOT NULL DEFAULT '1970-01-01',
separated DATE NOT NULL DEFAULT '9999-12-31',
job_code INT,
store_id INT
) PARTITION BY HASH(store_id) PARTITIONS 102400000000;`,
`create table p_hash_table_03(
col1 bigint ,
col2 date default '1970-01-01',
col3 varchar(30)
)
partition by hash(col4)
partitions 8;`,
}
mock := NewMockOptimizer(false)
for _, sql := range sqls {
_, err := buildSingleStmt(mock, t, sql)
t.Log(sql)
require.NotNil(t, err)
t.Log(err)
if err == nil {
t.Fatalf("%+v", err)
}
}
}
func Test_hash_buildPartitionDefs(t *testing.T) | {
type kase struct {
sql string
def *plan.PartitionByDef
wantErr bool
}
kases := []kase{
{
sql: "create table a(col1 int) partition by hash(col1) (partition x1, partition x2);",
def: &plan.PartitionByDef{
PartitionNum: 2,
},
wantErr: false,
},
{
sql: "create table a(col1 int) partition by hash(col1) (partition x1, partition x2);",
def: &plan.PartitionByDef{
PartitionNum: 1,
}, | identifier_body |
|
partition_hash_test.go | .Fatalf("%+v", err)
}
outPutPlan(logicPlan, true, t)
}
// -----------------------Hash Partition-------------------------------------
func TestHashPartition(t *testing.T) {
// HASH(expr) Partition
sqls := []string{
"CREATE TABLE t1 (col1 INT, col2 CHAR(5)) PARTITION BY HASH(col1);",
"CREATE TABLE t1 (col1 INT, col2 CHAR(5)) PARTITION BY HASH(col1) PARTITIONS 4;",
//"CREATE TABLE t1 (col1 INT, col2 DECIMAL) PARTITION BY HASH(col2) PARTITIONS 4;",
"CREATE TABLE t1 (col1 INT, col2 CHAR(5), col3 DATETIME) PARTITION BY HASH (YEAR(col3));",
"CREATE TABLE t1 (col1 INT, col2 CHAR(5), col3 DATETIME) PARTITION BY HASH (YEAR(col3) + col1 % (7*24));",
"CREATE TABLE t1 (col1 INT, col2 CHAR(5), col3 DATE) PARTITION BY LINEAR HASH( YEAR(col3)) PARTITIONS 6;",
"create table t2 (a date, b datetime) partition by hash (EXTRACT(YEAR_MONTH FROM a)) partitions 7",
"create table t3 (a int, b int) partition by hash(ceiling(a-b)) partitions 10",
"create table t4 (a int, b int) partition by hash(floor(a-b)) partitions 10",
`CREATE TABLE employees (
id INT NOT NULL,
fname VARCHAR(30),
lname VARCHAR(30),
hired DATE NOT NULL DEFAULT '1970-01-01',
separated DATE NOT NULL DEFAULT '9999-12-31',
job_code INT,
store_id INT
)
PARTITION BY HASH(store_id)
PARTITIONS 4;`,
`CREATE TABLE t1 (
col1 INT NOT NULL,
col2 DATE NOT NULL,
col3 INT NOT NULL,
col4 INT NOT NULL,
PRIMARY KEY (col1, col2)
)
PARTITION BY HASH(col1)
PARTITIONS 4;`,
`CREATE TABLE t1 (
col1 INT NOT NULL,
col2 DATE NOT NULL,
col3 INT NOT NULL,
col4 INT NOT NULL,
PRIMARY KEY (col1, col3)
)
PARTITION BY HASH(col1 + col3)
PARTITIONS 4;`,
`CREATE TABLE t2 (
col1 INT NOT NULL,
col2 DATE NOT NULL,
col3 INT NOT NULL,
col4 INT NOT NULL,
PRIMARY KEY (col1)
)
PARTITION BY HASH(col1+10)
PARTITIONS 4;`,
`CREATE TABLE employees (
id INT NOT NULL,
fname VARCHAR(30),
lname VARCHAR(30),
hired DATE NOT NULL DEFAULT '1970-01-01',
separated DATE NOT NULL DEFAULT '9999-12-31',
job_code INT,
store_id INT
)
PARTITION BY LINEAR HASH( YEAR(hired) )
PARTITIONS 4;`,
}
mock := NewMockOptimizer(false)
for _, sql := range sqls {
t.Log(sql)
_, err := buildSingleStmt(mock, t, sql)
require.Nil(t, err)
if err != nil {
t.Fatalf("%+v", err)
}
}
}
func | (t *testing.T) {
// HASH(expr) Partition
sqls := []string{
"CREATE TABLE t2 (col1 INT, col2 CHAR(5)) " +
"PARTITION BY HASH(col1) PARTITIONS 1 " +
"( PARTITION p0 " +
"ENGINE = 'engine_name' " +
"COMMENT = 'p0_comment' " +
"DATA DIRECTORY = 'data_dir' " +
"INDEX DIRECTORY = 'data_dir' " +
"MAX_ROWS = 100 " +
"MIN_ROWS = 100 " +
"TABLESPACE = space " +
"(SUBPARTITION sub_name) " +
");",
}
mock := NewMockOptimizer(false)
for _, sql := range sqls {
t.Log(sql)
_, err := buildSingleStmt(mock, t, sql)
require.Nil(t, err)
if err != nil {
t.Fatalf("%+v", err)
}
}
}
func TestHashPartitionError(t *testing.T) {
// HASH(expr) Partition
sqls := []string{
// In MySQL, RANGE, LIST, and HASH partitions require that the partitioning key must be of type INT or be returned through an expression.
// For the following Partition table test case, in matrixone, when the parameter of ceil function is of decimal type and the return value type is of decimal type,
// it cannot be used as the partition expression type, but in MySQL, when the parameter of ceil function is of decimal type and the return
// value is of int type, it can be used as the partition expression type
"create table p_hash_table_08(col1 tinyint,col2 varchar(30),col3 decimal(6,3))partition by hash(ceil(col3)) partitions 2;",
"CREATE TABLE t1 (col1 INT, col2 CHAR(5)) PARTITION BY HASH(col2);",
"CREATE TABLE t1 (col1 INT, col2 DECIMAL) PARTITION BY HASH(col2) PARTITIONS 4;",
"CREATE TABLE t1 (col1 INT, col2 DECIMAL) PARTITION BY HASH(12);",
"CREATE TABLE t1 (col1 INT, col2 CHAR(5), col3 DATETIME) PARTITION BY HASH (YEAR(col3)) PARTITIONS 4 SUBPARTITION BY KEY(col1);",
"CREATE TABLE t1 (col1 INT, col2 CHAR(5), col3 DATE) PARTITION BY HASH( YEAR(col3) ) PARTITIONS;",
"create table t3 (a int, b int) partition by hash(ceiling(a-b) + 23.5) partitions 10",
`CREATE TABLE employees (
id INT NOT NULL,
fname VARCHAR(30),
lname VARCHAR(30),
hired DATE NOT NULL DEFAULT '1970-01-01',
separated DATE NOT NULL DEFAULT '9999-12-31',
job_code INT,
store_id INT
)
PARTITION BY HASH(4)
PARTITIONS 4;`,
`CREATE TABLE t1 (
col1 INT NOT NULL,
col2 DATE NOT NULL,
col3 INT NOT NULL,
col4 INT NOT NULL,
PRIMARY KEY (col1, col2)
)
PARTITION BY HASH(col3)
PARTITIONS 4;`,
`CREATE TABLE t2 (
col1 INT NOT NULL,
col2 DATE NOT NULL,
col3 INT NOT NULL,
col4 INT NOT NULL,
PRIMARY KEY (col1)
)
PARTITION BY HASH(col1 + col3)
PARTITIONS 4;`,
`CREATE TABLE t2 (
col1 INT NOT NULL,
col2 DATE NOT NULL,
col3 INT NOT NULL,
col4 INT NOT NULL,
UNIQUE KEY (col1),
UNIQUE KEY (col3)
)
PARTITION BY HASH(col1+col3)
PARTITIONS 4;`,
`create table p_hash_table_03(
col1 bigint ,
col2 date default '1970-01-01',
col3 varchar(30)
)
partition by hash(year(col3))
partitions 8;`,
`CREATE TABLE employees (
id INT NOT NULL,
fname VARCHAR(30),
lname VARCHAR(30),
hired DATE NOT NULL DEFAULT '1970-01-01',
separated DATE NOT NULL DEFAULT '9999-12-31',
job_code INT,
store_id INT
) PARTITION BY HASH(store_id) PARTITIONS 102400000000;`,
`create table p_hash_table_03(
col1 bigint ,
col2 date default '1970-01-01',
col3 varchar(30)
)
partition by hash(col4)
partitions 8;`,
}
mock := NewMockOptimizer(false)
for _, sql := range sqls {
_, err := buildSingleStmt(mock, t, sql)
t.Log(sql)
require.NotNil(t, err)
t.Log(err)
if err == nil {
t.Fatalf("%+v", err)
}
}
}
func Test_hash_buildPartitionDefs(t *testing.T) {
type kase struct {
sql string
def *plan.PartitionByDef | TestHashPartition2 | identifier_name |
partition_hash_test.go | .Fatalf("%+v", err)
}
outPutPlan(logicPlan, true, t)
}
// -----------------------Hash Partition-------------------------------------
func TestHashPartition(t *testing.T) {
// HASH(expr) Partition
sqls := []string{
"CREATE TABLE t1 (col1 INT, col2 CHAR(5)) PARTITION BY HASH(col1);",
"CREATE TABLE t1 (col1 INT, col2 CHAR(5)) PARTITION BY HASH(col1) PARTITIONS 4;",
//"CREATE TABLE t1 (col1 INT, col2 DECIMAL) PARTITION BY HASH(col2) PARTITIONS 4;",
"CREATE TABLE t1 (col1 INT, col2 CHAR(5), col3 DATETIME) PARTITION BY HASH (YEAR(col3));",
"CREATE TABLE t1 (col1 INT, col2 CHAR(5), col3 DATETIME) PARTITION BY HASH (YEAR(col3) + col1 % (7*24));",
"CREATE TABLE t1 (col1 INT, col2 CHAR(5), col3 DATE) PARTITION BY LINEAR HASH( YEAR(col3)) PARTITIONS 6;",
"create table t2 (a date, b datetime) partition by hash (EXTRACT(YEAR_MONTH FROM a)) partitions 7",
"create table t3 (a int, b int) partition by hash(ceiling(a-b)) partitions 10",
"create table t4 (a int, b int) partition by hash(floor(a-b)) partitions 10",
`CREATE TABLE employees (
id INT NOT NULL,
fname VARCHAR(30),
lname VARCHAR(30),
hired DATE NOT NULL DEFAULT '1970-01-01',
separated DATE NOT NULL DEFAULT '9999-12-31',
job_code INT,
store_id INT
)
PARTITION BY HASH(store_id)
PARTITIONS 4;`,
`CREATE TABLE t1 (
col1 INT NOT NULL,
col2 DATE NOT NULL,
col3 INT NOT NULL,
col4 INT NOT NULL,
PRIMARY KEY (col1, col2)
)
PARTITION BY HASH(col1)
PARTITIONS 4;`,
`CREATE TABLE t1 (
col1 INT NOT NULL,
col2 DATE NOT NULL,
col3 INT NOT NULL,
col4 INT NOT NULL,
PRIMARY KEY (col1, col3)
)
PARTITION BY HASH(col1 + col3)
PARTITIONS 4;`,
`CREATE TABLE t2 (
col1 INT NOT NULL,
col2 DATE NOT NULL,
col3 INT NOT NULL,
col4 INT NOT NULL,
PRIMARY KEY (col1)
)
PARTITION BY HASH(col1+10)
PARTITIONS 4;`,
`CREATE TABLE employees (
id INT NOT NULL,
fname VARCHAR(30),
lname VARCHAR(30),
hired DATE NOT NULL DEFAULT '1970-01-01',
separated DATE NOT NULL DEFAULT '9999-12-31',
job_code INT,
store_id INT
)
PARTITION BY LINEAR HASH( YEAR(hired) )
PARTITIONS 4;`,
}
mock := NewMockOptimizer(false)
for _, sql := range sqls {
t.Log(sql)
_, err := buildSingleStmt(mock, t, sql)
require.Nil(t, err)
if err != nil {
t.Fatalf("%+v", err)
}
}
}
func TestHashPartition2(t *testing.T) {
// HASH(expr) Partition
sqls := []string{
"CREATE TABLE t2 (col1 INT, col2 CHAR(5)) " +
"PARTITION BY HASH(col1) PARTITIONS 1 " +
"( PARTITION p0 " +
"ENGINE = 'engine_name' " +
"COMMENT = 'p0_comment' " +
"DATA DIRECTORY = 'data_dir' " +
"INDEX DIRECTORY = 'data_dir' " +
"MAX_ROWS = 100 " +
"MIN_ROWS = 100 " +
"TABLESPACE = space " +
"(SUBPARTITION sub_name) " +
");",
}
mock := NewMockOptimizer(false)
for _, sql := range sqls {
t.Log(sql)
_, err := buildSingleStmt(mock, t, sql)
require.Nil(t, err)
if err != nil {
t.Fatalf("%+v", err)
}
}
}
func TestHashPartitionError(t *testing.T) {
// HASH(expr) Partition
sqls := []string{
// In MySQL, RANGE, LIST, and HASH partitions require that the partitioning key must be of type INT or be returned through an expression.
// For the following Partition table test case, in matrixone, when the parameter of ceil function is of decimal type and the return value type is of decimal type,
// it cannot be used as the partition expression type, but in MySQL, when the parameter of ceil function is of decimal type and the return
// value is of int type, it can be used as the partition expression type
"create table p_hash_table_08(col1 tinyint,col2 varchar(30),col3 decimal(6,3))partition by hash(ceil(col3)) partitions 2;",
"CREATE TABLE t1 (col1 INT, col2 CHAR(5)) PARTITION BY HASH(col2);",
"CREATE TABLE t1 (col1 INT, col2 DECIMAL) PARTITION BY HASH(col2) PARTITIONS 4;",
"CREATE TABLE t1 (col1 INT, col2 DECIMAL) PARTITION BY HASH(12);",
"CREATE TABLE t1 (col1 INT, col2 CHAR(5), col3 DATETIME) PARTITION BY HASH (YEAR(col3)) PARTITIONS 4 SUBPARTITION BY KEY(col1);",
"CREATE TABLE t1 (col1 INT, col2 CHAR(5), col3 DATE) PARTITION BY HASH( YEAR(col3) ) PARTITIONS;",
"create table t3 (a int, b int) partition by hash(ceiling(a-b) + 23.5) partitions 10",
`CREATE TABLE employees (
id INT NOT NULL,
fname VARCHAR(30),
lname VARCHAR(30),
hired DATE NOT NULL DEFAULT '1970-01-01',
separated DATE NOT NULL DEFAULT '9999-12-31',
job_code INT,
store_id INT
)
PARTITION BY HASH(4)
PARTITIONS 4;`,
`CREATE TABLE t1 (
col1 INT NOT NULL,
col2 DATE NOT NULL,
col3 INT NOT NULL,
col4 INT NOT NULL,
PRIMARY KEY (col1, col2)
)
PARTITION BY HASH(col3)
PARTITIONS 4;`,
`CREATE TABLE t2 (
col1 INT NOT NULL,
col2 DATE NOT NULL,
col3 INT NOT NULL,
col4 INT NOT NULL,
PRIMARY KEY (col1)
)
PARTITION BY HASH(col1 + col3)
PARTITIONS 4;`,
`CREATE TABLE t2 (
col1 INT NOT NULL,
col2 DATE NOT NULL,
col3 INT NOT NULL,
col4 INT NOT NULL,
UNIQUE KEY (col1),
UNIQUE KEY (col3)
)
PARTITION BY HASH(col1+col3)
PARTITIONS 4;`,
`create table p_hash_table_03(
col1 bigint ,
col2 date default '1970-01-01',
col3 varchar(30)
)
partition by hash(year(col3))
partitions 8;`,
`CREATE TABLE employees (
id INT NOT NULL,
fname VARCHAR(30),
lname VARCHAR(30),
hired DATE NOT NULL DEFAULT '1970-01-01', |
`create table p_hash_table_03(
col1 bigint ,
col2 date default '1970-01-01',
col3 varchar(30)
)
partition by hash(col4)
partitions 8;`,
}
mock := NewMockOptimizer(false)
for _, sql := range sqls {
_, err := buildSingleStmt(mock, t, sql)
t.Log(sql)
require.NotNil(t, err)
t.Log(err)
if err == nil {
t.Fatalf("%+v", err)
}
}
}
func Test_hash_buildPartitionDefs(t *testing.T) {
type kase struct {
sql string
def *plan.PartitionByDef
| separated DATE NOT NULL DEFAULT '9999-12-31',
job_code INT,
store_id INT
) PARTITION BY HASH(store_id) PARTITIONS 102400000000;`, | random_line_split |
partition_hash_test.go | .Fatalf("%+v", err)
}
outPutPlan(logicPlan, true, t)
}
// -----------------------Hash Partition-------------------------------------
func TestHashPartition(t *testing.T) {
// HASH(expr) Partition
sqls := []string{
"CREATE TABLE t1 (col1 INT, col2 CHAR(5)) PARTITION BY HASH(col1);",
"CREATE TABLE t1 (col1 INT, col2 CHAR(5)) PARTITION BY HASH(col1) PARTITIONS 4;",
//"CREATE TABLE t1 (col1 INT, col2 DECIMAL) PARTITION BY HASH(col2) PARTITIONS 4;",
"CREATE TABLE t1 (col1 INT, col2 CHAR(5), col3 DATETIME) PARTITION BY HASH (YEAR(col3));",
"CREATE TABLE t1 (col1 INT, col2 CHAR(5), col3 DATETIME) PARTITION BY HASH (YEAR(col3) + col1 % (7*24));",
"CREATE TABLE t1 (col1 INT, col2 CHAR(5), col3 DATE) PARTITION BY LINEAR HASH( YEAR(col3)) PARTITIONS 6;",
"create table t2 (a date, b datetime) partition by hash (EXTRACT(YEAR_MONTH FROM a)) partitions 7",
"create table t3 (a int, b int) partition by hash(ceiling(a-b)) partitions 10",
"create table t4 (a int, b int) partition by hash(floor(a-b)) partitions 10",
`CREATE TABLE employees (
id INT NOT NULL,
fname VARCHAR(30),
lname VARCHAR(30),
hired DATE NOT NULL DEFAULT '1970-01-01',
separated DATE NOT NULL DEFAULT '9999-12-31',
job_code INT,
store_id INT
)
PARTITION BY HASH(store_id)
PARTITIONS 4;`,
`CREATE TABLE t1 (
col1 INT NOT NULL,
col2 DATE NOT NULL,
col3 INT NOT NULL,
col4 INT NOT NULL,
PRIMARY KEY (col1, col2)
)
PARTITION BY HASH(col1)
PARTITIONS 4;`,
`CREATE TABLE t1 (
col1 INT NOT NULL,
col2 DATE NOT NULL,
col3 INT NOT NULL,
col4 INT NOT NULL,
PRIMARY KEY (col1, col3)
)
PARTITION BY HASH(col1 + col3)
PARTITIONS 4;`,
`CREATE TABLE t2 (
col1 INT NOT NULL,
col2 DATE NOT NULL,
col3 INT NOT NULL,
col4 INT NOT NULL,
PRIMARY KEY (col1)
)
PARTITION BY HASH(col1+10)
PARTITIONS 4;`,
`CREATE TABLE employees (
id INT NOT NULL,
fname VARCHAR(30),
lname VARCHAR(30),
hired DATE NOT NULL DEFAULT '1970-01-01',
separated DATE NOT NULL DEFAULT '9999-12-31',
job_code INT,
store_id INT
)
PARTITION BY LINEAR HASH( YEAR(hired) )
PARTITIONS 4;`,
}
mock := NewMockOptimizer(false)
for _, sql := range sqls |
}
func TestHashPartition2(t *testing.T) {
// HASH(expr) Partition
sqls := []string{
"CREATE TABLE t2 (col1 INT, col2 CHAR(5)) " +
"PARTITION BY HASH(col1) PARTITIONS 1 " +
"( PARTITION p0 " +
"ENGINE = 'engine_name' " +
"COMMENT = 'p0_comment' " +
"DATA DIRECTORY = 'data_dir' " +
"INDEX DIRECTORY = 'data_dir' " +
"MAX_ROWS = 100 " +
"MIN_ROWS = 100 " +
"TABLESPACE = space " +
"(SUBPARTITION sub_name) " +
");",
}
mock := NewMockOptimizer(false)
for _, sql := range sqls {
t.Log(sql)
_, err := buildSingleStmt(mock, t, sql)
require.Nil(t, err)
if err != nil {
t.Fatalf("%+v", err)
}
}
}
func TestHashPartitionError(t *testing.T) {
// HASH(expr) Partition
sqls := []string{
// In MySQL, RANGE, LIST, and HASH partitions require that the partitioning key must be of type INT or be returned through an expression.
// For the following Partition table test case, in matrixone, when the parameter of ceil function is of decimal type and the return value type is of decimal type,
// it cannot be used as the partition expression type, but in MySQL, when the parameter of ceil function is of decimal type and the return
// value is of int type, it can be used as the partition expression type
"create table p_hash_table_08(col1 tinyint,col2 varchar(30),col3 decimal(6,3))partition by hash(ceil(col3)) partitions 2;",
"CREATE TABLE t1 (col1 INT, col2 CHAR(5)) PARTITION BY HASH(col2);",
"CREATE TABLE t1 (col1 INT, col2 DECIMAL) PARTITION BY HASH(col2) PARTITIONS 4;",
"CREATE TABLE t1 (col1 INT, col2 DECIMAL) PARTITION BY HASH(12);",
"CREATE TABLE t1 (col1 INT, col2 CHAR(5), col3 DATETIME) PARTITION BY HASH (YEAR(col3)) PARTITIONS 4 SUBPARTITION BY KEY(col1);",
"CREATE TABLE t1 (col1 INT, col2 CHAR(5), col3 DATE) PARTITION BY HASH( YEAR(col3) ) PARTITIONS;",
"create table t3 (a int, b int) partition by hash(ceiling(a-b) + 23.5) partitions 10",
`CREATE TABLE employees (
id INT NOT NULL,
fname VARCHAR(30),
lname VARCHAR(30),
hired DATE NOT NULL DEFAULT '1970-01-01',
separated DATE NOT NULL DEFAULT '9999-12-31',
job_code INT,
store_id INT
)
PARTITION BY HASH(4)
PARTITIONS 4;`,
`CREATE TABLE t1 (
col1 INT NOT NULL,
col2 DATE NOT NULL,
col3 INT NOT NULL,
col4 INT NOT NULL,
PRIMARY KEY (col1, col2)
)
PARTITION BY HASH(col3)
PARTITIONS 4;`,
`CREATE TABLE t2 (
col1 INT NOT NULL,
col2 DATE NOT NULL,
col3 INT NOT NULL,
col4 INT NOT NULL,
PRIMARY KEY (col1)
)
PARTITION BY HASH(col1 + col3)
PARTITIONS 4;`,
`CREATE TABLE t2 (
col1 INT NOT NULL,
col2 DATE NOT NULL,
col3 INT NOT NULL,
col4 INT NOT NULL,
UNIQUE KEY (col1),
UNIQUE KEY (col3)
)
PARTITION BY HASH(col1+col3)
PARTITIONS 4;`,
`create table p_hash_table_03(
col1 bigint ,
col2 date default '1970-01-01',
col3 varchar(30)
)
partition by hash(year(col3))
partitions 8;`,
`CREATE TABLE employees (
id INT NOT NULL,
fname VARCHAR(30),
lname VARCHAR(30),
hired DATE NOT NULL DEFAULT '1970-01-01',
separated DATE NOT NULL DEFAULT '9999-12-31',
job_code INT,
store_id INT
) PARTITION BY HASH(store_id) PARTITIONS 102400000000;`,
`create table p_hash_table_03(
col1 bigint ,
col2 date default '1970-01-01',
col3 varchar(30)
)
partition by hash(col4)
partitions 8;`,
}
mock := NewMockOptimizer(false)
for _, sql := range sqls {
_, err := buildSingleStmt(mock, t, sql)
t.Log(sql)
require.NotNil(t, err)
t.Log(err)
if err == nil {
t.Fatalf("%+v", err)
}
}
}
func Test_hash_buildPartitionDefs(t *testing.T) {
type kase struct {
sql string
def *plan.PartitionBy | {
t.Log(sql)
_, err := buildSingleStmt(mock, t, sql)
require.Nil(t, err)
if err != nil {
t.Fatalf("%+v", err)
}
} | conditional_block |
register.py | f_fn = open("first_name", "r")
all_fn = []
for line in f_fn.readlines():
if len(line) > 2 and "-" not in line:
all_fn.append(line.strip())
f_fn.close()
f_ln = open("last_name", "r")
all_ln = []
for line in f_ln.readlines():
if len(line) > 2 and "-" not in line:
all_ln.append(line.strip())
f_ln.close()
def first_name(self):
first_name = random.sample(self.all_fn,1)[0]
return first_name
def last_name(self):
last_name = random.sample(self.all_ln,1)[0]
return last_name
def mail_account(self,first_name,last_name):
mail_name = first_name.lower() + last_name.lower() + ''.join(random.sample(string.digits, 4))
return mail_name
ra = random_account()
header_dict = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Trident/7.0; rv:11.0) like Gecko'}
def getRandomString():
return ''.join(random.sample(string.ascii_letters + string.digits, 8))
def smsLogin():
global token
# 登陆/获取TOKEN
username = 'qiao5174' # 账号
password = 'nhZEpdZT9eiQGuU' # 密码
url = 'http://api.fxhyd.cn/UserInterface.aspx?action=login&username=' + \
username+'&password='+password
TOKEN1 = request.urlopen(request.Request(
url=url, headers=header_dict)).read().decode(encoding='utf-8')
if TOKEN1.split('|')[0] == 'success':
TOKEN = TOKEN1.split('|')[1]
print('TOKEN是'+TOKEN)
token = TOKEN
return True
else:
print('获取TOKEN错误,错误代码'+TOEKN1+'。代码释义:1001:参数token不能为空;1002:参数action不能为空;1003:参数action错误;1004:token失效;1005:用户名或密码错误;1006:用户名不能为空;1007:密码不能为空;1008:账户余额不足;1009:账户被禁用;1010:参数错误;1011:账户待审核;1012:登录数达到上限')
return False
def getPhNumber():
if token.strip():
global phoneNumber, isRelese
EXCLUDENO = '' # 排除号段170_171
url = 'http://api.fxhyd.cn/UserInterface.aspx?action=getmobile&token=' + \
token+'&itemid='+ITEMID+'&excludeno='+EXCLUDENO
MOBILE1 = request.urlopen(request.Request(
url=url, headers=header_dict)).read().decode(encoding='utf-8')
if MOBILE1.split('|')[0] == 'success':
MOBILE = MOBILE1.split('|')[1]
print('获取号码是:\n'+MOBILE)
phoneNumber = MOBILE
isRelese = False
return True
else:
print('获取TOKEN错误,错误代码'+MOBILE1)
return False
else:
print('获取手机号码失败,token为空重新获取')
smsLogin()
return False
def txt_wrap_by(start_str, end, html):
start = html.find(start_str)
if start >= 0:
start += len(start_str)
end = html.find(end, start)
if end >= 0:
return html[start:end].strip()
def getMsg():
if token.strip():
global smsCode
global isRelese
TOKEN = token # TOKEN
MOBILE = phoneNumber # 手机号码
WAIT = 100 # 接受短信时长60s
url = 'http://api.fxhyd.cn/UserInterface.aspx?action=getsms&token=' + \
TOKEN+'&itemid='+ITEMID+'&mobile='+MOBILE+'&release=1'
text1 = request.urlopen(request.Request(
url=url, headers=header_dict)).read().decode(encoding='utf-8')
TIME1 = time.time()
TIME2 = time.time()
ROUND = 1
while (TIME2-TIME1) < WAIT and not text1.split('|')[0] == "success":
time.sleep(5)
text1 = request.urlopen(request.Request(
url=url, headers=header_dict)).read().decode(encoding='utf-8')
TIME2 = time.time()
ROUND = ROUND+1
ROUND = str(ROUND)
if text1.split('|')[0] == "success":
text = text1.split('|')[1]
TIME = str(round(TIME2-TIME1, 1))
print('短信内容是'+text+'\n耗费时长'+TIME+'s,循环数是'+ROUND)
start = text.find('G-')
smsCode = text[(start+2):(start+8)]
isRelese = True
return True
else:
print('获取短信超时,错误代码是'+text1+',循环数是'+ROUND)
return False
else:
print('获取手机消息失败,token为空重新获取')
smsLogin()
return False
def releaseNumber():
url = 'http://api.fxhyd.cn/UserInterface.aspx?action=release&token=' + \
token+'&itemid='+ITEMID+'&mobile='+phoneNumber
RELEASE = request.urlopen(request.Request(
url=url, headers=header_dict)).read().decode(encoding='utf-8')
if RELEASE == 'success':
print('号码成功释放:' + phoneNumber)
return True
print('号码释放失败:'+RELEASE)
return False
# 前台开启浏览器模式
def openChrome():
# 加启动配置
option = webdriver.ChromeOptions()
option.add_argument('disable-infobars')
# option.add_argument("--proxy-server=http://103.218.240.182:80")
driver = webdriver.Chrome(chrome_options=option)
# 打开chrome浏览器
# options = webdriver.ChromeOptions()
#
# ua = 'Mozilla/5.0 (iPhone; CPU iPhone OS 10_0_2 like Mac OS X) AppleWebKit/602.1.50 (KHTML, like Gecko) Mobile/14A456 MicroMessenger/6.5.7 NetType/WIFI Language/zh_CN'
#
# options.add_argument('user-agent=' + ua)
#
# driver = webdriver.Chrome(options=options)
return driver
# 前台开启浏览器模式
def openEdge():
driver = webdriver.Edge()
return driver
def register(driver):
global reginfo, isRelese, isChrome
headingText = driver.find_element_by_id("headingText")
if headingText.text == "创建您的 Google 帐号":
# 找到输入框并输入查询内容
last_name = driver.find_element_by_id("lastName")
reginfo['lastName'] = ra.last_name()
last_name.clear()
last_name.send_keys(reginfo['lastName'])
firstName = driver.find_element_by_id("firstName")
reginfo['firstName'] = ra.first_name()
firstName.clear()
firstName.send_keys(reginfo['firstName'])
user_name = driver.find_element_by_id("username")
reginfo['username'] = ra.mail_account(reginfo['firstName'],reginfo['lastName'])
reginfo['email'] = reginfo['username'] + '@gmail.com'
user_name.clear()
user_name.send_keys(reginfo['username'])
passwd = driver.find_element_by_name("Passwd")
reginfo['password'] = getRandomString()
passwd.clear()
passwd.send_keys(reginfo['password'])
confirm_passwd = driver.find_element_by_name("ConfirmPasswd")
confirm_passwd.clear()
confirm_passwd.send_keys(reginfo['password'])
accountDetailsNext = driver.find_element_by_id("accountDetailsNext")
# 提交表单
accountDetailsNext.click()
elif headingText.text == "验证您的手机号码":
try:
code = driver.find_element_by_id("code")
except NoSuchElementException as e:
phoneNumberId = driver.find_element_by_id("phoneNumberId")
if not isRelese:
ret = releaseNumber()
if ret:
isRelese = True
# driver.quit()
# openbrowser()
# return
start_timer(driver)
return
ret = getPhNumber()
if not ret:
start_timer(driver)
return
phoneNumberId.clear()
reginfo['phoneNumber'] = phoneNumber
phoneNumberId.send_keys('+86 ' + phoneNumber)
gradsIdvPhoneNext = driver.find_element_by_id("gradsIdvPhoneNext")
gradsIdvPhoneNext.click()
else:
ret = getMsg()
if not ret:
start_timer(driver)
return
code.send_keys(smsCode)
reginfo['smsCode'] = smsCode
gradsIdvVerifyNext = driver.find_element_by_id(
"gradsIdvVerifyNext")
gradsIdvVerifyNext.click()
elif headingText.text == "欢迎使用 Google":
year = driver.find_element_by_id('year')
year | ccount():
| identifier_name |
|
register.py | _ln,1)[0]
return last_name
def mail_account(self,first_name,last_name):
mail_name = first_name.lower() + last_name.lower() + ''.join(random.sample(string.digits, 4))
return mail_name
ra = random_account()
header_dict = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Trident/7.0; rv:11.0) like Gecko'}
def getRandomString():
return ''.join(random.sample(string.ascii_letters + string.digits, 8))
def smsLogin():
global token
# 登陆/获取TOKEN
username = 'qiao5174' # 账号
password = 'nhZEpdZT9eiQGuU' # 密码
url = 'http://api.fxhyd.cn/UserInterface.aspx?action=login&username=' + \
username+'&password='+password
TOKEN1 = request.urlopen(request.Request(
url=url, headers=header_dict)).read().decode(encoding='utf-8')
if TOKEN1.split('|')[0] == 'success':
TOKEN = TOKEN1.split('|')[1]
print('TOKEN是'+TOKEN)
token = TOKEN
return True
else:
print('获取TOKEN错误,错误代码'+TOEKN1+'。代码释义:1001:参数token不能为空;1002:参数action不能为空;1003:参数action错误;1004:token失效;1005:用户名或密码错误;1006:用户名不能为空;1007:密码不能为空;1008:账户余额不足;1009:账户被禁用;1010:参数错误;1011:账户待审核;1012:登录数达到上限')
return False
def getPhNumber():
if token.strip():
global phoneNumber, isRelese
EXCLUDENO = '' # 排除号段170_171
url = 'http://api.fxhyd.cn/UserInterface.aspx?action=getmobile&token=' + \
token+'&itemid='+ITEMID+'&excludeno='+EXCLUDENO
MOBILE1 = request.urlopen(request.Request(
url=url, headers=header_dict)).read().decode(encoding='utf-8')
if MOBILE1.split('|')[0] == 'success':
MOBILE = MOBILE1.split('|')[1]
print('获取号码是:\n'+MOBILE)
phoneNumber = MOBILE
isRelese = False
return True
else:
print('获取TOKEN错误,错误代码'+MOBILE1)
return False
else:
print('获取手机号码失败,token为空重新获取')
smsLogin()
return False
def txt_wrap_by(start_str, end, html):
start = html.find(start_str)
if start >= 0:
start += len(start_str)
end = html.find(end, start)
if end >= 0:
return html[start:end].strip()
def getMsg():
if token.strip():
global smsCode
global isRelese
TOKEN = token # TOKEN
MOBILE = phoneNumber # 手机号码
WAIT = 100 # 接受短信时长60s
url = 'http://api.fxhyd.cn/UserInterface.aspx?action=getsms&token=' + \
TOKEN+'&itemid='+ITEMID+'&mobile='+MOBILE+'&release=1'
text1 = request.urlopen(request.Request(
url=url, headers=header_dict)).read().decode(encoding='utf-8')
TIME1 = time.time()
TIME2 = time.time()
ROUND = 1
while (TIME2-TIME1) < WAIT and not text1.split('|')[0] == "success":
time.sleep(5)
text1 = request.urlopen(request.Request(
url=url, headers=header_dict)).read().decode(encoding='utf-8')
TIME2 = time.time()
ROUND = ROUND+1
ROUND = str(ROUND)
if text1.split('|')[0] == "success":
text = text1.split('|')[1]
TIME = str(round(TIME2-TIME1, 1))
print('短信内容是'+text+'\n耗费时长'+TIME+'s,循环数是'+ROUND)
start = text.find('G-')
smsCode = text[(start+2):(start+8)]
isRelese = True
return True
else:
print('获取短信超时,错误代码是'+text1+',循环数是'+ROUND)
return False
else:
print('获取手机消息失败,token为空重新获取')
smsLogin()
return False
def releaseNumber():
url = 'http://api.fxhyd.cn/UserInterface.aspx?action=release&token=' + \
token+'&itemid='+ITEMID+'&mobile='+phoneNumber
RELEASE = request.urlopen(request.Request(
url=url, headers=header_dict)).read().decode(encoding='utf-8')
if RELEASE == 'success':
print('号码成功释放:' + phoneNumber)
return True
print('号码释放失败:'+RELEASE)
return False
# 前台开启浏览器模式
def openChrome():
# 加启动配置
option = webdriver.ChromeOptions()
option.add_argument('disable-infobars')
# option.add_argument("--proxy-server=http://103.218.240.182:80")
driver = webdriver.Chrome(chrome_options=option)
# 打开chrome浏览器
# options = webdriver.ChromeOptions()
#
# ua = 'Mozilla/5.0 (iPhone; CPU iPhone OS 10_0_2 like Mac OS X) AppleWebKit/602.1.50 (KHTML, like Gecko) Mobile/14A456 MicroMessenger/6.5.7 NetType/WIFI Language/zh_CN'
#
# options.add_argument('user-agent=' + ua)
#
# driver = webdriver.Chrome(options=options)
return driver
# 前台开启浏览器模式
def openEdge():
driver = webdriver.Edge()
return driver
def register(driver):
global reginfo, isRelese, isChrome
headingText = driver.find_element_by_id("headingText")
if headingText.text == "创建您的 Google 帐号":
# 找到输入框并输入查询内容
last_name = driver.find_element_by_id("lastName")
reginfo['lastName'] = ra.last_name()
last_name.clear()
last_name.send_keys(reginfo['lastName'])
firstName = driver.find_element_by_id("firstName")
reginfo['firstName'] = ra.first_name()
firstName.clear()
firstName.send_keys(reginfo['firstName'])
user_name = driver.find_element_by_id("username")
reginfo['username'] = ra.mail_account(reginfo['firstName'],reginfo['lastName'])
reginfo['email'] = reginfo['username'] + '@gmail.com'
user_name.clear()
user_name.send_keys(reginfo['username'])
passwd = driver.find_element_by_name("Passwd")
reginfo['password'] = getRandomString()
passwd.clear()
passwd.send_keys(reginfo['password'])
confirm_passwd = driver.find_element_by_name("ConfirmPasswd")
confirm_passwd.clear()
confirm_passwd.send_keys(reginfo['password'])
accountDetailsNext = driver.find_element_by_id("accountDetailsNext")
# 提交表单
accountDetailsNext.click()
elif headingText.text == "验证您的手机号码":
try:
code = driver.find_element_by_id("code")
except NoSuchElementException as e:
phoneNumberId = driver.find_element_by_id("phoneNumberId")
if not isRelese:
ret = releaseNumber()
if ret:
isRelese = True
# driver.quit()
# openbrowser()
# return
start_timer(driver)
return
ret = getPhNumber()
if not ret:
start_timer(driver)
return
phoneNumberId.clear()
reginfo['phoneNumber'] = phoneNumber
phoneNumberId.send_keys('+86 ' + phoneNumber)
gradsIdvPhoneNext = driver.find_element_by_id("gradsIdvPhoneNext")
gradsIdvPhoneNext.click()
else:
ret = getMsg()
if not ret:
start_timer(driver)
return
code.send_keys(smsCode)
reginfo['smsCode'] = smsCode
gradsIdvVerifyNext = driver.find_element_by_id(
"gradsIdvVerifyNext")
gradsIdvVerifyNext.click()
elif headingText.text == "欢迎使用 Google":
year = driver.find_element_by_id('year')
year.send_keys('1988')
month = driver.find_element_by_id('month') | day = driver.find_element_by_id('day')
day.send_keys('1')
gender = driver.find_element_by_id('gender')
gender.send_keys('不愿透露')
personalDetailsNext = driver.find_element_by_id('personalDetailsNext')
personalDetailsNext.click()
elif headingText.text == "充分利用您的电话号码":
array = driver.find_elements_by_class_name('uBOgn')
for i in range(0, len(array)):
if array[i].text == '跳过':
array[i].click()
elif headingText.text | month.send_keys('1') | random_line_split |
register.py | ,1)[0]
return last_name
def mail_account(self,first_name,last_name):
mail_name = first_name.lower() + last_name.lower() + ''.join(random.sample(string.digits, 4))
return mail_name
ra = random_account()
header_dict = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Trident/7.0; rv:11.0) like Gecko'}
def getRandomString():
return ''.join(random.sample(string.ascii_letters + string.digits, 8))
def smsLogin():
global token
# 登陆/获取TOKEN
username = 'qiao5174' # 账号
password = 'nhZEpdZT9eiQGuU' # 密码
url = 'http://api.fxhyd.cn/UserInterface.aspx?action=login&username=' + \
username+'&password='+password
TOKEN1 = request.urlopen(request.Request(
url=url, headers=header_dict)).read().decode(encoding='utf-8')
if TOKEN1.split('|')[0] == 'success':
TOKEN = TOKEN1.split('|')[1]
print('TOKEN是'+TOKEN)
token = TOKEN
return True
else:
print('获取TOKEN错误,错误代码'+TOE | ile&token=' + \
token+'&itemid='+ITEMID+'&excludeno='+EXCLUDENO
MOBILE1 = request.urlopen(request.Request(
url=url, headers=header_dict)).read().decode(encoding='utf-8')
if MOBILE1.split('|')[0] == 'success':
MOBILE = MOBILE1.split('|')[1]
print('获取号码是:\n'+MOBILE)
phoneNumber = MOBILE
isRelese = False
return True
else:
print('获取TOKEN错误,错误代码'+MOBILE1)
return False
else:
print('获取手机号码失败,token为空重新获取')
smsLogin()
return False
def txt_wrap_by(start_str, end, html):
start = html.find(start_str)
if start >= 0:
start += len(start_str)
end = html.find(end, start)
if end >= 0:
return html[start:end].strip()
def getMsg():
if token.strip():
global smsCode
global isRelese
TOKEN = token # TOKEN
MOBILE = phoneNumber # 手机号码
WAIT = 100 # 接受短信时长60s
url = 'http://api.fxhyd.cn/UserInterface.aspx?action=getsms&token=' + \
TOKEN+'&itemid='+ITEMID+'&mobile='+MOBILE+'&release=1'
text1 = request.urlopen(request.Request(
url=url, headers=header_dict)).read().decode(encoding='utf-8')
TIME1 = time.time()
TIME2 = time.time()
ROUND = 1
while (TIME2-TIME1) < WAIT and not text1.split('|')[0] == "success":
time.sleep(5)
text1 = request.urlopen(request.Request(
url=url, headers=header_dict)).read().decode(encoding='utf-8')
TIME2 = time.time()
ROUND = ROUND+1
ROUND = str(ROUND)
if text1.split('|')[0] == "success":
text = text1.split('|')[1]
TIME = str(round(TIME2-TIME1, 1))
print('短信内容是'+text+'\n耗费时长'+TIME+'s,循环数是'+ROUND)
start = text.find('G-')
smsCode = text[(start+2):(start+8)]
isRelese = True
return True
else:
print('获取短信超时,错误代码是'+text1+',循环数是'+ROUND)
return False
else:
print('获取手机消息失败,token为空重新获取')
smsLogin()
return False
def releaseNumber():
url = 'http://api.fxhyd.cn/UserInterface.aspx?action=release&token=' + \
token+'&itemid='+ITEMID+'&mobile='+phoneNumber
RELEASE = request.urlopen(request.Request(
url=url, headers=header_dict)).read().decode(encoding='utf-8')
if RELEASE == 'success':
print('号码成功释放:' + phoneNumber)
return True
print('号码释放失败:'+RELEASE)
return False
# 前台开启浏览器模式
def openChrome():
# 加启动配置
option = webdriver.ChromeOptions()
option.add_argument('disable-infobars')
# option.add_argument("--proxy-server=http://103.218.240.182:80")
driver = webdriver.Chrome(chrome_options=option)
# 打开chrome浏览器
# options = webdriver.ChromeOptions()
#
# ua = 'Mozilla/5.0 (iPhone; CPU iPhone OS 10_0_2 like Mac OS X) AppleWebKit/602.1.50 (KHTML, like Gecko) Mobile/14A456 MicroMessenger/6.5.7 NetType/WIFI Language/zh_CN'
#
# options.add_argument('user-agent=' + ua)
#
# driver = webdriver.Chrome(options=options)
return driver
# 前台开启浏览器模式
def openEdge():
driver = webdriver.Edge()
return driver
def register(driver):
global reginfo, isRelese, isChrome
headingText = driver.find_element_by_id("headingText")
if headingText.text == "创建您的 Google 帐号":
# 找到输入框并输入查询内容
last_name = driver.find_element_by_id("lastName")
reginfo['lastName'] = ra.last_name()
last_name.clear()
last_name.send_keys(reginfo['lastName'])
firstName = driver.find_element_by_id("firstName")
reginfo['firstName'] = ra.first_name()
firstName.clear()
firstName.send_keys(reginfo['firstName'])
user_name = driver.find_element_by_id("username")
reginfo['username'] = ra.mail_account(reginfo['firstName'],reginfo['lastName'])
reginfo['email'] = reginfo['username'] + '@gmail.com'
user_name.clear()
user_name.send_keys(reginfo['username'])
passwd = driver.find_element_by_name("Passwd")
reginfo['password'] = getRandomString()
passwd.clear()
passwd.send_keys(reginfo['password'])
confirm_passwd = driver.find_element_by_name("ConfirmPasswd")
confirm_passwd.clear()
confirm_passwd.send_keys(reginfo['password'])
accountDetailsNext = driver.find_element_by_id("accountDetailsNext")
# 提交表单
accountDetailsNext.click()
elif headingText.text == "验证您的手机号码":
try:
code = driver.find_element_by_id("code")
except NoSuchElementException as e:
phoneNumberId = driver.find_element_by_id("phoneNumberId")
if not isRelese:
ret = releaseNumber()
if ret:
isRelese = True
# driver.quit()
# openbrowser()
# return
start_timer(driver)
return
ret = getPhNumber()
if not ret:
start_timer(driver)
return
phoneNumberId.clear()
reginfo['phoneNumber'] = phoneNumber
phoneNumberId.send_keys('+86 ' + phoneNumber)
gradsIdvPhoneNext = driver.find_element_by_id("gradsIdvPhoneNext")
gradsIdvPhoneNext.click()
else:
ret = getMsg()
if not ret:
start_timer(driver)
return
code.send_keys(smsCode)
reginfo['smsCode'] = smsCode
gradsIdvVerifyNext = driver.find_element_by_id(
"gradsIdvVerifyNext")
gradsIdvVerifyNext.click()
elif headingText.text == "欢迎使用 Google":
year = driver.find_element_by_id('year')
year.send_keys('1988')
month = driver.find_element_by_id('month')
month.send_keys('1')
day = driver.find_element_by_id('day')
day.send_keys('1')
gender = driver.find_element_by_id('gender')
gender.send_keys('不愿透露')
personalDetailsNext = driver.find_element_by_id('personalDetailsNext')
personalDetailsNext.click()
elif headingText.text == "充分利用您的电话号码":
array = driver.find_elements_by_class_name('uBOgn')
for i in range(0, len(array)):
if array[i].text == '跳过':
array[i].click()
elif headingText.text | KN1+'。代码释义:1001:参数token不能为空;1002:参数action不能为空;1003:参数action错误;1004:token失效;1005:用户名或密码错误;1006:用户名不能为空;1007:密码不能为空;1008:账户余额不足;1009:账户被禁用;1010:参数错误;1011:账户待审核;1012:登录数达到上限')
return False
def getPhNumber():
if token.strip():
global phoneNumber, isRelese
EXCLUDENO = '' # 排除号段170_171
url = 'http://api.fxhyd.cn/UserInterface.aspx?action=getmob | conditional_block |
register.py | ,1)[0]
return last_name
def mail_account(self,first_name,last_name):
mail_name = first_name.lower() + last_name.lower() + ''.join(random.sample(string.digits, 4))
return mail_name
ra = random_account()
header_dict = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Trident/7.0; rv:11.0) like Gecko'}
def getRandomString():
return ''.join(random.sample(string.ascii_letters + string.digits, 8))
def smsLogin():
global token
# 登陆/获取TOKEN
username = 'qiao5174' # 账号
password = 'nhZEpdZT9eiQGuU' # 密码
url = 'http://api.fxhyd.cn/UserInterface.aspx?action=login&username=' + \
username+'&password='+password
TOKEN1 = request.urlopen(request.Request(
url=url, headers=header_dict)).read().decode(encoding='utf-8')
if TOKEN1.split('|')[0] == 'success':
TOKEN = TOKEN1.split('|')[1]
print('TOKEN是'+TOKEN)
token = TOKEN
return True
else:
print('获取TOKEN错误,错误代码'+TOEKN1+'。代码释义:1001:参数token不能为空;1002:参数action不能为空;1003:参数action错误;1004:token失效;1005:用户名或密码错误;1006:用户名不能为空;1007:密码不能为空;1008:账户余额不足;1009:账户被禁用;1010:参数错误;1011:账户待审核;1012:登录数达到上限')
return False
def getPhNumber():
if token.strip():
global phoneNumber, isRelese
EXCLUDENO = '' # 排除号段170_171
url = 'http://api.fxhyd.cn/UserInterface.aspx?action=getmobile&token=' + \
token+'&itemid='+ITEMID+'&excludeno='+EXCLUDENO
MOBILE1 = request.urlopen(request.Request(
url=url, headers=header_dict)).read().decode(encoding='utf-8')
if MOBILE1.split('|')[0] == 'success':
MOBILE = MOBILE1.split('|')[1]
print('获取号码是:\n'+MOBILE)
phoneNumber = MOBILE
isRelese = False
return True
else:
print('获取TOKEN错误,错误代码'+MOBILE1)
return False
else:
print('获取手机号码失败,token为空重新获取')
smsLogin()
return False
def txt_wrap_by(start_str, end, html):
start = html.find(start_str)
if start >= 0:
start += len(start_str)
end = html.find(end, start)
if end >= 0:
return html[start:end].strip()
def getMsg():
if token.strip():
global smsCode
global isRelese
TOKEN = token # TOKEN
MOBILE = phoneNumber # 手机号码
WAIT = 100 # 接受短信时长60s
url = 'http://api.fxhyd.cn/UserInterface.aspx?action=getsms&token=' + \
| isRelese = True
return True
else:
print('获取短信超时,错误代码是'+text1+',循环数是'+ROUND)
return False
else:
print('获取手机消息失败,token为空重新获取')
smsLogin()
return False
def releaseNumber():
url = 'http://api.fxhyd.cn/UserInterface.aspx?action=release&token=' + \
token+'&itemid='+ITEMID+'&mobile='+phoneNumber
RELEASE = request.urlopen(request.Request(
url=url, headers=header_dict)).read().decode(encoding='utf-8')
if RELEASE == 'success':
print('号码成功释放:' + phoneNumber)
return
True
print('号码释放失败:'+RELEASE)
return False
# 前台开启浏览器模式
def openChrome():
# 加启动配置
option = webdriver.ChromeOptions()
option.add_argument('disable-infobars')
# option.add_argument("--proxy-server=http://103.218.240.182:80")
driver = webdriver.Chrome(chrome_options=option)
# 打开chrome浏览器
# options = webdriver.ChromeOptions()
#
# ua = 'Mozilla/5.0 (iPhone; CPU iPhone OS 10_0_2 like Mac OS X) AppleWebKit/602.1.50 (KHTML, like Gecko) Mobile/14A456 MicroMessenger/6.5.7 NetType/WIFI Language/zh_CN'
#
# options.add_argument('user-agent=' + ua)
#
# driver = webdriver.Chrome(options=options)
return driver
# 前台开启浏览器模式
def openEdge():
driver = webdriver.Edge()
return driver
def register(driver):
global reginfo, isRelese, isChrome
headingText = driver.find_element_by_id("headingText")
if headingText.text == "创建您的 Google 帐号":
# 找到输入框并输入查询内容
last_name = driver.find_element_by_id("lastName")
reginfo['lastName'] = ra.last_name()
last_name.clear()
last_name.send_keys(reginfo['lastName'])
firstName = driver.find_element_by_id("firstName")
reginfo['firstName'] = ra.first_name()
firstName.clear()
firstName.send_keys(reginfo['firstName'])
user_name = driver.find_element_by_id("username")
reginfo['username'] = ra.mail_account(reginfo['firstName'],reginfo['lastName'])
reginfo['email'] = reginfo['username'] + '@gmail.com'
user_name.clear()
user_name.send_keys(reginfo['username'])
passwd = driver.find_element_by_name("Passwd")
reginfo['password'] = getRandomString()
passwd.clear()
passwd.send_keys(reginfo['password'])
confirm_passwd = driver.find_element_by_name("ConfirmPasswd")
confirm_passwd.clear()
confirm_passwd.send_keys(reginfo['password'])
accountDetailsNext = driver.find_element_by_id("accountDetailsNext")
# 提交表单
accountDetailsNext.click()
elif headingText.text == "验证您的手机号码":
try:
code = driver.find_element_by_id("code")
except NoSuchElementException as e:
phoneNumberId = driver.find_element_by_id("phoneNumberId")
if not isRelese:
ret = releaseNumber()
if ret:
isRelese = True
# driver.quit()
# openbrowser()
# return
start_timer(driver)
return
ret = getPhNumber()
if not ret:
start_timer(driver)
return
phoneNumberId.clear()
reginfo['phoneNumber'] = phoneNumber
phoneNumberId.send_keys('+86 ' + phoneNumber)
gradsIdvPhoneNext = driver.find_element_by_id("gradsIdvPhoneNext")
gradsIdvPhoneNext.click()
else:
ret = getMsg()
if not ret:
start_timer(driver)
return
code.send_keys(smsCode)
reginfo['smsCode'] = smsCode
gradsIdvVerifyNext = driver.find_element_by_id(
"gradsIdvVerifyNext")
gradsIdvVerifyNext.click()
elif headingText.text == "欢迎使用 Google":
year = driver.find_element_by_id('year')
year.send_keys('1988')
month = driver.find_element_by_id('month')
month.send_keys('1')
day = driver.find_element_by_id('day')
day.send_keys('1')
gender = driver.find_element_by_id('gender')
gender.send_keys('不愿透露')
personalDetailsNext = driver.find_element_by_id('personalDetailsNext')
personalDetailsNext.click()
elif headingText.text == "充分利用您的电话号码":
array = driver.find_elements_by_class_name('uBOgn')
for i in range(0, len(array)):
if array[i].text == '跳过':
array[i].click()
elif headingText | TOKEN+'&itemid='+ITEMID+'&mobile='+MOBILE+'&release=1'
text1 = request.urlopen(request.Request(
url=url, headers=header_dict)).read().decode(encoding='utf-8')
TIME1 = time.time()
TIME2 = time.time()
ROUND = 1
while (TIME2-TIME1) < WAIT and not text1.split('|')[0] == "success":
time.sleep(5)
text1 = request.urlopen(request.Request(
url=url, headers=header_dict)).read().decode(encoding='utf-8')
TIME2 = time.time()
ROUND = ROUND+1
ROUND = str(ROUND)
if text1.split('|')[0] == "success":
text = text1.split('|')[1]
TIME = str(round(TIME2-TIME1, 1))
print('短信内容是'+text+'\n耗费时长'+TIME+'s,循环数是'+ROUND)
start = text.find('G-')
smsCode = text[(start+2):(start+8)] | identifier_body |
webcam-local_folder-emotions-gdrive.py | , basename
from deepface import DeepFace
#Google authorisation
#----------------------
gauth = GoogleAuth()
# Try to load saved client credentials
gauth.LoadCredentialsFile("googlecreds.txt")
if gauth.credentials is None:
# Authenticate via google if they're not there, and get refresh token to automate token retrieval
gauth.GetFlow()
gauth.flow.params.update({'access_type': 'offline'})
gauth.flow.params.update({'approval_prompt': 'force'})
gauth.LocalWebserverAuth()
elif gauth.access_token_expired:
gauth.Refresh()
else:
gauth.Authorize()
# Save the current credentials to a file
gauth.SaveCredentialsFile("googlecreds.txt")
drive = GoogleDrive(gauth)
#--------------------
#--------------------
# Disable printing to command line - ignore
def blockPrint():
sys.stdout = open(os.devnull, 'w')
# Restore
def enablePrint():
sys.stdout = sys.__stdout__
#--------------------
def webcam_images_to_local_folders():
#function that takes webcam image and saves to local folder
#Define number of frames to be captured and interval
watch_time = 1 #in minutes
interval = 0.991442321 #Target of 1 fps, adjusted for average time taken in for loop
#nframes = int(watch_time)*1*60
nframes = 30
#set video capture device
cap = cv2.VideoCapture(0,cv2.CAP_DSHOW)
#capture frames in 1s intervals
dt_string = []
now = []
for i in range(nframes):
# get time now
now.append(datetime.now())
dt_string.append(now[i].strftime("%d-%m-%Y %H.%M.%S"))
#print("date and time =", dt_string[i])
#capture image
ret, img = cap.read()
time.sleep(1)
contrast = 1.5
brightness = 0
img = np.clip(contrast*img+brightness,0,255)
# save file jusing current time
cv2.imwrite('./images/'+dt_string[i]+'.jpg', img)
file_dirs = [0]*nframes
datetimes = [0]*nframes
for i in range(nframes):
file_name = dt_string[i]
file_dirs[i] = './images/'+dt_string[i]+'.jpg'
datetimes[i] = dt_string[i]
# wait interval
time.sleep(interval)
stop = timeit.default_timer()
#if period>interval:
# interval = (1-((period)-1))
#else:
# interval = (1+(1-(period)))
#print('Time: ', stop - start)
webcam_images_to_local_folders.file_dirs = file_dirs
webcam_images_to_local_folders.datetimes = datetimes
webcam_images_to_local_folders.nframes = nframes
def emotion_from_local_image_file(images):
#function that takes images from local file, runs expression recognition using Azure Face API,
#and output raw datetime and emotion data
datetimes = webcam_images_to_local_folders.datetimes
nframes = webcam_images_to_local_folders.nframes
emotions = []
emotions_deepface = []
os.environ['FACE_SUBSCRIPTION_KEY'] = 'c03f4bb6a5794c79aa9d6d623b81c30d'
os.environ['FACE_ENDPOINT'] = 'https://iotface1.cognitiveservices.azure.com/'
#Authentication process
subscription_key = os.environ['FACE_SUBSCRIPTION_KEY']
assert subscription_key
face_api_url = os.environ['FACE_ENDPOINT'] + '/face/v1.0/detect'
headers = {'Ocp-Apim-Subscription-Key': subscription_key,
'Content-Type': 'application/octet-stream'
}
params = {
'returnFaceId': 'true',
'returnFaceLandmarks': 'false',
'returnFaceAttributes': 'age,gender,headPose,smile,facialHair,glasses,emotion,hair,makeup,occlusion,accessories,blur,exposure,noise',
}
# =============================================================================
#
#image_url = 'https://blog-cdn.crowdcontent.com/crowdblog/Man-Smiling-with-Beard.jpg'
# params = {
# 'detectionModel': 'detection_02',
# 'returnFaceId': 'true'
# }
#
# response = requests.post(face_api_url, params=params,
# headers=headers, json={"url": image_url})
# face = response.json
# print(json.dumps(response.json()))
#
# params = {
# 'detectionModel': 'detection_01',
# 'returnFaceAttributes': 'age,gender,headPose,smile,facialHair,glasses,emotion,hair,makeup,occlusion,accessories,blur,exposure,noise',
# 'returnFaceId': 'true'
# }
#
# =============================================================================
#function to rename dictionary keys
def rename_keys(d, keys):
return dict([(keys.get(k), v) for k, v in d.items()])
#Deepface for emotion detection -- ended up not using as it was significantly slower than the azure model
#(time for report: average time = 5.67s)
def annotate_image_deepface(image_url):
img = cv2.imread(image_url)
predictions = DeepFace.analyze(img) #uses FER2013 dataset and others; more accurate than I could get myself
print(predictions)
#Azure Face API for emotion detection
#(time for report: average time = 0.355s)
def annotate_image(image_url):
data = open(image_url,'rb').read()
response = requests.post(face_api_url, params=params, headers=headers, data=data)
faces = response.json()
image = Image.open(image_url)
plt.figure(figsize=(8,8))
ax = plt.imshow(image, alpha=1)
for face in faces:
fr = face["faceRectangle"]
fa = face["faceAttributes"]
emotions_prob = (fa["emotion"])
emotion_translations = { #get emotions as adjective
'anger':'angry',
'contempt':'angry',#maps to angry; contempt is shown when angry and the deepface model has no contempt key
'disgust':'disgusted',
'fear':'scared',
'happiness':'happy',
'neutral':'neutral',
'sadness':'sad',
'surprise':'surprised'
}
emotions_prob = rename_keys(emotions_prob, emotion_translations) #call rename keys function
#find dominant emotion by looking at highest probability emotion
emotions_prob["neutral"] = emotions_prob["neutral"]/100 #calibration to detect more nuances expressions
emotion = max(emotions_prob, key=emotions_prob.get)
emotions.append(emotion)
#plot on figure
origin = (fr["left"], fr["top"])
p = patches.Rectangle(origin, fr["width"], fr["height"],
fill=False, linewidth=2, color='b')
ax.axes.add_patch(p)
plt.text(origin[0], origin[1], "%s, %d"%(emotion, fa["age"]),
fontsize=20, weight="bold", va="bottom", color=("b"))
plt.axis("off")
#label images without .jpg and call functions
for image in images:
b = os.path.basename(image)
if b.endswith('.jpg'):
|
start_time_azure = time.time()
annotate_image(image)
end_time_azure = time.time()
print("time for azure model: ")
print(end_time_azure-start_time_azure)
#----Uncomment to also run deepface model
# =============================================================================
# start_time_deepface = time.time()
# annotate_image_deepface(image)
# end_time_deepface = time.time()
# print("time for deepface model: ")
# print(end_time_deepface-start_time_deepface)
# =============================================================================
#concat datetimes with emotions; error handling deals with frames where person may be out of frame -
#would throw an index error but instead outputs as zero instead
data_entry = [0]*nframes
for i in range(nframes):
try:
data_entry[i] = datetimes[i] + ';' + emotions[i]
except IndexError:
data_entry[i] = "0;0"
emotion_from_local_image_file.data_entry=data_entry
def publish_to_drive(data_entry):
#function that formats data into correct CSV format, and uploads to google drive as a text file
#No raw images are actually exported beyond the secure Azure link, making this more secure
#format data to be in line-by-line csv format
li = str(data_entry)
li = li.replace(".",":")
li = li.replace("[","")
li = li.replace("]","")
li = li.replace(", ","\n")
li = li.replace(";",",")
li = li.replace("'","")
#Check if file already exists, if yes download and add new data to it before uploading, if not, | b = b[:-4] | conditional_block |
webcam-local_folder-emotions-gdrive.py | , basename
from deepface import DeepFace
#Google authorisation
#----------------------
gauth = GoogleAuth()
# Try to load saved client credentials
gauth.LoadCredentialsFile("googlecreds.txt")
if gauth.credentials is None:
# Authenticate via google if they're not there, and get refresh token to automate token retrieval
gauth.GetFlow()
gauth.flow.params.update({'access_type': 'offline'})
gauth.flow.params.update({'approval_prompt': 'force'})
gauth.LocalWebserverAuth()
elif gauth.access_token_expired:
gauth.Refresh()
else:
gauth.Authorize()
# Save the current credentials to a file
gauth.SaveCredentialsFile("googlecreds.txt")
drive = GoogleDrive(gauth)
#--------------------
#--------------------
# Disable printing to command line - ignore
def blockPrint():
sys.stdout = open(os.devnull, 'w')
# Restore
def enablePrint():
sys.stdout = sys.__stdout__
#--------------------
def webcam_images_to_local_folders():
#function that takes webcam image and saves to local folder
#Define number of frames to be captured and interval
watch_time = 1 #in minutes
interval = 0.991442321 #Target of 1 fps, adjusted for average time taken in for loop
#nframes = int(watch_time)*1*60
nframes = 30
#set video capture device
cap = cv2.VideoCapture(0,cv2.CAP_DSHOW)
#capture frames in 1s intervals
dt_string = []
now = []
for i in range(nframes):
# get time now
now.append(datetime.now())
dt_string.append(now[i].strftime("%d-%m-%Y %H.%M.%S"))
#print("date and time =", dt_string[i])
#capture image
ret, img = cap.read()
time.sleep(1)
contrast = 1.5
brightness = 0
img = np.clip(contrast*img+brightness,0,255)
# save file jusing current time
cv2.imwrite('./images/'+dt_string[i]+'.jpg', img)
file_dirs = [0]*nframes
datetimes = [0]*nframes
for i in range(nframes):
file_name = dt_string[i]
file_dirs[i] = './images/'+dt_string[i]+'.jpg'
datetimes[i] = dt_string[i]
# wait interval
time.sleep(interval)
stop = timeit.default_timer()
#if period>interval:
# interval = (1-((period)-1))
#else:
# interval = (1+(1-(period)))
#print('Time: ', stop - start)
webcam_images_to_local_folders.file_dirs = file_dirs
webcam_images_to_local_folders.datetimes = datetimes
webcam_images_to_local_folders.nframes = nframes
def emotion_from_local_image_file(images):
#function that takes images from local file, runs expression recognition using Azure Face API,
#and output raw datetime and emotion data
datetimes = webcam_images_to_local_folders.datetimes
nframes = webcam_images_to_local_folders.nframes
emotions = []
emotions_deepface = []
os.environ['FACE_SUBSCRIPTION_KEY'] = 'c03f4bb6a5794c79aa9d6d623b81c30d'
os.environ['FACE_ENDPOINT'] = 'https://iotface1.cognitiveservices.azure.com/'
#Authentication process
subscription_key = os.environ['FACE_SUBSCRIPTION_KEY']
assert subscription_key
face_api_url = os.environ['FACE_ENDPOINT'] + '/face/v1.0/detect'
headers = {'Ocp-Apim-Subscription-Key': subscription_key,
'Content-Type': 'application/octet-stream'
}
params = {
'returnFaceId': 'true',
'returnFaceLandmarks': 'false',
'returnFaceAttributes': 'age,gender,headPose,smile,facialHair,glasses,emotion,hair,makeup,occlusion,accessories,blur,exposure,noise',
}
# =============================================================================
#
#image_url = 'https://blog-cdn.crowdcontent.com/crowdblog/Man-Smiling-with-Beard.jpg'
# params = {
# 'detectionModel': 'detection_02',
# 'returnFaceId': 'true'
# }
#
# response = requests.post(face_api_url, params=params,
# headers=headers, json={"url": image_url})
# face = response.json
# print(json.dumps(response.json()))
#
# params = {
# 'detectionModel': 'detection_01',
# 'returnFaceAttributes': 'age,gender,headPose,smile,facialHair,glasses,emotion,hair,makeup,occlusion,accessories,blur,exposure,noise',
# 'returnFaceId': 'true'
# }
#
# =============================================================================
#function to rename dictionary keys
def rename_keys(d, keys):
return dict([(keys.get(k), v) for k, v in d.items()])
#Deepface for emotion detection -- ended up not using as it was significantly slower than the azure model
#(time for report: average time = 5.67s)
def annotate_image_deepface(image_url):
img = cv2.imread(image_url)
predictions = DeepFace.analyze(img) #uses FER2013 dataset and others; more accurate than I could get myself
print(predictions)
#Azure Face API for emotion detection
#(time for report: average time = 0.355s)
def annotate_image(image_url):
data = open(image_url,'rb').read()
response = requests.post(face_api_url, params=params, headers=headers, data=data)
faces = response.json()
image = Image.open(image_url)
plt.figure(figsize=(8,8))
ax = plt.imshow(image, alpha=1)
for face in faces:
fr = face["faceRectangle"]
fa = face["faceAttributes"]
emotions_prob = (fa["emotion"])
emotion_translations = { #get emotions as adjective
'anger':'angry',
'contempt':'angry',#maps to angry; contempt is shown when angry and the deepface model has no contempt key
'disgust':'disgusted',
'fear':'scared',
'happiness':'happy',
'neutral':'neutral',
'sadness':'sad',
'surprise':'surprised'
}
emotions_prob = rename_keys(emotions_prob, emotion_translations) #call rename keys function
#find dominant emotion by looking at highest probability emotion
emotions_prob["neutral"] = emotions_prob["neutral"]/100 #calibration to detect more nuances expressions
emotion = max(emotions_prob, key=emotions_prob.get)
emotions.append(emotion)
#plot on figure
origin = (fr["left"], fr["top"])
p = patches.Rectangle(origin, fr["width"], fr["height"],
fill=False, linewidth=2, color='b')
ax.axes.add_patch(p)
plt.text(origin[0], origin[1], "%s, %d"%(emotion, fa["age"]),
fontsize=20, weight="bold", va="bottom", color=("b"))
plt.axis("off")
| b = b[:-4]
start_time_azure = time.time()
annotate_image(image)
end_time_azure = time.time()
print("time for azure model: ")
print(end_time_azure-start_time_azure)
#----Uncomment to also run deepface model
# =============================================================================
# start_time_deepface = time.time()
# annotate_image_deepface(image)
# end_time_deepface = time.time()
# print("time for deepface model: ")
# print(end_time_deepface-start_time_deepface)
# =============================================================================
#concat datetimes with emotions; error handling deals with frames where person may be out of frame -
#would throw an index error but instead outputs as zero instead
data_entry = [0]*nframes
for i in range(nframes):
try:
data_entry[i] = datetimes[i] + ';' + emotions[i]
except IndexError:
data_entry[i] = "0;0"
emotion_from_local_image_file.data_entry=data_entry
def publish_to_drive(data_entry):
#function that formats data into correct CSV format, and uploads to google drive as a text file
#No raw images are actually exported beyond the secure Azure link, making this more secure
#format data to be in line-by-line csv format
li = str(data_entry)
li = li.replace(".",":")
li = li.replace("[","")
li = li.replace("]","")
li = li.replace(", ","\n")
li = li.replace(";",",")
li = li.replace("'","")
#Check if file already exists, if yes download and add new data to it before uploading, if not, | #label images without .jpg and call functions
for image in images:
b = os.path.basename(image)
if b.endswith('.jpg'): | random_line_split |
webcam-local_folder-emotions-gdrive.py | , basename
from deepface import DeepFace
#Google authorisation
#----------------------
gauth = GoogleAuth()
# Try to load saved client credentials
gauth.LoadCredentialsFile("googlecreds.txt")
if gauth.credentials is None:
# Authenticate via google if they're not there, and get refresh token to automate token retrieval
gauth.GetFlow()
gauth.flow.params.update({'access_type': 'offline'})
gauth.flow.params.update({'approval_prompt': 'force'})
gauth.LocalWebserverAuth()
elif gauth.access_token_expired:
gauth.Refresh()
else:
gauth.Authorize()
# Save the current credentials to a file
gauth.SaveCredentialsFile("googlecreds.txt")
drive = GoogleDrive(gauth)
#--------------------
#--------------------
# Disable printing to command line - ignore
def blockPrint():
sys.stdout = open(os.devnull, 'w')
# Restore
def enablePrint():
|
#--------------------
def webcam_images_to_local_folders():
#function that takes webcam image and saves to local folder
#Define number of frames to be captured and interval
watch_time = 1 #in minutes
interval = 0.991442321 #Target of 1 fps, adjusted for average time taken in for loop
#nframes = int(watch_time)*1*60
nframes = 30
#set video capture device
cap = cv2.VideoCapture(0,cv2.CAP_DSHOW)
#capture frames in 1s intervals
dt_string = []
now = []
for i in range(nframes):
# get time now
now.append(datetime.now())
dt_string.append(now[i].strftime("%d-%m-%Y %H.%M.%S"))
#print("date and time =", dt_string[i])
#capture image
ret, img = cap.read()
time.sleep(1)
contrast = 1.5
brightness = 0
img = np.clip(contrast*img+brightness,0,255)
# save file jusing current time
cv2.imwrite('./images/'+dt_string[i]+'.jpg', img)
file_dirs = [0]*nframes
datetimes = [0]*nframes
for i in range(nframes):
file_name = dt_string[i]
file_dirs[i] = './images/'+dt_string[i]+'.jpg'
datetimes[i] = dt_string[i]
# wait interval
time.sleep(interval)
stop = timeit.default_timer()
#if period>interval:
# interval = (1-((period)-1))
#else:
# interval = (1+(1-(period)))
#print('Time: ', stop - start)
webcam_images_to_local_folders.file_dirs = file_dirs
webcam_images_to_local_folders.datetimes = datetimes
webcam_images_to_local_folders.nframes = nframes
def emotion_from_local_image_file(images):
#function that takes images from local file, runs expression recognition using Azure Face API,
#and output raw datetime and emotion data
datetimes = webcam_images_to_local_folders.datetimes
nframes = webcam_images_to_local_folders.nframes
emotions = []
emotions_deepface = []
os.environ['FACE_SUBSCRIPTION_KEY'] = 'c03f4bb6a5794c79aa9d6d623b81c30d'
os.environ['FACE_ENDPOINT'] = 'https://iotface1.cognitiveservices.azure.com/'
#Authentication process
subscription_key = os.environ['FACE_SUBSCRIPTION_KEY']
assert subscription_key
face_api_url = os.environ['FACE_ENDPOINT'] + '/face/v1.0/detect'
headers = {'Ocp-Apim-Subscription-Key': subscription_key,
'Content-Type': 'application/octet-stream'
}
params = {
'returnFaceId': 'true',
'returnFaceLandmarks': 'false',
'returnFaceAttributes': 'age,gender,headPose,smile,facialHair,glasses,emotion,hair,makeup,occlusion,accessories,blur,exposure,noise',
}
# =============================================================================
#
#image_url = 'https://blog-cdn.crowdcontent.com/crowdblog/Man-Smiling-with-Beard.jpg'
# params = {
# 'detectionModel': 'detection_02',
# 'returnFaceId': 'true'
# }
#
# response = requests.post(face_api_url, params=params,
# headers=headers, json={"url": image_url})
# face = response.json
# print(json.dumps(response.json()))
#
# params = {
# 'detectionModel': 'detection_01',
# 'returnFaceAttributes': 'age,gender,headPose,smile,facialHair,glasses,emotion,hair,makeup,occlusion,accessories,blur,exposure,noise',
# 'returnFaceId': 'true'
# }
#
# =============================================================================
#function to rename dictionary keys
def rename_keys(d, keys):
return dict([(keys.get(k), v) for k, v in d.items()])
#Deepface for emotion detection -- ended up not using as it was significantly slower than the azure model
#(time for report: average time = 5.67s)
def annotate_image_deepface(image_url):
img = cv2.imread(image_url)
predictions = DeepFace.analyze(img) #uses FER2013 dataset and others; more accurate than I could get myself
print(predictions)
#Azure Face API for emotion detection
#(time for report: average time = 0.355s)
def annotate_image(image_url):
data = open(image_url,'rb').read()
response = requests.post(face_api_url, params=params, headers=headers, data=data)
faces = response.json()
image = Image.open(image_url)
plt.figure(figsize=(8,8))
ax = plt.imshow(image, alpha=1)
for face in faces:
fr = face["faceRectangle"]
fa = face["faceAttributes"]
emotions_prob = (fa["emotion"])
emotion_translations = { #get emotions as adjective
'anger':'angry',
'contempt':'angry',#maps to angry; contempt is shown when angry and the deepface model has no contempt key
'disgust':'disgusted',
'fear':'scared',
'happiness':'happy',
'neutral':'neutral',
'sadness':'sad',
'surprise':'surprised'
}
emotions_prob = rename_keys(emotions_prob, emotion_translations) #call rename keys function
#find dominant emotion by looking at highest probability emotion
emotions_prob["neutral"] = emotions_prob["neutral"]/100 #calibration to detect more nuances expressions
emotion = max(emotions_prob, key=emotions_prob.get)
emotions.append(emotion)
#plot on figure
origin = (fr["left"], fr["top"])
p = patches.Rectangle(origin, fr["width"], fr["height"],
fill=False, linewidth=2, color='b')
ax.axes.add_patch(p)
plt.text(origin[0], origin[1], "%s, %d"%(emotion, fa["age"]),
fontsize=20, weight="bold", va="bottom", color=("b"))
plt.axis("off")
#label images without .jpg and call functions
for image in images:
b = os.path.basename(image)
if b.endswith('.jpg'):
b = b[:-4]
start_time_azure = time.time()
annotate_image(image)
end_time_azure = time.time()
print("time for azure model: ")
print(end_time_azure-start_time_azure)
#----Uncomment to also run deepface model
# =============================================================================
# start_time_deepface = time.time()
# annotate_image_deepface(image)
# end_time_deepface = time.time()
# print("time for deepface model: ")
# print(end_time_deepface-start_time_deepface)
# =============================================================================
#concat datetimes with emotions; error handling deals with frames where person may be out of frame -
#would throw an index error but instead outputs as zero instead
data_entry = [0]*nframes
for i in range(nframes):
try:
data_entry[i] = datetimes[i] + ';' + emotions[i]
except IndexError:
data_entry[i] = "0;0"
emotion_from_local_image_file.data_entry=data_entry
def publish_to_drive(data_entry):
#function that formats data into correct CSV format, and uploads to google drive as a text file
#No raw images are actually exported beyond the secure Azure link, making this more secure
#format data to be in line-by-line csv format
li = str(data_entry)
li = li.replace(".",":")
li = li.replace("[","")
li = li.replace("]","")
li = li.replace(", ","\n")
li = li.replace(";",",")
li = li.replace("'","")
#Check if file already exists, if yes download and add new data to it before uploading, if not | sys.stdout = sys.__stdout__ | identifier_body |
webcam-local_folder-emotions-gdrive.py | , basename
from deepface import DeepFace
#Google authorisation
#----------------------
gauth = GoogleAuth()
# Try to load saved client credentials
gauth.LoadCredentialsFile("googlecreds.txt")
if gauth.credentials is None:
# Authenticate via google if they're not there, and get refresh token to automate token retrieval
gauth.GetFlow()
gauth.flow.params.update({'access_type': 'offline'})
gauth.flow.params.update({'approval_prompt': 'force'})
gauth.LocalWebserverAuth()
elif gauth.access_token_expired:
gauth.Refresh()
else:
gauth.Authorize()
# Save the current credentials to a file
gauth.SaveCredentialsFile("googlecreds.txt")
drive = GoogleDrive(gauth)
#--------------------
#--------------------
# Disable printing to command line - ignore
def blockPrint():
sys.stdout = open(os.devnull, 'w')
# Restore
def enablePrint():
sys.stdout = sys.__stdout__
#--------------------
def webcam_images_to_local_folders():
#function that takes webcam image and saves to local folder
#Define number of frames to be captured and interval
watch_time = 1 #in minutes
interval = 0.991442321 #Target of 1 fps, adjusted for average time taken in for loop
#nframes = int(watch_time)*1*60
nframes = 30
#set video capture device
cap = cv2.VideoCapture(0,cv2.CAP_DSHOW)
#capture frames in 1s intervals
dt_string = []
now = []
for i in range(nframes):
# get time now
now.append(datetime.now())
dt_string.append(now[i].strftime("%d-%m-%Y %H.%M.%S"))
#print("date and time =", dt_string[i])
#capture image
ret, img = cap.read()
time.sleep(1)
contrast = 1.5
brightness = 0
img = np.clip(contrast*img+brightness,0,255)
# save file jusing current time
cv2.imwrite('./images/'+dt_string[i]+'.jpg', img)
file_dirs = [0]*nframes
datetimes = [0]*nframes
for i in range(nframes):
file_name = dt_string[i]
file_dirs[i] = './images/'+dt_string[i]+'.jpg'
datetimes[i] = dt_string[i]
# wait interval
time.sleep(interval)
stop = timeit.default_timer()
#if period>interval:
# interval = (1-((period)-1))
#else:
# interval = (1+(1-(period)))
#print('Time: ', stop - start)
webcam_images_to_local_folders.file_dirs = file_dirs
webcam_images_to_local_folders.datetimes = datetimes
webcam_images_to_local_folders.nframes = nframes
def emotion_from_local_image_file(images):
#function that takes images from local file, runs expression recognition using Azure Face API,
#and output raw datetime and emotion data
datetimes = webcam_images_to_local_folders.datetimes
nframes = webcam_images_to_local_folders.nframes
emotions = []
emotions_deepface = []
os.environ['FACE_SUBSCRIPTION_KEY'] = 'c03f4bb6a5794c79aa9d6d623b81c30d'
os.environ['FACE_ENDPOINT'] = 'https://iotface1.cognitiveservices.azure.com/'
#Authentication process
subscription_key = os.environ['FACE_SUBSCRIPTION_KEY']
assert subscription_key
face_api_url = os.environ['FACE_ENDPOINT'] + '/face/v1.0/detect'
headers = {'Ocp-Apim-Subscription-Key': subscription_key,
'Content-Type': 'application/octet-stream'
}
params = {
'returnFaceId': 'true',
'returnFaceLandmarks': 'false',
'returnFaceAttributes': 'age,gender,headPose,smile,facialHair,glasses,emotion,hair,makeup,occlusion,accessories,blur,exposure,noise',
}
# =============================================================================
#
#image_url = 'https://blog-cdn.crowdcontent.com/crowdblog/Man-Smiling-with-Beard.jpg'
# params = {
# 'detectionModel': 'detection_02',
# 'returnFaceId': 'true'
# }
#
# response = requests.post(face_api_url, params=params,
# headers=headers, json={"url": image_url})
# face = response.json
# print(json.dumps(response.json()))
#
# params = {
# 'detectionModel': 'detection_01',
# 'returnFaceAttributes': 'age,gender,headPose,smile,facialHair,glasses,emotion,hair,makeup,occlusion,accessories,blur,exposure,noise',
# 'returnFaceId': 'true'
# }
#
# =============================================================================
#function to rename dictionary keys
def rename_keys(d, keys):
return dict([(keys.get(k), v) for k, v in d.items()])
#Deepface for emotion detection -- ended up not using as it was significantly slower than the azure model
#(time for report: average time = 5.67s)
def | (image_url):
img = cv2.imread(image_url)
predictions = DeepFace.analyze(img) #uses FER2013 dataset and others; more accurate than I could get myself
print(predictions)
#Azure Face API for emotion detection
#(time for report: average time = 0.355s)
def annotate_image(image_url):
data = open(image_url,'rb').read()
response = requests.post(face_api_url, params=params, headers=headers, data=data)
faces = response.json()
image = Image.open(image_url)
plt.figure(figsize=(8,8))
ax = plt.imshow(image, alpha=1)
for face in faces:
fr = face["faceRectangle"]
fa = face["faceAttributes"]
emotions_prob = (fa["emotion"])
emotion_translations = { #get emotions as adjective
'anger':'angry',
'contempt':'angry',#maps to angry; contempt is shown when angry and the deepface model has no contempt key
'disgust':'disgusted',
'fear':'scared',
'happiness':'happy',
'neutral':'neutral',
'sadness':'sad',
'surprise':'surprised'
}
emotions_prob = rename_keys(emotions_prob, emotion_translations) #call rename keys function
#find dominant emotion by looking at highest probability emotion
emotions_prob["neutral"] = emotions_prob["neutral"]/100 #calibration to detect more nuances expressions
emotion = max(emotions_prob, key=emotions_prob.get)
emotions.append(emotion)
#plot on figure
origin = (fr["left"], fr["top"])
p = patches.Rectangle(origin, fr["width"], fr["height"],
fill=False, linewidth=2, color='b')
ax.axes.add_patch(p)
plt.text(origin[0], origin[1], "%s, %d"%(emotion, fa["age"]),
fontsize=20, weight="bold", va="bottom", color=("b"))
plt.axis("off")
#label images without .jpg and call functions
for image in images:
b = os.path.basename(image)
if b.endswith('.jpg'):
b = b[:-4]
start_time_azure = time.time()
annotate_image(image)
end_time_azure = time.time()
print("time for azure model: ")
print(end_time_azure-start_time_azure)
#----Uncomment to also run deepface model
# =============================================================================
# start_time_deepface = time.time()
# annotate_image_deepface(image)
# end_time_deepface = time.time()
# print("time for deepface model: ")
# print(end_time_deepface-start_time_deepface)
# =============================================================================
#concat datetimes with emotions; error handling deals with frames where person may be out of frame -
#would throw an index error but instead outputs as zero instead
data_entry = [0]*nframes
for i in range(nframes):
try:
data_entry[i] = datetimes[i] + ';' + emotions[i]
except IndexError:
data_entry[i] = "0;0"
emotion_from_local_image_file.data_entry=data_entry
def publish_to_drive(data_entry):
#function that formats data into correct CSV format, and uploads to google drive as a text file
#No raw images are actually exported beyond the secure Azure link, making this more secure
#format data to be in line-by-line csv format
li = str(data_entry)
li = li.replace(".",":")
li = li.replace("[","")
li = li.replace("]","")
li = li.replace(", ","\n")
li = li.replace(";",",")
li = li.replace("'","")
#Check if file already exists, if yes download and add new data to it before uploading, if not | annotate_image_deepface | identifier_name |
mqtt_framework.py | qtt.reasoncodes import ReasonCodes
from paho.mqtt.subscribeoptions import SubscribeOptions
log = getLogger(__name__)
class GenericMqttEndpoint:
def __init__(self, client_kwargs: dict, password_auth: dict, server_kwargs: dict, tls: bool):
"""
:param client_kwargs: See https://github.com/eclipse/paho.mqtt.python/blob/9c22a9c297c0cdc4e1aac13aa19073e09a822961/src/paho/mqtt/client.py#L517
:param password_auth: See https://github.com/eclipse/paho.mqtt.python/blob/9c22a9c297c0cdc4e1aac13aa19073e09a822961/src/paho/mqtt/client.py#L1318
:param server_kwargs: See https://github.com/eclipse/paho.mqtt.python/blob/9c22a9c297c0cdc4e1aac13aa19073e09a822961/src/paho/mqtt/client.py#L913
:param tls: If true, enables TLS with https://github.com/eclipse/paho.mqtt.python/blob/9c22a9c297c0cdc4e1aac13aa19073e09a822961/src/paho/mqtt/client.py#L765
"""
self.mqtt_client_kwargs = client_kwargs
# Some features and parameters depend on this.
self.mqtt_client_kwargs.update(protocol=MQTTv5)
self.mqtt_tls = tls
self.mqtt_password_auth = password_auth
self.mqtt_server_kwargs = server_kwargs
# This is specific to MQTTv5 (MQTTv311 has clean_session in the client_kwargs instead)
self.mqtt_server_kwargs.update(clean_start=MQTT_CLEAN_START_FIRST_ONLY)
self._mqttc = MqttClient(**self.mqtt_client_kwargs)
if self.mqtt_tls:
self._mqttc.tls_set()
if self.mqtt_password_auth:
self._mqttc.username_pw_set(**self.mqtt_password_auth)
self._mqttc.on_connect = self._on_connect
self._mqttc.on_disconnect = self._on_disconnect
self._mqttc.on_message = self._on_message
self._mqttc.on_log = self._on_log
self._managed_subsciptions = dict()
"""
This dictionary maps subscription topics to subscription options
"""
for attribute in self.__class__.__dict__.values():
if hasattr(attribute, _SUBSCRIBE_DECORATOR_NAME):
decorated_function = attribute
topic_pattern, kwargs = getattr(decorated_function, _SUBSCRIBE_DECORATOR_NAME)
if topic_pattern in self._managed_subsciptions:
raise Exception(
"A client cannot subscribe to an identical topic filter multiple times!")
else:
self._managed_subsciptions[topic_pattern] = kwargs
# This function introduces a scope,
# to avoid a changing decorated_function variable
# cause changing behaviour of call_decorated_function
def create_caller(decorated_function):
# the decorated_function has not yet a self object; thus we need this wrapper
@wraps(decorated_function)
def call_decorated_function(client, userdata, message):
variables = unpack_topic(topic_pattern, message.topic)
return decorated_function(self, client=client, userdata=userdata, message=message, *variables)
return call_decorated_function
# this is done only once, not on every reconnect / resubscribe.
self._mqttc.message_callback_add(topic_pattern, create_caller(decorated_function))
def connect(self):
# currently, this will retry first connects, we don't need bettermqtt
self._mqttc.connect_async(**self.mqtt_server_kwargs)
self._mqttc.loop_start()
def _on_connect(self, client, userdata, flags, rc: ReasonCodes, properties: Properties = None):
if flags['session present'] == 0:
# This is a new session, and we need to resubscribe
self._subscribe()
elif flags['session present'] == 1:
pass
else:
raise Exception("Unknown Session Present Flag")
def _subscribe(self):
# Edge case: This may fail if we disconnect when not subscribed to all channels; there seems to a case where
# subscribe() returns an error code that we currently do handle.
# With some luck, the subscription stays in the packet queue.
# Other defaults are sane, we don't need Subscription Options
# However, if our session expires (after long-lasting conection loss),
# we will unexpectedly re-receive all retained messages
# which is not bad, if they are idempotent
# We MUST NOT add message callbacks here, otherwise, they may be added twice upon reconnect after session expiry
for topic_filter, kwargs in self._managed_subsciptions.items():
self._mqttc.subscribe(topic=topic_filter, **kwargs)
def _on_disconnect(self, client, userdata, rc: ReasonCodes, properties: Properties = None):
# Exceptions here seem to disrupt the automatic reconnect
# Connection loss can be tested with:
# sudo tc qdisc add dev lo root netem loss 100%
# sudo tc qdisc del dev lo root
pass
def _on_message(self, client, userdata, message: MQTTMessage):
message_dict = {attr: getattr(message, attr) for attr in dir(message) if not attr.startswith("_")}
message_properties: Properties = message.properties
message_properties_dict = {attr: getattr(message_properties, attr) for attr in dir(message_properties) if
not attr.startswith("_")}
def _on_log(self, client, userdata, level, buf):
log.log(level, buf, extra=dict(userdata=userdata))
@staticmethod
def subscribe_decorator(topic, **kwargs):
"""
This must be the outermost decorator (except for other similar nop-decorators)
Avoid overlapping subscriptions or handle duplicates.
Uses the same kwargs as paho.mqtt.client.Client.subscribe()
Try qos=2 or options=SubscriptionOptions()
Your function should have the signature func(var1, var2, vars, *, client,userdata,message)
with a positional variable for each + or # in the pattern
"""
def _subscribe_decorator(func):
|
return _subscribe_decorator
def publish(self, topic_pattern, *topic_data, **kwargs):
"""
:param topic_pattern: A topic pattern, e.g. a/+/c/#
:param topic_data: some elements matching the pattern, e.g. "b", ("d", "e")
:param kwargs: Passed to Client.publish(self, topic, payload=None, qos=0, retain=False, properties=None)
:return:
"""
topic = pack_topic(topic_pattern, *topic_data)
self._mqttc.publish(topic, **kwargs)
_SUBSCRIBE_DECORATOR_NAME = name = __name__ + "." + GenericMqttEndpoint.subscribe_decorator.__qualname__
FORBIDDEN_CHARS = "/+#"
def pack_topic(pattern: str, *data):
data = list(data)
while "+" in pattern:
if not data:
raise Exception("Placeholder with no value to fill in")
element = data.pop(0)
check_data_is_sane(element)
pattern = pattern.replace("+", element, 1)
while "#" in pattern:
if not data:
raise Exception("Placeholder with no value to fill in")
remainder = data.pop(0)
if isinstance(remainder, str):
raise Exception("You should provide a list or a tuple to replace a '#', not a string.")
elements = list(remainder)
for element in elements:
check_data_is_sane(element)
pattern = pattern.replace("#", "/".join(elements), 1)
if data:
raise Exception("Unused placeholders are present")
return pattern
def check_data_is_sane(element):
for FORBIDDEN_CHAR in FORBIDDEN_CHARS:
if FORBIDDEN_CHAR in element:
raise Exception(f"Cannot fill in data containing a '{FORBIDDEN_CHAR}'")
def unpack_topic(pattern, topic):
"""
returns one string for each "+", followed by a list of strings when a trailing "#" is present
"""
pattern_parts = iter(pattern.split("/"))
topic_parts = iter(topic.split("/"))
while True:
try:
cur_pattern = next(pattern_parts)
except StopIteration:
try:
cur_topic = next(topic_parts)
raise Exception("The topic to be matched is longer than the pattern without an # suffix. "
"The first unmatched part is {!r}".format(cur_topic))
except StopIteration:
# no more elements in both sequences.
return
if cur_pattern == "#":
yield list(topic_parts)
try:
cur_pattern = next(pattern_parts)
raise Exception("The pattern has a component after a #: {!r}".format(cur_pattern))
except StopIteration:
# topic has been exhausted by list() enumeration, and pattern is empty, too.
return
else:
try:
cur_topic = next(topic_parts)
except StopIteration:
raise Exception("The | setattr(func, _SUBSCRIBE_DECORATOR_NAME, (topic, kwargs))
# no @wraps
return func | identifier_body |
mqtt_framework.py | qtt.reasoncodes import ReasonCodes
from paho.mqtt.subscribeoptions import SubscribeOptions
log = getLogger(__name__)
class GenericMqttEndpoint:
def __init__(self, client_kwargs: dict, password_auth: dict, server_kwargs: dict, tls: bool):
"""
:param client_kwargs: See https://github.com/eclipse/paho.mqtt.python/blob/9c22a9c297c0cdc4e1aac13aa19073e09a822961/src/paho/mqtt/client.py#L517
:param password_auth: See https://github.com/eclipse/paho.mqtt.python/blob/9c22a9c297c0cdc4e1aac13aa19073e09a822961/src/paho/mqtt/client.py#L1318
:param server_kwargs: See https://github.com/eclipse/paho.mqtt.python/blob/9c22a9c297c0cdc4e1aac13aa19073e09a822961/src/paho/mqtt/client.py#L913
:param tls: If true, enables TLS with https://github.com/eclipse/paho.mqtt.python/blob/9c22a9c297c0cdc4e1aac13aa19073e09a822961/src/paho/mqtt/client.py#L765
"""
self.mqtt_client_kwargs = client_kwargs
# Some features and parameters depend on this.
self.mqtt_client_kwargs.update(protocol=MQTTv5)
self.mqtt_tls = tls
self.mqtt_password_auth = password_auth
self.mqtt_server_kwargs = server_kwargs
# This is specific to MQTTv5 (MQTTv311 has clean_session in the client_kwargs instead)
self.mqtt_server_kwargs.update(clean_start=MQTT_CLEAN_START_FIRST_ONLY)
self._mqttc = MqttClient(**self.mqtt_client_kwargs)
if self.mqtt_tls:
self._mqttc.tls_set()
if self.mqtt_password_auth:
self._mqttc.username_pw_set(**self.mqtt_password_auth)
self._mqttc.on_connect = self._on_connect
self._mqttc.on_disconnect = self._on_disconnect
self._mqttc.on_message = self._on_message
self._mqttc.on_log = self._on_log
self._managed_subsciptions = dict()
"""
This dictionary maps subscription topics to subscription options
"""
for attribute in self.__class__.__dict__.values():
if hasattr(attribute, _SUBSCRIBE_DECORATOR_NAME):
decorated_function = attribute
topic_pattern, kwargs = getattr(decorated_function, _SUBSCRIBE_DECORATOR_NAME)
if topic_pattern in self._managed_subsciptions:
raise Exception(
"A client cannot subscribe to an identical topic filter multiple times!")
else:
self._managed_subsciptions[topic_pattern] = kwargs
# This function introduces a scope,
# to avoid a changing decorated_function variable
# cause changing behaviour of call_decorated_function
def create_caller(decorated_function):
# the decorated_function has not yet a self object; thus we need this wrapper
@wraps(decorated_function)
def call_decorated_function(client, userdata, message):
variables = unpack_topic(topic_pattern, message.topic)
return decorated_function(self, client=client, userdata=userdata, message=message, *variables)
return call_decorated_function
# this is done only once, not on every reconnect / resubscribe.
self._mqttc.message_callback_add(topic_pattern, create_caller(decorated_function))
def connect(self):
# currently, this will retry first connects, we don't need bettermqtt
self._mqttc.connect_async(**self.mqtt_server_kwargs)
self._mqttc.loop_start()
def _on_connect(self, client, userdata, flags, rc: ReasonCodes, properties: Properties = None):
if flags['session present'] == 0:
# This is a new session, and we need to resubscribe
self._subscribe()
elif flags['session present'] == 1:
pass
else:
raise Exception("Unknown Session Present Flag")
def _subscribe(self):
# Edge case: This may fail if we disconnect when not subscribed to all channels; there seems to a case where
# subscribe() returns an error code that we currently do handle.
# With some luck, the subscription stays in the packet queue.
# Other defaults are sane, we don't need Subscription Options
# However, if our session expires (after long-lasting conection loss),
# we will unexpectedly re-receive all retained messages
# which is not bad, if they are idempotent
# We MUST NOT add message callbacks here, otherwise, they may be added twice upon reconnect after session expiry
for topic_filter, kwargs in self._managed_subsciptions.items():
self._mqttc.subscribe(topic=topic_filter, **kwargs)
def _on_disconnect(self, client, userdata, rc: ReasonCodes, properties: Properties = None):
# Exceptions here seem to disrupt the automatic reconnect
# Connection loss can be tested with:
# sudo tc qdisc add dev lo root netem loss 100%
# sudo tc qdisc del dev lo root
pass
def _on_message(self, client, userdata, message: MQTTMessage):
message_dict = {attr: getattr(message, attr) for attr in dir(message) if not attr.startswith("_")}
message_properties: Properties = message.properties
message_properties_dict = {attr: getattr(message_properties, attr) for attr in dir(message_properties) if
not attr.startswith("_")}
def _on_log(self, client, userdata, level, buf):
log.log(level, buf, extra=dict(userdata=userdata))
@staticmethod
def subscribe_decorator(topic, **kwargs):
"""
This must be the outermost decorator (except for other similar nop-decorators)
Avoid overlapping subscriptions or handle duplicates.
Uses the same kwargs as paho.mqtt.client.Client.subscribe()
Try qos=2 or options=SubscriptionOptions()
Your function should have the signature func(var1, var2, vars, *, client,userdata,message)
with a positional variable for each + or # in the pattern
"""
def _subscribe_decorator(func):
setattr(func, _SUBSCRIBE_DECORATOR_NAME, (topic, kwargs))
# no @wraps
return func
return _subscribe_decorator
def publish(self, topic_pattern, *topic_data, **kwargs):
"""
:param topic_pattern: A topic pattern, e.g. a/+/c/#
:param topic_data: some elements matching the pattern, e.g. "b", ("d", "e")
:param kwargs: Passed to Client.publish(self, topic, payload=None, qos=0, retain=False, properties=None)
:return:
"""
topic = pack_topic(topic_pattern, *topic_data)
self._mqttc.publish(topic, **kwargs)
_SUBSCRIBE_DECORATOR_NAME = name = __name__ + "." + GenericMqttEndpoint.subscribe_decorator.__qualname__
FORBIDDEN_CHARS = "/+#"
def pack_topic(pattern: str, *data):
data = list(data)
while "+" in pattern:
if not data:
raise Exception("Placeholder with no value to fill in")
element = data.pop(0)
check_data_is_sane(element)
pattern = pattern.replace("+", element, 1)
while "#" in pattern:
if not data:
raise Exception("Placeholder with no value to fill in")
remainder = data.pop(0)
if isinstance(remainder, str):
raise Exception("You should provide a list or a tuple to replace a '#', not a string.")
elements = list(remainder)
for element in elements:
check_data_is_sane(element)
pattern = pattern.replace("#", "/".join(elements), 1)
if data:
raise Exception("Unused placeholders are present")
return pattern
def check_data_is_sane(element):
for FORBIDDEN_CHAR in FORBIDDEN_CHARS:
if FORBIDDEN_CHAR in element:
raise Exception(f"Cannot fill in data containing a '{FORBIDDEN_CHAR}'")
def unpack_topic(pattern, topic):
"""
returns one string for each "+", followed by a list of strings when a trailing "#" is present
"""
pattern_parts = iter(pattern.split("/"))
topic_parts = iter(topic.split("/"))
while True:
try:
cur_pattern = next(pattern_parts)
except StopIteration:
try:
cur_topic = next(topic_parts)
raise Exception("The topic to be matched is longer than the pattern without an # suffix. "
"The first unmatched part is {!r}".format(cur_topic))
except StopIteration:
# no more elements in both sequences.
return | raise Exception("The pattern has a component after a #: {!r}".format(cur_pattern))
except StopIteration:
# topic has been exhausted by list() enumeration, and pattern is empty, too.
return
else:
try:
cur_topic = next(topic_parts)
except StopIteration:
raise Exception("The topic | if cur_pattern == "#":
yield list(topic_parts)
try:
cur_pattern = next(pattern_parts) | random_line_split |
mqtt_framework.py | .reasoncodes import ReasonCodes
from paho.mqtt.subscribeoptions import SubscribeOptions
log = getLogger(__name__)
class GenericMqttEndpoint:
def __init__(self, client_kwargs: dict, password_auth: dict, server_kwargs: dict, tls: bool):
"""
:param client_kwargs: See https://github.com/eclipse/paho.mqtt.python/blob/9c22a9c297c0cdc4e1aac13aa19073e09a822961/src/paho/mqtt/client.py#L517
:param password_auth: See https://github.com/eclipse/paho.mqtt.python/blob/9c22a9c297c0cdc4e1aac13aa19073e09a822961/src/paho/mqtt/client.py#L1318
:param server_kwargs: See https://github.com/eclipse/paho.mqtt.python/blob/9c22a9c297c0cdc4e1aac13aa19073e09a822961/src/paho/mqtt/client.py#L913
:param tls: If true, enables TLS with https://github.com/eclipse/paho.mqtt.python/blob/9c22a9c297c0cdc4e1aac13aa19073e09a822961/src/paho/mqtt/client.py#L765
"""
self.mqtt_client_kwargs = client_kwargs
# Some features and parameters depend on this.
self.mqtt_client_kwargs.update(protocol=MQTTv5)
self.mqtt_tls = tls
self.mqtt_password_auth = password_auth
self.mqtt_server_kwargs = server_kwargs
# This is specific to MQTTv5 (MQTTv311 has clean_session in the client_kwargs instead)
self.mqtt_server_kwargs.update(clean_start=MQTT_CLEAN_START_FIRST_ONLY)
self._mqttc = MqttClient(**self.mqtt_client_kwargs)
if self.mqtt_tls:
self._mqttc.tls_set()
if self.mqtt_password_auth:
self._mqttc.username_pw_set(**self.mqtt_password_auth)
self._mqttc.on_connect = self._on_connect
self._mqttc.on_disconnect = self._on_disconnect
self._mqttc.on_message = self._on_message
self._mqttc.on_log = self._on_log
self._managed_subsciptions = dict()
"""
This dictionary maps subscription topics to subscription options
"""
for attribute in self.__class__.__dict__.values():
if hasattr(attribute, _SUBSCRIBE_DECORATOR_NAME):
decorated_function = attribute
topic_pattern, kwargs = getattr(decorated_function, _SUBSCRIBE_DECORATOR_NAME)
if topic_pattern in self._managed_subsciptions:
raise Exception(
"A client cannot subscribe to an identical topic filter multiple times!")
else:
self._managed_subsciptions[topic_pattern] = kwargs
# This function introduces a scope,
# to avoid a changing decorated_function variable
# cause changing behaviour of call_decorated_function
def create_caller(decorated_function):
# the decorated_function has not yet a self object; thus we need this wrapper
@wraps(decorated_function)
def call_decorated_function(client, userdata, message):
variables = unpack_topic(topic_pattern, message.topic)
return decorated_function(self, client=client, userdata=userdata, message=message, *variables)
return call_decorated_function
# this is done only once, not on every reconnect / resubscribe.
self._mqttc.message_callback_add(topic_pattern, create_caller(decorated_function))
def connect(self):
# currently, this will retry first connects, we don't need bettermqtt
self._mqttc.connect_async(**self.mqtt_server_kwargs)
self._mqttc.loop_start()
def _on_connect(self, client, userdata, flags, rc: ReasonCodes, properties: Properties = None):
if flags['session present'] == 0:
# This is a new session, and we need to resubscribe
self._subscribe()
elif flags['session present'] == 1:
pass
else:
raise Exception("Unknown Session Present Flag")
def _subscribe(self):
# Edge case: This may fail if we disconnect when not subscribed to all channels; there seems to a case where
# subscribe() returns an error code that we currently do handle.
# With some luck, the subscription stays in the packet queue.
# Other defaults are sane, we don't need Subscription Options
# However, if our session expires (after long-lasting conection loss),
# we will unexpectedly re-receive all retained messages
# which is not bad, if they are idempotent
# We MUST NOT add message callbacks here, otherwise, they may be added twice upon reconnect after session expiry
for topic_filter, kwargs in self._managed_subsciptions.items():
self._mqttc.subscribe(topic=topic_filter, **kwargs)
def _on_disconnect(self, client, userdata, rc: ReasonCodes, properties: Properties = None):
# Exceptions here seem to disrupt the automatic reconnect
# Connection loss can be tested with:
# sudo tc qdisc add dev lo root netem loss 100%
# sudo tc qdisc del dev lo root
pass
def _on_message(self, client, userdata, message: MQTTMessage):
message_dict = {attr: getattr(message, attr) for attr in dir(message) if not attr.startswith("_")}
message_properties: Properties = message.properties
message_properties_dict = {attr: getattr(message_properties, attr) for attr in dir(message_properties) if
not attr.startswith("_")}
def _on_log(self, client, userdata, level, buf):
log.log(level, buf, extra=dict(userdata=userdata))
@staticmethod
def subscribe_decorator(topic, **kwargs):
"""
This must be the outermost decorator (except for other similar nop-decorators)
Avoid overlapping subscriptions or handle duplicates.
Uses the same kwargs as paho.mqtt.client.Client.subscribe()
Try qos=2 or options=SubscriptionOptions()
Your function should have the signature func(var1, var2, vars, *, client,userdata,message)
with a positional variable for each + or # in the pattern
"""
def _subscribe_decorator(func):
setattr(func, _SUBSCRIBE_DECORATOR_NAME, (topic, kwargs))
# no @wraps
return func
return _subscribe_decorator
def publish(self, topic_pattern, *topic_data, **kwargs):
"""
:param topic_pattern: A topic pattern, e.g. a/+/c/#
:param topic_data: some elements matching the pattern, e.g. "b", ("d", "e")
:param kwargs: Passed to Client.publish(self, topic, payload=None, qos=0, retain=False, properties=None)
:return:
"""
topic = pack_topic(topic_pattern, *topic_data)
self._mqttc.publish(topic, **kwargs)
_SUBSCRIBE_DECORATOR_NAME = name = __name__ + "." + GenericMqttEndpoint.subscribe_decorator.__qualname__
FORBIDDEN_CHARS = "/+#"
def pack_topic(pattern: str, *data):
data = list(data)
while "+" in pattern:
if not data:
raise Exception("Placeholder with no value to fill in")
element = data.pop(0)
check_data_is_sane(element)
pattern = pattern.replace("+", element, 1)
while "#" in pattern:
if not data:
raise Exception("Placeholder with no value to fill in")
remainder = data.pop(0)
if isinstance(remainder, str):
raise Exception("You should provide a list or a tuple to replace a '#', not a string.")
elements = list(remainder)
for element in elements:
check_data_is_sane(element)
pattern = pattern.replace("#", "/".join(elements), 1)
if data:
raise Exception("Unused placeholders are present")
return pattern
def check_data_is_sane(element):
for FORBIDDEN_CHAR in FORBIDDEN_CHARS:
if FORBIDDEN_CHAR in element:
raise Exception(f"Cannot fill in data containing a '{FORBIDDEN_CHAR}'")
def | (pattern, topic):
"""
returns one string for each "+", followed by a list of strings when a trailing "#" is present
"""
pattern_parts = iter(pattern.split("/"))
topic_parts = iter(topic.split("/"))
while True:
try:
cur_pattern = next(pattern_parts)
except StopIteration:
try:
cur_topic = next(topic_parts)
raise Exception("The topic to be matched is longer than the pattern without an # suffix. "
"The first unmatched part is {!r}".format(cur_topic))
except StopIteration:
# no more elements in both sequences.
return
if cur_pattern == "#":
yield list(topic_parts)
try:
cur_pattern = next(pattern_parts)
raise Exception("The pattern has a component after a #: {!r}".format(cur_pattern))
except StopIteration:
# topic has been exhausted by list() enumeration, and pattern is empty, too.
return
else:
try:
cur_topic = next(topic_parts)
except StopIteration:
raise Exception("The | unpack_topic | identifier_name |
mqtt_framework.py | .reasoncodes import ReasonCodes
from paho.mqtt.subscribeoptions import SubscribeOptions
log = getLogger(__name__)
class GenericMqttEndpoint:
def __init__(self, client_kwargs: dict, password_auth: dict, server_kwargs: dict, tls: bool):
"""
:param client_kwargs: See https://github.com/eclipse/paho.mqtt.python/blob/9c22a9c297c0cdc4e1aac13aa19073e09a822961/src/paho/mqtt/client.py#L517
:param password_auth: See https://github.com/eclipse/paho.mqtt.python/blob/9c22a9c297c0cdc4e1aac13aa19073e09a822961/src/paho/mqtt/client.py#L1318
:param server_kwargs: See https://github.com/eclipse/paho.mqtt.python/blob/9c22a9c297c0cdc4e1aac13aa19073e09a822961/src/paho/mqtt/client.py#L913
:param tls: If true, enables TLS with https://github.com/eclipse/paho.mqtt.python/blob/9c22a9c297c0cdc4e1aac13aa19073e09a822961/src/paho/mqtt/client.py#L765
"""
self.mqtt_client_kwargs = client_kwargs
# Some features and parameters depend on this.
self.mqtt_client_kwargs.update(protocol=MQTTv5)
self.mqtt_tls = tls
self.mqtt_password_auth = password_auth
self.mqtt_server_kwargs = server_kwargs
# This is specific to MQTTv5 (MQTTv311 has clean_session in the client_kwargs instead)
self.mqtt_server_kwargs.update(clean_start=MQTT_CLEAN_START_FIRST_ONLY)
self._mqttc = MqttClient(**self.mqtt_client_kwargs)
if self.mqtt_tls:
self._mqttc.tls_set()
if self.mqtt_password_auth:
self._mqttc.username_pw_set(**self.mqtt_password_auth)
self._mqttc.on_connect = self._on_connect
self._mqttc.on_disconnect = self._on_disconnect
self._mqttc.on_message = self._on_message
self._mqttc.on_log = self._on_log
self._managed_subsciptions = dict()
"""
This dictionary maps subscription topics to subscription options
"""
for attribute in self.__class__.__dict__.values():
if hasattr(attribute, _SUBSCRIBE_DECORATOR_NAME):
|
# this is done only once, not on every reconnect / resubscribe.
self._mqttc.message_callback_add(topic_pattern, create_caller(decorated_function))
def connect(self):
# currently, this will retry first connects, we don't need bettermqtt
self._mqttc.connect_async(**self.mqtt_server_kwargs)
self._mqttc.loop_start()
def _on_connect(self, client, userdata, flags, rc: ReasonCodes, properties: Properties = None):
if flags['session present'] == 0:
# This is a new session, and we need to resubscribe
self._subscribe()
elif flags['session present'] == 1:
pass
else:
raise Exception("Unknown Session Present Flag")
def _subscribe(self):
# Edge case: This may fail if we disconnect when not subscribed to all channels; there seems to a case where
# subscribe() returns an error code that we currently do handle.
# With some luck, the subscription stays in the packet queue.
# Other defaults are sane, we don't need Subscription Options
# However, if our session expires (after long-lasting conection loss),
# we will unexpectedly re-receive all retained messages
# which is not bad, if they are idempotent
# We MUST NOT add message callbacks here, otherwise, they may be added twice upon reconnect after session expiry
for topic_filter, kwargs in self._managed_subsciptions.items():
self._mqttc.subscribe(topic=topic_filter, **kwargs)
def _on_disconnect(self, client, userdata, rc: ReasonCodes, properties: Properties = None):
# Exceptions here seem to disrupt the automatic reconnect
# Connection loss can be tested with:
# sudo tc qdisc add dev lo root netem loss 100%
# sudo tc qdisc del dev lo root
pass
def _on_message(self, client, userdata, message: MQTTMessage):
message_dict = {attr: getattr(message, attr) for attr in dir(message) if not attr.startswith("_")}
message_properties: Properties = message.properties
message_properties_dict = {attr: getattr(message_properties, attr) for attr in dir(message_properties) if
not attr.startswith("_")}
def _on_log(self, client, userdata, level, buf):
log.log(level, buf, extra=dict(userdata=userdata))
@staticmethod
def subscribe_decorator(topic, **kwargs):
"""
This must be the outermost decorator (except for other similar nop-decorators)
Avoid overlapping subscriptions or handle duplicates.
Uses the same kwargs as paho.mqtt.client.Client.subscribe()
Try qos=2 or options=SubscriptionOptions()
Your function should have the signature func(var1, var2, vars, *, client,userdata,message)
with a positional variable for each + or # in the pattern
"""
def _subscribe_decorator(func):
setattr(func, _SUBSCRIBE_DECORATOR_NAME, (topic, kwargs))
# no @wraps
return func
return _subscribe_decorator
def publish(self, topic_pattern, *topic_data, **kwargs):
"""
:param topic_pattern: A topic pattern, e.g. a/+/c/#
:param topic_data: some elements matching the pattern, e.g. "b", ("d", "e")
:param kwargs: Passed to Client.publish(self, topic, payload=None, qos=0, retain=False, properties=None)
:return:
"""
topic = pack_topic(topic_pattern, *topic_data)
self._mqttc.publish(topic, **kwargs)
_SUBSCRIBE_DECORATOR_NAME = name = __name__ + "." + GenericMqttEndpoint.subscribe_decorator.__qualname__
FORBIDDEN_CHARS = "/+#"
def pack_topic(pattern: str, *data):
data = list(data)
while "+" in pattern:
if not data:
raise Exception("Placeholder with no value to fill in")
element = data.pop(0)
check_data_is_sane(element)
pattern = pattern.replace("+", element, 1)
while "#" in pattern:
if not data:
raise Exception("Placeholder with no value to fill in")
remainder = data.pop(0)
if isinstance(remainder, str):
raise Exception("You should provide a list or a tuple to replace a '#', not a string.")
elements = list(remainder)
for element in elements:
check_data_is_sane(element)
pattern = pattern.replace("#", "/".join(elements), 1)
if data:
raise Exception("Unused placeholders are present")
return pattern
def check_data_is_sane(element):
for FORBIDDEN_CHAR in FORBIDDEN_CHARS:
if FORBIDDEN_CHAR in element:
raise Exception(f"Cannot fill in data containing a '{FORBIDDEN_CHAR}'")
def unpack_topic(pattern, topic):
"""
returns one string for each "+", followed by a list of strings when a trailing "#" is present
"""
pattern_parts = iter(pattern.split("/"))
topic_parts = iter(topic.split("/"))
while True:
try:
cur_pattern = next(pattern_parts)
except StopIteration:
try:
cur_topic = next(topic_parts)
raise Exception("The topic to be matched is longer than the pattern without an # suffix. "
"The first unmatched part is {!r}".format(cur_topic))
except StopIteration:
# no more elements in both sequences.
return
if cur_pattern == "#":
yield list(topic_parts)
try:
cur_pattern = next(pattern_parts)
raise Exception("The pattern has a component after a #: {!r}".format(cur_pattern))
except StopIteration:
# topic has been exhausted by list() enumeration, and pattern is empty, too.
return
else:
try:
cur_topic = next(topic_parts)
except StopIteration:
raise Exception("The | decorated_function = attribute
topic_pattern, kwargs = getattr(decorated_function, _SUBSCRIBE_DECORATOR_NAME)
if topic_pattern in self._managed_subsciptions:
raise Exception(
"A client cannot subscribe to an identical topic filter multiple times!")
else:
self._managed_subsciptions[topic_pattern] = kwargs
# This function introduces a scope,
# to avoid a changing decorated_function variable
# cause changing behaviour of call_decorated_function
def create_caller(decorated_function):
# the decorated_function has not yet a self object; thus we need this wrapper
@wraps(decorated_function)
def call_decorated_function(client, userdata, message):
variables = unpack_topic(topic_pattern, message.topic)
return decorated_function(self, client=client, userdata=userdata, message=message, *variables)
return call_decorated_function | conditional_block |
skymotemanager.py | ridge(LJSocket = ljsocketAddress, serial = dev['serial'])
try:
d.ethernetFirmwareVersion()
except:
d.ethernetFWVersion = "(No Ethernet)"
d.nameCache = d.getName()
d.readSerialNumber()
d.usbFirmwareVersion()
d.mainFirmwareVersion()
d.productName = "SkyMote Bridge"
d.meetsFirmwareRequirements = True
d.spontaneousDataCache = dict()
d.motes = d.listMotes()
for mote in d.motes:
t = PlaceMoteInRapidModeThread(mote)
t.start()
self.bridges["%s" % d.serialNumber] = d
t = SpontaneousDataLoggingThread(d)
t.start()
self.loggingThreads["%s" % d.serialNumber] = t
return self.bridges
def scan(self):
results = dict()
for b in self.bridges.values():
for mote in b.listMotes():
results[str(mote.moteId)] = mote.sensorSweep()
return results
def getBridge(self, serial):
if isinstance(serial, skymote.Bridge):
return serial
elif serial in self.bridges:
return self.bridges[serial]
else:
return self.bridges[str(serial)]
def getMote(self, b, unitId):
unitId = int(unitId)
m = None
for mote in b.motes:
if mote.unitId == unitId:
m = mote
break
return m
def scanBridge(self, serial):
results = dict()
b = self.getBridge(serial)
# Check if the device is free, if we're blocked just keep moving.
if b.deviceLock.acquire(0):
b.deviceLock.release()
numMotes = b.numMotes()
if numMotes != len(b.motes):
b.motes = b.listMotes()
for mote in b.motes:
t = PlaceMoteInRapidModeThread(mote)
t.start()
results['Number of Connected Motes'] = len(b.motes)
motes = dict()
for m in b.motes:
moteDict = moteAsDict(m)
moteDict['nickname'] = moteDict['name']
data = b.spontaneousDataCache.get(str(m.unitId), {})
if data:
tableData = list()
tableData.append(createFeedbackDict('Temperature',data['Temp']))
tableData.append(createFeedbackDict('Light',data['Light']))
tableData.append(createFeedbackDict('Bump',data['Bump']))
tableData.append(createFeedbackDict('Tx Link Quality',data['TxLQI']))
tableData.append(createFeedbackDict('Rx Link Quality',data['RxLQI']))
tableData.append(createFeedbackDict('Vbatt',data['Battery']))
moteDict['tableData'] = tableData
moteDict['transId'] = data['transId']
try:
moteDict['missed'] = int((floattime() - m.lastCommunication) / m.checkinInterval)
except:
moteDict['missed'] = 0
if m.lastCommunication is not None:
moteDict['lastComm'] = m.lastCommunication
else:
moteDict['lastComm'] = -1
moteDict['inRapidMode'] = m.inRapidMode
motes[str(m.unitId)] = moteDict
results['Connected Motes'] = motes
# Not implemented: results['Temperature'] =
return results
def updateMoteSettings(self, serial, unitId, settings):
# Update the settings on a mote.
b = self.getBridge(serial)
m = self.getMote(b, unitId)
if m is None:
return False
if not m.inRapidMode:
m.startRapidMode()
m.inRapidMode = True
print "settings =", settings
if "name" in settings and settings['name'] != m.nickname:
log("Updating name to %s from %s." % (settings['name'], m.nickname))
m.name = settings['name']
m.nickname = settings['name']
if "unitId" in settings and settings['unitId'] != m.unitId:
log("Updating mote's Unit ID from %s to %s" % (m.unitId, settings['unitId']))
m.setUnitId(settings['unitId'])
if "checkinInterval" in settings and settings['checkinInterval'] != m.checkinInterval:
log("Updating mote's Check-In interval from %s to %s seconds" % (m.checkinInterval, settings['checkinInterval']))
m.setCheckinInterval(settings['checkinInterval']*1000)
return True
def updateBridgeSettings(self, serial, settings):
b = self.getBridge(serial)
if settings['name'] != b.nameCache:
log("Updating name to %s from %s." % (settings['name'], b.nameCache))
b.name = settings['name']
b.nameCache = settings['name']
netpassDict = b.getNetworkPassword()
if settings['enable'] != netpassDict['enabled'] or settings['password'] != netpassDict['password']:
e = settings['enable']
pw = settings['password']
for m in b.listMotes():
m.setNetworkPassword(pw, enable = e)
b.setNetworkPassword(pw, enable = e)
return True
def readRegister(self, serial, addr, numReg, format, unitId):
b = self.getBridge(serial)
return b.readRegister(addr, numReg = numReg, format = format, unitId = unitId)
def doFirmwareUpgrade(self, serial, unitId, fwFile):
"""
Starts the thread that will upgrade the firmware of a Skymote device
"""
b = self.getBridge(serial)
if unitId != 0:
# We are going to upgrade the motes
b.upgradeThread = SkymoteFirmwareUpgraderThread(b, fwFile, upgradeMotes = True, recovery = False)
else:
# We are going to upgrade the bridge
b.upgradeThread = SkymoteFirmwareUpgraderThread(b, fwFile, upgradeMotes = False, recovery = False)
b.upgradeThread.start()
return True
def getFirmwareStatus(self, serial):
b = self.getBridge(serial)
try:
t = b.upgradeThread
if t.running:
line = ""
try:
while True:
line += (t.statusList.pop() + "<br/>")
except:
pass
return line, True
else:
b.statusList = None
return "Firmware update finished.", False
except AttributeError:
return "Couldn't find a firmware upgrade thread.", False
class PlaceMoteInRapidModeThread(threading.Thread):
def __init__(self, mote):
threading.Thread.__init__(self)
self.daemon = True
self.mote = mote
self.mote.inRapidMode = False
self.mote.lastCommunication = None
def run(self):
log("Trying to place mote %s into high powered mode. This might take some time." % self.mote.unitId)
self.mote.nickname = MOTE_CACHE.get(str(self.mote.unitId) ,"Placeholder SkyMote Name")
self.mote.startRapidMode()
log("Mote %s successfully placed into high powered mode." % self.mote.unitId)
self.mote.inRapidMode = True
self.mote.nickname = self.mote.name
MOTE_CACHE[str(self.mote.unitId)] = self.mote.nickname
self.mote.mainFirmwareVersion()
self.mote.devType = self.mote.readRegister(65000)
if self.mote.devType == 2000:
self.mote.productName = "SkyMote TLB"
else:
self.mote.productName = "SkyMote Unknown Type"
self.mote.readSerialNumber()
self.mote.checkinInterval = self.mote.readCheckinInterval()/1000
class SpontaneousDataLoggingThread(threading.Thread):
| def __init__(self, bridge):
threading.Thread.__init__(self)
self.daemon = True
self.bridge = bridge
self.name = sanitize(self.bridge.nameCache)
self.filename = "%%Y-%%m-%%d %%H__%%M__%%S %s %s.csv" % (self.name, "spontaneous")
self.filename = datetime.now().strftime(self.filename)
self.headers = [ "Timestamp", "Unit ID", "Temp", "Light", "Bump", "RxLQI", "TxLQI", "Battery"]
self.filepath = "./logfiles/%s" % self.filename
self.running = False
try:
self.stream = open(self.filepath, "wb", 1)
self.csvWriter = csv.writer(self.stream)
except IOError:
os.mkdir("./logfiles")
self.stream = open(self.filepath, "wb", 1) | identifier_body |
|
skymotemanager.py | str(int(value))
chType += " lqi"
return {'connection' : connection, 'state' : state, 'value' : dictValue, 'chType' : chType}
class SkyMoteManager(object):
def __init__(self, address = LJSOCKET_ADDRESS, port = LJSOCKET_PORT):
# The address and port to try to connect to LJSocket
self.address = address
self.port = port
# Dictionary of all open bridges. Key = Serial, Value = Object.
self.bridges = dict()
# Logging Threads
self.loggingThreads = dict()
# Use Direct USB instead of LJSocket.
self.usbOverride = False
def shutdownThreads(self):
"""
Called when cherrypy starts shutting down, shutdownThreads stops all the
logging threads.
"""
for s, thread in self.loggingThreads.items():
thread.stop()
def findBridges(self):
devs = []
ljsocketAddress = "localhost:6000"
try:
devs = LabJackPython.listAll(ljsocketAddress, 200)
except:
return {}
for dev in devs:
#print "Got dev: serial = %s, prodId = 0x%X" % (dev['serial'], dev['prodId'])
if dev['prodId'] != 0x501:
continue
elif str(dev['serial']) in self.bridges:
d = self.bridges[str(dev['serial'])]
if d.numMotes() != len(d.motes):
log("Number of motes changed. Placing all motes into rapid mode.")
d.motes = d.listMotes()
for mote in d.motes:
t = PlaceMoteInRapidModeThread(mote)
t.start()
continue
d = skymote.Bridge(LJSocket = ljsocketAddress, serial = dev['serial'])
try:
d.ethernetFirmwareVersion()
except:
d.ethernetFWVersion = "(No Ethernet)"
d.nameCache = d.getName()
d.readSerialNumber()
d.usbFirmwareVersion()
d.mainFirmwareVersion()
d.productName = "SkyMote Bridge"
d.meetsFirmwareRequirements = True
d.spontaneousDataCache = dict()
d.motes = d.listMotes()
for mote in d.motes:
t = PlaceMoteInRapidModeThread(mote)
t.start()
self.bridges["%s" % d.serialNumber] = d
t = SpontaneousDataLoggingThread(d)
t.start()
self.loggingThreads["%s" % d.serialNumber] = t
return self.bridges
def scan(self):
results = dict()
for b in self.bridges.values():
|
return results
def getBridge(self, serial):
if isinstance(serial, skymote.Bridge):
return serial
elif serial in self.bridges:
return self.bridges[serial]
else:
return self.bridges[str(serial)]
def getMote(self, b, unitId):
unitId = int(unitId)
m = None
for mote in b.motes:
if mote.unitId == unitId:
m = mote
break
return m
def scanBridge(self, serial):
results = dict()
b = self.getBridge(serial)
# Check if the device is free, if we're blocked just keep moving.
if b.deviceLock.acquire(0):
b.deviceLock.release()
numMotes = b.numMotes()
if numMotes != len(b.motes):
b.motes = b.listMotes()
for mote in b.motes:
t = PlaceMoteInRapidModeThread(mote)
t.start()
results['Number of Connected Motes'] = len(b.motes)
motes = dict()
for m in b.motes:
moteDict = moteAsDict(m)
moteDict['nickname'] = moteDict['name']
data = b.spontaneousDataCache.get(str(m.unitId), {})
if data:
tableData = list()
tableData.append(createFeedbackDict('Temperature',data['Temp']))
tableData.append(createFeedbackDict('Light',data['Light']))
tableData.append(createFeedbackDict('Bump',data['Bump']))
tableData.append(createFeedbackDict('Tx Link Quality',data['TxLQI']))
tableData.append(createFeedbackDict('Rx Link Quality',data['RxLQI']))
tableData.append(createFeedbackDict('Vbatt',data['Battery']))
moteDict['tableData'] = tableData
moteDict['transId'] = data['transId']
try:
moteDict['missed'] = int((floattime() - m.lastCommunication) / m.checkinInterval)
except:
moteDict['missed'] = 0
if m.lastCommunication is not None:
moteDict['lastComm'] = m.lastCommunication
else:
moteDict['lastComm'] = -1
moteDict['inRapidMode'] = m.inRapidMode
motes[str(m.unitId)] = moteDict
results['Connected Motes'] = motes
# Not implemented: results['Temperature'] =
return results
def updateMoteSettings(self, serial, unitId, settings):
# Update the settings on a mote.
b = self.getBridge(serial)
m = self.getMote(b, unitId)
if m is None:
return False
if not m.inRapidMode:
m.startRapidMode()
m.inRapidMode = True
print "settings =", settings
if "name" in settings and settings['name'] != m.nickname:
log("Updating name to %s from %s." % (settings['name'], m.nickname))
m.name = settings['name']
m.nickname = settings['name']
if "unitId" in settings and settings['unitId'] != m.unitId:
log("Updating mote's Unit ID from %s to %s" % (m.unitId, settings['unitId']))
m.setUnitId(settings['unitId'])
if "checkinInterval" in settings and settings['checkinInterval'] != m.checkinInterval:
log("Updating mote's Check-In interval from %s to %s seconds" % (m.checkinInterval, settings['checkinInterval']))
m.setCheckinInterval(settings['checkinInterval']*1000)
return True
def updateBridgeSettings(self, serial, settings):
b = self.getBridge(serial)
if settings['name'] != b.nameCache:
log("Updating name to %s from %s." % (settings['name'], b.nameCache))
b.name = settings['name']
b.nameCache = settings['name']
netpassDict = b.getNetworkPassword()
if settings['enable'] != netpassDict['enabled'] or settings['password'] != netpassDict['password']:
e = settings['enable']
pw = settings['password']
for m in b.listMotes():
m.setNetworkPassword(pw, enable = e)
b.setNetworkPassword(pw, enable = e)
return True
def readRegister(self, serial, addr, numReg, format, unitId):
b = self.getBridge(serial)
return b.readRegister(addr, numReg = numReg, format = format, unitId = unitId)
def doFirmwareUpgrade(self, serial, unitId, fwFile):
"""
Starts the thread that will upgrade the firmware of a Skymote device
"""
b = self.getBridge(serial)
if unitId != 0:
# We are going to upgrade the motes
b.upgradeThread = SkymoteFirmwareUpgraderThread(b, fwFile, upgradeMotes = True, recovery = False)
else:
# We are going to upgrade the bridge
b.upgradeThread = SkymoteFirmwareUpgraderThread(b, fwFile, upgradeMotes = False, recovery = False)
b.upgradeThread.start()
return True
def getFirmwareStatus(self, serial):
b = self.getBridge(serial)
try:
t = b.upgradeThread
if t.running:
line = ""
try:
while True:
line += (t.statusList.pop() + "<br/>")
except:
pass
return line, True
else:
b.statusList = None
return "Firmware update finished.", False
except AttributeError:
return "Couldn't find a firmware upgrade thread.", False
class PlaceMoteInRapidModeThread(threading.Thread):
def __init__(self, mote):
threading.Thread.__init__(self)
self.daemon = True
self.mote = mote
self.mote.inRapidMode = False
self.mote.lastCommunication = None
def run(self):
log("Trying to place mote %s into high powered mode. This might take some time." % self.mote.unitId | for mote in b.listMotes():
results[str(mote.moteId)] = mote.sensorSweep() | conditional_block |
skymotemanager.py |
if channelName == "Temperature":
dictValue = kelvinToFahrenheit(float(value) + 273.15)
state = (FLOAT_FORMAT % dictValue) + " °F"
elif channelName == "Vbatt":
state = (FLOAT_FORMAT % value) + " V"
chType += " vbatt"
elif channelName == "Bump":
chType = DIGITAL_IN_TYPE
if value:
state = "Bumped"
else:
state = "Still"
elif channelName.endswith("Link Quality"):
state = str(int(value))
dictValue = str(int(value))
chType += " lqi"
return {'connection' : connection, 'state' : state, 'value' : dictValue, 'chType' : chType}
class SkyMoteManager(object):
def __init__(self, address = LJSOCKET_ADDRESS, port = LJSOCKET_PORT):
# The address and port to try to connect to LJSocket
self.address = address
self.port = port
# Dictionary of all open bridges. Key = Serial, Value = Object.
self.bridges = dict()
# Logging Threads
self.loggingThreads = dict()
# Use Direct USB instead of LJSocket.
self.usbOverride = False
def shutdownThreads(self):
"""
Called when cherrypy starts shutting down, shutdownThreads stops all the
logging threads.
"""
for s, thread in self.loggingThreads.items():
thread.stop()
def findBridges(self):
devs = []
ljsocketAddress = "localhost:6000"
try:
devs = LabJackPython.listAll(ljsocketAddress, 200)
except:
return {}
for dev in devs:
#print "Got dev: serial = %s, prodId = 0x%X" % (dev['serial'], dev['prodId'])
if dev['prodId'] != 0x501:
continue
elif str(dev['serial']) in self.bridges:
d = self.bridges[str(dev['serial'])]
if d.numMotes() != len(d.motes):
log("Number of motes changed. Placing all motes into rapid mode.")
d.motes = d.listMotes()
for mote in d.motes:
t = PlaceMoteInRapidModeThread(mote)
t.start()
continue
d = skymote.Bridge(LJSocket = ljsocketAddress, serial = dev['serial'])
try:
d.ethernetFirmwareVersion()
except:
d.ethernetFWVersion = "(No Ethernet)"
d.nameCache = d.getName()
d.readSerialNumber()
d.usbFirmwareVersion()
d.mainFirmwareVersion()
d.productName = "SkyMote Bridge"
d.meetsFirmwareRequirements = True
d.spontaneousDataCache = dict()
d.motes = d.listMotes()
for mote in d.motes:
t = PlaceMoteInRapidModeThread(mote)
t.start()
self.bridges["%s" % d.serialNumber] = d
t = SpontaneousDataLoggingThread(d)
t.start()
self.loggingThreads["%s" % d.serialNumber] = t
return self.bridges
def scan(self):
results = dict()
for b in self.bridges.values():
for mote in b.listMotes():
results[str(mote.moteId)] = mote.sensorSweep()
return results
def getBridge(self, serial):
if isinstance(serial, skymote.Bridge):
return serial
elif serial in self.bridges:
return self.bridges[serial]
else:
return self.bridges[str(serial)]
def getMote(self, b, unitId):
unitId = int(unitId)
m = None
for mote in b.motes:
if mote.unitId == unitId:
m = mote
break
return m
def scanBridge(self, serial):
results = dict()
b = self.getBridge(serial)
# Check if the device is free, if we're blocked just keep moving.
if b.deviceLock.acquire(0):
b.deviceLock.release()
numMotes = b.numMotes()
if numMotes != len(b.motes):
b.motes = b.listMotes()
for mote in b.motes:
t = PlaceMoteInRapidModeThread(mote)
t.start()
results['Number of Connected Motes'] = len(b.motes)
motes = dict()
for m in b.motes:
moteDict = moteAsDict(m)
moteDict['nickname'] = moteDict['name']
data = b.spontaneousDataCache.get(str(m.unitId), {})
if data:
tableData = list()
tableData.append(createFeedbackDict('Temperature',data['Temp']))
tableData.append(createFeedbackDict('Light',data['Light']))
tableData.append(createFeedbackDict('Bump',data['Bump']))
tableData.append(createFeedbackDict('Tx Link Quality',data['TxLQI']))
tableData.append(createFeedbackDict('Rx Link Quality',data['RxLQI']))
tableData.append(createFeedbackDict('Vbatt',data['Battery']))
moteDict['tableData'] = tableData
moteDict['transId'] = data['transId']
try:
moteDict['missed'] = int((floattime() - m.lastCommunication) / m.checkinInterval)
except:
moteDict['missed'] = 0
if m.lastCommunication is not None:
moteDict['lastComm'] = m.lastCommunication
else:
moteDict['lastComm'] = -1
moteDict['inRapidMode'] = m.inRapidMode
motes[str(m.unitId)] = moteDict
results['Connected Motes'] = motes
# Not implemented: results['Temperature'] =
return results
def updateMoteSettings(self, serial, unitId, settings):
# Update the settings on a mote.
b = self.getBridge(serial)
m = self.getMote(b, unitId)
if m is None:
return False
if not m.inRapidMode:
m.startRapidMode()
m.inRapidMode = True
print "settings =", settings
if "name" in settings and settings['name'] != m.nickname:
log("Updating name to %s from %s." % (settings['name'], m.nickname))
m.name = settings['name']
m.nickname = settings['name']
if "unitId" in settings and settings['unitId'] != m.unitId:
log("Updating mote's Unit ID from %s to %s" % (m.unitId, settings['unitId']))
m.setUnitId(settings['unitId'])
if "checkinInterval" in settings and settings['checkinInterval'] != m.checkinInterval:
log("Updating mote's Check-In interval from %s to %s seconds" % (m.checkinInterval, settings['checkinInterval']))
m.setCheckinInterval(settings['checkinInterval']*1000)
return True
def updateBridgeSettings(self, serial, settings):
b = self.getBridge(serial)
if settings['name'] != b.nameCache:
log("Updating name to %s from %s." % (settings['name'], b.nameCache))
b.name = settings['name']
b.nameCache = settings['name']
netpassDict = b.getNetworkPassword()
if settings['enable'] != netpassDict['enabled'] or settings['password'] != netpassDict['password']:
e = settings['enable']
pw = settings['password']
for m in b.listMotes():
m.setNetworkPassword(pw, enable = e)
b.setNetworkPassword(pw, enable = e)
return True
def readRegister(self, serial, addr, numReg, format, unitId):
b = self.getBridge(serial)
return b.readRegister(addr, numReg = numReg, format = format, unitId = unitId)
def doFirmwareUpgrade(self, serial, unitId, fwFile):
"""
Starts the thread that will upgrade the firmware of a Skymote device
"""
b = self.getBridge(serial)
if unitId != 0:
# We are going to upgrade the motes
b.upgradeThread = SkymoteFirmwareUpgraderThread(b, fwFile, upgradeMotes = True, recovery = False)
else:
# We are going to upgrade the bridge
b.upgradeThread = SkymoteFirmwareUpgraderThread(b, fwFile, upgradeMotes = False, recovery = False)
b.upgradeThread.start()
return True
def getFirmwareStatus(self, serial):
b = self.getBridge(serial)
try:
t = b.upgradeThread
| def createFeedbackDict(channelName, value):
connection = channelName
state = FLOAT_FORMAT % value
dictValue = FLOAT_FORMAT % value
chType = ANALOG_TYPE | random_line_split |
|
skymotemanager.py | (int(value))
chType += " lqi"
return {'connection' : connection, 'state' : state, 'value' : dictValue, 'chType' : chType}
class SkyMoteManager(object):
def __init__(self, address = LJSOCKET_ADDRESS, port = LJSOCKET_PORT):
# The address and port to try to connect to LJSocket
self.address = address
self.port = port
# Dictionary of all open bridges. Key = Serial, Value = Object.
self.bridges = dict()
# Logging Threads
self.loggingThreads = dict()
# Use Direct USB instead of LJSocket.
self.usbOverride = False
def shutdownThreads(self):
"""
Called when cherrypy starts shutting down, shutdownThreads stops all the
logging threads.
"""
for s, thread in self.loggingThreads.items():
thread.stop()
def findBridges(self):
devs = []
ljsocketAddress = "localhost:6000"
try:
devs = LabJackPython.listAll(ljsocketAddress, 200)
except:
return {}
for dev in devs:
#print "Got dev: serial = %s, prodId = 0x%X" % (dev['serial'], dev['prodId'])
if dev['prodId'] != 0x501:
continue
elif str(dev['serial']) in self.bridges:
d = self.bridges[str(dev['serial'])]
if d.numMotes() != len(d.motes):
log("Number of motes changed. Placing all motes into rapid mode.")
d.motes = d.listMotes()
for mote in d.motes:
t = PlaceMoteInRapidModeThread(mote)
t.start()
continue
d = skymote.Bridge(LJSocket = ljsocketAddress, serial = dev['serial'])
try:
d.ethernetFirmwareVersion()
except:
d.ethernetFWVersion = "(No Ethernet)"
d.nameCache = d.getName()
d.readSerialNumber()
d.usbFirmwareVersion()
d.mainFirmwareVersion()
d.productName = "SkyMote Bridge"
d.meetsFirmwareRequirements = True
d.spontaneousDataCache = dict()
d.motes = d.listMotes()
for mote in d.motes:
t = PlaceMoteInRapidModeThread(mote)
t.start()
self.bridges["%s" % d.serialNumber] = d
t = SpontaneousDataLoggingThread(d)
t.start()
self.loggingThreads["%s" % d.serialNumber] = t
return self.bridges
def | (self):
results = dict()
for b in self.bridges.values():
for mote in b.listMotes():
results[str(mote.moteId)] = mote.sensorSweep()
return results
def getBridge(self, serial):
if isinstance(serial, skymote.Bridge):
return serial
elif serial in self.bridges:
return self.bridges[serial]
else:
return self.bridges[str(serial)]
def getMote(self, b, unitId):
unitId = int(unitId)
m = None
for mote in b.motes:
if mote.unitId == unitId:
m = mote
break
return m
def scanBridge(self, serial):
results = dict()
b = self.getBridge(serial)
# Check if the device is free, if we're blocked just keep moving.
if b.deviceLock.acquire(0):
b.deviceLock.release()
numMotes = b.numMotes()
if numMotes != len(b.motes):
b.motes = b.listMotes()
for mote in b.motes:
t = PlaceMoteInRapidModeThread(mote)
t.start()
results['Number of Connected Motes'] = len(b.motes)
motes = dict()
for m in b.motes:
moteDict = moteAsDict(m)
moteDict['nickname'] = moteDict['name']
data = b.spontaneousDataCache.get(str(m.unitId), {})
if data:
tableData = list()
tableData.append(createFeedbackDict('Temperature',data['Temp']))
tableData.append(createFeedbackDict('Light',data['Light']))
tableData.append(createFeedbackDict('Bump',data['Bump']))
tableData.append(createFeedbackDict('Tx Link Quality',data['TxLQI']))
tableData.append(createFeedbackDict('Rx Link Quality',data['RxLQI']))
tableData.append(createFeedbackDict('Vbatt',data['Battery']))
moteDict['tableData'] = tableData
moteDict['transId'] = data['transId']
try:
moteDict['missed'] = int((floattime() - m.lastCommunication) / m.checkinInterval)
except:
moteDict['missed'] = 0
if m.lastCommunication is not None:
moteDict['lastComm'] = m.lastCommunication
else:
moteDict['lastComm'] = -1
moteDict['inRapidMode'] = m.inRapidMode
motes[str(m.unitId)] = moteDict
results['Connected Motes'] = motes
# Not implemented: results['Temperature'] =
return results
def updateMoteSettings(self, serial, unitId, settings):
# Update the settings on a mote.
b = self.getBridge(serial)
m = self.getMote(b, unitId)
if m is None:
return False
if not m.inRapidMode:
m.startRapidMode()
m.inRapidMode = True
print "settings =", settings
if "name" in settings and settings['name'] != m.nickname:
log("Updating name to %s from %s." % (settings['name'], m.nickname))
m.name = settings['name']
m.nickname = settings['name']
if "unitId" in settings and settings['unitId'] != m.unitId:
log("Updating mote's Unit ID from %s to %s" % (m.unitId, settings['unitId']))
m.setUnitId(settings['unitId'])
if "checkinInterval" in settings and settings['checkinInterval'] != m.checkinInterval:
log("Updating mote's Check-In interval from %s to %s seconds" % (m.checkinInterval, settings['checkinInterval']))
m.setCheckinInterval(settings['checkinInterval']*1000)
return True
def updateBridgeSettings(self, serial, settings):
b = self.getBridge(serial)
if settings['name'] != b.nameCache:
log("Updating name to %s from %s." % (settings['name'], b.nameCache))
b.name = settings['name']
b.nameCache = settings['name']
netpassDict = b.getNetworkPassword()
if settings['enable'] != netpassDict['enabled'] or settings['password'] != netpassDict['password']:
e = settings['enable']
pw = settings['password']
for m in b.listMotes():
m.setNetworkPassword(pw, enable = e)
b.setNetworkPassword(pw, enable = e)
return True
def readRegister(self, serial, addr, numReg, format, unitId):
b = self.getBridge(serial)
return b.readRegister(addr, numReg = numReg, format = format, unitId = unitId)
def doFirmwareUpgrade(self, serial, unitId, fwFile):
"""
Starts the thread that will upgrade the firmware of a Skymote device
"""
b = self.getBridge(serial)
if unitId != 0:
# We are going to upgrade the motes
b.upgradeThread = SkymoteFirmwareUpgraderThread(b, fwFile, upgradeMotes = True, recovery = False)
else:
# We are going to upgrade the bridge
b.upgradeThread = SkymoteFirmwareUpgraderThread(b, fwFile, upgradeMotes = False, recovery = False)
b.upgradeThread.start()
return True
def getFirmwareStatus(self, serial):
b = self.getBridge(serial)
try:
t = b.upgradeThread
if t.running:
line = ""
try:
while True:
line += (t.statusList.pop() + "<br/>")
except:
pass
return line, True
else:
b.statusList = None
return "Firmware update finished.", False
except AttributeError:
return "Couldn't find a firmware upgrade thread.", False
class PlaceMoteInRapidModeThread(threading.Thread):
def __init__(self, mote):
threading.Thread.__init__(self)
self.daemon = True
self.mote = mote
self.mote.inRapidMode = False
self.mote.lastCommunication = None
def run(self):
log("Trying to place mote %s into high powered mode. This might take some time." % self.mote.unitId | scan | identifier_name |
dz04.js | subIndex));
// [1, 1, 1, 1, 1]
// ================================================Задача 4 - 2
// Callback функция и метод push
// Функция isUniq принимает три параметра - element, index и arr.
// Функция возвращает true или false в зависимости от того встречается
// ли элемент первый раз в массиве(true)
// или этот элемент в массиве уже встречался(false).
// Функция isEven принимает один параметр - element.
// Функция возвращает true или false
// в зависимости от того является ли элемент четным числом или нет.
// Функция filterArray(array, cb), принимает 1 - м параметром array - массив чисел,
// а вторым параметром cb - функцию обратного вызова(callback).
// Дополни тело функции так, чтобы функция filterArray заполняла новый пустой массив numbers
// только теми элементами из массива array, для которых вызов функции cb вернет true.
// const isUniq = (element, index, arr) =>
// arr.indexOf(element) === index;
// // console.log(element);
// const isEven = (element) => element % 2 === 0;
// function filterArray(array, cb) {
// 'use strict';
// const numbers = [];
// for (let i = 0; i < array.length; i += 1) {
// const element = array[i];
// const index = i;
// // Write code under this line
// if (cb(element, index, array)) {
// numbers.push(element)
// }
// }
// return numbers;
// }
// const arr = [1, 2, 3, 4, 5, 1, 2, 5];
// console.log(filterArray(arr, isUniq));
// // [1, 2, 3, 4, 5]
// console.log(filterArray(arr, isEven));
// // [2, 4, 2]
// =================================Задача 4 - 3
// Callback функция для получения одного вычисляемого значения массива
// Функции add, sub и mult принимают два параметра - accum и element,
// возвращает число - сумму, разность или произведение параметров.
// Дополни тело функции reduceArray строкой присвоения accum вызова функции cb.
// Функция reduceArray должна будет подсчитать сумму или разность или произведение
// всех элементов массива в зависимости от того какая именно из трех функция(add, mult, sub)
// будет передана в качестве cb.
// const add = (accum, element) => accum + element;
// const mult = (accum, element) => accum * element;
// const sub = (accum, element) => accum - element;
// function reduceArray(array, cb, initial) {
// 'use strict';
// let i;
// let accum;
// if (arguments.length >= 3) {
// accum = initial;
// i = 0;
// }
// if (arguments.length === 2) {
// accum = array[0];
// i = 1;
// }
// for (i; i < array.length; i += 1) {
// const element = array[i];
// // Write code under this line
// accum = cb(accum, element); // строка присвоения
// }
// return accum;
// }
// const arr = [1, 2, 3, 4, 5];
// console.log(reduceArray(arr, add)); // 15
// console.log(reduceArray(arr, add, 10)); // 25
// console.log(reduceArray(arr, mult)); // 120
// console.log(reduceArray(arr, mult, 10)); // 1200
// console.log(reduceArray(arr, sub)); // -13
// console.log(reduceArray(arr, sub, 10)); // -5
// ==================================================Задача 4 - 4
// this в методах объекта
// Расставь отсутствующие this в методах объекта account.
// В комментариях показаны операции с объектом и ожидаемые результаты.
// const account = {
// owner: 'Mango',
// balance: 24000,
// discount: 0.1,
// orders: ['order-1', 'order-2', 'order-3'],
// changeDiscount(value) {
// this.discount = value; // Write code in this line
// },
// showOrders() {
// return this.orders; // Write code in this line
// },
// addOrder(cost, order) {
// this.balance -= cost; // Write code in this line
// this.orders.push(order); // Write code in this line
// },
// };
// const copyAccount = Object.assign({}, account);
// copyAccount.orders = [...account.orders];
// // копируем для автотестов ссылочные типы
// account.changeDiscount(0.15);
// console.log(account.discount); // 0.15
// console.log(account.showOrders());
// // ['order-1', 'order-2', 'order-3']
// account.addOrder(5000, 'order-4');
// console.log(account.balance); // 19000
// console.log(account.showOrders());
// ['order-1', 'order-2', 'order-3', 'order-4']
//============================================== Задача 4 - 5 | // inventory.add
// inventory.remove
//выступал объект inventory
// const inventory = {
// items: ['Knife', 'Gas mask'],
// add(itemName) {
// this.items.push(itemName);
// return `Adding ${itemName} to inventory`;
// },
// remove(itemName) {
// this.items = this.items.filter(item => item !== itemName);
// return `Removing ${itemName} from inventory`;
// },
// };
// const invokeInventoryAction = function (itemName, action) {
// const act = action(itemName);
// const msg = `Invoking action on ${itemName}`;
// return { act, msg };
// };
// const invokeAdd = invokeInventoryAction(
// 'Medkit',
// inventory.add.bind(inventory) // bind для замены this в методах объекта
// );
// const arrayAdd = [...inventory.items];
// console.log(invokeAdd);
// //{ act: 'Adding Medkit to inventory', msg: 'Invoking action on Medkit' }
// console.log(arrayAdd);
// // ['Knife', 'Gas mask', 'Medkit']
// const invokeRemove = invokeInventoryAction(
// 'Gas mask',
// inventory.remove.bind(inventory) // bind для замены this в методах объекта
// );
// const arrayRemove = [...inventory.items];
// console.log(invokeRemove);
// //{ act: 'Removing Gas mask from inventory', msg: 'Invoking action on Gas mask' }
// console.log(arrayRemove);
// // ['Knife', 'Medkit']
//чужая задача===========================================================================
// const inventory = {
// items: ['Монорельса', 'Фильтр'],
// add(itemName) {
// this.items.push(itemName);
// },
// remove(itemName) {
// this.items = this.items.filter(item => item !== itemName);
// },
// };
// const invokeInventoryOperation = function (itemName, inventoryAction) {
// console.log(`Invoking ${inventoryAction.name} opeartion on ${itemName}`);
// inventoryAction.call(inventory, itemName);
// };
// invokeInventoryOperation('Аптечка', inventory.add);
// // Invoking add opeartion on Аптечка
// console.log(inventory.items); // ['Монорельса', 'Фильтр', 'Аптечка']
// invokeInventoryOperation('Фильтр', inventory.remove);
// // Invoking remove opeartion on Фильтр
// console.log(inventory.items); // ['Монорельса', 'Аптечка']//
//проба с дебагер
// const pov = (x, n) => {
// if (n === 1) {
// return x;
// } else {
// return x * pov(x, n - 1);
// }
// }
// console.log(pov(2, 10)); //1024
// const printValue = function (value) {
// console.log(value);
// };
// const prettyPrint = function (value) {
// console.log('Logging value: ', value);
// };
// const repeat = function (n, action) {
// for (let i = 0; i < n; i += 1) {
// action(i);
// }
// };
// // Передаем printValue как callback-функцию
// repeat(3, printValue);
// // 0
// // 1
// // 2
// // Передаем prettyPrint как callback-функцию
// |
// bind для замены this в методах объекта
// Оформи вызов метода invokeInventoryAction таким образом,
// чтобы в качестве this для методов | random_line_split |
gradebook.go | book
// by the instructor.
Date GradebookDate `xml:",attr"`
// DueDate is the date on which the assignment was due for the student.
DueDate GradebookDate `xml:",attr"`
// Score holds the student's earned and possible raw score of the assignment.
Score AssignmentScore `xml:",attr"`
// ScoreType is the kind of score represented by the Score field; e.g. `Raw Score.`
ScoreType string `xml:",attr"`
// Points is the number of points for which the assignment actually counted.
// For example, an assignment score may be out of 20, but the instructor may
// choose to scale it down to only be worth 5 points (towards calculating the
// student's grade) or scale it up to be worth 80 points.
Points AssignmentPoints `xml:",attr"`
// Notes is any comment added by the instructor on the assignment entry.
Notes string `xml:",attr"`
}
// A CourseID holds the identification information for a class.
type CourseID struct {
// ID is the school's/StudentVUE's internal ID for the class.
ID string
// Name is the official name of the class.
Name string
}
func (cid *CourseID) UnmarshalXMLAttr(attr xml.Attr) error {
const nameRegex = "(.+?)\\s*(\\(.+?\\))"
r, err := regexp.Compile(nameRegex)
if err != nil {
return err
}
name := r.FindStringSubmatch(attr.Value)
if len(name) != 3 {
return fmt.Errorf("Expected course name attribute in format `Course (ID)`, received %s and found %d regex matches", attr.Value, len(name)-1)
}
var (
id string
cname string
)
for i, g := range name[1:] {
if rune(g[0]) == '(' && rune(g[len(g)-1]) == ')' {
id = g[1 : len(g)-1]
cname = name[i]
break
}
}
if id == "" {
return fmt.Errorf("Unable to parse out course name and ID from `%s`, got `%v`", attr.Value, name)
}
*cid = CourseID{
ID: id,
Name: cname,
}
return nil
}
// A Percentage is a floating-point number representing a percentage.
type Percentage struct {
float64
}
func (p *Percentage) UnmarshalXMLAttr(attr xml.Attr) error {
pct := attr.Value
if rune(pct[len(pct)-1]) != '%' {
return fmt.Errorf("Expected percentage attribute in format `x%`, received %s", pct)
}
f, err := strconv.ParseFloat(pct[:len(pct)-1], 64)
if err != nil {
return err
}
*p = Percentage{f}
return nil
}
// A GradebookDate holds a timestamp parsed from the format of StudentVUE's systems.
type GradebookDate struct {
time.Time
}
func (gd *GradebookDate) UnmarshalXMLAttr(attr xml.Attr) error {
const gradebookDateFormat = "1/2/2006"
dt, err := time.Parse(gradebookDateFormat, attr.Value)
if err != nil {
return err
}
*gd = GradebookDate{dt}
return nil
}
// An AssignmentScore holds the score information for a single assignment for a student.
type AssignmentScore struct {
// Graded denotes whether the assignment has been graded or not.
Graded bool
// NotDue indicates if the assignment is not due yet.
NotDue bool
// NotForGrading indicates that an assignment is either not to be graded yet
// or is in the gradebook just for organizational purposes (?)
NotForGrading bool
// Percentage indicates whether the score is a percentage rather than a raw score
Percentage bool
// Score is the number of points earned on the assignment by the student.
Score float64
// PossibleScore is the number of points that could be earned by the student.
PossibleScore float64
}
func (as *AssignmentScore) UnmarshalXMLAttr(attr xml.Attr) error {
switch attr.Value {
case "Not Graded":
*as = AssignmentScore{
Graded: false,
NotDue: false,
NotForGrading: false,
Percentage: false,
Score: 0,
PossibleScore: 0,
}
return nil
case "Not Due":
*as = AssignmentScore{
Graded: false,
NotDue: true,
NotForGrading: false,
Percentage: false,
Score: 0,
PossibleScore: 0,
}
return nil
case "":
*as = AssignmentScore{
Graded: false,
NotDue: false,
NotForGrading: true,
Percentage: false,
Score: 0,
PossibleScore: 0,
}
return nil
}
const scoreRegex = "([\\d\\.]+)\\s*out\\s*of\\s*([\\d\\.]+)"
const pctScoreRegex = "([\\d\\.]+)"
r, err := regexp.Compile(scoreRegex)
if err != nil {
return err
}
isPct := false
scores := r.FindStringSubmatch(attr.Value)
if len(scores) != 3 {
r, err = regexp.Compile(pctScoreRegex)
if err != nil {
return err
}
scores = r.FindStringSubmatch(attr.Value)
if len(scores) != 2 {
return fmt.Errorf("Expected assignment score in format `x out of y`, where x and y are real numbers, or `x`, where x is a percentage, received %s and parsed %d numbers", attr.Value, len(scores))
}
isPct = true
}
fs, err := stringsToFloats(scores[1:])
if err != nil {
return err
}
if isPct {
*as = AssignmentScore{
Graded: true,
NotDue: false,
NotForGrading: false,
Percentage: true,
Score: fs[0],
PossibleScore: 100,
}
} else {
*as = AssignmentScore{
Graded: true,
NotDue: false,
NotForGrading: false,
Score: fs[0],
PossibleScore: fs[1],
}
}
return nil
}
// An AssignmentPoints holds an assignment's actual score for a student.
// The different between AssignmentScore and AssignmentPoints is that an assignment's
// score is a raw score, while the points may be either the score scaled up or down
// to affect the student's actual grade differently.
type AssignmentPoints struct {
// Graded denotes whether the assignment has been graded or not.
Graded bool
// Points is the number of points that the student received on the assignment.
Points float64
// PossiblePoints is the number of points the student could receive on the assignment.
PossiblePoints float64
}
func (ap *AssignmentPoints) UnmarshalXMLAttr(attr xml.Attr) error {
if strings.Contains(attr.Value, "Points Possible") {
const pointsRegex = "([\\d\\.]+)\\s*Points\\s*Possible"
r, err := regexp.Compile(pointsRegex)
if err != nil {
return err
}
possiblePoints := r.FindStringSubmatch(attr.Value)
if len(possiblePoints) != 2 {
return fmt.Errorf("Expected points attribute in format `x Points Possible`, received %s and parsed %d values", attr.Value, len(possiblePoints))
}
val, err := stringsToFloats(possiblePoints[1:])
if err != nil {
return err
}
*ap = AssignmentPoints{
Graded: false,
Points: 0,
PossiblePoints: val[0],
}
} else {
const pointsRegex = "([\\d\\.]+)\\s*\\/\\s*([\\d\\.]+)"
r, err := regexp.Compile(pointsRegex)
if err != nil {
return err
}
points := r.FindStringSubmatch(attr.Value)
if len(points) != 3 {
return fmt.Errorf("Expected points attribute in format `x/y`, received %s and parsed %d numbers", attr.Value, len(points))
}
fs, err := stringsToFloats(points[1:])
if err != nil {
return err
}
*ap = AssignmentPoints{
Graded: true,
Points: fs[0],
PossiblePoints: fs[1],
}
}
return nil
}
func stringsToFloats(strs []string) ([]float64, error) | {
fs := make([]float64, 0, len(strs))
for _, s := range strs {
f, err := strconv.ParseFloat(s, 64)
if err != nil {
return nil, err
}
fs = append(fs, f)
}
return fs, nil
} | identifier_body |
|
gradebook.go | category's weighted percentage and
// letter grade.
GradeSummaries []*AssignmentGradeCalc `xml:"GradeCalculationSummary>AssignmentGradeCalc"`
// Assignments holds all of the course's assignments for the grading period.
Assignments []*Assignment `xml:"Assignments>Assignment"`
}
// AssignmentGradeCalc represents one of a course's weighted categories.
// This may include Tests, Homework, Class Work, etc... These are created and decided
// by the course's instructor.
type AssignmentGradeCalc struct {
// Type is the name of the weighted category.
Type string `xml:",attr"`
// Weight is the weight of the category of the student's grade in percent.
Weight Percentage `xml:",attr"`
// Points is the number of points earned by the student in this category.
Points float64 `xml:",attr"`
// PointsPossible is the number of points that can be earned by the student in this category.
PointsPossible float64 `xml:",attr"`
// WeightedPercentage is the impact of this category on the student's overall
// grade in percent.
WeightedPercentage Percentage `xml:"WeightedPct,attr"`
// LetterGrade is the student's raw (number) grade mapped to a letter for this category.
LetterGrade string `xml:"CalculatedMark,attr"`
}
// An Assignment is a single entry into a course's gradebook by an instructor.
type Assignment struct {
// GradebookID is the internal ID given to the assignment by StudentVUE.
GradebookID string `xml:",attr"`
// Name is the name of the assignment entry.
Name string `xml:"Measure,attr"`
// Type is the weighted category to which the assignment belongs.
Type string `xml:",attr"`
// Date is the date on which the assignment was entered into the gradebook
// by the instructor.
Date GradebookDate `xml:",attr"`
// DueDate is the date on which the assignment was due for the student.
DueDate GradebookDate `xml:",attr"`
// Score holds the student's earned and possible raw score of the assignment.
Score AssignmentScore `xml:",attr"`
// ScoreType is the kind of score represented by the Score field; e.g. `Raw Score.`
ScoreType string `xml:",attr"`
// Points is the number of points for which the assignment actually counted.
// For example, an assignment score may be out of 20, but the instructor may
// choose to scale it down to only be worth 5 points (towards calculating the
// student's grade) or scale it up to be worth 80 points.
Points AssignmentPoints `xml:",attr"`
// Notes is any comment added by the instructor on the assignment entry.
Notes string `xml:",attr"`
}
// A CourseID holds the identification information for a class.
type CourseID struct {
// ID is the school's/StudentVUE's internal ID for the class.
ID string
// Name is the official name of the class.
Name string
}
func (cid *CourseID) UnmarshalXMLAttr(attr xml.Attr) error {
const nameRegex = "(.+?)\\s*(\\(.+?\\))"
r, err := regexp.Compile(nameRegex)
if err != nil {
return err
}
name := r.FindStringSubmatch(attr.Value)
if len(name) != 3 {
return fmt.Errorf("Expected course name attribute in format `Course (ID)`, received %s and found %d regex matches", attr.Value, len(name)-1)
}
var (
id string
cname string
)
for i, g := range name[1:] {
if rune(g[0]) == '(' && rune(g[len(g)-1]) == ')' {
id = g[1 : len(g)-1]
cname = name[i]
break
}
}
if id == "" {
return fmt.Errorf("Unable to parse out course name and ID from `%s`, got `%v`", attr.Value, name)
}
*cid = CourseID{
ID: id,
Name: cname,
}
return nil
}
// A Percentage is a floating-point number representing a percentage.
type Percentage struct {
float64
}
func (p *Percentage) UnmarshalXMLAttr(attr xml.Attr) error {
pct := attr.Value
if rune(pct[len(pct)-1]) != '%' {
return fmt.Errorf("Expected percentage attribute in format `x%`, received %s", pct)
}
f, err := strconv.ParseFloat(pct[:len(pct)-1], 64)
if err != nil {
return err
}
*p = Percentage{f}
return nil
}
// A GradebookDate holds a timestamp parsed from the format of StudentVUE's systems.
type GradebookDate struct {
time.Time
}
func (gd *GradebookDate) UnmarshalXMLAttr(attr xml.Attr) error {
const gradebookDateFormat = "1/2/2006"
dt, err := time.Parse(gradebookDateFormat, attr.Value)
if err != nil {
return err
}
*gd = GradebookDate{dt}
return nil
}
// An AssignmentScore holds the score information for a single assignment for a student.
type AssignmentScore struct {
// Graded denotes whether the assignment has been graded or not.
Graded bool
// NotDue indicates if the assignment is not due yet.
NotDue bool
// NotForGrading indicates that an assignment is either not to be graded yet
// or is in the gradebook just for organizational purposes (?)
NotForGrading bool
// Percentage indicates whether the score is a percentage rather than a raw score
Percentage bool
// Score is the number of points earned on the assignment by the student.
Score float64
// PossibleScore is the number of points that could be earned by the student.
PossibleScore float64
}
func (as *AssignmentScore) UnmarshalXMLAttr(attr xml.Attr) error {
switch attr.Value {
case "Not Graded":
*as = AssignmentScore{
Graded: false,
NotDue: false,
NotForGrading: false,
Percentage: false,
Score: 0,
PossibleScore: 0,
}
return nil
case "Not Due":
*as = AssignmentScore{
Graded: false,
NotDue: true,
NotForGrading: false,
Percentage: false,
Score: 0,
PossibleScore: 0,
}
return nil
case "":
*as = AssignmentScore{
Graded: false,
NotDue: false,
NotForGrading: true,
Percentage: false,
Score: 0,
PossibleScore: 0,
}
return nil
}
const scoreRegex = "([\\d\\.]+)\\s*out\\s*of\\s*([\\d\\.]+)"
const pctScoreRegex = "([\\d\\.]+)"
r, err := regexp.Compile(scoreRegex)
if err != nil {
return err
}
isPct := false
scores := r.FindStringSubmatch(attr.Value)
if len(scores) != 3 {
r, err = regexp.Compile(pctScoreRegex)
if err != nil {
return err
}
scores = r.FindStringSubmatch(attr.Value)
if len(scores) != 2 {
return fmt.Errorf("Expected assignment score in format `x out of y`, where x and y are real numbers, or `x`, where x is a percentage, received %s and parsed %d numbers", attr.Value, len(scores))
}
isPct = true
}
fs, err := stringsToFloats(scores[1:])
if err != nil {
return err
}
if isPct {
*as = AssignmentScore{
Graded: true,
NotDue: false,
NotForGrading: false,
Percentage: true,
Score: fs[0],
PossibleScore: 100,
}
} else {
*as = AssignmentScore{
Graded: true,
NotDue: false,
NotForGrading: false,
Score: fs[0],
PossibleScore: fs[1],
}
}
return nil
}
// An AssignmentPoints holds an assignment's actual score for a student.
// The different between AssignmentScore and AssignmentPoints is that an assignment's
// score is a raw score, while the points may be either the score scaled up or down
// to affect the student's actual grade differently.
type AssignmentPoints struct {
// Graded denotes whether the assignment has been graded or not.
Graded bool
// Points is the number of points that the student received on the assignment.
Points float64
// PossiblePoints is the number of points the student could receive on the assignment.
PossiblePoints float64
}
func (ap *AssignmentPoints) UnmarshalXMLAttr(attr xml.Attr) error {
if strings.Contains(attr.Value, "Points Possible") {
const pointsRegex = "([\\d\\.]+)\\s*Points\\s*Possible"
r, err := regexp.Compile(pointsRegex)
if err != nil | {
return err
} | conditional_block |
|
gradebook.go | attr"`
}
// A Course represents one of a student's classes.
type Course struct {
// Period is the period of the day in which the student has this class.
Period int `xml:",attr"`
// ID holds identification information for this class, which includes
// its Name and ID within the school's/StudentVUE's systems.
ID CourseID `xml:"Title,attr"`
// Room is the room number of this class inside the school.
Room string `xml:",attr"`
// Teacher is the name of the instructor of this class.
Teacher string `xml:"Staff,attr"`
// TeacherEmail is the email of this class's instructor.
TeacherEmail string `xml:"StaffEMail,attr"`
// Marks holds the student's grading, including assignments, information
//for each grading period.
Marks []*CourseMark `xml:"Marks>Mark"`
// CurrentMark points to the mark for the current grading period.
CurrentMark *CourseMark `xml:"-"`
}
// A CourseMark holds a student's grades and assignments for a single grading period.
type CourseMark struct {
// Name is the name of the grading period.
Name string `xml:"MarkName,attr"`
// LetterGrade is the student's raw (number) grade mapped to a letter.
// Usually mapped as such:
// 90+ -> A
// 80+ -> B
// 70+ -> C
// 60+ -> D
// Else -> F
LetterGrade string `xml:"CalculatedScoreString,attr"`
// RawGradeScore is the student's raw percentage grade for the grading period.
RawGradeScore float64 `xml:"CalculatedScoreRaw,attr"`
// GradeSummaries holds the grade summaries for each of the course's weighted categories.
// For example, if a course weighs Tests and Homework as separate categories, those will
// be contained here with information including the category's weighted percentage and
// letter grade.
GradeSummaries []*AssignmentGradeCalc `xml:"GradeCalculationSummary>AssignmentGradeCalc"`
// Assignments holds all of the course's assignments for the grading period.
Assignments []*Assignment `xml:"Assignments>Assignment"`
}
// AssignmentGradeCalc represents one of a course's weighted categories.
// This may include Tests, Homework, Class Work, etc... These are created and decided
// by the course's instructor.
type AssignmentGradeCalc struct {
// Type is the name of the weighted category.
Type string `xml:",attr"`
// Weight is the weight of the category of the student's grade in percent.
Weight Percentage `xml:",attr"`
// Points is the number of points earned by the student in this category.
Points float64 `xml:",attr"`
// PointsPossible is the number of points that can be earned by the student in this category.
PointsPossible float64 `xml:",attr"`
// WeightedPercentage is the impact of this category on the student's overall
// grade in percent.
WeightedPercentage Percentage `xml:"WeightedPct,attr"`
// LetterGrade is the student's raw (number) grade mapped to a letter for this category.
LetterGrade string `xml:"CalculatedMark,attr"`
}
// An Assignment is a single entry into a course's gradebook by an instructor.
type Assignment struct {
// GradebookID is the internal ID given to the assignment by StudentVUE.
GradebookID string `xml:",attr"`
// Name is the name of the assignment entry.
Name string `xml:"Measure,attr"`
// Type is the weighted category to which the assignment belongs.
Type string `xml:",attr"`
// Date is the date on which the assignment was entered into the gradebook
// by the instructor.
Date GradebookDate `xml:",attr"`
// DueDate is the date on which the assignment was due for the student.
DueDate GradebookDate `xml:",attr"`
// Score holds the student's earned and possible raw score of the assignment.
Score AssignmentScore `xml:",attr"`
// ScoreType is the kind of score represented by the Score field; e.g. `Raw Score.`
ScoreType string `xml:",attr"`
// Points is the number of points for which the assignment actually counted.
// For example, an assignment score may be out of 20, but the instructor may
// choose to scale it down to only be worth 5 points (towards calculating the
// student's grade) or scale it up to be worth 80 points. | Notes string `xml:",attr"`
}
// A CourseID holds the identification information for a class.
type CourseID struct {
// ID is the school's/StudentVUE's internal ID for the class.
ID string
// Name is the official name of the class.
Name string
}
func (cid *CourseID) UnmarshalXMLAttr(attr xml.Attr) error {
const nameRegex = "(.+?)\\s*(\\(.+?\\))"
r, err := regexp.Compile(nameRegex)
if err != nil {
return err
}
name := r.FindStringSubmatch(attr.Value)
if len(name) != 3 {
return fmt.Errorf("Expected course name attribute in format `Course (ID)`, received %s and found %d regex matches", attr.Value, len(name)-1)
}
var (
id string
cname string
)
for i, g := range name[1:] {
if rune(g[0]) == '(' && rune(g[len(g)-1]) == ')' {
id = g[1 : len(g)-1]
cname = name[i]
break
}
}
if id == "" {
return fmt.Errorf("Unable to parse out course name and ID from `%s`, got `%v`", attr.Value, name)
}
*cid = CourseID{
ID: id,
Name: cname,
}
return nil
}
// A Percentage is a floating-point number representing a percentage.
type Percentage struct {
float64
}
func (p *Percentage) UnmarshalXMLAttr(attr xml.Attr) error {
pct := attr.Value
if rune(pct[len(pct)-1]) != '%' {
return fmt.Errorf("Expected percentage attribute in format `x%`, received %s", pct)
}
f, err := strconv.ParseFloat(pct[:len(pct)-1], 64)
if err != nil {
return err
}
*p = Percentage{f}
return nil
}
// A GradebookDate holds a timestamp parsed from the format of StudentVUE's systems.
type GradebookDate struct {
time.Time
}
func (gd *GradebookDate) UnmarshalXMLAttr(attr xml.Attr) error {
const gradebookDateFormat = "1/2/2006"
dt, err := time.Parse(gradebookDateFormat, attr.Value)
if err != nil {
return err
}
*gd = GradebookDate{dt}
return nil
}
// An AssignmentScore holds the score information for a single assignment for a student.
type AssignmentScore struct {
// Graded denotes whether the assignment has been graded or not.
Graded bool
// NotDue indicates if the assignment is not due yet.
NotDue bool
// NotForGrading indicates that an assignment is either not to be graded yet
// or is in the gradebook just for organizational purposes (?)
NotForGrading bool
// Percentage indicates whether the score is a percentage rather than a raw score
Percentage bool
// Score is the number of points earned on the assignment by the student.
Score float64
// PossibleScore is the number of points that could be earned by the student.
PossibleScore float64
}
func (as *AssignmentScore) UnmarshalXMLAttr(attr xml.Attr) error {
switch attr.Value {
case "Not Graded":
*as = AssignmentScore{
Graded: false,
NotDue: false,
NotForGrading: false,
Percentage: false,
Score: 0,
PossibleScore: 0,
}
return nil
case "Not Due":
*as = AssignmentScore{
Graded: false,
NotDue: true,
NotForGrading: false,
Percentage: false,
Score: 0,
PossibleScore: 0,
}
return nil
case "":
*as = AssignmentScore{
Graded: false,
NotDue: false,
NotForGrading: true,
Percentage: false,
Score: 0,
PossibleScore: 0,
}
return nil
}
const scoreRegex = "([\\d\\.]+)\\s*out\\s*of\\s*([\\d\\.]+)"
const pctScoreRegex = "([\\d\\.]+)"
r, err := regexp.Compile(scoreRegex)
if err != nil {
return err
}
isPct := false
scores := r.FindStringSubmatch(attr.Value)
if len(scores) != 3 {
r, err = regexp.Compile(pctScoreRegex)
if err != nil {
return err
}
scores = r.FindStringSubmatch(attr.Value)
if len(scores) != 2 | Points AssignmentPoints `xml:",attr"`
// Notes is any comment added by the instructor on the assignment entry. | random_line_split |
gradebook.go | awGradeScore float64 `xml:"CalculatedScoreRaw,attr"`
// GradeSummaries holds the grade summaries for each of the course's weighted categories.
// For example, if a course weighs Tests and Homework as separate categories, those will
// be contained here with information including the category's weighted percentage and
// letter grade.
GradeSummaries []*AssignmentGradeCalc `xml:"GradeCalculationSummary>AssignmentGradeCalc"`
// Assignments holds all of the course's assignments for the grading period.
Assignments []*Assignment `xml:"Assignments>Assignment"`
}
// AssignmentGradeCalc represents one of a course's weighted categories.
// This may include Tests, Homework, Class Work, etc... These are created and decided
// by the course's instructor.
type AssignmentGradeCalc struct {
// Type is the name of the weighted category.
Type string `xml:",attr"`
// Weight is the weight of the category of the student's grade in percent.
Weight Percentage `xml:",attr"`
// Points is the number of points earned by the student in this category.
Points float64 `xml:",attr"`
// PointsPossible is the number of points that can be earned by the student in this category.
PointsPossible float64 `xml:",attr"`
// WeightedPercentage is the impact of this category on the student's overall
// grade in percent.
WeightedPercentage Percentage `xml:"WeightedPct,attr"`
// LetterGrade is the student's raw (number) grade mapped to a letter for this category.
LetterGrade string `xml:"CalculatedMark,attr"`
}
// An Assignment is a single entry into a course's gradebook by an instructor.
type Assignment struct {
// GradebookID is the internal ID given to the assignment by StudentVUE.
GradebookID string `xml:",attr"`
// Name is the name of the assignment entry.
Name string `xml:"Measure,attr"`
// Type is the weighted category to which the assignment belongs.
Type string `xml:",attr"`
// Date is the date on which the assignment was entered into the gradebook
// by the instructor.
Date GradebookDate `xml:",attr"`
// DueDate is the date on which the assignment was due for the student.
DueDate GradebookDate `xml:",attr"`
// Score holds the student's earned and possible raw score of the assignment.
Score AssignmentScore `xml:",attr"`
// ScoreType is the kind of score represented by the Score field; e.g. `Raw Score.`
ScoreType string `xml:",attr"`
// Points is the number of points for which the assignment actually counted.
// For example, an assignment score may be out of 20, but the instructor may
// choose to scale it down to only be worth 5 points (towards calculating the
// student's grade) or scale it up to be worth 80 points.
Points AssignmentPoints `xml:",attr"`
// Notes is any comment added by the instructor on the assignment entry.
Notes string `xml:",attr"`
}
// A CourseID holds the identification information for a class.
type CourseID struct {
// ID is the school's/StudentVUE's internal ID for the class.
ID string
// Name is the official name of the class.
Name string
}
func (cid *CourseID) UnmarshalXMLAttr(attr xml.Attr) error {
const nameRegex = "(.+?)\\s*(\\(.+?\\))"
r, err := regexp.Compile(nameRegex)
if err != nil {
return err
}
name := r.FindStringSubmatch(attr.Value)
if len(name) != 3 {
return fmt.Errorf("Expected course name attribute in format `Course (ID)`, received %s and found %d regex matches", attr.Value, len(name)-1)
}
var (
id string
cname string
)
for i, g := range name[1:] {
if rune(g[0]) == '(' && rune(g[len(g)-1]) == ')' {
id = g[1 : len(g)-1]
cname = name[i]
break
}
}
if id == "" {
return fmt.Errorf("Unable to parse out course name and ID from `%s`, got `%v`", attr.Value, name)
}
*cid = CourseID{
ID: id,
Name: cname,
}
return nil
}
// A Percentage is a floating-point number representing a percentage.
type Percentage struct {
float64
}
func (p *Percentage) UnmarshalXMLAttr(attr xml.Attr) error {
pct := attr.Value
if rune(pct[len(pct)-1]) != '%' {
return fmt.Errorf("Expected percentage attribute in format `x%`, received %s", pct)
}
f, err := strconv.ParseFloat(pct[:len(pct)-1], 64)
if err != nil {
return err
}
*p = Percentage{f}
return nil
}
// A GradebookDate holds a timestamp parsed from the format of StudentVUE's systems.
type GradebookDate struct {
time.Time
}
func (gd *GradebookDate) UnmarshalXMLAttr(attr xml.Attr) error {
const gradebookDateFormat = "1/2/2006"
dt, err := time.Parse(gradebookDateFormat, attr.Value)
if err != nil {
return err
}
*gd = GradebookDate{dt}
return nil
}
// An AssignmentScore holds the score information for a single assignment for a student.
type AssignmentScore struct {
// Graded denotes whether the assignment has been graded or not.
Graded bool
// NotDue indicates if the assignment is not due yet.
NotDue bool
// NotForGrading indicates that an assignment is either not to be graded yet
// or is in the gradebook just for organizational purposes (?)
NotForGrading bool
// Percentage indicates whether the score is a percentage rather than a raw score
Percentage bool
// Score is the number of points earned on the assignment by the student.
Score float64
// PossibleScore is the number of points that could be earned by the student.
PossibleScore float64
}
func (as *AssignmentScore) UnmarshalXMLAttr(attr xml.Attr) error {
switch attr.Value {
case "Not Graded":
*as = AssignmentScore{
Graded: false,
NotDue: false,
NotForGrading: false,
Percentage: false,
Score: 0,
PossibleScore: 0,
}
return nil
case "Not Due":
*as = AssignmentScore{
Graded: false,
NotDue: true,
NotForGrading: false,
Percentage: false,
Score: 0,
PossibleScore: 0,
}
return nil
case "":
*as = AssignmentScore{
Graded: false,
NotDue: false,
NotForGrading: true,
Percentage: false,
Score: 0,
PossibleScore: 0,
}
return nil
}
const scoreRegex = "([\\d\\.]+)\\s*out\\s*of\\s*([\\d\\.]+)"
const pctScoreRegex = "([\\d\\.]+)"
r, err := regexp.Compile(scoreRegex)
if err != nil {
return err
}
isPct := false
scores := r.FindStringSubmatch(attr.Value)
if len(scores) != 3 {
r, err = regexp.Compile(pctScoreRegex)
if err != nil {
return err
}
scores = r.FindStringSubmatch(attr.Value)
if len(scores) != 2 {
return fmt.Errorf("Expected assignment score in format `x out of y`, where x and y are real numbers, or `x`, where x is a percentage, received %s and parsed %d numbers", attr.Value, len(scores))
}
isPct = true
}
fs, err := stringsToFloats(scores[1:])
if err != nil {
return err
}
if isPct {
*as = AssignmentScore{
Graded: true,
NotDue: false,
NotForGrading: false,
Percentage: true,
Score: fs[0],
PossibleScore: 100,
}
} else {
*as = AssignmentScore{
Graded: true,
NotDue: false,
NotForGrading: false,
Score: fs[0],
PossibleScore: fs[1],
}
}
return nil
}
// An AssignmentPoints holds an assignment's actual score for a student.
// The different between AssignmentScore and AssignmentPoints is that an assignment's
// score is a raw score, while the points may be either the score scaled up or down
// to affect the student's actual grade differently.
type AssignmentPoints struct {
// Graded denotes whether the assignment has been graded or not.
Graded bool
// Points is the number of points that the student received on the assignment.
Points float64
// PossiblePoints is the number of points the student could receive on the assignment.
PossiblePoints float64
}
func (ap *AssignmentPoints) | UnmarshalXMLAttr | identifier_name |
|
mod.rs | Define array index type
pub type ArrayIndex = usize;
/// The Piece type includes black and white
#[derive(Copy, Clone, Eq, PartialEq)]
pub enum PieceType {
WHITE, BLACK
}
impl PieceType {
pub fn get_name(&self) -> &str {
match self {
PieceType::WHITE => "White",
PieceType::BLACK => "Black"
}
}
pub fn | (&self) -> board::BoardPieceType {
match self {
PieceType::BLACK => board::BoardPieceType::BLACK,
PieceType::WHITE => board::BoardPieceType::WHITE,
}
}
}
impl fmt::Display for PieceType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.get_name())
}
}
#[derive(Copy, Clone, Eq, PartialEq)]
pub enum GameBuilderPlayerType {
Human,
IdiotAi,
}
/// Game builder
pub struct GameBuilder {
first_player: GameBuilderPlayerType,
second_player: GameBuilderPlayerType
}
impl GameBuilder {
/// Create an game builder object
pub fn new() -> GameBuilder {
GameBuilder {
first_player: GameBuilderPlayerType::Human,
second_player: GameBuilderPlayerType::Human
}
}
/// Set the first player (Uses black piece)
pub fn set_first_player(&mut self, player_type: GameBuilderPlayerType) -> &mut Self {
self.first_player = player_type;
self
}
/// Set the second player (Uses black piece)
pub fn set_second_player(&mut self, player_type: GameBuilderPlayerType) -> &mut Self {
self.second_player = player_type;
self
}
pub fn build(&self) -> Game {
Game::new(
GameBuilder::create_player(self.first_player, BLACK),
GameBuilder::create_player(self.second_player, WHITE),
)
}
fn create_player(player_type: GameBuilderPlayerType, piece: PieceType) -> Box<Player> {
match player_type {
GameBuilderPlayerType::Human => Box::new(LocalHumanPlayer::new(piece)),
GameBuilderPlayerType::IdiotAi => Box::new(IdiotAi::new(piece))
}
}
}
///
/// Game context in game, typically is same as Game struct
///
pub(in game) struct GameContext {
/// A board 2D array copy
board: Board,
/// None if it's first player point
last_point: Option<CoordinationFlat>,
/// Total pieces in the game
total_pieces: usize
}
impl GameContext {
pub fn new(board: Board, last_point: Option<CoordinationFlat>, total_pieces: usize)
-> Self {
GameContext {
board,
last_point,
total_pieces
}
}
}
///
/// A Gomoku game instance.
///
pub struct Game {
board: Board,
players: [Box<Player>; 2],
current_player: usize,
// TODO Can history put reference of player into?
history: Vec<(PieceType, CoordinationFlat)>,
started: bool,
ended: bool,
}
impl Game {
/// Create a new game with black first
fn new(first_player: Box<Player>, second_player: Box<Player>) -> Game {
Game {
board: Board::new(),
current_player: 0,
players: [first_player, second_player],
history: vec![],
started: false,
ended: false,
}
}
/// Create an game builder object, equals with GameBuilder::new()
pub fn game_builder() -> GameBuilder {
GameBuilder::new()
}
/// Start the game!
///
/// This function will initialize the game,
/// and start main game loop.
pub fn start(&mut self) {
self.init();
self.started = true;
self.main_loop();
}
/// Initialize the game.
///
/// This function will initialize the game board,
/// but currently is unreusable, so that is not needed.
///
/// Currently for the console version Gomoku game,
/// this method prints the game board to console.
fn init(&mut self) {
self.draw();
}
/// Draw game graphic
fn draw(&self) {
println!();
self.board.draw_console();
if !self.ended {
self.print_player();
}
}
/// Print who will point this time
fn print_player(&self) {
let p = self.get_current_player();
print!("{} ({}) turn to point: ", p.name(), p.piece_type().get_name());
}
/// Print where is pointed
fn print_point(&self, coord: CoordinationFlat) {
let x = coord.x;
let y = coord.y;
let char_x = char::from_digit((x + 9) as u32, 36).unwrap();
print!("{}{}", char_x, y);
}
/// Start the game main loop, loop the two player to point, until the game is end.
///
/// In the loop, when every player placed a piece, the game updates it's board and print,
/// then invoke the blocking function `Player::point()`, let another place piece.
fn main_loop(&mut self) {
let mut fail_count = 0;
loop {
// Initialize the game context every lap
// TODO Is there a better way to references the board?
let context = GameContext::new(self.board.clone(),
self.history.last().map(|z| { z.1 }),
self.history.len());
// Read input from player
let coord = self.get_current_player_mut().point(&context);
// Try point the coordinate
let optional_winner = match self.point(coord) {
Ok(v) => v,
Err(e) => {
fail_count += 1;
println!("Failed point to ({}, {}), {}", coord.x, coord.y, e);
// Panic if too many invalid point
if fail_count >= 6 {
panic!("Fail to point 6 times, may due to invalid AI implementation, panic")
}
continue;
}
};
// Print
self.print_point(coord);
self.draw();
// See if there is a winner.
match optional_winner {
Some(_) => {
// Current player cannot point anything because another player is wined
let winner = self.get_another_player();
println!("Winner is {} ({}).", winner.name(), winner.piece_type());
break;
},
None => { }
};
fail_count = 0;
}
}
// TODO Can I returns the reference of winner player?
/// Place a piece in the game
///
/// Returns the winner if the game is end.
fn point(&mut self, coord: CoordinationFlat) -> Result<Option<PieceType>, String> {
if !self.started {
return Err(String::from("The game has not started yet"))
}
if self.ended {
return Err(String::from("The game is over"))
}
// place the piece to board, and check the game is end
let current_piece = self.get_current_player().piece_type();
let place = self.board.place(coord, current_piece.to_board_piece_type());
if place.is_err() {
return Err(place.err().unwrap())
}
self.history.push((current_piece, coord));
let winner = if self.check_game_end() {
self.ended = true;
Some(current_piece)
} else {
None
};
self.change_to_another_player();
Ok(winner)
}
// Change current player to another player, and returns new current player.
fn change_to_another_player(&mut self) -> &Box<Player> {
if self.current_player == 0 {
self.current_player = 1
} else {
self.current_player = 0
}
self.get_current_player()
}
/// Get another player, don't change the current player state
fn get_another_player(&self) -> &Box<Player> {
if self.current_player == 0 {
&self.players[1]
} else {
&self.players[0]
}
}
/// Get another player mutable reference, don't change the current player state
fn get_another_player_mut(&mut self) -> &mut Box<Player> {
if self.current_player == 0 {
&mut self.players[1]
} else {
&mut self.players[0]
}
}
/// Get the current player
fn get_current_player(&self) -> &Box<Player> {
&self.players[self.current_player]
}
/// Get the current player mutable reference
fn get_current_player_mut(&mut self) -> &mut Box<Player> {
&mut self.players[self.current_player]
}
/// Check the game is end, if end, returns true; not end the return false.
///
/// So the winner is the top of history stack
fn check_game_end(&self) -> bool {
let last_point = match self.history.last() {
Some(a) => a,
None => return false
};
// Current position information
let last_player_color: board::BoardPieceType = last_point.0.to_board_piece_type();
let last_coordination = last_point.1;
// Define 4 non-parallel directions
const MOVE_DIRECTION: [(isize | to_board_piece_type | identifier_name |
mod.rs | Define array index type
pub type ArrayIndex = usize;
/// The Piece type includes black and white
#[derive(Copy, Clone, Eq, PartialEq)]
pub enum PieceType {
WHITE, BLACK
}
impl PieceType {
pub fn get_name(&self) -> &str {
match self {
PieceType::WHITE => "White",
PieceType::BLACK => "Black"
}
}
pub fn to_board_piece_type(&self) -> board::BoardPieceType {
match self {
PieceType::BLACK => board::BoardPieceType::BLACK,
PieceType::WHITE => board::BoardPieceType::WHITE,
}
}
}
impl fmt::Display for PieceType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.get_name())
}
}
#[derive(Copy, Clone, Eq, PartialEq)]
pub enum GameBuilderPlayerType {
Human,
IdiotAi,
}
/// Game builder
pub struct GameBuilder {
first_player: GameBuilderPlayerType,
second_player: GameBuilderPlayerType
}
impl GameBuilder {
/// Create an game builder object
pub fn new() -> GameBuilder {
GameBuilder {
first_player: GameBuilderPlayerType::Human,
second_player: GameBuilderPlayerType::Human
}
}
/// Set the first player (Uses black piece) | self
}
/// Set the second player (Uses black piece)
pub fn set_second_player(&mut self, player_type: GameBuilderPlayerType) -> &mut Self {
self.second_player = player_type;
self
}
pub fn build(&self) -> Game {
Game::new(
GameBuilder::create_player(self.first_player, BLACK),
GameBuilder::create_player(self.second_player, WHITE),
)
}
fn create_player(player_type: GameBuilderPlayerType, piece: PieceType) -> Box<Player> {
match player_type {
GameBuilderPlayerType::Human => Box::new(LocalHumanPlayer::new(piece)),
GameBuilderPlayerType::IdiotAi => Box::new(IdiotAi::new(piece))
}
}
}
///
/// Game context in game, typically is same as Game struct
///
pub(in game) struct GameContext {
/// A board 2D array copy
board: Board,
/// None if it's first player point
last_point: Option<CoordinationFlat>,
/// Total pieces in the game
total_pieces: usize
}
impl GameContext {
pub fn new(board: Board, last_point: Option<CoordinationFlat>, total_pieces: usize)
-> Self {
GameContext {
board,
last_point,
total_pieces
}
}
}
///
/// A Gomoku game instance.
///
pub struct Game {
board: Board,
players: [Box<Player>; 2],
current_player: usize,
// TODO Can history put reference of player into?
history: Vec<(PieceType, CoordinationFlat)>,
started: bool,
ended: bool,
}
impl Game {
/// Create a new game with black first
fn new(first_player: Box<Player>, second_player: Box<Player>) -> Game {
Game {
board: Board::new(),
current_player: 0,
players: [first_player, second_player],
history: vec![],
started: false,
ended: false,
}
}
/// Create an game builder object, equals with GameBuilder::new()
pub fn game_builder() -> GameBuilder {
GameBuilder::new()
}
/// Start the game!
///
/// This function will initialize the game,
/// and start main game loop.
pub fn start(&mut self) {
self.init();
self.started = true;
self.main_loop();
}
/// Initialize the game.
///
/// This function will initialize the game board,
/// but currently is unreusable, so that is not needed.
///
/// Currently for the console version Gomoku game,
/// this method prints the game board to console.
fn init(&mut self) {
self.draw();
}
/// Draw game graphic
fn draw(&self) {
println!();
self.board.draw_console();
if !self.ended {
self.print_player();
}
}
/// Print who will point this time
fn print_player(&self) {
let p = self.get_current_player();
print!("{} ({}) turn to point: ", p.name(), p.piece_type().get_name());
}
/// Print where is pointed
fn print_point(&self, coord: CoordinationFlat) {
let x = coord.x;
let y = coord.y;
let char_x = char::from_digit((x + 9) as u32, 36).unwrap();
print!("{}{}", char_x, y);
}
/// Start the game main loop, loop the two player to point, until the game is end.
///
/// In the loop, when every player placed a piece, the game updates it's board and print,
/// then invoke the blocking function `Player::point()`, let another place piece.
fn main_loop(&mut self) {
let mut fail_count = 0;
loop {
// Initialize the game context every lap
// TODO Is there a better way to references the board?
let context = GameContext::new(self.board.clone(),
self.history.last().map(|z| { z.1 }),
self.history.len());
// Read input from player
let coord = self.get_current_player_mut().point(&context);
// Try point the coordinate
let optional_winner = match self.point(coord) {
Ok(v) => v,
Err(e) => {
fail_count += 1;
println!("Failed point to ({}, {}), {}", coord.x, coord.y, e);
// Panic if too many invalid point
if fail_count >= 6 {
panic!("Fail to point 6 times, may due to invalid AI implementation, panic")
}
continue;
}
};
// Print
self.print_point(coord);
self.draw();
// See if there is a winner.
match optional_winner {
Some(_) => {
// Current player cannot point anything because another player is wined
let winner = self.get_another_player();
println!("Winner is {} ({}).", winner.name(), winner.piece_type());
break;
},
None => { }
};
fail_count = 0;
}
}
// TODO Can I returns the reference of winner player?
/// Place a piece in the game
///
/// Returns the winner if the game is end.
fn point(&mut self, coord: CoordinationFlat) -> Result<Option<PieceType>, String> {
if !self.started {
return Err(String::from("The game has not started yet"))
}
if self.ended {
return Err(String::from("The game is over"))
}
// place the piece to board, and check the game is end
let current_piece = self.get_current_player().piece_type();
let place = self.board.place(coord, current_piece.to_board_piece_type());
if place.is_err() {
return Err(place.err().unwrap())
}
self.history.push((current_piece, coord));
let winner = if self.check_game_end() {
self.ended = true;
Some(current_piece)
} else {
None
};
self.change_to_another_player();
Ok(winner)
}
// Change current player to another player, and returns new current player.
fn change_to_another_player(&mut self) -> &Box<Player> {
if self.current_player == 0 {
self.current_player = 1
} else {
self.current_player = 0
}
self.get_current_player()
}
/// Get another player, don't change the current player state
fn get_another_player(&self) -> &Box<Player> {
if self.current_player == 0 {
&self.players[1]
} else {
&self.players[0]
}
}
/// Get another player mutable reference, don't change the current player state
fn get_another_player_mut(&mut self) -> &mut Box<Player> {
if self.current_player == 0 {
&mut self.players[1]
} else {
&mut self.players[0]
}
}
/// Get the current player
fn get_current_player(&self) -> &Box<Player> {
&self.players[self.current_player]
}
/// Get the current player mutable reference
fn get_current_player_mut(&mut self) -> &mut Box<Player> {
&mut self.players[self.current_player]
}
/// Check the game is end, if end, returns true; not end the return false.
///
/// So the winner is the top of history stack
fn check_game_end(&self) -> bool {
let last_point = match self.history.last() {
Some(a) => a,
None => return false
};
// Current position information
let last_player_color: board::BoardPieceType = last_point.0.to_board_piece_type();
let last_coordination = last_point.1;
// Define 4 non-parallel directions
const MOVE_DIRECTION: [(isize, | pub fn set_first_player(&mut self, player_type: GameBuilderPlayerType) -> &mut Self {
self.first_player = player_type; | random_line_split |
mod.rs | same as Game struct
///
pub(in game) struct GameContext {
/// A board 2D array copy
board: Board,
/// None if it's first player point
last_point: Option<CoordinationFlat>,
/// Total pieces in the game
total_pieces: usize
}
impl GameContext {
pub fn new(board: Board, last_point: Option<CoordinationFlat>, total_pieces: usize)
-> Self {
GameContext {
board,
last_point,
total_pieces
}
}
}
///
/// A Gomoku game instance.
///
pub struct Game {
board: Board,
players: [Box<Player>; 2],
current_player: usize,
// TODO Can history put reference of player into?
history: Vec<(PieceType, CoordinationFlat)>,
started: bool,
ended: bool,
}
impl Game {
/// Create a new game with black first
fn new(first_player: Box<Player>, second_player: Box<Player>) -> Game {
Game {
board: Board::new(),
current_player: 0,
players: [first_player, second_player],
history: vec![],
started: false,
ended: false,
}
}
/// Create an game builder object, equals with GameBuilder::new()
pub fn game_builder() -> GameBuilder {
GameBuilder::new()
}
/// Start the game!
///
/// This function will initialize the game,
/// and start main game loop.
pub fn start(&mut self) {
self.init();
self.started = true;
self.main_loop();
}
/// Initialize the game.
///
/// This function will initialize the game board,
/// but currently is unreusable, so that is not needed.
///
/// Currently for the console version Gomoku game,
/// this method prints the game board to console.
fn init(&mut self) {
self.draw();
}
/// Draw game graphic
fn draw(&self) {
println!();
self.board.draw_console();
if !self.ended {
self.print_player();
}
}
/// Print who will point this time
fn print_player(&self) {
let p = self.get_current_player();
print!("{} ({}) turn to point: ", p.name(), p.piece_type().get_name());
}
/// Print where is pointed
fn print_point(&self, coord: CoordinationFlat) {
let x = coord.x;
let y = coord.y;
let char_x = char::from_digit((x + 9) as u32, 36).unwrap();
print!("{}{}", char_x, y);
}
/// Start the game main loop, loop the two player to point, until the game is end.
///
/// In the loop, when every player placed a piece, the game updates it's board and print,
/// then invoke the blocking function `Player::point()`, let another place piece.
fn main_loop(&mut self) {
let mut fail_count = 0;
loop {
// Initialize the game context every lap
// TODO Is there a better way to references the board?
let context = GameContext::new(self.board.clone(),
self.history.last().map(|z| { z.1 }),
self.history.len());
// Read input from player
let coord = self.get_current_player_mut().point(&context);
// Try point the coordinate
let optional_winner = match self.point(coord) {
Ok(v) => v,
Err(e) => {
fail_count += 1;
println!("Failed point to ({}, {}), {}", coord.x, coord.y, e);
// Panic if too many invalid point
if fail_count >= 6 {
panic!("Fail to point 6 times, may due to invalid AI implementation, panic")
}
continue;
}
};
// Print
self.print_point(coord);
self.draw();
// See if there is a winner.
match optional_winner {
Some(_) => {
// Current player cannot point anything because another player is wined
let winner = self.get_another_player();
println!("Winner is {} ({}).", winner.name(), winner.piece_type());
break;
},
None => { }
};
fail_count = 0;
}
}
// TODO Can I returns the reference of winner player?
/// Place a piece in the game
///
/// Returns the winner if the game is end.
fn point(&mut self, coord: CoordinationFlat) -> Result<Option<PieceType>, String> {
if !self.started {
return Err(String::from("The game has not started yet"))
}
if self.ended {
return Err(String::from("The game is over"))
}
// place the piece to board, and check the game is end
let current_piece = self.get_current_player().piece_type();
let place = self.board.place(coord, current_piece.to_board_piece_type());
if place.is_err() {
return Err(place.err().unwrap())
}
self.history.push((current_piece, coord));
let winner = if self.check_game_end() {
self.ended = true;
Some(current_piece)
} else {
None
};
self.change_to_another_player();
Ok(winner)
}
// Change current player to another player, and returns new current player.
fn change_to_another_player(&mut self) -> &Box<Player> {
if self.current_player == 0 {
self.current_player = 1
} else {
self.current_player = 0
}
self.get_current_player()
}
/// Get another player, don't change the current player state
fn get_another_player(&self) -> &Box<Player> {
if self.current_player == 0 {
&self.players[1]
} else {
&self.players[0]
}
}
/// Get another player mutable reference, don't change the current player state
fn get_another_player_mut(&mut self) -> &mut Box<Player> {
if self.current_player == 0 {
&mut self.players[1]
} else {
&mut self.players[0]
}
}
/// Get the current player
fn get_current_player(&self) -> &Box<Player> {
&self.players[self.current_player]
}
/// Get the current player mutable reference
fn get_current_player_mut(&mut self) -> &mut Box<Player> {
&mut self.players[self.current_player]
}
/// Check the game is end, if end, returns true; not end the return false.
///
/// So the winner is the top of history stack
fn check_game_end(&self) -> bool {
let last_point = match self.history.last() {
Some(a) => a,
None => return false
};
// Current position information
let last_player_color: board::BoardPieceType = last_point.0.to_board_piece_type();
let last_coordination = last_point.1;
// Define 4 non-parallel directions
const MOVE_DIRECTION: [(isize, isize); 4] = [
(0, 1),
(1, 1),
(1, 0),
(1, -1)
];
fn move_dir(coord: &CoordinationFlat, dir: &(isize, isize)) -> Result<CoordinationFlat, &'static str> {
let new_x = (coord.x as isize) + dir.0;
let new_y = (coord.y as isize) + dir.1;
if new_x < 0 {
return Err("x is out of bound");
} else if new_y < 0 {
return Err("y is out of bound");
}
Ok(CoordinationFlat::new(new_x as usize, new_y as usize))
}
fn move_dir_reverse(coord: &CoordinationFlat, dir: &(isize, isize)) -> Result<CoordinationFlat, &'static str> {
let new_x = (coord.x as isize) - dir.0;
let new_y = (coord.y as isize) - dir.1;
if new_x < 0 {
return Err("x is out of bound")
} else if new_y < 0 {
return Err("y is out of bound")
}
Ok(CoordinationFlat::new(new_x as usize, new_y as usize))
}
// Check 4 directions negative and positive directions from point position
for dir in MOVE_DIRECTION.iter() {
let mut score = 1;
{
let mut next_coord = move_dir(&last_coordination, dir);
if next_coord.is_ok() {
let mut a = self.board.get(next_coord.unwrap());
while next_coord.is_ok() && a.is_ok() && a.unwrap() == last_player_color {
score += 1;
next_coord = move_dir(&next_coord.unwrap(), dir);
a = self.board.get(next_coord.unwrap());
}
}
}
{
let mut next_coord = move_dir_reverse(&last_coordination, dir);
if next_coord.is_ok() | {
let mut a = self.board.get(next_coord.unwrap());
while next_coord.is_ok() && a.is_ok() && a.unwrap() == last_player_color {
score += 1;
next_coord = move_dir_reverse(&next_coord.unwrap(), dir);
a = self.board.get(next_coord.unwrap());
}
} | conditional_block |
|
mod.rs | array index type
pub type ArrayIndex = usize;
/// The Piece type includes black and white
#[derive(Copy, Clone, Eq, PartialEq)]
pub enum PieceType {
WHITE, BLACK
}
impl PieceType {
pub fn get_name(&self) -> &str {
match self {
PieceType::WHITE => "White",
PieceType::BLACK => "Black"
}
}
pub fn to_board_piece_type(&self) -> board::BoardPieceType {
match self {
PieceType::BLACK => board::BoardPieceType::BLACK,
PieceType::WHITE => board::BoardPieceType::WHITE,
}
}
}
impl fmt::Display for PieceType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.get_name())
}
}
#[derive(Copy, Clone, Eq, PartialEq)]
pub enum GameBuilderPlayerType {
Human,
IdiotAi,
}
/// Game builder
pub struct GameBuilder {
first_player: GameBuilderPlayerType,
second_player: GameBuilderPlayerType
}
impl GameBuilder {
/// Create an game builder object
pub fn new() -> GameBuilder {
GameBuilder {
first_player: GameBuilderPlayerType::Human,
second_player: GameBuilderPlayerType::Human
}
}
/// Set the first player (Uses black piece)
pub fn set_first_player(&mut self, player_type: GameBuilderPlayerType) -> &mut Self {
self.first_player = player_type;
self
}
/// Set the second player (Uses black piece)
pub fn set_second_player(&mut self, player_type: GameBuilderPlayerType) -> &mut Self {
self.second_player = player_type;
self
}
pub fn build(&self) -> Game {
Game::new(
GameBuilder::create_player(self.first_player, BLACK),
GameBuilder::create_player(self.second_player, WHITE),
)
}
fn create_player(player_type: GameBuilderPlayerType, piece: PieceType) -> Box<Player> {
match player_type {
GameBuilderPlayerType::Human => Box::new(LocalHumanPlayer::new(piece)),
GameBuilderPlayerType::IdiotAi => Box::new(IdiotAi::new(piece))
}
}
}
///
/// Game context in game, typically is same as Game struct
///
pub(in game) struct GameContext {
/// A board 2D array copy
board: Board,
/// None if it's first player point
last_point: Option<CoordinationFlat>,
/// Total pieces in the game
total_pieces: usize
}
impl GameContext {
pub fn new(board: Board, last_point: Option<CoordinationFlat>, total_pieces: usize)
-> Self {
GameContext {
board,
last_point,
total_pieces
}
}
}
///
/// A Gomoku game instance.
///
pub struct Game {
board: Board,
players: [Box<Player>; 2],
current_player: usize,
// TODO Can history put reference of player into?
history: Vec<(PieceType, CoordinationFlat)>,
started: bool,
ended: bool,
}
impl Game {
/// Create a new game with black first
fn new(first_player: Box<Player>, second_player: Box<Player>) -> Game {
Game {
board: Board::new(),
current_player: 0,
players: [first_player, second_player],
history: vec![],
started: false,
ended: false,
}
}
/// Create an game builder object, equals with GameBuilder::new()
pub fn game_builder() -> GameBuilder {
GameBuilder::new()
}
/// Start the game!
///
/// This function will initialize the game,
/// and start main game loop.
pub fn start(&mut self) {
self.init();
self.started = true;
self.main_loop();
}
/// Initialize the game.
///
/// This function will initialize the game board,
/// but currently is unreusable, so that is not needed.
///
/// Currently for the console version Gomoku game,
/// this method prints the game board to console.
fn init(&mut self) {
self.draw();
}
/// Draw game graphic
fn draw(&self) {
println!();
self.board.draw_console();
if !self.ended {
self.print_player();
}
}
/// Print who will point this time
fn print_player(&self) {
let p = self.get_current_player();
print!("{} ({}) turn to point: ", p.name(), p.piece_type().get_name());
}
/// Print where is pointed
fn print_point(&self, coord: CoordinationFlat) {
let x = coord.x;
let y = coord.y;
let char_x = char::from_digit((x + 9) as u32, 36).unwrap();
print!("{}{}", char_x, y);
}
/// Start the game main loop, loop the two player to point, until the game is end.
///
/// In the loop, when every player placed a piece, the game updates it's board and print,
/// then invoke the blocking function `Player::point()`, let another place piece.
fn main_loop(&mut self) {
let mut fail_count = 0;
loop {
// Initialize the game context every lap
// TODO Is there a better way to references the board?
let context = GameContext::new(self.board.clone(),
self.history.last().map(|z| { z.1 }),
self.history.len());
// Read input from player
let coord = self.get_current_player_mut().point(&context);
// Try point the coordinate
let optional_winner = match self.point(coord) {
Ok(v) => v,
Err(e) => {
fail_count += 1;
println!("Failed point to ({}, {}), {}", coord.x, coord.y, e);
// Panic if too many invalid point
if fail_count >= 6 {
panic!("Fail to point 6 times, may due to invalid AI implementation, panic")
}
continue;
}
};
// Print
self.print_point(coord);
self.draw();
// See if there is a winner.
match optional_winner {
Some(_) => {
// Current player cannot point anything because another player is wined
let winner = self.get_another_player();
println!("Winner is {} ({}).", winner.name(), winner.piece_type());
break;
},
None => { }
};
fail_count = 0;
}
}
// TODO Can I returns the reference of winner player?
/// Place a piece in the game
///
/// Returns the winner if the game is end.
fn point(&mut self, coord: CoordinationFlat) -> Result<Option<PieceType>, String> {
if !self.started {
return Err(String::from("The game has not started yet"))
}
if self.ended {
return Err(String::from("The game is over"))
}
// place the piece to board, and check the game is end
let current_piece = self.get_current_player().piece_type();
let place = self.board.place(coord, current_piece.to_board_piece_type());
if place.is_err() {
return Err(place.err().unwrap())
}
self.history.push((current_piece, coord));
let winner = if self.check_game_end() {
self.ended = true;
Some(current_piece)
} else {
None
};
self.change_to_another_player();
Ok(winner)
}
// Change current player to another player, and returns new current player.
fn change_to_another_player(&mut self) -> &Box<Player> {
if self.current_player == 0 {
self.current_player = 1
} else {
self.current_player = 0
}
self.get_current_player()
}
/// Get another player, don't change the current player state
fn get_another_player(&self) -> &Box<Player> {
if self.current_player == 0 {
&self.players[1]
} else {
&self.players[0]
}
}
/// Get another player mutable reference, don't change the current player state
fn get_another_player_mut(&mut self) -> &mut Box<Player> |
/// Get the current player
fn get_current_player(&self) -> &Box<Player> {
&self.players[self.current_player]
}
/// Get the current player mutable reference
fn get_current_player_mut(&mut self) -> &mut Box<Player> {
&mut self.players[self.current_player]
}
/// Check the game is end, if end, returns true; not end the return false.
///
/// So the winner is the top of history stack
fn check_game_end(&self) -> bool {
let last_point = match self.history.last() {
Some(a) => a,
None => return false
};
// Current position information
let last_player_color: board::BoardPieceType = last_point.0.to_board_piece_type();
let last_coordination = last_point.1;
// Define 4 non-parallel directions
const MOVE_DIRECTION: [(isize | {
if self.current_player == 0 {
&mut self.players[1]
} else {
&mut self.players[0]
}
} | identifier_body |
lib.rs | (Serialize, Deserialize)]
pub struct Site {
pub base_uri: String,
pub name: String,
pub proto: String,
pub description: String,
}
#[derive(Serialize, Deserialize)]
pub struct Data {
pub gpx_input: String,
pub img_input: String,
pub site_output: String,
}
#[derive(Serialize, Deserialize)]
pub struct TrackArticle {
pub title: String,
pub underscored_title: String,
pub photos_number: usize,
pub country: String,
pub start_time: DateTime<Utc>,
pub end_time: DateTime<Utc>,
pub coordinate_avg: Coordinate,
}
#[derive(Serialize, Deserialize)]
pub struct ReverseGeocoding {
pub address: HashMap<String, String>,
}
pub fn read_config(file: &Path) -> Result<Config, io::Error> {
let mut config_file = File::open(file)?;
let mut config_str = String::new();
config_file.read_to_string(&mut config_str)?;
// Not sure about that, maybe I should use a Box<Error> ?
match toml::from_str(&config_str) {
Ok(config) => Ok(config),
Err(error) => Err(Error::new(ErrorKind::Interrupted, error)),
}
}
pub fn process_gpx_dir(config: &Config) -> Vec<TrackArticle> {
let gpx_dir = Path::new(&config.data.gpx_input);
let target_dir = Path::new(&config.data.site_output);
let mut articles: Vec<TrackArticle> = Vec::new();
let tera = compile_templates!("site/templates/*");
let img_input_dir = Path::new(&config.data.img_input);
let photo_all = parse_photos(img_input_dir);
for entry in fs::read_dir(gpx_dir).unwrap() {
let gpx_path = entry.unwrap().path();
if gpx_path.extension().unwrap() == "gpx" {
info!("Processing {}", gpx_path.display());
match generate_article(&gpx_path, target_dir, &tera, &config, &photo_all) {
Some(article) => articles.push(article),
None => continue,
}
}
}
articles.sort_by(|a, b| a.start_time.cmp(&b.start_time));
articles
}
pub fn article_gpx_info(gpx_file: &Path) -> (TrackArticle, Vec<Coordinate>) {
let file = File::open(&gpx_file).unwrap();
let reader = BufReader::new(file);
let gpx: Gpx = read(reader).unwrap();
let track: &Track = &gpx.tracks[0];
let segment: &TrackSegment = &track.segments[0];
let mut track_coordinates: Vec<Coordinate> = Vec::new();
for s in segment.points.iter() {
track_coordinates.push(Coordinate {
lon: s.point().x(),
lat: s.point().y(),
});
}
// type annotations required: cannot resolve `_: std::iter::Sum<f64>` error
// is generated if avg calculation done in one time, I don't known to fix it
// for now
let mut lon_avg: f64 = track_coordinates.iter().map(|x| x.lon).sum();
lon_avg = lon_avg / track_coordinates.len() as f64;
let mut lat_avg: f64 = track_coordinates.iter().map(|x| x.lat).sum();
lat_avg = lat_avg / track_coordinates.len() as f64;
let coordinate_avg: Coordinate = Coordinate {
lon: lon_avg,
lat: lat_avg,
};
let start_time = segment.points.first().unwrap().time.unwrap();
let end_time = segment.points.last().unwrap().time.unwrap();
let article_title = match gpx.metadata.unwrap().name {
Some(name) => name,
None => gpx_file.file_stem().unwrap().to_str().unwrap().to_string(),
};
let special_chars_re = Regex::new(r"( |/|\|<|>)").unwrap();
let article_underscored_title = special_chars_re
.replace_all(&article_title, "_")
.to_string();
(
TrackArticle {
title: article_title,
underscored_title: article_underscored_title,
photos_number: 0,
country: String::new(),
start_time: start_time,
end_time: end_time,
coordinate_avg: coordinate_avg,
},
track_coordinates,
)
}
pub fn generate_article(
gpx_file: &Path,
target_dir: &Path,
tera: &Tera,
config: &Config,
photo_list: &Vec<Photo>,
) -> Option<TrackArticle> {
let (article_info, track_coordinates) = article_gpx_info(gpx_file);
let photo_article = find_photos(photo_list, article_info.start_time, article_info.end_time);
let mut copied_photos: Vec<String> = Vec::new();
let photo_target_dir = target_dir
.join("static/photos")
.join(article_info.underscored_title.to_string());
let photo_target_dir_relative =
Path::new("static/photos").join(article_info.underscored_title.to_string());
match photo_article {
Some(photo_article) => {
let photos = photo_article;
fs::create_dir_all(&photo_target_dir).unwrap();
fs::create_dir_all(&photo_target_dir.join("thumbnails")).unwrap();
for (i, p) in photos.iter().enumerate() {
let extension = p.path.extension().unwrap().to_str().unwrap();
let photo_target_file = photo_target_dir.join(format!("{}.{}", i + 1, extension));
match fs::copy(Path::new(&p.path), &photo_target_file) {
Ok(file) => file,
Err(error) => {
error!("unable to copy {}: {}", &p.path.display(), error);
continue;
}
};
let img = image::open(&Path::new(&photo_target_file))
.ok()
.expect("Opening image failed");
let thumbnail = img.thumbnail(300, 300);
thumbnail
.save(&photo_target_dir.join("thumbnails").join(format!(
"{}.{}",
i + 1,
extension
)))
.unwrap();
copied_photos.push(format!("{}.{}", i + 1, extension));
remove_exif(&photo_target_file);
}
}
None => {
info!("No photos found for {}, skipping", gpx_file.display());
return None;
}
};
let mut context = Context::new();
context.add("track_coordinates", &track_coordinates);
context.add("article_title", &article_info.title);
context.add("lon_avg", &article_info.coordinate_avg.lon);
context.add("lat_avg", &article_info.coordinate_avg.lat);
context.add("start_time", &article_info.start_time.to_string());
context.add("end_time", &article_info.end_time.to_string());
context.add("static_dir", "../static");
context.add("config", config);
context.add("copied_photos", &copied_photos);
context.add("photo_target_dir_relative", &photo_target_dir_relative);
render_html(
tera,
context,
&target_dir.join("tracks"),
&article_info.underscored_title,
"track_article.html",
)
.unwrap();
let track_country = match reverse_geocoding(&article_info.coordinate_avg) {
Ok(geocoding) => geocoding.address["country"].clone(),
Err(error) => {
error!("error while reverse geocoding : {}", error);
String::new()
}
};
Some(TrackArticle {
title: article_info.title,
underscored_title: article_info.underscored_title,
photos_number: copied_photos.len(),
country: track_country.to_string(),
start_time: article_info.start_time,
end_time: article_info.end_time,
coordinate_avg: article_info.coordinate_avg,
})
}
pub fn render_html(
tera: &Tera,
context: Context,
dir: &Path,
file: &str,
template: &str,
) -> Result<(), io::Error> {
let res = tera.render(template, &context).unwrap();
let mut generated_file = File::create(format!("{}/{}.html", dir.to_str().unwrap(), file))?;
generated_file.write(res.as_bytes())?;
Ok(())
}
fn find_photos(
photos: &Vec<Photo>,
start_time: DateTime<Utc>,
end_time: DateTime<Utc>,
) -> Option<Vec<&Photo>> {
let mut res: Vec<&Photo> = Vec::new();
for p in photos {
if start_time.timestamp() <= p.datetime.timestamp()
&& end_time.timestamp() >= p.datetime.timestamp()
{
res.push(p);
}
}
if res.len() > 0 {
res.sort_unstable_by_key(|r| r.datetime.timestamp());
return Some(res);
}
None
}
pub fn | (dir: &Path) -> Vec<Photo> {
let mut photos: Vec<Photo> = Vec::new();
unsafe {
gexiv2_sys::gexiv2_log_set_level(gexiv2_sys::GExiv2LogLevel::MUTE);
}
for entry in fs::read_dir(dir).unwrap() {
let img_path = entry.unwrap().path();
let file_metadata = rexiv2::Metadata::new_from_path(&img_path.to_str().unwrap()).unwrap();
if !file_metadata.has_exif() {
| parse_photos | identifier_name |
lib.rs | (Serialize, Deserialize)]
pub struct Site {
pub base_uri: String,
pub name: String,
pub proto: String,
pub description: String,
}
#[derive(Serialize, Deserialize)]
pub struct Data {
pub gpx_input: String,
pub img_input: String,
pub site_output: String,
}
#[derive(Serialize, Deserialize)]
pub struct TrackArticle {
pub title: String,
pub underscored_title: String,
pub photos_number: usize,
pub country: String,
pub start_time: DateTime<Utc>,
pub end_time: DateTime<Utc>,
pub coordinate_avg: Coordinate,
}
#[derive(Serialize, Deserialize)]
pub struct ReverseGeocoding {
pub address: HashMap<String, String>,
}
pub fn read_config(file: &Path) -> Result<Config, io::Error> {
let mut config_file = File::open(file)?;
let mut config_str = String::new();
config_file.read_to_string(&mut config_str)?;
// Not sure about that, maybe I should use a Box<Error> ?
match toml::from_str(&config_str) {
Ok(config) => Ok(config),
Err(error) => Err(Error::new(ErrorKind::Interrupted, error)),
}
}
pub fn process_gpx_dir(config: &Config) -> Vec<TrackArticle> {
let gpx_dir = Path::new(&config.data.gpx_input);
let target_dir = Path::new(&config.data.site_output);
let mut articles: Vec<TrackArticle> = Vec::new();
let tera = compile_templates!("site/templates/*");
let img_input_dir = Path::new(&config.data.img_input);
let photo_all = parse_photos(img_input_dir);
for entry in fs::read_dir(gpx_dir).unwrap() {
let gpx_path = entry.unwrap().path();
if gpx_path.extension().unwrap() == "gpx" {
info!("Processing {}", gpx_path.display());
match generate_article(&gpx_path, target_dir, &tera, &config, &photo_all) {
Some(article) => articles.push(article),
None => continue,
}
}
}
articles.sort_by(|a, b| a.start_time.cmp(&b.start_time));
articles
}
pub fn article_gpx_info(gpx_file: &Path) -> (TrackArticle, Vec<Coordinate>) {
let file = File::open(&gpx_file).unwrap();
let reader = BufReader::new(file);
let gpx: Gpx = read(reader).unwrap();
let track: &Track = &gpx.tracks[0];
let segment: &TrackSegment = &track.segments[0];
let mut track_coordinates: Vec<Coordinate> = Vec::new();
for s in segment.points.iter() {
track_coordinates.push(Coordinate {
lon: s.point().x(),
lat: s.point().y(),
});
}
// type annotations required: cannot resolve `_: std::iter::Sum<f64>` error
// is generated if avg calculation done in one time, I don't known to fix it
// for now
let mut lon_avg: f64 = track_coordinates.iter().map(|x| x.lon).sum();
lon_avg = lon_avg / track_coordinates.len() as f64;
let mut lat_avg: f64 = track_coordinates.iter().map(|x| x.lat).sum();
lat_avg = lat_avg / track_coordinates.len() as f64;
let coordinate_avg: Coordinate = Coordinate {
lon: lon_avg,
lat: lat_avg,
};
let start_time = segment.points.first().unwrap().time.unwrap();
let end_time = segment.points.last().unwrap().time.unwrap();
let article_title = match gpx.metadata.unwrap().name {
Some(name) => name,
None => gpx_file.file_stem().unwrap().to_str().unwrap().to_string(),
};
let special_chars_re = Regex::new(r"( |/|\|<|>)").unwrap();
let article_underscored_title = special_chars_re
.replace_all(&article_title, "_")
.to_string();
(
TrackArticle {
title: article_title,
underscored_title: article_underscored_title,
photos_number: 0,
country: String::new(),
start_time: start_time,
end_time: end_time,
coordinate_avg: coordinate_avg,
},
track_coordinates,
)
}
pub fn generate_article(
gpx_file: &Path,
target_dir: &Path,
tera: &Tera,
config: &Config,
photo_list: &Vec<Photo>,
) -> Option<TrackArticle> {
let (article_info, track_coordinates) = article_gpx_info(gpx_file);
let photo_article = find_photos(photo_list, article_info.start_time, article_info.end_time);
let mut copied_photos: Vec<String> = Vec::new();
let photo_target_dir = target_dir
.join("static/photos")
.join(article_info.underscored_title.to_string());
let photo_target_dir_relative =
Path::new("static/photos").join(article_info.underscored_title.to_string());
match photo_article {
Some(photo_article) => {
let photos = photo_article;
fs::create_dir_all(&photo_target_dir).unwrap();
fs::create_dir_all(&photo_target_dir.join("thumbnails")).unwrap();
for (i, p) in photos.iter().enumerate() {
let extension = p.path.extension().unwrap().to_str().unwrap();
let photo_target_file = photo_target_dir.join(format!("{}.{}", i + 1, extension));
match fs::copy(Path::new(&p.path), &photo_target_file) {
Ok(file) => file,
Err(error) => {
error!("unable to copy {}: {}", &p.path.display(), error);
continue;
}
};
let img = image::open(&Path::new(&photo_target_file))
.ok()
.expect("Opening image failed");
let thumbnail = img.thumbnail(300, 300);
thumbnail
.save(&photo_target_dir.join("thumbnails").join(format!(
"{}.{}",
i + 1,
extension
)))
.unwrap();
copied_photos.push(format!("{}.{}", i + 1, extension));
remove_exif(&photo_target_file);
}
}
None => {
info!("No photos found for {}, skipping", gpx_file.display());
return None;
}
};
let mut context = Context::new();
context.add("track_coordinates", &track_coordinates);
context.add("article_title", &article_info.title);
context.add("lon_avg", &article_info.coordinate_avg.lon);
context.add("lat_avg", &article_info.coordinate_avg.lat);
context.add("start_time", &article_info.start_time.to_string());
context.add("end_time", &article_info.end_time.to_string());
context.add("static_dir", "../static");
context.add("config", config);
context.add("copied_photos", &copied_photos);
context.add("photo_target_dir_relative", &photo_target_dir_relative);
render_html(
tera,
context,
&target_dir.join("tracks"),
&article_info.underscored_title,
"track_article.html",
)
.unwrap();
let track_country = match reverse_geocoding(&article_info.coordinate_avg) {
Ok(geocoding) => geocoding.address["country"].clone(),
Err(error) => {
error!("error while reverse geocoding : {}", error);
String::new()
}
};
Some(TrackArticle {
title: article_info.title,
underscored_title: article_info.underscored_title,
photos_number: copied_photos.len(),
country: track_country.to_string(),
start_time: article_info.start_time,
end_time: article_info.end_time,
coordinate_avg: article_info.coordinate_avg,
})
}
pub fn render_html(
tera: &Tera,
context: Context,
dir: &Path,
file: &str,
template: &str,
) -> Result<(), io::Error> {
let res = tera.render(template, &context).unwrap();
let mut generated_file = File::create(format!("{}/{}.html", dir.to_str().unwrap(), file))?;
generated_file.write(res.as_bytes())?;
Ok(())
}
fn find_photos(
photos: &Vec<Photo>,
start_time: DateTime<Utc>,
end_time: DateTime<Utc>,
) -> Option<Vec<&Photo>> {
let mut res: Vec<&Photo> = Vec::new();
for p in photos {
if start_time.timestamp() <= p.datetime.timestamp()
&& end_time.timestamp() >= p.datetime.timestamp()
{
res.push(p);
}
}
if res.len() > 0 |
None
}
pub fn parse_photos(dir: &Path) -> Vec<Photo> {
let mut photos: Vec<Photo> = Vec::new();
unsafe {
gexiv2_sys::gexiv2_log_set_level(gexiv2_sys::GExiv2LogLevel::MUTE);
}
for entry in fs::read_dir(dir).unwrap() {
let img_path = entry.unwrap().path();
let file_metadata = rexiv2::Metadata::new_from_path(&img_path.to_str().unwrap()).unwrap();
if !file_metadata.has_exif() | {
res.sort_unstable_by_key(|r| r.datetime.timestamp());
return Some(res);
} | conditional_block |
lib.rs | use std::io;
use std::io::prelude::*;
use std::io::BufReader;
use std::io::{Error, ErrorKind};
use std::path::{Path, PathBuf};
use tera::{compile_templates, Context, Tera};
#[derive(Serialize, Deserialize)]
pub struct Coordinate {
lon: f64,
lat: f64,
}
pub struct Photo {
path: PathBuf,
datetime: NaiveDateTime,
}
#[derive(Serialize, Deserialize)]
pub struct Config {
pub site: Site,
pub data: Data,
}
#[derive(Serialize, Deserialize)]
pub struct Site {
pub base_uri: String,
pub name: String,
pub proto: String,
pub description: String,
}
#[derive(Serialize, Deserialize)]
pub struct Data {
pub gpx_input: String,
pub img_input: String,
pub site_output: String,
}
#[derive(Serialize, Deserialize)]
pub struct TrackArticle {
pub title: String,
pub underscored_title: String,
pub photos_number: usize,
pub country: String,
pub start_time: DateTime<Utc>,
pub end_time: DateTime<Utc>,
pub coordinate_avg: Coordinate,
}
#[derive(Serialize, Deserialize)]
pub struct ReverseGeocoding {
pub address: HashMap<String, String>,
}
pub fn read_config(file: &Path) -> Result<Config, io::Error> {
let mut config_file = File::open(file)?;
let mut config_str = String::new();
config_file.read_to_string(&mut config_str)?;
// Not sure about that, maybe I should use a Box<Error> ?
match toml::from_str(&config_str) {
Ok(config) => Ok(config),
Err(error) => Err(Error::new(ErrorKind::Interrupted, error)),
}
}
pub fn process_gpx_dir(config: &Config) -> Vec<TrackArticle> {
let gpx_dir = Path::new(&config.data.gpx_input);
let target_dir = Path::new(&config.data.site_output);
let mut articles: Vec<TrackArticle> = Vec::new();
let tera = compile_templates!("site/templates/*");
let img_input_dir = Path::new(&config.data.img_input);
let photo_all = parse_photos(img_input_dir);
for entry in fs::read_dir(gpx_dir).unwrap() {
let gpx_path = entry.unwrap().path();
if gpx_path.extension().unwrap() == "gpx" {
info!("Processing {}", gpx_path.display());
match generate_article(&gpx_path, target_dir, &tera, &config, &photo_all) {
Some(article) => articles.push(article),
None => continue,
}
}
}
articles.sort_by(|a, b| a.start_time.cmp(&b.start_time));
articles
}
pub fn article_gpx_info(gpx_file: &Path) -> (TrackArticle, Vec<Coordinate>) {
let file = File::open(&gpx_file).unwrap();
let reader = BufReader::new(file);
let gpx: Gpx = read(reader).unwrap();
let track: &Track = &gpx.tracks[0];
let segment: &TrackSegment = &track.segments[0];
let mut track_coordinates: Vec<Coordinate> = Vec::new();
for s in segment.points.iter() {
track_coordinates.push(Coordinate {
lon: s.point().x(),
lat: s.point().y(),
});
}
// type annotations required: cannot resolve `_: std::iter::Sum<f64>` error
// is generated if avg calculation done in one time, I don't known to fix it
// for now
let mut lon_avg: f64 = track_coordinates.iter().map(|x| x.lon).sum();
lon_avg = lon_avg / track_coordinates.len() as f64;
let mut lat_avg: f64 = track_coordinates.iter().map(|x| x.lat).sum();
lat_avg = lat_avg / track_coordinates.len() as f64;
let coordinate_avg: Coordinate = Coordinate {
lon: lon_avg,
lat: lat_avg,
};
let start_time = segment.points.first().unwrap().time.unwrap();
let end_time = segment.points.last().unwrap().time.unwrap();
let article_title = match gpx.metadata.unwrap().name {
Some(name) => name,
None => gpx_file.file_stem().unwrap().to_str().unwrap().to_string(),
};
let special_chars_re = Regex::new(r"( |/|\|<|>)").unwrap();
let article_underscored_title = special_chars_re
.replace_all(&article_title, "_")
.to_string();
(
TrackArticle {
title: article_title,
underscored_title: article_underscored_title,
photos_number: 0,
country: String::new(),
start_time: start_time,
end_time: end_time,
coordinate_avg: coordinate_avg,
},
track_coordinates,
)
}
pub fn generate_article(
gpx_file: &Path,
target_dir: &Path,
tera: &Tera,
config: &Config,
photo_list: &Vec<Photo>,
) -> Option<TrackArticle> {
let (article_info, track_coordinates) = article_gpx_info(gpx_file);
let photo_article = find_photos(photo_list, article_info.start_time, article_info.end_time);
let mut copied_photos: Vec<String> = Vec::new();
let photo_target_dir = target_dir
.join("static/photos")
.join(article_info.underscored_title.to_string());
let photo_target_dir_relative =
Path::new("static/photos").join(article_info.underscored_title.to_string());
match photo_article {
Some(photo_article) => {
let photos = photo_article;
fs::create_dir_all(&photo_target_dir).unwrap();
fs::create_dir_all(&photo_target_dir.join("thumbnails")).unwrap();
for (i, p) in photos.iter().enumerate() {
let extension = p.path.extension().unwrap().to_str().unwrap();
let photo_target_file = photo_target_dir.join(format!("{}.{}", i + 1, extension));
match fs::copy(Path::new(&p.path), &photo_target_file) {
Ok(file) => file,
Err(error) => {
error!("unable to copy {}: {}", &p.path.display(), error);
continue;
}
};
let img = image::open(&Path::new(&photo_target_file))
.ok()
.expect("Opening image failed");
let thumbnail = img.thumbnail(300, 300);
thumbnail
.save(&photo_target_dir.join("thumbnails").join(format!(
"{}.{}",
i + 1,
extension
)))
.unwrap();
copied_photos.push(format!("{}.{}", i + 1, extension));
remove_exif(&photo_target_file);
}
}
None => {
info!("No photos found for {}, skipping", gpx_file.display());
return None;
}
};
let mut context = Context::new();
context.add("track_coordinates", &track_coordinates);
context.add("article_title", &article_info.title);
context.add("lon_avg", &article_info.coordinate_avg.lon);
context.add("lat_avg", &article_info.coordinate_avg.lat);
context.add("start_time", &article_info.start_time.to_string());
context.add("end_time", &article_info.end_time.to_string());
context.add("static_dir", "../static");
context.add("config", config);
context.add("copied_photos", &copied_photos);
context.add("photo_target_dir_relative", &photo_target_dir_relative);
render_html(
tera,
context,
&target_dir.join("tracks"),
&article_info.underscored_title,
"track_article.html",
)
.unwrap();
let track_country = match reverse_geocoding(&article_info.coordinate_avg) {
Ok(geocoding) => geocoding.address["country"].clone(),
Err(error) => {
error!("error while reverse geocoding : {}", error);
String::new()
}
};
Some(TrackArticle {
title: article_info.title,
underscored_title: article_info.underscored_title,
photos_number: copied_photos.len(),
country: track_country.to_string(),
start_time: article_info.start_time,
end_time: article_info.end_time,
coordinate_avg: article_info.coordinate_avg,
})
}
pub fn render_html(
tera: &Tera,
context: Context,
dir: &Path,
file: &str,
template: &str,
) -> Result<(), io::Error> {
let res = tera.render(template, &context).unwrap();
let mut generated_file = File::create(format!("{}/{}.html", dir.to_str().unwrap(), file))?;
generated_file.write(res.as_bytes())?;
Ok(())
}
fn find_photos(
photos: &Vec<Photo>,
start_time: DateTime<Utc>,
end_time: DateTime<Utc>,
) -> Option<Vec<&Photo>> {
let mut res: Vec<&Photo> = Vec::new();
for p in photos {
if start_time.timestamp() <= p.datetime.timestamp()
&& end_time.timestamp() >= p.datetime.timestamp()
{
res.push(p);
}
| use serde_derive::{Deserialize, Serialize};
use std::collections::HashMap;
use std::error;
use std::fs;
use std::fs::File; | random_line_split |
|
TextDbBase.py | ON %s (url );''' ),
("%s_title_index" % self.tableName, self.tableName, '''CREATE INDEX %s ON %s (title );''' ),
("%s_fhash_index" % self.tableName, self.tableName, '''CREATE INDEX %s ON %s (fhash );''' ),
("%s_title_coll_index" % self.tableName, self.tableName, '''CREATE INDEX %s ON %s USING BTREE (title COLLATE "en_US" text_pattern_ops);''' ),
("%s_date_index" % self.changeTableName, self.changeTableName, '''CREATE INDEX %s ON %s (changeDate);''' ),
("%s_src_index" % self.changeTableName, self.changeTableName, '''CREATE INDEX %s ON %s (src );''' ),
("%s_url_index" % self.changeTableName, self.changeTableName, '''CREATE INDEX %s ON %s (url );''' ),
("%s_change_index" % self.changeTableName, self.changeTableName, '''CREATE INDEX %s ON %s (change );''' ),
("%s_netloc_index" % self.changeTableName, self.changeTableName, '''CREATE INDEX %s ON %s (change );''' ),
("%s_title_trigram" % self.changeTableName, self.changeTableName, '''CREATE INDEX %s ON %s USING gin (title gin_trgm_ops);''' ),
]
# CREATE INDEX book_series_name_trigram ON book_series USING gin (itemname gin_trgm_ops);
# CREATE INDEX book_title_trigram ON book_items USING gin (title gin_trgm_ops);
# ALTER INDEX book_title_trigram RENAME TO book_items_title_trigram;
# CREATE INDEX book_items_title_coll_index ON book_items USING BTREE (title COLLATE "en_US" text_pattern_ops);
# CREATE INDEX book_items_fhash_index ON book_items (fhash);
# CREATE INDEX title_collate_index ON book_items USING BTREE (title COLLATE "en_US" text_pattern_ops);
# EXPLAIN ANALYZE SELECT COUNT(*) FROM book_items WHERE title LIKE 's%';
for name, table, nameFormat in indexes:
if not name.lower() in haveIndexes:
cur.execute(nameFormat % (name, table))
self.log.info("Retreived page database created")
self.validKwargs = [
'dbid',
'src',
'dlstate',
'url',
'title',
'netloc',
'series',
'contents',
'istext',
'fhash',
'mimetype',
'fspath',
'distance',
'walklimit'
]
self.table = sql.Table(self.tableName.lower())
self.cols = (
self.table.dbid,
self.table.src,
self.table.dlstate,
self.table.url,
self.table.title,
self.table.netloc,
self.table.series,
self.table.contents,
self.table.istext,
self.table.fhash,
self.table.mimetype,
self.table.fspath,
self.table.distance,
self.table.walklimit,
)
self.colMap = {
"dbid" : self.table.dbid,
"src" : self.table.src,
"dlstate" : self.table.dlstate,
"url" : self.table.url,
"netloc" : self.table.netloc,
"title" : self.table.title,
"series" : self.table.series,
"contents" : self.table.contents,
"istext" : self.table.istext,
"fhash" : self.table.fhash,
"mimetype" : self.table.mimetype,
"fspath" : self.table.fspath,
"distance" : self.table.distance,
"walklimit" : self.table.walklimit,
}
##############################################################################################################################################
#
#
# ######## ######## ######## ## ## ## ## ###### ######## #### ####### ## ## ######
# ## ## ## ## ## ## ## ### ## ## ## ## ## ## ## ### ## ## ##
# ## ## ## ## ## ## ## #### ## ## ## ## ## ## #### ## ##
# ## ## ######## ###### ## ## ## ## ## ## ## ## ## ## ## ## ## ######
# ## ## ## ## ## ## ## ## #### ## ## ## ## ## ## #### ##
# ## ## ## ## ## ## ## ## ### ## ## ## ## ## ## ## ### ## ##
# ######## ######## ## ####### ## ## ###### ## #### ####### ## ## ######
#
#
##############################################################################################################################################
def keyToCol(self, key):
key = key.lower()
if not key in self.colMap:
raise ValueError("Invalid column name '%s'" % key)
return self.colMap[key]
def sqlBuildConditional(self, **kwargs):
operators = []
# Short circuit and return none (so the resulting where clause is all items) if no kwargs are passed.
if not kwargs:
return None
for key, val in kwargs.items():
operators.append((self.keyToCol(key) == val))
# This is ugly as hell, but it functionally returns x & y & z ... for an array of [x, y, z]
# And allows variable length arrays.
conditional = functools.reduce(opclass.and_, operators)
return conditional
def | (self, returning=None, **kwargs):
cols = [self.table.src]
vals = [self.tableKey]
if 'url' in kwargs:
cols.append(self.table.netloc)
vals.append(urllib.parse.urlparse(kwargs['url']).netloc.lower())
for key, val in kwargs.items():
key = key.lower()
if key not in self.colMap:
raise ValueError("Invalid column name for insert! '%s'" % key)
cols.append(self.colMap[key])
vals.append(val)
query = self.table.insert(columns=cols, values=[vals], returning=returning)
query, params = tuple(query)
return query, params
# Insert new item into DB.
# MASSIVELY faster if you set commit=False (it doesn't flush the write to disk), but that can open a transaction which locks the DB.
# Only pass commit=False if the calling code can gaurantee it'll call commit() itself within a reasonable timeframe.
def insertIntoDb(self, commit=True, cursor=None, **kwargs):
query, queryArguments = self.sqlBuildInsertArgs(returning=[self.table.dbid], **kwargs)
if self.QUERY_DEBUG:
print("Query = ", query)
print("Args = ", queryArguments)
if cursor:
cursor.execute(query, queryArguments)
ret = cursor.fetchone()
try:
self.insertDelta(cursor=cursor, **kwargs)
except ValueError:
self.log.error("Error when updating change stats:")
for line in traceback.format_exc().split("\n"):
self.log.error(line)
else:
with self.transaction(commit=commit) as cur:
cur.execute(query, queryArguments)
ret = cur.fetchone()
try:
self.insertDelta(cursor=cur, **kwargs)
except ValueError:
self.log.error("Error when updating change stats:")
for line in traceback.format_exc().split("\n"):
self.log.error(line)
if self.QUERY_DEBUG:
print("Query ret = ", ret)
return ret[0]
def generateUpdateQuery(self, **kwargs):
cols = []
vals = []
# By default, take ownership of any pages we're operating on by setting it's src key to us
if not 'src' in kwargs:
cols.append(self.table.src)
vals.append(self.tableKey)
if 'url' in kwargs:
cols.append(self.table.netloc)
vals.append(urllib.parse.urlparse(kwargs['url']).netloc.lower())
if "dbid" in kwargs:
where = (self.table.dbid == kwargs.pop('dbid'))
elif "url" in kwargs:
where = (self.table.url == kwargs.pop('url'))
else:
raise ValueError("GenerateUpdateQuery must be passed a single unique column identifier (either dbId or url)")
# Extract and insert the netloc if needed.
if 'url' in kwargs:
print("Urlparse!")
cols.append(self.table.netloc)
vals.append(urllib.parse.urlparse(kwargs['url']).netloc.lower())
for key, val in kwargs.items():
key = key.lower()
if key not in self.colMap:
raise ValueError("Invalid column name for insert! '%s'" % key)
cols.append(self.colMap[key])
vals.append(val)
query = self.table.update(columns=cols, values=vals, where=where)
query, params = tuple(query)
return query, params | sqlBuildInsertArgs | identifier_name |
TextDbBase.py | for line in traceback.format_exc().split("\n"):
self.log.error(line)
else:
with self.transaction(commit=commit) as cur:
if distance != None:
self.updateDistance(cur, distance, **kwargs)
cur.execute(query, queryArguments)
try:
self.insertDelta(cursor=cur, **kwargs)
except ValueError:
self.log.error("Error when updating change stats:")
for line in traceback.format_exc().split("\n"):
self.log.error(line)
def deleteDbEntry(self, commit=True, **kwargs):
if len(kwargs) != 1:
raise ValueError("deleteDbEntry only supports calling with a single kwarg", kwargs)
validCols = ["dbid", "url"]
key, val = kwargs.popitem()
if key not in validCols:
raise ValueError("Invalid column key for delete query: %s. Must be either 'dbid' or 'url'" % key)
where = (self.colMap[key.lower()] == val)
query = self.table.delete(where=where)
query, args = tuple(query)
if self.QUERY_DEBUG:
print("Query = ", query)
print("Args = ", args)
with self.transaction(commit=commit) as cur:
cur.execute(query, args)
def getRowsByValue(self, limitByKey=True, cursor=None, **kwargs):
if limitByKey and self.tableKey:
kwargs["src"] = self.tableKey
where = self.sqlBuildConditional(**kwargs)
wantCols = (
self.table.dbid,
self.table.src,
self.table.dlstate,
self.table.url,
self.table.title,
self.table.series,
self.table.contents,
self.table.istext,
self.table.fhash,
self.table.mimetype,
self.table.fspath
)
query = self.table.select(*wantCols, order_by=sql.Desc(self.table.dbid), where=where)
query, quargs = tuple(query)
if self.QUERY_DEBUG:
print("Query = ", query)
print("args = ", quargs)
if cursor:
cursor.execute(query, quargs)
rets = cursor.fetchall()
else:
with self.transaction() as cur:
#wrap queryies in transactions so we don't have hanging db handles.
cur.execute(query, quargs)
rets = cur.fetchall()
retL = []
for row in rets:
keys = ['dbid', 'src', 'dlstate', 'url', 'title', 'series', 'contents', 'istext', 'fhash', 'mimetype', 'fspath']
retL.append(dict(zip(keys, row)))
return retL
def getRowByValue(self, cursor=None, **kwargs):
rows = self.getRowsByValue(cursor=cursor, **kwargs)
if len(rows) == 1:
return rows.pop()
if len(rows) == 0:
return None
else:
raise ValueError("Got multiple rows for selection. Wat?")
def getFilenameFromIdName(self, rowid, filename):
if not os.path.exists(settings.bookCachePath):
self.log.warn("Cache directory for book items did not exist. Creating")
self.log.warn("Directory at path '%s'", settings.bookCachePath)
os.makedirs(settings.bookCachePath)
# one new directory per 1000 items.
dirName = '%s' % (rowid // 1000)
dirPath = os.path.join(settings.bookCachePath, dirName)
if not os.path.exists(dirPath):
os.mkdir(dirPath)
filename = 'ID%s - %s' % (rowid, filename)
filename = nameTools.makeFilenameSafe(filename)
fqpath = os.path.join(dirPath, filename)
return fqpath
def saveFile(self, url, mimetype, fileName, content):
hadFile = False
# Yeah, I'm hashing twice in lots of cases. Bite me
fHash = self.getHash(content)
# Look for existing files with the same MD5sum. If there are any, just point the new file at the
# fsPath of the existing one, rather then creating a new file on-disk.
with self.transaction() as cur:
cur.execute("SELECT fspath FROM {tableName} WHERE fhash=%s;".format(tableName=self.tableName), (fHash, ))
row = cur.fetchone()
if row:
self.log.info("Already downloaded file. Not creating duplicates.")
hadFile = True
fqPath = row[0]
with self.transaction() as cur:
cur.execute("SELECT dbid, fspath, contents, mimetype FROM {tableName} WHERE url=%s;".format(tableName=self.tableName), (url, ))
row = cur.fetchone()
if not row:
self.log.critical("Failure when saving file for URL '%s'", url)
self.log.critical("File name: '%s'", fileName)
return
dbid, dummy_havePath, dummy_haveCtnt, dummy_haveMime = row
# self.log.info('havePath, haveCtnt, haveMime - %s, %s, %s', havePath, haveCtnt, haveMime)
if not hadFile:
fqPath = self.getFilenameFromIdName(dbid, fileName)
newRowDict = { "dlstate" : 2,
"series" : None,
"contents": len(content),
"istext" : False,
"mimetype": mimetype,
"fspath" : fqPath,
"fhash" : fHash}
self.updateDbEntry(url=url, commit=False, **newRowDict)
if not hadFile:
try:
with open(fqPath, "wb") as fp:
fp.write(content)
except OSError:
self.log.error("Error when attempting to save file. ")
with self.transaction() as cur:
newRowDict = {"dlstate" : -1}
self.updateDbEntry(url=url, commit=False, **newRowDict)
def getToDo(self, distance):
# Retreiving todo items must be atomic, so we lock for that.
with self.dbLock:
with self.transaction() as cur:
cur.execute('''SELECT dbid, url, distance FROM {tableName} WHERE dlstate=%s AND src=%s AND distance < %s ORDER BY distance ASC, istext ASC LIMIT 1;'''.format(tableName=self.tableName), (0, self.tableKey, distance))
row = cur.fetchone()
# print(('''SELECT dbid, url, distance FROM {tableName} WHERE dlstate=%s AND src=%s AND distance < %s ORDER BY istext ASC LIMIT 1;'''.format(tableName=self.tableName) % (0, self.tableKey, distance)))
# print(row)
if not row:
return False
else:
dbid, url, itemDistance = row
cur.execute('UPDATE {tableName} SET dlstate=%s WHERE dbid=%s;'.format(tableName=self.tableName), (1, dbid))
if not url.startswith("http"):
raise ValueError("Non HTTP URL in database: '%s'!" % url)
return url, itemDistance
def getTodoCount(self):
with self.dbLock:
with self.transaction() as cur:
cur.execute('''SELECT COUNT(*) FROM {tableName} WHERE dlstate=%s AND src=%s;'''.format(tableName=self.tableName), (0, self.tableKey))
row = cur.fetchone()
return row[0]
def resetStuckItems(self):
self.log.info("Resetting stuck downloads in DB")
with self.transaction() as cur:
cur.execute('''UPDATE {tableName} SET dlState=0 WHERE dlState=1 AND src=%s'''.format(tableName=self.tableName), (self.tableKey, ))
self.log.info("Download reset complete")
# Override to filter items that get
def changeFilter(self, url, title, changePercentage):
return False
def insertChangeStats(self, url, changePercentage, title, cursor=None):
# Skip title cruft on baka-tsuki
if self.changeFilter(url, title, changePercentage):
return
# If we weren't passed a title, look it up from the DB.
if not title:
self.log.warning("ChangeStat call without title parameter passed!")
row = self.getRowByValue(url=url, cursor=cursor)
if row and row['title']:
title = row['title']
else:
title = url
query = '''INSERT INTO {changeTable} (src, url, change, title, changeDate) VALUES (%s, %s, %s, %s, %s)'''.format(changeTable=self.changeTableName)
values = (self.tableKey, url, changePercentage, title, time.time())
if cursor:
cursor.execute(query, values)
else:
with self.transaction() as cur:
cur.execute(query, values)
##############################################################################################################################################
# Higher level DB Interfacing
##############################################################################################################################################
def upsert(self, pgUrl, commit=True, **kwargs):
if 'url' in kwargs and 'drive_web' in kwargs['url']:
| self.log.error('')
self.log.error('')
self.log.error("WAT")
self.log.error('')
self.log.error(traceback.format_stack())
self.log.error('') | conditional_block |
|
TextDbBase.py | # Only pass commit=False if the calling code can gaurantee it'll call commit() itself within a reasonable timeframe.
def insertIntoDb(self, commit=True, cursor=None, **kwargs):
query, queryArguments = self.sqlBuildInsertArgs(returning=[self.table.dbid], **kwargs)
if self.QUERY_DEBUG:
print("Query = ", query)
print("Args = ", queryArguments)
if cursor:
cursor.execute(query, queryArguments)
ret = cursor.fetchone()
try:
self.insertDelta(cursor=cursor, **kwargs)
except ValueError:
self.log.error("Error when updating change stats:")
for line in traceback.format_exc().split("\n"):
self.log.error(line)
else:
with self.transaction(commit=commit) as cur:
cur.execute(query, queryArguments)
ret = cur.fetchone()
try:
self.insertDelta(cursor=cur, **kwargs)
except ValueError:
self.log.error("Error when updating change stats:")
for line in traceback.format_exc().split("\n"):
self.log.error(line)
if self.QUERY_DEBUG:
print("Query ret = ", ret)
return ret[0]
def generateUpdateQuery(self, **kwargs):
cols = []
vals = []
# By default, take ownership of any pages we're operating on by setting it's src key to us
if not 'src' in kwargs:
cols.append(self.table.src)
vals.append(self.tableKey)
if 'url' in kwargs:
cols.append(self.table.netloc)
vals.append(urllib.parse.urlparse(kwargs['url']).netloc.lower())
if "dbid" in kwargs:
where = (self.table.dbid == kwargs.pop('dbid'))
elif "url" in kwargs:
where = (self.table.url == kwargs.pop('url'))
else:
raise ValueError("GenerateUpdateQuery must be passed a single unique column identifier (either dbId or url)")
# Extract and insert the netloc if needed.
if 'url' in kwargs:
print("Urlparse!")
cols.append(self.table.netloc)
vals.append(urllib.parse.urlparse(kwargs['url']).netloc.lower())
for key, val in kwargs.items():
key = key.lower()
if key not in self.colMap:
raise ValueError("Invalid column name for insert! '%s'" % key)
cols.append(self.colMap[key])
vals.append(val)
query = self.table.update(columns=cols, values=vals, where=where)
query, params = tuple(query)
return query, params
def updateDistance(self, cur, distance, **kwargs):
if "dbid" in kwargs:
where = (self.table.dbid == kwargs.pop('dbid'))
elif "url" in kwargs:
where = (self.table.url == kwargs.pop('url'))
else:
raise ValueError("GenerateUpdateQuery must be passed a single unique column identifier (either dbId or url)")
cols = [self.table.distance]
vals = [sqlc.Least(distance, self.table.distance)]
query = self.table.update(columns=cols, values=vals, where=where)
query, params = tuple(query)
if self.QUERY_DEBUG:
print("Query = ", query)
print("Args = ", params)
cur.execute(query, params)
# Update entry with key sourceUrl with values **kwargs
# kwarg names are checked for validity, and to prevent possiblity of sql injection.
def updateDbEntry(self, commit=True, cursor=None, **kwargs):
distance = None
if 'distance' in kwargs:
distance = kwargs.pop('distance')
# Apparently passing a dict as ** does (at least) a shallow copy
# Therefore, we can ignore the fact that generateUpdateQuery
# will then permute it's copy of the kwargs, and just use it again
# for the call to updateDistance
query, queryArguments = self.generateUpdateQuery(**kwargs)
if self.QUERY_DEBUG:
print("Query = ", query)
print("Args = ", queryArguments)
if cursor:
if distance != None:
self.updateDistance(cursor, distance, **kwargs)
cursor.execute(query, queryArguments)
try:
self.insertDelta(cursor=cursor, **kwargs)
except ValueError:
self.log.error("Error when updating change stats:")
for line in traceback.format_exc().split("\n"):
self.log.error(line)
else:
with self.transaction(commit=commit) as cur:
if distance != None:
self.updateDistance(cur, distance, **kwargs)
cur.execute(query, queryArguments)
try:
self.insertDelta(cursor=cur, **kwargs)
except ValueError:
self.log.error("Error when updating change stats:")
for line in traceback.format_exc().split("\n"):
self.log.error(line)
def deleteDbEntry(self, commit=True, **kwargs):
if len(kwargs) != 1:
raise ValueError("deleteDbEntry only supports calling with a single kwarg", kwargs)
validCols = ["dbid", "url"]
key, val = kwargs.popitem()
if key not in validCols:
raise ValueError("Invalid column key for delete query: %s. Must be either 'dbid' or 'url'" % key)
where = (self.colMap[key.lower()] == val)
query = self.table.delete(where=where)
query, args = tuple(query)
if self.QUERY_DEBUG:
print("Query = ", query)
print("Args = ", args)
with self.transaction(commit=commit) as cur:
cur.execute(query, args)
def getRowsByValue(self, limitByKey=True, cursor=None, **kwargs):
if limitByKey and self.tableKey:
kwargs["src"] = self.tableKey
where = self.sqlBuildConditional(**kwargs)
wantCols = (
self.table.dbid,
self.table.src,
self.table.dlstate,
self.table.url,
self.table.title,
self.table.series,
self.table.contents,
self.table.istext,
self.table.fhash,
self.table.mimetype,
self.table.fspath
)
query = self.table.select(*wantCols, order_by=sql.Desc(self.table.dbid), where=where)
query, quargs = tuple(query)
if self.QUERY_DEBUG:
print("Query = ", query)
print("args = ", quargs)
if cursor:
cursor.execute(query, quargs)
rets = cursor.fetchall()
else:
with self.transaction() as cur:
#wrap queryies in transactions so we don't have hanging db handles.
cur.execute(query, quargs)
rets = cur.fetchall()
retL = []
for row in rets:
keys = ['dbid', 'src', 'dlstate', 'url', 'title', 'series', 'contents', 'istext', 'fhash', 'mimetype', 'fspath']
retL.append(dict(zip(keys, row)))
return retL
def getRowByValue(self, cursor=None, **kwargs):
rows = self.getRowsByValue(cursor=cursor, **kwargs)
if len(rows) == 1:
return rows.pop()
if len(rows) == 0:
return None
else:
raise ValueError("Got multiple rows for selection. Wat?")
def getFilenameFromIdName(self, rowid, filename):
if not os.path.exists(settings.bookCachePath):
self.log.warn("Cache directory for book items did not exist. Creating")
self.log.warn("Directory at path '%s'", settings.bookCachePath)
os.makedirs(settings.bookCachePath)
# one new directory per 1000 items.
dirName = '%s' % (rowid // 1000)
dirPath = os.path.join(settings.bookCachePath, dirName)
if not os.path.exists(dirPath):
os.mkdir(dirPath)
filename = 'ID%s - %s' % (rowid, filename)
filename = nameTools.makeFilenameSafe(filename)
fqpath = os.path.join(dirPath, filename)
return fqpath
def saveFile(self, url, mimetype, fileName, content):
hadFile = False
# Yeah, I'm hashing twice in lots of cases. Bite me
fHash = self.getHash(content)
# Look for existing files with the same MD5sum. If there are any, just point the new file at the
# fsPath of the existing one, rather then creating a new file on-disk.
with self.transaction() as cur:
cur.execute("SELECT fspath FROM {tableName} WHERE fhash=%s;".format(tableName=self.tableName), (fHash, ))
row = cur.fetchone()
if row:
self.log.info("Already downloaded file. Not creating duplicates.")
hadFile = True
fqPath = row[0]
with self.transaction() as cur:
cur.execute("SELECT dbid, fspath, contents, mimetype FROM {tableName} WHERE url=%s;".format(tableName=self.tableName), (url, ))
row = cur.fetchone()
if not row:
self.log.critical("Failure when saving file for URL '%s'", url)
self.log.critical("File name: '%s'", fileName)
return
| dbid, dummy_havePath, dummy_haveCtnt, dummy_haveMime = row | random_line_split |
|
TextDbBase.py | and self.tableKey:
kwargs["src"] = self.tableKey
where = self.sqlBuildConditional(**kwargs)
wantCols = (
self.table.dbid,
self.table.src,
self.table.dlstate,
self.table.url,
self.table.title,
self.table.series,
self.table.contents,
self.table.istext,
self.table.fhash,
self.table.mimetype,
self.table.fspath
)
query = self.table.select(*wantCols, order_by=sql.Desc(self.table.dbid), where=where)
query, quargs = tuple(query)
if self.QUERY_DEBUG:
print("Query = ", query)
print("args = ", quargs)
if cursor:
cursor.execute(query, quargs)
rets = cursor.fetchall()
else:
with self.transaction() as cur:
#wrap queryies in transactions so we don't have hanging db handles.
cur.execute(query, quargs)
rets = cur.fetchall()
retL = []
for row in rets:
keys = ['dbid', 'src', 'dlstate', 'url', 'title', 'series', 'contents', 'istext', 'fhash', 'mimetype', 'fspath']
retL.append(dict(zip(keys, row)))
return retL
def getRowByValue(self, cursor=None, **kwargs):
rows = self.getRowsByValue(cursor=cursor, **kwargs)
if len(rows) == 1:
return rows.pop()
if len(rows) == 0:
return None
else:
raise ValueError("Got multiple rows for selection. Wat?")
def getFilenameFromIdName(self, rowid, filename):
if not os.path.exists(settings.bookCachePath):
self.log.warn("Cache directory for book items did not exist. Creating")
self.log.warn("Directory at path '%s'", settings.bookCachePath)
os.makedirs(settings.bookCachePath)
# one new directory per 1000 items.
dirName = '%s' % (rowid // 1000)
dirPath = os.path.join(settings.bookCachePath, dirName)
if not os.path.exists(dirPath):
os.mkdir(dirPath)
filename = 'ID%s - %s' % (rowid, filename)
filename = nameTools.makeFilenameSafe(filename)
fqpath = os.path.join(dirPath, filename)
return fqpath
def saveFile(self, url, mimetype, fileName, content):
hadFile = False
# Yeah, I'm hashing twice in lots of cases. Bite me
fHash = self.getHash(content)
# Look for existing files with the same MD5sum. If there are any, just point the new file at the
# fsPath of the existing one, rather then creating a new file on-disk.
with self.transaction() as cur:
cur.execute("SELECT fspath FROM {tableName} WHERE fhash=%s;".format(tableName=self.tableName), (fHash, ))
row = cur.fetchone()
if row:
self.log.info("Already downloaded file. Not creating duplicates.")
hadFile = True
fqPath = row[0]
with self.transaction() as cur:
cur.execute("SELECT dbid, fspath, contents, mimetype FROM {tableName} WHERE url=%s;".format(tableName=self.tableName), (url, ))
row = cur.fetchone()
if not row:
self.log.critical("Failure when saving file for URL '%s'", url)
self.log.critical("File name: '%s'", fileName)
return
dbid, dummy_havePath, dummy_haveCtnt, dummy_haveMime = row
# self.log.info('havePath, haveCtnt, haveMime - %s, %s, %s', havePath, haveCtnt, haveMime)
if not hadFile:
fqPath = self.getFilenameFromIdName(dbid, fileName)
newRowDict = { "dlstate" : 2,
"series" : None,
"contents": len(content),
"istext" : False,
"mimetype": mimetype,
"fspath" : fqPath,
"fhash" : fHash}
self.updateDbEntry(url=url, commit=False, **newRowDict)
if not hadFile:
try:
with open(fqPath, "wb") as fp:
fp.write(content)
except OSError:
self.log.error("Error when attempting to save file. ")
with self.transaction() as cur:
newRowDict = {"dlstate" : -1}
self.updateDbEntry(url=url, commit=False, **newRowDict)
def getToDo(self, distance):
# Retreiving todo items must be atomic, so we lock for that.
with self.dbLock:
with self.transaction() as cur:
cur.execute('''SELECT dbid, url, distance FROM {tableName} WHERE dlstate=%s AND src=%s AND distance < %s ORDER BY distance ASC, istext ASC LIMIT 1;'''.format(tableName=self.tableName), (0, self.tableKey, distance))
row = cur.fetchone()
# print(('''SELECT dbid, url, distance FROM {tableName} WHERE dlstate=%s AND src=%s AND distance < %s ORDER BY istext ASC LIMIT 1;'''.format(tableName=self.tableName) % (0, self.tableKey, distance)))
# print(row)
if not row:
return False
else:
dbid, url, itemDistance = row
cur.execute('UPDATE {tableName} SET dlstate=%s WHERE dbid=%s;'.format(tableName=self.tableName), (1, dbid))
if not url.startswith("http"):
raise ValueError("Non HTTP URL in database: '%s'!" % url)
return url, itemDistance
def getTodoCount(self):
with self.dbLock:
with self.transaction() as cur:
cur.execute('''SELECT COUNT(*) FROM {tableName} WHERE dlstate=%s AND src=%s;'''.format(tableName=self.tableName), (0, self.tableKey))
row = cur.fetchone()
return row[0]
def resetStuckItems(self):
self.log.info("Resetting stuck downloads in DB")
with self.transaction() as cur:
cur.execute('''UPDATE {tableName} SET dlState=0 WHERE dlState=1 AND src=%s'''.format(tableName=self.tableName), (self.tableKey, ))
self.log.info("Download reset complete")
# Override to filter items that get
def changeFilter(self, url, title, changePercentage):
return False
def insertChangeStats(self, url, changePercentage, title, cursor=None):
# Skip title cruft on baka-tsuki
if self.changeFilter(url, title, changePercentage):
return
# If we weren't passed a title, look it up from the DB.
if not title:
self.log.warning("ChangeStat call without title parameter passed!")
row = self.getRowByValue(url=url, cursor=cursor)
if row and row['title']:
title = row['title']
else:
title = url
query = '''INSERT INTO {changeTable} (src, url, change, title, changeDate) VALUES (%s, %s, %s, %s, %s)'''.format(changeTable=self.changeTableName)
values = (self.tableKey, url, changePercentage, title, time.time())
if cursor:
cursor.execute(query, values)
else:
with self.transaction() as cur:
cur.execute(query, values)
##############################################################################################################################################
# Higher level DB Interfacing
##############################################################################################################################################
def upsert(self, pgUrl, commit=True, **kwargs):
if 'url' in kwargs and 'drive_web' in kwargs['url']:
self.log.error('')
self.log.error('')
self.log.error("WAT")
self.log.error('')
self.log.error(traceback.format_stack())
self.log.error('')
if 'url' in kwargs and not kwargs['url'].startswith("http"):
self.log.error('')
self.log.error('')
self.log.error("WAT")
self.log.error('')
self.log.error(traceback.format_stack())
self.log.error('')
# print("Upserting!")
with self.transaction(commit=commit) as cur:
# Do everything in one transaction
try:
self.insertIntoDb(url=pgUrl, cursor=cur, **kwargs)
except psycopg2.IntegrityError:
if kwargs:
cur.execute("ROLLBACK")
cur.execute("BEGIN")
self.updateDbEntry(url=pgUrl, cursor=cur, **kwargs)
# print("Upserted")
def insertDelta(self, cursor=None, **kwargs):
| if 'istext' in kwargs and not kwargs['istext']:
return
if not 'contents' in kwargs:
return
if 'url' in kwargs:
old = self.getRowByValue(url=kwargs['url'], cursor=cursor)
elif 'dbid' in kwargs:
old = self.getRowByValue(dbid=kwargs['dbid'], cursor=cursor)
else:
raise ValueError("No identifying info in insertDelta call!")
if 'title' in kwargs and kwargs['title']:
title = kwargs['title']
else:
if not old: | identifier_body |
|
stateless.rs | accumulator::*;
/// At the moment, this particular struct resembles more closely an NFT.
#[cfg_attr(feature = "std", derive(Debug))]
#[derive(Default, Clone, Encode, Decode, PartialEq, Eq, Copy)]
pub struct UTXO {
pub_key: H256,
id: u64,
}
/// Primitive transaction model with one input and one output.
#[cfg_attr(feature = "std", derive(Debug))]
#[derive(Default, Clone, Encode, Decode, PartialEq, Eq)]
pub struct | {
input: UTXO,
output: UTXO,
witness: Vec<u8>,
// Would in practice include a signature here.
}
pub trait Trait: system::Trait {
type Event: From<Event> + Into<<Self as system::Trait>::Event>;
}
decl_storage! {
trait Store for Module<T: Trait> as Stateless {
State get(get_state): U2048 = U2048::from(2); // Use 2 as an arbitrary generator with "unknown" order.
SpentCoins get(get_spent_coins): Vec<(U2048, U2048)>;
NewCoins get(get_new_coins): Vec<U2048>
}
}
decl_event!(
pub enum Event {
Deletion(U2048, U2048, U2048),
Addition(U2048, U2048, U2048),
}
);
decl_module! {
/// The module declaration.
pub struct Module<T: Trait> for enum Call where origin: T::Origin {
// Initialize generic event
fn deposit_event() = default;
/// Receive request to execute a transaction.
/// Verify the contents of a transaction and temporarily add it to a queue of verified transactions.
/// NOTE: Only works if one transaction per user per block is submitted.
pub fn addTransaction(origin, transaction: Transaction) -> Result {
ensure_signed(origin)?;
// Arbitrarily cap the number of pending transactions to 100
ensure!(SpentCoins::get().len() < 100, "Transaction queue full. Please try again next block.");
// Also verify that the user is not spending to themselves
ensure!(transaction.input.pub_key != transaction.output.pub_key, "Cannot send coin to yourself.");
// Verify witness
let spent_elem = subroutines::hash_to_prime(&transaction.input.encode());
let witness = U2048::from_little_endian(&transaction.witness);
ensure!(witnesses::verify_mem_wit(State::get(), witness, spent_elem), "Witness is invalid");
let new_elem = subroutines::hash_to_prime(&transaction.output.encode());
// Update storage items.
SpentCoins::append(&vec![(spent_elem, witness)]);
Ok(())
}
/// Arbitrary replacement for Proof-of-Work to create new coins.
pub fn mint(origin, elem: u64) -> Result {
ensure_signed(origin)?;
let state = subroutines::mod_exp(Self::get_state(), U2048::from(elem), U2048::from_dec_str(MODULUS).unwrap());
State::put(state);
Ok(())
}
/// Batch delete spent coins and add new coins on block finalization
fn on_finalize() {
// Clause here to protect against empty blocks
if Self::get_spent_coins().len() > 0 {
// Delete spent coins from aggregator and distribute proof
let (state, agg, proof) = accumulator::batch_delete(State::get(), &Self::get_spent_coins());
Self::deposit_event(Event::Deletion(state, agg, proof));
// Add new coins to aggregator and distribute proof
let (state, agg, proof) = accumulator::batch_add(state, &Self::get_new_coins());
Self::deposit_event(Event::Addition(state, agg, proof));
// Update state
State::put(state);
}
// Clear storage
SpentCoins::kill();
NewCoins::kill();
}
}
}
/// tests for this module
#[cfg(test)]
mod tests {
use super::*;
use runtime_io::with_externalities;
use primitives::{H256, Blake2Hasher};
use support::{impl_outer_origin, parameter_types};
use sr_primitives::{traits::{BlakeTwo256, IdentityLookup, OnFinalize}, testing::Header};
use sr_primitives::weights::Weight;
use sr_primitives::Perbill;
impl_outer_origin! {
pub enum Origin for Test {}
}
// For testing the module, we construct most of a mock runtime. This means
// first constructing a configuration type (`Test`) which `impl`s each of the
// configuration traits of modules we want to use.
#[derive(Clone, Eq, PartialEq)]
pub struct Test;
parameter_types! {
pub const BlockHashCount: u64 = 250;
pub const MaximumBlockWeight: Weight = 1024;
pub const MaximumBlockLength: u32 = 2 * 1024;
pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75);
}
impl system::Trait for Test {
type Origin = Origin;
type Call = ();
type Index = u64;
type BlockNumber = u64;
type Hash = H256;
type Hashing = BlakeTwo256;
type AccountId = u64;
type Lookup = IdentityLookup<Self::AccountId>;
type Header = Header;
type WeightMultiplierUpdate = ();
type Event = ();
type BlockHashCount = BlockHashCount;
type MaximumBlockWeight = MaximumBlockWeight;
type MaximumBlockLength = MaximumBlockLength;
type AvailableBlockRatio = AvailableBlockRatio;
type Version = ();
}
impl Trait for Test {
type Event = ();
}
type Stateless = Module<Test>;
type System = system::Module<Test>;
// This function basically just builds a genesis storage key/value store according to
// our desired mockup.
fn new_test_ext() -> runtime_io::TestExternalities<Blake2Hasher> {
system::GenesisConfig::default().build_storage::<Test>().unwrap().into()
}
#[test]
fn test_add() {
with_externalities(&mut new_test_ext(), || {
let elems = vec![U2048::from(3), U2048::from(5), U2048::from(7)];
let (state, _, _) = accumulator::batch_add(Stateless::get_state(), &elems);
assert_eq!(state, U2048::from(5));
});
}
#[test]
fn test_del() {
with_externalities(&mut new_test_ext(), || {
let elems = vec![U2048::from(3), U2048::from(5), U2048::from(7)];
// Collect witnesses for the added elements
let witnesses = witnesses::create_all_mem_wit(Stateless::get_state(), &elems);
// Add elements
let (state, _, _) = accumulator::batch_add(Stateless::get_state(), &elems);
assert_eq!(state, U2048::from(5));
// Delete elements
let deletions = vec![(elems[0], witnesses[0]), (elems[1], witnesses[1]), (elems[2], witnesses[2])];
let (state, _, _) = accumulator::batch_delete(Stateless::get_state(), &deletions);
assert_eq!(state, U2048::from(2));
});
}
#[test]
fn test_block() {
with_externalities(&mut new_test_ext(), || {
// 1. Construct UTXOs.
let utxo_0 = UTXO {
pub_key: H256::from_low_u64_be(0),
id: 0,
};
let utxo_1 = UTXO {
pub_key: H256::from_low_u64_be(1),
id: 1,
};
let utxo_2 = UTXO {
pub_key: H256::from_low_u64_be(2),
id: 2,
};
// 2. Hash each UTXO to a prime.
let elem_0 = subroutines::hash_to_prime(&utxo_0.encode());
let elem_1 = subroutines::hash_to_prime(&utxo_1.encode());
let elem_2 = subroutines::hash_to_prime(&utxo_2.encode());
let elems = vec![elem_0, elem_1, elem_2];
// 3. Produce witnesses for the added elements.
let witnesses = witnesses::create_all_mem_wit(Stateless::get_state(), &elems);
// 4. Add elements to the accumulator.
let (state, _, _) = accumulator::batch_add(Stateless::get_state(), &elems);
State::put(state);
// 5. Construct new UTXOs and derive integer representations.
let utxo_3 = UTXO {
pub_key: H256::from_low_u64_be(1),
id: 0,
};
let utxo_4 = UTXO {
pub_key: H256:: | Transaction | identifier_name |
stateless.rs | use accumulator::*;
/// At the moment, this particular struct resembles more closely an NFT.
#[cfg_attr(feature = "std", derive(Debug))]
#[derive(Default, Clone, Encode, Decode, PartialEq, Eq, Copy)]
pub struct UTXO {
pub_key: H256,
id: u64,
}
/// Primitive transaction model with one input and one output.
#[cfg_attr(feature = "std", derive(Debug))]
#[derive(Default, Clone, Encode, Decode, PartialEq, Eq)]
pub struct Transaction {
input: UTXO,
output: UTXO,
witness: Vec<u8>,
// Would in practice include a signature here.
}
pub trait Trait: system::Trait {
type Event: From<Event> + Into<<Self as system::Trait>::Event>;
}
decl_storage! {
trait Store for Module<T: Trait> as Stateless {
State get(get_state): U2048 = U2048::from(2); // Use 2 as an arbitrary generator with "unknown" order.
SpentCoins get(get_spent_coins): Vec<(U2048, U2048)>;
NewCoins get(get_new_coins): Vec<U2048>
}
}
decl_event!(
pub enum Event {
Deletion(U2048, U2048, U2048),
Addition(U2048, U2048, U2048),
}
);
| /// The module declaration.
pub struct Module<T: Trait> for enum Call where origin: T::Origin {
// Initialize generic event
fn deposit_event() = default;
/// Receive request to execute a transaction.
/// Verify the contents of a transaction and temporarily add it to a queue of verified transactions.
/// NOTE: Only works if one transaction per user per block is submitted.
pub fn addTransaction(origin, transaction: Transaction) -> Result {
ensure_signed(origin)?;
// Arbitrarily cap the number of pending transactions to 100
ensure!(SpentCoins::get().len() < 100, "Transaction queue full. Please try again next block.");
// Also verify that the user is not spending to themselves
ensure!(transaction.input.pub_key != transaction.output.pub_key, "Cannot send coin to yourself.");
// Verify witness
let spent_elem = subroutines::hash_to_prime(&transaction.input.encode());
let witness = U2048::from_little_endian(&transaction.witness);
ensure!(witnesses::verify_mem_wit(State::get(), witness, spent_elem), "Witness is invalid");
let new_elem = subroutines::hash_to_prime(&transaction.output.encode());
// Update storage items.
SpentCoins::append(&vec![(spent_elem, witness)]);
Ok(())
}
/// Arbitrary replacement for Proof-of-Work to create new coins.
pub fn mint(origin, elem: u64) -> Result {
ensure_signed(origin)?;
let state = subroutines::mod_exp(Self::get_state(), U2048::from(elem), U2048::from_dec_str(MODULUS).unwrap());
State::put(state);
Ok(())
}
/// Batch delete spent coins and add new coins on block finalization
fn on_finalize() {
// Clause here to protect against empty blocks
if Self::get_spent_coins().len() > 0 {
// Delete spent coins from aggregator and distribute proof
let (state, agg, proof) = accumulator::batch_delete(State::get(), &Self::get_spent_coins());
Self::deposit_event(Event::Deletion(state, agg, proof));
// Add new coins to aggregator and distribute proof
let (state, agg, proof) = accumulator::batch_add(state, &Self::get_new_coins());
Self::deposit_event(Event::Addition(state, agg, proof));
// Update state
State::put(state);
}
// Clear storage
SpentCoins::kill();
NewCoins::kill();
}
}
}
/// tests for this module
#[cfg(test)]
mod tests {
use super::*;
use runtime_io::with_externalities;
use primitives::{H256, Blake2Hasher};
use support::{impl_outer_origin, parameter_types};
use sr_primitives::{traits::{BlakeTwo256, IdentityLookup, OnFinalize}, testing::Header};
use sr_primitives::weights::Weight;
use sr_primitives::Perbill;
impl_outer_origin! {
pub enum Origin for Test {}
}
// For testing the module, we construct most of a mock runtime. This means
// first constructing a configuration type (`Test`) which `impl`s each of the
// configuration traits of modules we want to use.
#[derive(Clone, Eq, PartialEq)]
pub struct Test;
parameter_types! {
pub const BlockHashCount: u64 = 250;
pub const MaximumBlockWeight: Weight = 1024;
pub const MaximumBlockLength: u32 = 2 * 1024;
pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75);
}
impl system::Trait for Test {
type Origin = Origin;
type Call = ();
type Index = u64;
type BlockNumber = u64;
type Hash = H256;
type Hashing = BlakeTwo256;
type AccountId = u64;
type Lookup = IdentityLookup<Self::AccountId>;
type Header = Header;
type WeightMultiplierUpdate = ();
type Event = ();
type BlockHashCount = BlockHashCount;
type MaximumBlockWeight = MaximumBlockWeight;
type MaximumBlockLength = MaximumBlockLength;
type AvailableBlockRatio = AvailableBlockRatio;
type Version = ();
}
impl Trait for Test {
type Event = ();
}
type Stateless = Module<Test>;
type System = system::Module<Test>;
// This function basically just builds a genesis storage key/value store according to
// our desired mockup.
fn new_test_ext() -> runtime_io::TestExternalities<Blake2Hasher> {
system::GenesisConfig::default().build_storage::<Test>().unwrap().into()
}
#[test]
fn test_add() {
with_externalities(&mut new_test_ext(), || {
let elems = vec![U2048::from(3), U2048::from(5), U2048::from(7)];
let (state, _, _) = accumulator::batch_add(Stateless::get_state(), &elems);
assert_eq!(state, U2048::from(5));
});
}
#[test]
fn test_del() {
with_externalities(&mut new_test_ext(), || {
let elems = vec![U2048::from(3), U2048::from(5), U2048::from(7)];
// Collect witnesses for the added elements
let witnesses = witnesses::create_all_mem_wit(Stateless::get_state(), &elems);
// Add elements
let (state, _, _) = accumulator::batch_add(Stateless::get_state(), &elems);
assert_eq!(state, U2048::from(5));
// Delete elements
let deletions = vec![(elems[0], witnesses[0]), (elems[1], witnesses[1]), (elems[2], witnesses[2])];
let (state, _, _) = accumulator::batch_delete(Stateless::get_state(), &deletions);
assert_eq!(state, U2048::from(2));
});
}
#[test]
fn test_block() {
with_externalities(&mut new_test_ext(), || {
// 1. Construct UTXOs.
let utxo_0 = UTXO {
pub_key: H256::from_low_u64_be(0),
id: 0,
};
let utxo_1 = UTXO {
pub_key: H256::from_low_u64_be(1),
id: 1,
};
let utxo_2 = UTXO {
pub_key: H256::from_low_u64_be(2),
id: 2,
};
// 2. Hash each UTXO to a prime.
let elem_0 = subroutines::hash_to_prime(&utxo_0.encode());
let elem_1 = subroutines::hash_to_prime(&utxo_1.encode());
let elem_2 = subroutines::hash_to_prime(&utxo_2.encode());
let elems = vec![elem_0, elem_1, elem_2];
// 3. Produce witnesses for the added elements.
let witnesses = witnesses::create_all_mem_wit(Stateless::get_state(), &elems);
// 4. Add elements to the accumulator.
let (state, _, _) = accumulator::batch_add(Stateless::get_state(), &elems);
State::put(state);
// 5. Construct new UTXOs and derive integer representations.
let utxo_3 = UTXO {
pub_key: H256::from_low_u64_be(1),
id: 0,
};
let utxo_4 = UTXO {
pub_key: H256:: | decl_module! { | random_line_split |
stateless.rs | accumulator::*;
/// At the moment, this particular struct resembles more closely an NFT.
#[cfg_attr(feature = "std", derive(Debug))]
#[derive(Default, Clone, Encode, Decode, PartialEq, Eq, Copy)]
pub struct UTXO {
pub_key: H256,
id: u64,
}
/// Primitive transaction model with one input and one output.
#[cfg_attr(feature = "std", derive(Debug))]
#[derive(Default, Clone, Encode, Decode, PartialEq, Eq)]
pub struct Transaction {
input: UTXO,
output: UTXO,
witness: Vec<u8>,
// Would in practice include a signature here.
}
pub trait Trait: system::Trait {
type Event: From<Event> + Into<<Self as system::Trait>::Event>;
}
decl_storage! {
trait Store for Module<T: Trait> as Stateless {
State get(get_state): U2048 = U2048::from(2); // Use 2 as an arbitrary generator with "unknown" order.
SpentCoins get(get_spent_coins): Vec<(U2048, U2048)>;
NewCoins get(get_new_coins): Vec<U2048>
}
}
decl_event!(
pub enum Event {
Deletion(U2048, U2048, U2048),
Addition(U2048, U2048, U2048),
}
);
decl_module! {
/// The module declaration.
pub struct Module<T: Trait> for enum Call where origin: T::Origin {
// Initialize generic event
fn deposit_event() = default;
/// Receive request to execute a transaction.
/// Verify the contents of a transaction and temporarily add it to a queue of verified transactions.
/// NOTE: Only works if one transaction per user per block is submitted.
pub fn addTransaction(origin, transaction: Transaction) -> Result {
ensure_signed(origin)?;
// Arbitrarily cap the number of pending transactions to 100
ensure!(SpentCoins::get().len() < 100, "Transaction queue full. Please try again next block.");
// Also verify that the user is not spending to themselves
ensure!(transaction.input.pub_key != transaction.output.pub_key, "Cannot send coin to yourself.");
// Verify witness
let spent_elem = subroutines::hash_to_prime(&transaction.input.encode());
let witness = U2048::from_little_endian(&transaction.witness);
ensure!(witnesses::verify_mem_wit(State::get(), witness, spent_elem), "Witness is invalid");
let new_elem = subroutines::hash_to_prime(&transaction.output.encode());
// Update storage items.
SpentCoins::append(&vec![(spent_elem, witness)]);
Ok(())
}
/// Arbitrary replacement for Proof-of-Work to create new coins.
pub fn mint(origin, elem: u64) -> Result {
ensure_signed(origin)?;
let state = subroutines::mod_exp(Self::get_state(), U2048::from(elem), U2048::from_dec_str(MODULUS).unwrap());
State::put(state);
Ok(())
}
/// Batch delete spent coins and add new coins on block finalization
fn on_finalize() {
// Clause here to protect against empty blocks
if Self::get_spent_coins().len() > 0 {
// Delete spent coins from aggregator and distribute proof
let (state, agg, proof) = accumulator::batch_delete(State::get(), &Self::get_spent_coins());
Self::deposit_event(Event::Deletion(state, agg, proof));
// Add new coins to aggregator and distribute proof
let (state, agg, proof) = accumulator::batch_add(state, &Self::get_new_coins());
Self::deposit_event(Event::Addition(state, agg, proof));
// Update state
State::put(state);
}
// Clear storage
SpentCoins::kill();
NewCoins::kill();
}
}
}
/// tests for this module
#[cfg(test)]
mod tests {
use super::*;
use runtime_io::with_externalities;
use primitives::{H256, Blake2Hasher};
use support::{impl_outer_origin, parameter_types};
use sr_primitives::{traits::{BlakeTwo256, IdentityLookup, OnFinalize}, testing::Header};
use sr_primitives::weights::Weight;
use sr_primitives::Perbill;
impl_outer_origin! {
pub enum Origin for Test {}
}
// For testing the module, we construct most of a mock runtime. This means
// first constructing a configuration type (`Test`) which `impl`s each of the
// configuration traits of modules we want to use.
#[derive(Clone, Eq, PartialEq)]
pub struct Test;
parameter_types! {
pub const BlockHashCount: u64 = 250;
pub const MaximumBlockWeight: Weight = 1024;
pub const MaximumBlockLength: u32 = 2 * 1024;
pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75);
}
impl system::Trait for Test {
type Origin = Origin;
type Call = ();
type Index = u64;
type BlockNumber = u64;
type Hash = H256;
type Hashing = BlakeTwo256;
type AccountId = u64;
type Lookup = IdentityLookup<Self::AccountId>;
type Header = Header;
type WeightMultiplierUpdate = ();
type Event = ();
type BlockHashCount = BlockHashCount;
type MaximumBlockWeight = MaximumBlockWeight;
type MaximumBlockLength = MaximumBlockLength;
type AvailableBlockRatio = AvailableBlockRatio;
type Version = ();
}
impl Trait for Test {
type Event = ();
}
type Stateless = Module<Test>;
type System = system::Module<Test>;
// This function basically just builds a genesis storage key/value store according to
// our desired mockup.
fn new_test_ext() -> runtime_io::TestExternalities<Blake2Hasher> {
system::GenesisConfig::default().build_storage::<Test>().unwrap().into()
}
#[test]
fn test_add() |
#[test]
fn test_del() {
with_externalities(&mut new_test_ext(), || {
let elems = vec![U2048::from(3), U2048::from(5), U2048::from(7)];
// Collect witnesses for the added elements
let witnesses = witnesses::create_all_mem_wit(Stateless::get_state(), &elems);
// Add elements
let (state, _, _) = accumulator::batch_add(Stateless::get_state(), &elems);
assert_eq!(state, U2048::from(5));
// Delete elements
let deletions = vec![(elems[0], witnesses[0]), (elems[1], witnesses[1]), (elems[2], witnesses[2])];
let (state, _, _) = accumulator::batch_delete(Stateless::get_state(), &deletions);
assert_eq!(state, U2048::from(2));
});
}
#[test]
fn test_block() {
with_externalities(&mut new_test_ext(), || {
// 1. Construct UTXOs.
let utxo_0 = UTXO {
pub_key: H256::from_low_u64_be(0),
id: 0,
};
let utxo_1 = UTXO {
pub_key: H256::from_low_u64_be(1),
id: 1,
};
let utxo_2 = UTXO {
pub_key: H256::from_low_u64_be(2),
id: 2,
};
// 2. Hash each UTXO to a prime.
let elem_0 = subroutines::hash_to_prime(&utxo_0.encode());
let elem_1 = subroutines::hash_to_prime(&utxo_1.encode());
let elem_2 = subroutines::hash_to_prime(&utxo_2.encode());
let elems = vec![elem_0, elem_1, elem_2];
// 3. Produce witnesses for the added elements.
let witnesses = witnesses::create_all_mem_wit(Stateless::get_state(), &elems);
// 4. Add elements to the accumulator.
let (state, _, _) = accumulator::batch_add(Stateless::get_state(), &elems);
State::put(state);
// 5. Construct new UTXOs and derive integer representations.
let utxo_3 = UTXO {
pub_key: H256::from_low_u64_be(1),
id: 0,
};
let utxo_4 = UTXO {
pub_key: H256 | {
with_externalities(&mut new_test_ext(), || {
let elems = vec![U2048::from(3), U2048::from(5), U2048::from(7)];
let (state, _, _) = accumulator::batch_add(Stateless::get_state(), &elems);
assert_eq!(state, U2048::from(5));
});
} | identifier_body |
worker.go | protobuf/types/known/timestamppb"
)
const (
// batchSize is the number of chunks to read from Spanner at a time.
batchSize = 10
// TargetTaskDuration is the desired duration of a re-clustering task.
// If a task completes before the reclustering run has completed, a
// continuation task will be scheduled.
//
// Longer durations will incur lower task queuing/re-queueing overhead,
// but limit the ability of autoscaling to move tasks between instances
// in response to load.
TargetTaskDuration = 2 * time.Second
// ProgressInterval is the amount of time between progress updates.
//
// Note that this is the frequency at which updates should
// be reported for a shard of work; individual tasks are usually
// much shorter lived and consequently most will not report any progress
// (unless it is time for the shard to report progress again).
ProgressInterval = 5 * time.Second
)
// ChunkStore is the interface for the blob store archiving chunks of test
// results for later re-clustering.
type ChunkStore interface {
// Get retrieves the chunk with the specified object ID and returns it.
Get(ctx context.Context, project, objectID string) (*cpb.Chunk, error)
}
// Worker provides methods to process re-clustering tasks. It is safe to be
// used by multiple threads concurrently.
type Worker struct {
chunkStore ChunkStore
analysis Analysis
}
// NewWorker initialises a new Worker.
func NewWorker(chunkStore ChunkStore, analysis Analysis) *Worker {
return &Worker{
chunkStore: chunkStore,
analysis: analysis,
}
}
// taskContext provides objects relevant to working on a particular
// re-clustering task.
type taskContext struct {
worker *Worker
task *taskspb.ReclusterChunks
// nextReportDue is the time at which the next progress update is
// due.
nextReportDue time.Time
// currentChunkID is the exclusive lower bound of the range
// of ChunkIds still to re-cluster.
currentChunkID string
}
// Do works on a re-clustering task for approximately duration, returning a
// continuation task (if the run end time has not been reached).
//
// Continuation tasks are used to better integrate with GAE autoscaling,
// autoscaling work best when tasks are relatively small (so that work
// can be moved between instances in real time).
func (w *Worker) Do(ctx context.Context, task *taskspb.ReclusterChunks, duration time.Duration) (*taskspb.ReclusterChunks, error) {
if task.State == nil {
return nil, errors.New("task does not have state")
}
if task.ShardNumber <= 0 {
return nil, errors.New("task must have valid shard number")
}
if task.AlgorithmsVersion <= 0 {
return nil, errors.New("task must have valid algorithms version")
}
runEndTime := task.AttemptTime.AsTime()
if task.AlgorithmsVersion > algorithms.AlgorithmsVersion {
return nil, fmt.Errorf("running out-of-date algorithms version (task requires %v, worker running %v)",
task.AlgorithmsVersion, algorithms.AlgorithmsVersion)
}
tctx := &taskContext{
worker: w, |
// softEndTime is the (soft) deadline for the run.
softEndTime := clock.Now(ctx).Add(duration)
if runEndTime.Before(softEndTime) {
// Stop by the run end time.
softEndTime = runEndTime
}
var done bool
for clock.Now(ctx).Before(softEndTime) && !done {
err := retry.Retry(ctx, transient.Only(retry.Default), func() error {
// Stop harder if retrying after the run end time, to avoid
// getting stuck in a retry loop if we are running in
// parallel with another worker.
if !clock.Now(ctx).Before(runEndTime) {
return nil
}
var err error
done, err = tctx.recluster(ctx)
return err
}, nil)
if err != nil {
return nil, err
}
}
var continuation *taskspb.ReclusterChunks
if softEndTime.Before(runEndTime) && !done {
continuation = &taskspb.ReclusterChunks{
ShardNumber: task.ShardNumber,
Project: task.Project,
AttemptTime: task.AttemptTime,
StartChunkId: task.StartChunkId,
EndChunkId: task.EndChunkId,
AlgorithmsVersion: task.AlgorithmsVersion,
RulesVersion: task.RulesVersion,
ConfigVersion: task.ConfigVersion,
State: &taskspb.ReclusterChunkState{
CurrentChunkId: tctx.currentChunkID,
NextReportDue: timestamppb.New(tctx.nextReportDue),
},
}
}
return continuation, nil
}
// recluster tries to reclusters some chunks, advancing currentChunkID
// as it succeeds. It returns 'true' if all chunks to be re-clustered by
// the reclustering task were completed.
func (t *taskContext) recluster(ctx context.Context) (done bool, err error) {
ctx, s := trace.StartSpan(ctx, "go.chromium.org/luci/analysis/internal/clustering/reclustering.recluster")
s.Attribute("project", t.task.Project)
s.Attribute("currentChunkID", t.currentChunkID)
defer func() { s.End(err) }()
readOpts := state.ReadNextOptions{
StartChunkID: t.currentChunkID,
EndChunkID: t.task.EndChunkId,
AlgorithmsVersion: t.task.AlgorithmsVersion,
ConfigVersion: t.task.ConfigVersion.AsTime(),
RulesVersion: t.task.RulesVersion.AsTime(),
}
entries, err := state.ReadNextN(span.Single(ctx), t.task.Project, readOpts, batchSize)
if err != nil {
return false, errors.Annotate(err, "read next chunk state").Err()
}
if len(entries) == 0 {
// We have finished re-clustering.
err = t.updateProgress(ctx, shards.MaxProgress)
if err != nil {
return true, err
}
return true, nil
}
pendingUpdates := NewPendingUpdates(ctx)
for i, entry := range entries {
// Read the test results from GCS.
chunk, err := t.worker.chunkStore.Get(ctx, t.task.Project, entry.ObjectID)
if err != nil {
return false, errors.Annotate(err, "read chunk").Err()
}
// Obtain a recent ruleset of at least RulesVersion.
ruleset, err := Ruleset(ctx, t.task.Project, t.task.RulesVersion.AsTime())
if err != nil {
return false, errors.Annotate(err, "obtain ruleset").Err()
}
// Obtain a recent configuration of at least ConfigVersion.
cfg, err := compiledcfg.Project(ctx, t.task.Project, t.task.ConfigVersion.AsTime())
if err != nil {
return false, errors.Annotate(err, "obtain config").Err()
}
// Re-cluster the test results in spanner, then export
// the re-clustering to BigQuery for analysis.
update, err := PrepareUpdate(ctx, ruleset, cfg, chunk, entry)
if err != nil {
return false, errors.Annotate(err, "re-cluster chunk").Err()
}
pendingUpdates.Add(update)
if pendingUpdates.ShouldApply(ctx) || (i == len(entries)-1) {
if err := pendingUpdates.Apply(ctx, t.worker.analysis); err != nil {
if err == UpdateRaceErr {
// Our update raced with another update.
// This is retriable if we re-read the chunk again.
err = transient.Tag.Apply(err)
}
return false, err
}
pendingUpdates = NewPendingUpdates(ctx)
// Advance our position only on successful commit.
t.currentChunkID = entry.ChunkID
if err := t.calculateAndReportProgress(ctx); err != nil {
return false, err
}
}
}
// More to do.
return false, nil
}
// calculateAndReportProgress reports progress on the shard, based on the current
// value of t.currentChunkID. It can only be used to report interim progress (it
// will never report a progress value of 1000).
func (t *taskContext) calculateAndReportProgress(ctx context.Context) (err error) {
// Manage contention on the ReclusteringRun row by only periodically
// reporting progress.
if clock.Now(ctx).After(t.nextReportDue) {
progress, err := calculateProgress(t.task, t.currentChunkID)
if err != nil {
return errors.Annotate(err, "calculate progress").Err()
}
err = t.updateProgress(ctx, progress)
if err != nil {
return err
}
t.nextReportDue = t.nextReportDue.Add(ProgressInterval)
}
return nil
}
// updateProgress sets progress on the shard.
func (t *taskContext) updateProgress(ctx context.Context, value int) (err error) {
ctx, s := trace.Start | task: task,
nextReportDue: task.State.NextReportDue.AsTime(),
currentChunkID: task.State.CurrentChunkId,
} | random_line_split |
worker.go | protobuf/types/known/timestamppb"
)
const (
// batchSize is the number of chunks to read from Spanner at a time.
batchSize = 10
// TargetTaskDuration is the desired duration of a re-clustering task.
// If a task completes before the reclustering run has completed, a
// continuation task will be scheduled.
//
// Longer durations will incur lower task queuing/re-queueing overhead,
// but limit the ability of autoscaling to move tasks between instances
// in response to load.
TargetTaskDuration = 2 * time.Second
// ProgressInterval is the amount of time between progress updates.
//
// Note that this is the frequency at which updates should
// be reported for a shard of work; individual tasks are usually
// much shorter lived and consequently most will not report any progress
// (unless it is time for the shard to report progress again).
ProgressInterval = 5 * time.Second
)
// ChunkStore is the interface for the blob store archiving chunks of test
// results for later re-clustering.
type ChunkStore interface {
// Get retrieves the chunk with the specified object ID and returns it.
Get(ctx context.Context, project, objectID string) (*cpb.Chunk, error)
}
// Worker provides methods to process re-clustering tasks. It is safe to be
// used by multiple threads concurrently.
type Worker struct {
chunkStore ChunkStore
analysis Analysis
}
// NewWorker initialises a new Worker.
func NewWorker(chunkStore ChunkStore, analysis Analysis) *Worker {
return &Worker{
chunkStore: chunkStore,
analysis: analysis,
}
}
// taskContext provides objects relevant to working on a particular
// re-clustering task.
type taskContext struct {
worker *Worker
task *taskspb.ReclusterChunks
// nextReportDue is the time at which the next progress update is
// due.
nextReportDue time.Time
// currentChunkID is the exclusive lower bound of the range
// of ChunkIds still to re-cluster.
currentChunkID string
}
// Do works on a re-clustering task for approximately duration, returning a
// continuation task (if the run end time has not been reached).
//
// Continuation tasks are used to better integrate with GAE autoscaling,
// autoscaling work best when tasks are relatively small (so that work
// can be moved between instances in real time).
func (w *Worker) Do(ctx context.Context, task *taskspb.ReclusterChunks, duration time.Duration) (*taskspb.ReclusterChunks, error) {
if task.State == nil {
return nil, errors.New("task does not have state")
}
if task.ShardNumber <= 0 {
return nil, errors.New("task must have valid shard number")
}
if task.AlgorithmsVersion <= 0 {
return nil, errors.New("task must have valid algorithms version")
}
runEndTime := task.AttemptTime.AsTime()
if task.AlgorithmsVersion > algorithms.AlgorithmsVersion {
return nil, fmt.Errorf("running out-of-date algorithms version (task requires %v, worker running %v)",
task.AlgorithmsVersion, algorithms.AlgorithmsVersion)
}
tctx := &taskContext{
worker: w,
task: task,
nextReportDue: task.State.NextReportDue.AsTime(),
currentChunkID: task.State.CurrentChunkId,
}
// softEndTime is the (soft) deadline for the run.
softEndTime := clock.Now(ctx).Add(duration)
if runEndTime.Before(softEndTime) {
// Stop by the run end time.
softEndTime = runEndTime
}
var done bool
for clock.Now(ctx).Before(softEndTime) && !done {
err := retry.Retry(ctx, transient.Only(retry.Default), func() error {
// Stop harder if retrying after the run end time, to avoid
// getting stuck in a retry loop if we are running in
// parallel with another worker.
if !clock.Now(ctx).Before(runEndTime) {
return nil
}
var err error
done, err = tctx.recluster(ctx)
return err
}, nil)
if err != nil {
return nil, err
}
}
var continuation *taskspb.ReclusterChunks
if softEndTime.Before(runEndTime) && !done {
continuation = &taskspb.ReclusterChunks{
ShardNumber: task.ShardNumber,
Project: task.Project,
AttemptTime: task.AttemptTime,
StartChunkId: task.StartChunkId,
EndChunkId: task.EndChunkId,
AlgorithmsVersion: task.AlgorithmsVersion,
RulesVersion: task.RulesVersion,
ConfigVersion: task.ConfigVersion,
State: &taskspb.ReclusterChunkState{
CurrentChunkId: tctx.currentChunkID,
NextReportDue: timestamppb.New(tctx.nextReportDue),
},
}
}
return continuation, nil
}
// recluster tries to reclusters some chunks, advancing currentChunkID
// as it succeeds. It returns 'true' if all chunks to be re-clustered by
// the reclustering task were completed.
func (t *taskContext) recluster(ctx context.Context) (done bool, err error) {
ctx, s := trace.StartSpan(ctx, "go.chromium.org/luci/analysis/internal/clustering/reclustering.recluster")
s.Attribute("project", t.task.Project)
s.Attribute("currentChunkID", t.currentChunkID)
defer func() { s.End(err) }()
readOpts := state.ReadNextOptions{
StartChunkID: t.currentChunkID,
EndChunkID: t.task.EndChunkId,
AlgorithmsVersion: t.task.AlgorithmsVersion,
ConfigVersion: t.task.ConfigVersion.AsTime(),
RulesVersion: t.task.RulesVersion.AsTime(),
}
entries, err := state.ReadNextN(span.Single(ctx), t.task.Project, readOpts, batchSize)
if err != nil {
return false, errors.Annotate(err, "read next chunk state").Err()
}
if len(entries) == 0 {
// We have finished re-clustering.
err = t.updateProgress(ctx, shards.MaxProgress)
if err != nil {
return true, err
}
return true, nil
}
pendingUpdates := NewPendingUpdates(ctx)
for i, entry := range entries {
// Read the test results from GCS.
chunk, err := t.worker.chunkStore.Get(ctx, t.task.Project, entry.ObjectID)
if err != nil {
return false, errors.Annotate(err, "read chunk").Err()
}
// Obtain a recent ruleset of at least RulesVersion.
ruleset, err := Ruleset(ctx, t.task.Project, t.task.RulesVersion.AsTime())
if err != nil {
return false, errors.Annotate(err, "obtain ruleset").Err()
}
// Obtain a recent configuration of at least ConfigVersion.
cfg, err := compiledcfg.Project(ctx, t.task.Project, t.task.ConfigVersion.AsTime())
if err != nil {
return false, errors.Annotate(err, "obtain config").Err()
}
// Re-cluster the test results in spanner, then export
// the re-clustering to BigQuery for analysis.
update, err := PrepareUpdate(ctx, ruleset, cfg, chunk, entry)
if err != nil {
return false, errors.Annotate(err, "re-cluster chunk").Err()
}
pendingUpdates.Add(update)
if pendingUpdates.ShouldApply(ctx) || (i == len(entries)-1) {
if err := pendingUpdates.Apply(ctx, t.worker.analysis); err != nil |
pendingUpdates = NewPendingUpdates(ctx)
// Advance our position only on successful commit.
t.currentChunkID = entry.ChunkID
if err := t.calculateAndReportProgress(ctx); err != nil {
return false, err
}
}
}
// More to do.
return false, nil
}
// calculateAndReportProgress reports progress on the shard, based on the current
// value of t.currentChunkID. It can only be used to report interim progress (it
// will never report a progress value of 1000).
func (t *taskContext) calculateAndReportProgress(ctx context.Context) (err error) {
// Manage contention on the ReclusteringRun row by only periodically
// reporting progress.
if clock.Now(ctx).After(t.nextReportDue) {
progress, err := calculateProgress(t.task, t.currentChunkID)
if err != nil {
return errors.Annotate(err, "calculate progress").Err()
}
err = t.updateProgress(ctx, progress)
if err != nil {
return err
}
t.nextReportDue = t.nextReportDue.Add(ProgressInterval)
}
return nil
}
// updateProgress sets progress on the shard.
func (t *taskContext) updateProgress(ctx context.Context, value int) (err error) {
ctx, s := trace | {
if err == UpdateRaceErr {
// Our update raced with another update.
// This is retriable if we re-read the chunk again.
err = transient.Tag.Apply(err)
}
return false, err
} | conditional_block |
worker.go | protobuf/types/known/timestamppb"
)
const (
// batchSize is the number of chunks to read from Spanner at a time.
batchSize = 10
// TargetTaskDuration is the desired duration of a re-clustering task.
// If a task completes before the reclustering run has completed, a
// continuation task will be scheduled.
//
// Longer durations will incur lower task queuing/re-queueing overhead,
// but limit the ability of autoscaling to move tasks between instances
// in response to load.
TargetTaskDuration = 2 * time.Second
// ProgressInterval is the amount of time between progress updates.
//
// Note that this is the frequency at which updates should
// be reported for a shard of work; individual tasks are usually
// much shorter lived and consequently most will not report any progress
// (unless it is time for the shard to report progress again).
ProgressInterval = 5 * time.Second
)
// ChunkStore is the interface for the blob store archiving chunks of test
// results for later re-clustering.
type ChunkStore interface {
// Get retrieves the chunk with the specified object ID and returns it.
Get(ctx context.Context, project, objectID string) (*cpb.Chunk, error)
}
// Worker provides methods to process re-clustering tasks. It is safe to be
// used by multiple threads concurrently.
type Worker struct {
chunkStore ChunkStore
analysis Analysis
}
// NewWorker initialises a new Worker.
func NewWorker(chunkStore ChunkStore, analysis Analysis) *Worker {
return &Worker{
chunkStore: chunkStore,
analysis: analysis,
}
}
// taskContext provides objects relevant to working on a particular
// re-clustering task.
type taskContext struct {
worker *Worker
task *taskspb.ReclusterChunks
// nextReportDue is the time at which the next progress update is
// due.
nextReportDue time.Time
// currentChunkID is the exclusive lower bound of the range
// of ChunkIds still to re-cluster.
currentChunkID string
}
// Do works on a re-clustering task for approximately duration, returning a
// continuation task (if the run end time has not been reached).
//
// Continuation tasks are used to better integrate with GAE autoscaling,
// autoscaling work best when tasks are relatively small (so that work
// can be moved between instances in real time).
func (w *Worker) Do(ctx context.Context, task *taskspb.ReclusterChunks, duration time.Duration) (*taskspb.ReclusterChunks, error) {
if task.State == nil {
return nil, errors.New("task does not have state")
}
if task.ShardNumber <= 0 {
return nil, errors.New("task must have valid shard number")
}
if task.AlgorithmsVersion <= 0 {
return nil, errors.New("task must have valid algorithms version")
}
runEndTime := task.AttemptTime.AsTime()
if task.AlgorithmsVersion > algorithms.AlgorithmsVersion {
return nil, fmt.Errorf("running out-of-date algorithms version (task requires %v, worker running %v)",
task.AlgorithmsVersion, algorithms.AlgorithmsVersion)
}
tctx := &taskContext{
worker: w,
task: task,
nextReportDue: task.State.NextReportDue.AsTime(),
currentChunkID: task.State.CurrentChunkId,
}
// softEndTime is the (soft) deadline for the run.
softEndTime := clock.Now(ctx).Add(duration)
if runEndTime.Before(softEndTime) {
// Stop by the run end time.
softEndTime = runEndTime
}
var done bool
for clock.Now(ctx).Before(softEndTime) && !done {
err := retry.Retry(ctx, transient.Only(retry.Default), func() error {
// Stop harder if retrying after the run end time, to avoid
// getting stuck in a retry loop if we are running in
// parallel with another worker.
if !clock.Now(ctx).Before(runEndTime) {
return nil
}
var err error
done, err = tctx.recluster(ctx)
return err
}, nil)
if err != nil {
return nil, err
}
}
var continuation *taskspb.ReclusterChunks
if softEndTime.Before(runEndTime) && !done {
continuation = &taskspb.ReclusterChunks{
ShardNumber: task.ShardNumber,
Project: task.Project,
AttemptTime: task.AttemptTime,
StartChunkId: task.StartChunkId,
EndChunkId: task.EndChunkId,
AlgorithmsVersion: task.AlgorithmsVersion,
RulesVersion: task.RulesVersion,
ConfigVersion: task.ConfigVersion,
State: &taskspb.ReclusterChunkState{
CurrentChunkId: tctx.currentChunkID,
NextReportDue: timestamppb.New(tctx.nextReportDue),
},
}
}
return continuation, nil
}
// recluster tries to reclusters some chunks, advancing currentChunkID
// as it succeeds. It returns 'true' if all chunks to be re-clustered by
// the reclustering task were completed.
func (t *taskContext) | (ctx context.Context) (done bool, err error) {
ctx, s := trace.StartSpan(ctx, "go.chromium.org/luci/analysis/internal/clustering/reclustering.recluster")
s.Attribute("project", t.task.Project)
s.Attribute("currentChunkID", t.currentChunkID)
defer func() { s.End(err) }()
readOpts := state.ReadNextOptions{
StartChunkID: t.currentChunkID,
EndChunkID: t.task.EndChunkId,
AlgorithmsVersion: t.task.AlgorithmsVersion,
ConfigVersion: t.task.ConfigVersion.AsTime(),
RulesVersion: t.task.RulesVersion.AsTime(),
}
entries, err := state.ReadNextN(span.Single(ctx), t.task.Project, readOpts, batchSize)
if err != nil {
return false, errors.Annotate(err, "read next chunk state").Err()
}
if len(entries) == 0 {
// We have finished re-clustering.
err = t.updateProgress(ctx, shards.MaxProgress)
if err != nil {
return true, err
}
return true, nil
}
pendingUpdates := NewPendingUpdates(ctx)
for i, entry := range entries {
// Read the test results from GCS.
chunk, err := t.worker.chunkStore.Get(ctx, t.task.Project, entry.ObjectID)
if err != nil {
return false, errors.Annotate(err, "read chunk").Err()
}
// Obtain a recent ruleset of at least RulesVersion.
ruleset, err := Ruleset(ctx, t.task.Project, t.task.RulesVersion.AsTime())
if err != nil {
return false, errors.Annotate(err, "obtain ruleset").Err()
}
// Obtain a recent configuration of at least ConfigVersion.
cfg, err := compiledcfg.Project(ctx, t.task.Project, t.task.ConfigVersion.AsTime())
if err != nil {
return false, errors.Annotate(err, "obtain config").Err()
}
// Re-cluster the test results in spanner, then export
// the re-clustering to BigQuery for analysis.
update, err := PrepareUpdate(ctx, ruleset, cfg, chunk, entry)
if err != nil {
return false, errors.Annotate(err, "re-cluster chunk").Err()
}
pendingUpdates.Add(update)
if pendingUpdates.ShouldApply(ctx) || (i == len(entries)-1) {
if err := pendingUpdates.Apply(ctx, t.worker.analysis); err != nil {
if err == UpdateRaceErr {
// Our update raced with another update.
// This is retriable if we re-read the chunk again.
err = transient.Tag.Apply(err)
}
return false, err
}
pendingUpdates = NewPendingUpdates(ctx)
// Advance our position only on successful commit.
t.currentChunkID = entry.ChunkID
if err := t.calculateAndReportProgress(ctx); err != nil {
return false, err
}
}
}
// More to do.
return false, nil
}
// calculateAndReportProgress reports progress on the shard, based on the current
// value of t.currentChunkID. It can only be used to report interim progress (it
// will never report a progress value of 1000).
func (t *taskContext) calculateAndReportProgress(ctx context.Context) (err error) {
// Manage contention on the ReclusteringRun row by only periodically
// reporting progress.
if clock.Now(ctx).After(t.nextReportDue) {
progress, err := calculateProgress(t.task, t.currentChunkID)
if err != nil {
return errors.Annotate(err, "calculate progress").Err()
}
err = t.updateProgress(ctx, progress)
if err != nil {
return err
}
t.nextReportDue = t.nextReportDue.Add(ProgressInterval)
}
return nil
}
// updateProgress sets progress on the shard.
func (t *taskContext) updateProgress(ctx context.Context, value int) (err error) {
ctx, s := trace.Start | recluster | identifier_name |
worker.go | GAE autoscaling,
// autoscaling work best when tasks are relatively small (so that work
// can be moved between instances in real time).
func (w *Worker) Do(ctx context.Context, task *taskspb.ReclusterChunks, duration time.Duration) (*taskspb.ReclusterChunks, error) {
if task.State == nil {
return nil, errors.New("task does not have state")
}
if task.ShardNumber <= 0 {
return nil, errors.New("task must have valid shard number")
}
if task.AlgorithmsVersion <= 0 {
return nil, errors.New("task must have valid algorithms version")
}
runEndTime := task.AttemptTime.AsTime()
if task.AlgorithmsVersion > algorithms.AlgorithmsVersion {
return nil, fmt.Errorf("running out-of-date algorithms version (task requires %v, worker running %v)",
task.AlgorithmsVersion, algorithms.AlgorithmsVersion)
}
tctx := &taskContext{
worker: w,
task: task,
nextReportDue: task.State.NextReportDue.AsTime(),
currentChunkID: task.State.CurrentChunkId,
}
// softEndTime is the (soft) deadline for the run.
softEndTime := clock.Now(ctx).Add(duration)
if runEndTime.Before(softEndTime) {
// Stop by the run end time.
softEndTime = runEndTime
}
var done bool
for clock.Now(ctx).Before(softEndTime) && !done {
err := retry.Retry(ctx, transient.Only(retry.Default), func() error {
// Stop harder if retrying after the run end time, to avoid
// getting stuck in a retry loop if we are running in
// parallel with another worker.
if !clock.Now(ctx).Before(runEndTime) {
return nil
}
var err error
done, err = tctx.recluster(ctx)
return err
}, nil)
if err != nil {
return nil, err
}
}
var continuation *taskspb.ReclusterChunks
if softEndTime.Before(runEndTime) && !done {
continuation = &taskspb.ReclusterChunks{
ShardNumber: task.ShardNumber,
Project: task.Project,
AttemptTime: task.AttemptTime,
StartChunkId: task.StartChunkId,
EndChunkId: task.EndChunkId,
AlgorithmsVersion: task.AlgorithmsVersion,
RulesVersion: task.RulesVersion,
ConfigVersion: task.ConfigVersion,
State: &taskspb.ReclusterChunkState{
CurrentChunkId: tctx.currentChunkID,
NextReportDue: timestamppb.New(tctx.nextReportDue),
},
}
}
return continuation, nil
}
// recluster tries to reclusters some chunks, advancing currentChunkID
// as it succeeds. It returns 'true' if all chunks to be re-clustered by
// the reclustering task were completed.
func (t *taskContext) recluster(ctx context.Context) (done bool, err error) {
ctx, s := trace.StartSpan(ctx, "go.chromium.org/luci/analysis/internal/clustering/reclustering.recluster")
s.Attribute("project", t.task.Project)
s.Attribute("currentChunkID", t.currentChunkID)
defer func() { s.End(err) }()
readOpts := state.ReadNextOptions{
StartChunkID: t.currentChunkID,
EndChunkID: t.task.EndChunkId,
AlgorithmsVersion: t.task.AlgorithmsVersion,
ConfigVersion: t.task.ConfigVersion.AsTime(),
RulesVersion: t.task.RulesVersion.AsTime(),
}
entries, err := state.ReadNextN(span.Single(ctx), t.task.Project, readOpts, batchSize)
if err != nil {
return false, errors.Annotate(err, "read next chunk state").Err()
}
if len(entries) == 0 {
// We have finished re-clustering.
err = t.updateProgress(ctx, shards.MaxProgress)
if err != nil {
return true, err
}
return true, nil
}
pendingUpdates := NewPendingUpdates(ctx)
for i, entry := range entries {
// Read the test results from GCS.
chunk, err := t.worker.chunkStore.Get(ctx, t.task.Project, entry.ObjectID)
if err != nil {
return false, errors.Annotate(err, "read chunk").Err()
}
// Obtain a recent ruleset of at least RulesVersion.
ruleset, err := Ruleset(ctx, t.task.Project, t.task.RulesVersion.AsTime())
if err != nil {
return false, errors.Annotate(err, "obtain ruleset").Err()
}
// Obtain a recent configuration of at least ConfigVersion.
cfg, err := compiledcfg.Project(ctx, t.task.Project, t.task.ConfigVersion.AsTime())
if err != nil {
return false, errors.Annotate(err, "obtain config").Err()
}
// Re-cluster the test results in spanner, then export
// the re-clustering to BigQuery for analysis.
update, err := PrepareUpdate(ctx, ruleset, cfg, chunk, entry)
if err != nil {
return false, errors.Annotate(err, "re-cluster chunk").Err()
}
pendingUpdates.Add(update)
if pendingUpdates.ShouldApply(ctx) || (i == len(entries)-1) {
if err := pendingUpdates.Apply(ctx, t.worker.analysis); err != nil {
if err == UpdateRaceErr {
// Our update raced with another update.
// This is retriable if we re-read the chunk again.
err = transient.Tag.Apply(err)
}
return false, err
}
pendingUpdates = NewPendingUpdates(ctx)
// Advance our position only on successful commit.
t.currentChunkID = entry.ChunkID
if err := t.calculateAndReportProgress(ctx); err != nil {
return false, err
}
}
}
// More to do.
return false, nil
}
// calculateAndReportProgress reports progress on the shard, based on the current
// value of t.currentChunkID. It can only be used to report interim progress (it
// will never report a progress value of 1000).
func (t *taskContext) calculateAndReportProgress(ctx context.Context) (err error) {
// Manage contention on the ReclusteringRun row by only periodically
// reporting progress.
if clock.Now(ctx).After(t.nextReportDue) {
progress, err := calculateProgress(t.task, t.currentChunkID)
if err != nil {
return errors.Annotate(err, "calculate progress").Err()
}
err = t.updateProgress(ctx, progress)
if err != nil {
return err
}
t.nextReportDue = t.nextReportDue.Add(ProgressInterval)
}
return nil
}
// updateProgress sets progress on the shard.
func (t *taskContext) updateProgress(ctx context.Context, value int) (err error) {
ctx, s := trace.StartSpan(ctx, "go.chromium.org/luci/analysis/internal/clustering/reclustering.updateProgress")
defer func() { s.End(err) }()
_, err = span.ReadWriteTransaction(ctx, func(ctx context.Context) error {
err = shards.UpdateProgress(ctx, t.task.ShardNumber, t.task.AttemptTime.AsTime(), value)
if err != nil {
return errors.Annotate(err, "update progress").Err()
}
return nil
})
if err != nil {
if status.Code(err) == codes.NotFound {
// If the row for the shard has been deleted (i.e. because
// we have overrun the end of our reclustering run), drop
// the progress update.
return nil
}
return err
}
return nil
}
// calculateProgress calculates the progress of the worker through the task.
// Progress is the proportion of the keyspace re-clustered, as a value between
// 0 and 1000 (i.e. 0 = 0%, 1000 = 100.0%).
// 1000 is never returned by this method as the value passed is the nextChunkID
// (i.e. the next chunkID to re-cluster), not the last completed chunk ID,
// which implies progress is not complete.
func calculateProgress(task *taskspb.ReclusterChunks, nextChunkID string) (int, error) | {
nextID, err := chunkIDAsBigInt(nextChunkID)
if err != nil {
return 0, err
}
startID, err := chunkIDAsBigInt(task.StartChunkId)
if err != nil {
return 0, err
}
endID, err := chunkIDAsBigInt(task.EndChunkId)
if err != nil {
return 0, err
}
if startID.Cmp(endID) >= 0 {
return 0, fmt.Errorf("end chunk ID %q is before or equal to start %q", task.EndChunkId, task.StartChunkId)
}
if nextID.Cmp(startID) <= 0 {
// Start is exclusive, not inclusive.
return 0, fmt.Errorf("next chunk ID %q is before or equal to start %q", nextChunkID, task.StartChunkId)
} | identifier_body |
|
upgrade_region.go | Upgrade configuration by reading BOM file to get the providers versions
// b) Get the providers information from the management cluster
// c) Prepare the providers upgrade information
// d) Call the clusterctl ApplyUpgrade() to upgrade providers
// e) Wait for providers to be up and running
// 2. call the UpgradeCluster() for upgrading the k8s version of the Management cluster
func (c *TkgClient) UpgradeManagementCluster(options *UpgradeClusterOptions) error {
contexts, err := c.GetRegionContexts(options.ClusterName)
if err != nil || len(contexts) == 0 {
return errors.Errorf("management cluster %s not found", options.ClusterName)
}
currentRegion := contexts[0]
options.Kubeconfig = currentRegion.SourceFilePath
if currentRegion.Status == region.Failed {
return errors.Errorf("cannot upgrade since deployment failed for management cluster %s", currentRegion.ClusterName)
}
regionalClusterClient, err := clusterclient.NewClient(currentRegion.SourceFilePath, currentRegion.ContextName, clusterclient.Options{OperationTimeout: c.timeout})
if err != nil {
return errors.Wrap(err, "unable to get cluster client while upgrading management cluster")
}
isPacific, err := regionalClusterClient.IsPacificRegionalCluster()
if err != nil {
return errors.Wrap(err, "error determining 'Tanzu Kubernetes Cluster service for vSphere' management cluster")
}
if isPacific {
return errors.New("upgrading 'Tanzu Kubernetes Cluster service for vSphere' management cluster is not yet supported")
}
// Validate the compatibility before upgrading management cluster
err = c.validateCompatibilityBeforeManagementClusterUpgrade(options, regionalClusterClient)
if err != nil {
return err
}
if err := c.configureVariablesForProvidersInstallation(regionalClusterClient); err != nil {
return errors.Wrap(err, "unable to configure variables for provider installation")
}
log.Info("Upgrading management cluster providers...")
providersUpgradeClient := providersupgradeclient.New(c.clusterctlClient)
if err = c.DoProvidersUpgrade(regionalClusterClient, currentRegion.ContextName, providersUpgradeClient, options); err != nil {
return errors.Wrap(err, "failed to upgrade management cluster providers")
}
// Wait for installed providers to get up and running
// TODO: Currently tkg doesn't support TargetNamespace and WatchingNamespace as it's not supporting multi-tenency of providers
// If we support it in future we need to make these namespaces as command line options and use here
waitOptions := waitForProvidersOptions{
Kubeconfig: options.Kubeconfig,
TargetNamespace: "",
WatchingNamespace: "",
}
err = c.WaitForProviders(regionalClusterClient, waitOptions)
if err != nil {
return errors.Wrap(err, "error waiting for provider components to be up and running after upgrading them")
}
log.Info("Management cluster providers upgraded successfully...")
log.Info("Upgrading management cluster kubernetes version...")
err = c.UpgradeCluster(options)
if err != nil {
return errors.Wrap(err, "unable to upgrade management cluster")
}
// Patch management cluster with the TKG version
err = regionalClusterClient.PatchClusterObjectWithTKGVersion(options.ClusterName, options.Namespace, c.tkgBomClient.GetCurrentTKGVersion())
if err != nil {
return err
}
return nil
}
func (c *TkgClient) configureVariablesForProvidersInstallation(regionalClusterClient clusterclient.Client) error {
err := c.configureImageTagsForProviderInstallation()
if err != nil {
return errors.Wrap(err, "failed to configure image tags for provider installation")
}
// If region client is not specified nothing to configure based on existing management cluster
if regionalClusterClient == nil {
return nil
}
infraProvider, err := regionalClusterClient.GetRegionalClusterDefaultProviderName(clusterctlv1.InfrastructureProviderType)
if err != nil {
return errors.Wrap(err, "failed to get cluster provider information.")
}
infraProviderName, _, err := ParseProviderName(infraProvider)
if err != nil {
return errors.Wrap(err, "failed to parse provider name")
}
// retrieve required variables required for infrastructure component spec rendering
// set them to default values if they don't exist.
err = c.RetrieveRegionalClusterConfiguration(regionalClusterClient)
if err != nil {
return errors.Wrap(err, "failed to set configurations for upgrade")
}
switch infraProviderName {
case AzureProviderName:
// since the templates needs Base64 values of credentials, encode them
if _, err := c.EncodeAzureCredentialsAndGetClient(regionalClusterClient); err != nil {
return errors.Wrap(err, "failed to encode azure credentials")
}
case AWSProviderName:
if _, err := c.EncodeAWSCredentialsAndGetClient(regionalClusterClient); err != nil {
return errors.Wrap(err, "failed to encode AWS credentials")
}
case VSphereProviderName:
if err := c.configureVsphereCredentialsFromCluster(regionalClusterClient); err != nil {
return errors.Wrap(err, "failed to configure Vsphere credentials")
}
case DockerProviderName:
// no variable configuration is needed to deploy Docker provider as
// infrastructure-components.yaml for docker does not require any variable
}
return nil
}
// DoProvidersUpgrade upgrades the providers of the management cluster
func (c *TkgClient) DoProvidersUpgrade(regionalClusterClient clusterclient.Client, ctx string,
providersUpgradeClient providersupgradeclient.Client, options *UpgradeClusterOptions) error {
// read the BOM file for latest providers version information to upgrade to
bomConfiguration, err := c.tkgBomClient.GetDefaultTkgBOMConfiguration()
if err != nil {
return errors.Wrap(err, "unable to read in configuration from BOM file")
}
pUpgradeInfo, err := c.getProvidersUpgradeInfo(regionalClusterClient, bomConfiguration)
if err != nil {
return errors.Wrap(err, "failed to get providers upgrade information")
}
if len(pUpgradeInfo.providers) == 0 {
log.Infof("All providers are up to date...")
return nil
}
pUpgradeApplyOptions, err := c.GenerateProvidersUpgradeOptions(pUpgradeInfo)
if err != nil {
return errors.Wrap(err, "failed to generate providers upgrade apply options")
}
// update the kubeconfig
pUpgradeApplyOptions.Kubeconfig.Path = options.Kubeconfig
pUpgradeApplyOptions.Kubeconfig.Context = ctx
log.V(6).Infof("clusterctl upgrade apply options: %+v", *pUpgradeApplyOptions)
clusterctlUpgradeOptions := clusterctl.ApplyUpgradeOptions(*pUpgradeApplyOptions)
err = providersUpgradeClient.ApplyUpgrade(&clusterctlUpgradeOptions)
if err != nil {
return errors.Wrap(err, "failed to apply providers upgrade")
}
return nil
}
// GenerateProvidersUpgradeOptions generates provider upgrade options
func (c *TkgClient) GenerateProvidersUpgradeOptions(pUpgradeInfo *providersUpgradeInfo) (*ApplyProvidersUpgradeOptions, error) | }
// getProvidersUpgradeInfo prepares the upgrade information by comparing the provider current version with and the upgradable version
// obtained from the BOM file.
func (c *TkgClient) getProvidersUpgradeInfo(regionalClusterClient clusterclient.Client, bomConfig *tkgconfigbom.BOMConfiguration) (*providersUpgradeInfo, error) {
pUpgradeInfo := &providersUpgradeInfo{}
// Get all the installed providers info
installedProviders := &clusterctlv1.ProviderList{}
err := regionalClusterClient.ListResources(installedProviders, &crtclient.ListOptions{})
if err != nil {
return nil, errors.Wrap(err, "cannot get installed provider config")
}
// get the management group
pUpgradeInfo.managementGroup, err = parseManagementGroup(installedProviders)
if err != nil {
return nil, errors.Wrap(err, "failed to parse the management group")
}
// get the providers Info with the version updated with the upgrade version obtained from BOM file map
upgradeProviderVersionMap := bomConfig.ProvidersVersionMap
// make a list of providers eligible for upgrade
for i := range installedProviders.Items {
// Note: provider.Name has the manifest label (eg:control-plane-kubeadm) and provider.ProviderName would not be ideal(eg:kubeadm)
// here as both bootstrap-kubeadm and control-plane-kubeadm has the same ProviderName as 'kubeadm'
latestVersion, ok := upgradeProviderVersionMap[installedProviders.Items[i].Name]
if !ok || latestVersion == "" {
log.Warningf(" %s provider's version is missing in BOM file, so | {
puo := &ApplyProvidersUpgradeOptions{}
puo.ManagementGroup = pUpgradeInfo.managementGroup
for i := range pUpgradeInfo.providers {
instanceVersion := pUpgradeInfo.providers[i].Namespace + "/" + pUpgradeInfo.providers[i].ProviderName + ":" + pUpgradeInfo.providers[i].Version
switch clusterctlv1.ProviderType(pUpgradeInfo.providers[i].Type) {
case clusterctlv1.CoreProviderType:
puo.CoreProvider = instanceVersion
case clusterctlv1.BootstrapProviderType:
puo.BootstrapProviders = append(puo.BootstrapProviders, instanceVersion)
case clusterctlv1.ControlPlaneProviderType:
puo.ControlPlaneProviders = append(puo.ControlPlaneProviders, instanceVersion)
case clusterctlv1.InfrastructureProviderType:
puo.InfrastructureProviders = append(puo.InfrastructureProviders, instanceVersion)
default:
return nil, errors.Errorf("unknown provider type: %s", pUpgradeInfo.providers[i].Type)
}
}
return puo, nil | identifier_body |
upgrade_region.go | != nil {
return errors.Wrap(err, "failed to get providers upgrade information")
}
if len(pUpgradeInfo.providers) == 0 {
log.Infof("All providers are up to date...")
return nil
}
pUpgradeApplyOptions, err := c.GenerateProvidersUpgradeOptions(pUpgradeInfo)
if err != nil {
return errors.Wrap(err, "failed to generate providers upgrade apply options")
}
// update the kubeconfig
pUpgradeApplyOptions.Kubeconfig.Path = options.Kubeconfig
pUpgradeApplyOptions.Kubeconfig.Context = ctx
log.V(6).Infof("clusterctl upgrade apply options: %+v", *pUpgradeApplyOptions)
clusterctlUpgradeOptions := clusterctl.ApplyUpgradeOptions(*pUpgradeApplyOptions)
err = providersUpgradeClient.ApplyUpgrade(&clusterctlUpgradeOptions)
if err != nil {
return errors.Wrap(err, "failed to apply providers upgrade")
}
return nil
}
// GenerateProvidersUpgradeOptions generates provider upgrade options
func (c *TkgClient) GenerateProvidersUpgradeOptions(pUpgradeInfo *providersUpgradeInfo) (*ApplyProvidersUpgradeOptions, error) {
puo := &ApplyProvidersUpgradeOptions{}
puo.ManagementGroup = pUpgradeInfo.managementGroup
for i := range pUpgradeInfo.providers {
instanceVersion := pUpgradeInfo.providers[i].Namespace + "/" + pUpgradeInfo.providers[i].ProviderName + ":" + pUpgradeInfo.providers[i].Version
switch clusterctlv1.ProviderType(pUpgradeInfo.providers[i].Type) {
case clusterctlv1.CoreProviderType:
puo.CoreProvider = instanceVersion
case clusterctlv1.BootstrapProviderType:
puo.BootstrapProviders = append(puo.BootstrapProviders, instanceVersion)
case clusterctlv1.ControlPlaneProviderType:
puo.ControlPlaneProviders = append(puo.ControlPlaneProviders, instanceVersion)
case clusterctlv1.InfrastructureProviderType:
puo.InfrastructureProviders = append(puo.InfrastructureProviders, instanceVersion)
default:
return nil, errors.Errorf("unknown provider type: %s", pUpgradeInfo.providers[i].Type)
}
}
return puo, nil
}
// getProvidersUpgradeInfo prepares the upgrade information by comparing the provider current version with and the upgradable version
// obtained from the BOM file.
func (c *TkgClient) getProvidersUpgradeInfo(regionalClusterClient clusterclient.Client, bomConfig *tkgconfigbom.BOMConfiguration) (*providersUpgradeInfo, error) {
pUpgradeInfo := &providersUpgradeInfo{}
// Get all the installed providers info
installedProviders := &clusterctlv1.ProviderList{}
err := regionalClusterClient.ListResources(installedProviders, &crtclient.ListOptions{})
if err != nil {
return nil, errors.Wrap(err, "cannot get installed provider config")
}
// get the management group
pUpgradeInfo.managementGroup, err = parseManagementGroup(installedProviders)
if err != nil {
return nil, errors.Wrap(err, "failed to parse the management group")
}
// get the providers Info with the version updated with the upgrade version obtained from BOM file map
upgradeProviderVersionMap := bomConfig.ProvidersVersionMap
// make a list of providers eligible for upgrade
for i := range installedProviders.Items {
// Note: provider.Name has the manifest label (eg:control-plane-kubeadm) and provider.ProviderName would not be ideal(eg:kubeadm)
// here as both bootstrap-kubeadm and control-plane-kubeadm has the same ProviderName as 'kubeadm'
latestVersion, ok := upgradeProviderVersionMap[installedProviders.Items[i].Name]
if !ok || latestVersion == "" {
log.Warningf(" %s provider's version is missing in BOM file, so it would not be upgraded ", installedProviders.Items[i].Name)
continue
}
latestSemVersion, err := version.ParseSemantic(latestVersion)
if err != nil {
log.Warningf("failed to parse %s provider's upgrade version, so it would not be upgraded ", installedProviders.Items[i].Name)
continue
}
currentSemVersion, err := version.ParseSemantic(installedProviders.Items[i].Version)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse %s provider's current version", installedProviders.Items[i].Name)
}
if latestSemVersion.LessThan(currentSemVersion) {
log.V(1).Infof("%s provider's upgrade version %s is less than current version %s, so skipping it for upgrade ",
installedProviders.Items[i].ProviderName, latestVersion, installedProviders.Items[i].Version)
continue
}
// update the provider to the latest version to be upgraded
installedProviders.Items[i].Version = fmt.Sprintf("v%v.%v.%v", latestSemVersion.Major(), latestSemVersion.Minor(), latestSemVersion.Patch())
pUpgradeInfo.providers = append(pUpgradeInfo.providers, installedProviders.Items[i])
}
return pUpgradeInfo, nil
}
func parseManagementGroup(installedProviders *clusterctlv1.ProviderList) (string, error) {
for i := range installedProviders.Items {
if clusterctlv1.ProviderType(installedProviders.Items[i].Type) == clusterctlv1.CoreProviderType {
mgmtGroupName := installedProviders.Items[i].InstanceName()
return mgmtGroupName, nil
}
}
return "", errors.New("failed to find core provider from the current providers")
}
// WaitForAddonsDeployments wait for addons deployments
func (c *TkgClient) WaitForAddonsDeployments(clusterClient clusterclient.Client) error {
group, _ := errgroup.WithContext(context.Background())
group.Go(
func() error {
err := clusterClient.WaitForDeployment(constants.TkrControllerDeploymentName, constants.TkrNamespace)
if err != nil {
log.V(3).Warningf("Failed waiting for deployment %s", constants.TkrControllerDeploymentName)
}
return err
})
group.Go(
func() error {
err := clusterClient.WaitForDeployment(constants.KappControllerDeploymentName, constants.KappControllerNamespace)
if err != nil {
log.V(3).Warningf("Failed waiting for deployment %s", constants.KappControllerDeploymentName)
}
return err
})
group.Go(
func() error {
err := clusterClient.WaitForDeployment(constants.AddonsManagerDeploymentName, constants.KappControllerNamespace)
if err != nil {
log.V(3).Warningf("Failed waiting for deployment %s", constants.AddonsManagerDeploymentName)
}
return err
})
err := group.Wait()
if err != nil {
return errors.Wrap(err, "Failed waiting for at least one CRS deployment, check logs for more detail.")
}
return nil
}
// WaitForPackages wait for packages to be up and running
func (c *TkgClient) WaitForPackages(regionalClusterClient, currentClusterClient clusterclient.Client, clusterName, namespace string) error {
// Adding kapp-controller package to the exclude list
// For management cluster, kapp-controller is deployed using CRS and addon secret does not exist
// For workload cluster, kapp-controller is deployed by addons manager. Even though the
// addon secret for kapp-controller exists, it is not deployed using PackageInstall.
// Hence skipping it while waiting for packages.
ListExcludePackageInstallsFromWait := []string{constants.KappControllerPackageName}
// Get the list of addons secrets
secretList := &corev1.SecretList{}
err := regionalClusterClient.ListResources(secretList, &crtclient.ListOptions{Namespace: namespace})
if err != nil {
return errors.Wrap(err, "unable to get list of secrets")
}
// From the addons secret get the names of package installs for each addon secret
// This is determined from the "tkg.tanzu.vmware.com/addon-name" label on the secret
packageInstallNames := []string{}
for i := range secretList.Items {
if secretList.Items[i].Type == constants.AddonSecretType {
if cn, exists := secretList.Items[i].Labels[constants.ClusterNameLabel]; exists && cn == clusterName {
if addonName, exists := secretList.Items[i].Labels[constants.AddonNameLabel]; exists {
if !utils.ContainsString(ListExcludePackageInstallsFromWait, addonName) {
packageInstallNames = append(packageInstallNames, addonName)
}
}
}
}
}
// Start waiting for all packages in parallel using group.Wait
// Note: As PackageInstall resources are created in the cluster itself
// we are using currentClusterClient which will point to correct cluster
group, _ := errgroup.WithContext(context.Background())
for _, packageName := range packageInstallNames {
pn := packageName
log.V(3).Warningf("Waiting for package: %s", pn)
group.Go(
func() error {
err := currentClusterClient.WaitForPackageInstall(pn, constants.TkgNamespace, c.getPackageInstallTimeoutFromConfig())
if err != nil {
log.V(3).Warningf("Failure while waiting for package '%s'", pn)
} else {
log.V(3).Infof("Successfully reconciled package: %s", pn)
}
return err
})
}
err = group.Wait()
if err != nil {
return errors.Wrap(err, "Failure while waiting for packages to be installed")
}
return nil
}
func (c *TkgClient) | getPackageInstallTimeoutFromConfig | identifier_name |
|
upgrade_region.go | Upgrade configuration by reading BOM file to get the providers versions
// b) Get the providers information from the management cluster
// c) Prepare the providers upgrade information
// d) Call the clusterctl ApplyUpgrade() to upgrade providers
// e) Wait for providers to be up and running
// 2. call the UpgradeCluster() for upgrading the k8s version of the Management cluster
func (c *TkgClient) UpgradeManagementCluster(options *UpgradeClusterOptions) error {
contexts, err := c.GetRegionContexts(options.ClusterName)
if err != nil || len(contexts) == 0 {
return errors.Errorf("management cluster %s not found", options.ClusterName)
}
currentRegion := contexts[0]
options.Kubeconfig = currentRegion.SourceFilePath
if currentRegion.Status == region.Failed {
return errors.Errorf("cannot upgrade since deployment failed for management cluster %s", currentRegion.ClusterName)
}
regionalClusterClient, err := clusterclient.NewClient(currentRegion.SourceFilePath, currentRegion.ContextName, clusterclient.Options{OperationTimeout: c.timeout})
if err != nil {
return errors.Wrap(err, "unable to get cluster client while upgrading management cluster")
}
isPacific, err := regionalClusterClient.IsPacificRegionalCluster()
if err != nil |
if isPacific {
return errors.New("upgrading 'Tanzu Kubernetes Cluster service for vSphere' management cluster is not yet supported")
}
// Validate the compatibility before upgrading management cluster
err = c.validateCompatibilityBeforeManagementClusterUpgrade(options, regionalClusterClient)
if err != nil {
return err
}
if err := c.configureVariablesForProvidersInstallation(regionalClusterClient); err != nil {
return errors.Wrap(err, "unable to configure variables for provider installation")
}
log.Info("Upgrading management cluster providers...")
providersUpgradeClient := providersupgradeclient.New(c.clusterctlClient)
if err = c.DoProvidersUpgrade(regionalClusterClient, currentRegion.ContextName, providersUpgradeClient, options); err != nil {
return errors.Wrap(err, "failed to upgrade management cluster providers")
}
// Wait for installed providers to get up and running
// TODO: Currently tkg doesn't support TargetNamespace and WatchingNamespace as it's not supporting multi-tenency of providers
// If we support it in future we need to make these namespaces as command line options and use here
waitOptions := waitForProvidersOptions{
Kubeconfig: options.Kubeconfig,
TargetNamespace: "",
WatchingNamespace: "",
}
err = c.WaitForProviders(regionalClusterClient, waitOptions)
if err != nil {
return errors.Wrap(err, "error waiting for provider components to be up and running after upgrading them")
}
log.Info("Management cluster providers upgraded successfully...")
log.Info("Upgrading management cluster kubernetes version...")
err = c.UpgradeCluster(options)
if err != nil {
return errors.Wrap(err, "unable to upgrade management cluster")
}
// Patch management cluster with the TKG version
err = regionalClusterClient.PatchClusterObjectWithTKGVersion(options.ClusterName, options.Namespace, c.tkgBomClient.GetCurrentTKGVersion())
if err != nil {
return err
}
return nil
}
func (c *TkgClient) configureVariablesForProvidersInstallation(regionalClusterClient clusterclient.Client) error {
err := c.configureImageTagsForProviderInstallation()
if err != nil {
return errors.Wrap(err, "failed to configure image tags for provider installation")
}
// If region client is not specified nothing to configure based on existing management cluster
if regionalClusterClient == nil {
return nil
}
infraProvider, err := regionalClusterClient.GetRegionalClusterDefaultProviderName(clusterctlv1.InfrastructureProviderType)
if err != nil {
return errors.Wrap(err, "failed to get cluster provider information.")
}
infraProviderName, _, err := ParseProviderName(infraProvider)
if err != nil {
return errors.Wrap(err, "failed to parse provider name")
}
// retrieve required variables required for infrastructure component spec rendering
// set them to default values if they don't exist.
err = c.RetrieveRegionalClusterConfiguration(regionalClusterClient)
if err != nil {
return errors.Wrap(err, "failed to set configurations for upgrade")
}
switch infraProviderName {
case AzureProviderName:
// since the templates needs Base64 values of credentials, encode them
if _, err := c.EncodeAzureCredentialsAndGetClient(regionalClusterClient); err != nil {
return errors.Wrap(err, "failed to encode azure credentials")
}
case AWSProviderName:
if _, err := c.EncodeAWSCredentialsAndGetClient(regionalClusterClient); err != nil {
return errors.Wrap(err, "failed to encode AWS credentials")
}
case VSphereProviderName:
if err := c.configureVsphereCredentialsFromCluster(regionalClusterClient); err != nil {
return errors.Wrap(err, "failed to configure Vsphere credentials")
}
case DockerProviderName:
// no variable configuration is needed to deploy Docker provider as
// infrastructure-components.yaml for docker does not require any variable
}
return nil
}
// DoProvidersUpgrade upgrades the providers of the management cluster
func (c *TkgClient) DoProvidersUpgrade(regionalClusterClient clusterclient.Client, ctx string,
providersUpgradeClient providersupgradeclient.Client, options *UpgradeClusterOptions) error {
// read the BOM file for latest providers version information to upgrade to
bomConfiguration, err := c.tkgBomClient.GetDefaultTkgBOMConfiguration()
if err != nil {
return errors.Wrap(err, "unable to read in configuration from BOM file")
}
pUpgradeInfo, err := c.getProvidersUpgradeInfo(regionalClusterClient, bomConfiguration)
if err != nil {
return errors.Wrap(err, "failed to get providers upgrade information")
}
if len(pUpgradeInfo.providers) == 0 {
log.Infof("All providers are up to date...")
return nil
}
pUpgradeApplyOptions, err := c.GenerateProvidersUpgradeOptions(pUpgradeInfo)
if err != nil {
return errors.Wrap(err, "failed to generate providers upgrade apply options")
}
// update the kubeconfig
pUpgradeApplyOptions.Kubeconfig.Path = options.Kubeconfig
pUpgradeApplyOptions.Kubeconfig.Context = ctx
log.V(6).Infof("clusterctl upgrade apply options: %+v", *pUpgradeApplyOptions)
clusterctlUpgradeOptions := clusterctl.ApplyUpgradeOptions(*pUpgradeApplyOptions)
err = providersUpgradeClient.ApplyUpgrade(&clusterctlUpgradeOptions)
if err != nil {
return errors.Wrap(err, "failed to apply providers upgrade")
}
return nil
}
// GenerateProvidersUpgradeOptions generates provider upgrade options
func (c *TkgClient) GenerateProvidersUpgradeOptions(pUpgradeInfo *providersUpgradeInfo) (*ApplyProvidersUpgradeOptions, error) {
puo := &ApplyProvidersUpgradeOptions{}
puo.ManagementGroup = pUpgradeInfo.managementGroup
for i := range pUpgradeInfo.providers {
instanceVersion := pUpgradeInfo.providers[i].Namespace + "/" + pUpgradeInfo.providers[i].ProviderName + ":" + pUpgradeInfo.providers[i].Version
switch clusterctlv1.ProviderType(pUpgradeInfo.providers[i].Type) {
case clusterctlv1.CoreProviderType:
puo.CoreProvider = instanceVersion
case clusterctlv1.BootstrapProviderType:
puo.BootstrapProviders = append(puo.BootstrapProviders, instanceVersion)
case clusterctlv1.ControlPlaneProviderType:
puo.ControlPlaneProviders = append(puo.ControlPlaneProviders, instanceVersion)
case clusterctlv1.InfrastructureProviderType:
puo.InfrastructureProviders = append(puo.InfrastructureProviders, instanceVersion)
default:
return nil, errors.Errorf("unknown provider type: %s", pUpgradeInfo.providers[i].Type)
}
}
return puo, nil
}
// getProvidersUpgradeInfo prepares the upgrade information by comparing the provider current version with and the upgradable version
// obtained from the BOM file.
func (c *TkgClient) getProvidersUpgradeInfo(regionalClusterClient clusterclient.Client, bomConfig *tkgconfigbom.BOMConfiguration) (*providersUpgradeInfo, error) {
pUpgradeInfo := &providersUpgradeInfo{}
// Get all the installed providers info
installedProviders := &clusterctlv1.ProviderList{}
err := regionalClusterClient.ListResources(installedProviders, &crtclient.ListOptions{})
if err != nil {
return nil, errors.Wrap(err, "cannot get installed provider config")
}
// get the management group
pUpgradeInfo.managementGroup, err = parseManagementGroup(installedProviders)
if err != nil {
return nil, errors.Wrap(err, "failed to parse the management group")
}
// get the providers Info with the version updated with the upgrade version obtained from BOM file map
upgradeProviderVersionMap := bomConfig.ProvidersVersionMap
// make a list of providers eligible for upgrade
for i := range installedProviders.Items {
// Note: provider.Name has the manifest label (eg:control-plane-kubeadm) and provider.ProviderName would not be ideal(eg:kubeadm)
// here as both bootstrap-kubeadm and control-plane-kubeadm has the same ProviderName as 'kubeadm'
latestVersion, ok := upgradeProviderVersionMap[installedProviders.Items[i].Name]
if !ok || latestVersion == "" {
log.Warningf(" %s provider's version is missing in BOM file | {
return errors.Wrap(err, "error determining 'Tanzu Kubernetes Cluster service for vSphere' management cluster")
} | conditional_block |
upgrade_region.go | the Upgrade configuration by reading BOM file to get the providers versions
// b) Get the providers information from the management cluster
// c) Prepare the providers upgrade information
// d) Call the clusterctl ApplyUpgrade() to upgrade providers
// e) Wait for providers to be up and running
// 2. call the UpgradeCluster() for upgrading the k8s version of the Management cluster
func (c *TkgClient) UpgradeManagementCluster(options *UpgradeClusterOptions) error {
contexts, err := c.GetRegionContexts(options.ClusterName)
if err != nil || len(contexts) == 0 {
return errors.Errorf("management cluster %s not found", options.ClusterName)
}
currentRegion := contexts[0]
options.Kubeconfig = currentRegion.SourceFilePath
if currentRegion.Status == region.Failed {
return errors.Errorf("cannot upgrade since deployment failed for management cluster %s", currentRegion.ClusterName)
}
regionalClusterClient, err := clusterclient.NewClient(currentRegion.SourceFilePath, currentRegion.ContextName, clusterclient.Options{OperationTimeout: c.timeout})
if err != nil {
return errors.Wrap(err, "unable to get cluster client while upgrading management cluster")
}
isPacific, err := regionalClusterClient.IsPacificRegionalCluster()
if err != nil {
return errors.Wrap(err, "error determining 'Tanzu Kubernetes Cluster service for vSphere' management cluster")
}
if isPacific {
return errors.New("upgrading 'Tanzu Kubernetes Cluster service for vSphere' management cluster is not yet supported")
}
// Validate the compatibility before upgrading management cluster
err = c.validateCompatibilityBeforeManagementClusterUpgrade(options, regionalClusterClient)
if err != nil {
return err
}
if err := c.configureVariablesForProvidersInstallation(regionalClusterClient); err != nil {
return errors.Wrap(err, "unable to configure variables for provider installation")
}
log.Info("Upgrading management cluster providers...")
providersUpgradeClient := providersupgradeclient.New(c.clusterctlClient)
if err = c.DoProvidersUpgrade(regionalClusterClient, currentRegion.ContextName, providersUpgradeClient, options); err != nil {
return errors.Wrap(err, "failed to upgrade management cluster providers")
}
// Wait for installed providers to get up and running
// TODO: Currently tkg doesn't support TargetNamespace and WatchingNamespace as it's not supporting multi-tenency of providers
// If we support it in future we need to make these namespaces as command line options and use here
waitOptions := waitForProvidersOptions{
Kubeconfig: options.Kubeconfig,
TargetNamespace: "",
WatchingNamespace: "",
}
err = c.WaitForProviders(regionalClusterClient, waitOptions)
if err != nil {
return errors.Wrap(err, "error waiting for provider components to be up and running after upgrading them")
}
log.Info("Management cluster providers upgraded successfully...")
log.Info("Upgrading management cluster kubernetes version...")
err = c.UpgradeCluster(options)
if err != nil {
return errors.Wrap(err, "unable to upgrade management cluster")
}
// Patch management cluster with the TKG version
err = regionalClusterClient.PatchClusterObjectWithTKGVersion(options.ClusterName, options.Namespace, c.tkgBomClient.GetCurrentTKGVersion())
if err != nil {
return err
}
return nil
}
func (c *TkgClient) configureVariablesForProvidersInstallation(regionalClusterClient clusterclient.Client) error {
err := c.configureImageTagsForProviderInstallation()
if err != nil {
return errors.Wrap(err, "failed to configure image tags for provider installation")
}
// If region client is not specified nothing to configure based on existing management cluster
if regionalClusterClient == nil {
return nil
}
infraProvider, err := regionalClusterClient.GetRegionalClusterDefaultProviderName(clusterctlv1.InfrastructureProviderType)
if err != nil {
return errors.Wrap(err, "failed to get cluster provider information.")
}
infraProviderName, _, err := ParseProviderName(infraProvider)
if err != nil {
return errors.Wrap(err, "failed to parse provider name")
}
// retrieve required variables required for infrastructure component spec rendering
// set them to default values if they don't exist.
err = c.RetrieveRegionalClusterConfiguration(regionalClusterClient)
if err != nil {
return errors.Wrap(err, "failed to set configurations for upgrade")
}
switch infraProviderName {
case AzureProviderName:
// since the templates needs Base64 values of credentials, encode them
if _, err := c.EncodeAzureCredentialsAndGetClient(regionalClusterClient); err != nil {
return errors.Wrap(err, "failed to encode azure credentials")
}
case AWSProviderName:
if _, err := c.EncodeAWSCredentialsAndGetClient(regionalClusterClient); err != nil {
return errors.Wrap(err, "failed to encode AWS credentials")
}
case VSphereProviderName:
if err := c.configureVsphereCredentialsFromCluster(regionalClusterClient); err != nil {
return errors.Wrap(err, "failed to configure Vsphere credentials")
}
case DockerProviderName:
// no variable configuration is needed to deploy Docker provider as
// infrastructure-components.yaml for docker does not require any variable
}
return nil
}
// DoProvidersUpgrade upgrades the providers of the management cluster
func (c *TkgClient) DoProvidersUpgrade(regionalClusterClient clusterclient.Client, ctx string,
providersUpgradeClient providersupgradeclient.Client, options *UpgradeClusterOptions) error {
// read the BOM file for latest providers version information to upgrade to
bomConfiguration, err := c.tkgBomClient.GetDefaultTkgBOMConfiguration()
if err != nil {
return errors.Wrap(err, "unable to read in configuration from BOM file")
}
pUpgradeInfo, err := c.getProvidersUpgradeInfo(regionalClusterClient, bomConfiguration)
if err != nil {
return errors.Wrap(err, "failed to get providers upgrade information")
}
if len(pUpgradeInfo.providers) == 0 {
log.Infof("All providers are up to date...")
return nil
}
pUpgradeApplyOptions, err := c.GenerateProvidersUpgradeOptions(pUpgradeInfo)
if err != nil {
return errors.Wrap(err, "failed to generate providers upgrade apply options")
}
// update the kubeconfig
pUpgradeApplyOptions.Kubeconfig.Path = options.Kubeconfig
pUpgradeApplyOptions.Kubeconfig.Context = ctx
log.V(6).Infof("clusterctl upgrade apply options: %+v", *pUpgradeApplyOptions)
clusterctlUpgradeOptions := clusterctl.ApplyUpgradeOptions(*pUpgradeApplyOptions)
err = providersUpgradeClient.ApplyUpgrade(&clusterctlUpgradeOptions)
if err != nil {
return errors.Wrap(err, "failed to apply providers upgrade")
}
return nil
}
// GenerateProvidersUpgradeOptions generates provider upgrade options | instanceVersion := pUpgradeInfo.providers[i].Namespace + "/" + pUpgradeInfo.providers[i].ProviderName + ":" + pUpgradeInfo.providers[i].Version
switch clusterctlv1.ProviderType(pUpgradeInfo.providers[i].Type) {
case clusterctlv1.CoreProviderType:
puo.CoreProvider = instanceVersion
case clusterctlv1.BootstrapProviderType:
puo.BootstrapProviders = append(puo.BootstrapProviders, instanceVersion)
case clusterctlv1.ControlPlaneProviderType:
puo.ControlPlaneProviders = append(puo.ControlPlaneProviders, instanceVersion)
case clusterctlv1.InfrastructureProviderType:
puo.InfrastructureProviders = append(puo.InfrastructureProviders, instanceVersion)
default:
return nil, errors.Errorf("unknown provider type: %s", pUpgradeInfo.providers[i].Type)
}
}
return puo, nil
}
// getProvidersUpgradeInfo prepares the upgrade information by comparing the provider current version with and the upgradable version
// obtained from the BOM file.
func (c *TkgClient) getProvidersUpgradeInfo(regionalClusterClient clusterclient.Client, bomConfig *tkgconfigbom.BOMConfiguration) (*providersUpgradeInfo, error) {
pUpgradeInfo := &providersUpgradeInfo{}
// Get all the installed providers info
installedProviders := &clusterctlv1.ProviderList{}
err := regionalClusterClient.ListResources(installedProviders, &crtclient.ListOptions{})
if err != nil {
return nil, errors.Wrap(err, "cannot get installed provider config")
}
// get the management group
pUpgradeInfo.managementGroup, err = parseManagementGroup(installedProviders)
if err != nil {
return nil, errors.Wrap(err, "failed to parse the management group")
}
// get the providers Info with the version updated with the upgrade version obtained from BOM file map
upgradeProviderVersionMap := bomConfig.ProvidersVersionMap
// make a list of providers eligible for upgrade
for i := range installedProviders.Items {
// Note: provider.Name has the manifest label (eg:control-plane-kubeadm) and provider.ProviderName would not be ideal(eg:kubeadm)
// here as both bootstrap-kubeadm and control-plane-kubeadm has the same ProviderName as 'kubeadm'
latestVersion, ok := upgradeProviderVersionMap[installedProviders.Items[i].Name]
if !ok || latestVersion == "" {
log.Warningf(" %s provider's version is missing in BOM file, so | func (c *TkgClient) GenerateProvidersUpgradeOptions(pUpgradeInfo *providersUpgradeInfo) (*ApplyProvidersUpgradeOptions, error) {
puo := &ApplyProvidersUpgradeOptions{}
puo.ManagementGroup = pUpgradeInfo.managementGroup
for i := range pUpgradeInfo.providers { | random_line_split |
mod.rs | mod base;
mod breakpoints;
mod desc;
mod monitor;
mod resume;
mod thread;
mod traits;
mod utils;
use super::arch::RuntimeArch;
use crate::{BreakpointCause, CoreStatus, Error, HaltReason, Session};
use gdbstub::stub::state_machine::GdbStubStateMachine;
use std::net::{SocketAddr, TcpListener, TcpStream};
use std::num::NonZeroUsize;
use std::sync::Mutex;
use std::time::Duration;
use gdbstub::common::Signal;
use gdbstub::conn::ConnectionExt;
use gdbstub::stub::{GdbStub, MultiThreadStopReason};
use gdbstub::target::ext::base::BaseOps;
use gdbstub::target::ext::breakpoints::BreakpointsOps;
use gdbstub::target::ext::memory_map::MemoryMapOps;
use gdbstub::target::ext::monitor_cmd::MonitorCmdOps;
use gdbstub::target::ext::target_description_xml_override::TargetDescriptionXmlOverrideOps;
use gdbstub::target::Target;
pub(crate) use traits::{GdbErrorExt, ProbeRsErrorExt};
use desc::TargetDescription;
/// Actions for resuming a core
#[derive(Debug, Copy, Clone)]
pub(crate) enum ResumeAction {
/// Don't change the state
Unchanged,
/// Resume core
Resume,
/// Single step core
Step,
}
/// The top level gdbstub target for a probe-rs debug session
pub(crate) struct RuntimeTarget<'a> {
/// The probe-rs session object
session: &'a Mutex<Session>,
/// A list of core IDs for this stub
cores: Vec<usize>,
/// TCP listener accepting incoming connections
listener: TcpListener,
/// The current GDB stub state machine
gdb: Option<GdbStubStateMachine<'a, RuntimeTarget<'a>, TcpStream>>,
/// Resume action to be used upon a continue request
resume_action: (usize, ResumeAction),
/// Description of target's architecture and registers
target_desc: TargetDescription,
}
impl<'a> RuntimeTarget<'a> {
/// Create a new RuntimeTarget and get ready to start processing GDB input
pub fn new(
session: &'a Mutex<Session>,
cores: Vec<usize>,
addrs: &[SocketAddr],
) -> Result<Self, Error> {
let listener = TcpListener::bind(addrs).into_error()?;
listener.set_nonblocking(true).into_error()?;
Ok(Self {
session,
cores,
listener,
gdb: None,
resume_action: (0, ResumeAction::Unchanged),
target_desc: TargetDescription::default(),
})
}
/// Process any pending work for this target
///
/// Returns: Duration to wait before processing this target again
pub fn process(&mut self) -> Result<Duration, Error> {
// State 1 - unconnected
if self.gdb.is_none() {
// See if we have a connection
match self.listener.accept() {
Ok((s, addr)) => {
tracing::info!("New connection from {:#?}", addr);
for i in 0..self.cores.len() {
let core_id = self.cores[i];
// When we first attach to the core, GDB expects us to halt the core, so we do this here when a new client connects.
// If the core is already halted, nothing happens if we issue a halt command again, so we always do this no matter of core state.
self.session
.lock()
.unwrap()
.core(core_id)?
.halt(Duration::from_millis(100))?;
self.load_target_desc()?;
}
// Start the GDB Stub state machine
let stub = GdbStub::<RuntimeTarget, _>::new(s);
match stub.run_state_machine(self) {
Ok(gdbstub) => {
self.gdb = Some(gdbstub);
}
Err(e) => {
// Any errors at this state are either IO errors or fatal config errors
return Err(anyhow::Error::from(e).into());
}
};
}
Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => {
// No connection yet
return Ok(Duration::from_millis(10));
}
Err(e) => {
// Fatal error
return Err(anyhow::Error::from(e).into());
}
};
}
// Stage 2 - connected
if self.gdb.is_some() {
let mut wait_time = Duration::ZERO;
let gdb = self.gdb.take().unwrap();
self.gdb = match gdb {
GdbStubStateMachine::Idle(mut state) => {
// Read data if available
let next_byte = {
let conn = state.borrow_conn();
read_if_available(conn)?
};
if let Some(b) = next_byte {
Some(state.incoming_data(self, b).into_error()?)
} else {
wait_time = Duration::from_millis(10);
Some(state.into())
}
}
GdbStubStateMachine::Running(mut state) => {
// Read data if available
let next_byte = {
let conn = state.borrow_conn();
read_if_available(conn)?
};
if let Some(b) = next_byte {
Some(state.incoming_data(self, b).into_error()?)
} else {
// Check for break
let mut stop_reason: Option<MultiThreadStopReason<u64>> = None;
{
let mut session = self.session.lock().unwrap();
for i in &self.cores {
let mut core = session.core(*i)?;
let status = core.status()?;
if let CoreStatus::Halted(reason) = status {
let tid = NonZeroUsize::new(i + 1).unwrap();
stop_reason = Some(match reason {
HaltReason::Breakpoint(BreakpointCause::Hardware)
| HaltReason::Breakpoint(BreakpointCause::Unknown) => {
// Some architectures do not allow us to distinguish between hardware and software breakpoints, so we just treat `Unknown` as hardware breakpoints.
MultiThreadStopReason::HwBreak(tid)
}
HaltReason::Step => MultiThreadStopReason::DoneStep,
_ => MultiThreadStopReason::SignalWithThread {
tid,
signal: Signal::SIGINT,
},
});
break;
}
}
// halt all remaining cores that are still running
// GDB expects all or nothing stops
if stop_reason.is_some() {
for i in &self.cores {
let mut core = session.core(*i)?;
if !core.core_halted()? {
core.halt(Duration::from_millis(100))?;
}
}
}
}
if let Some(reason) = stop_reason {
Some(state.report_stop(self, reason).into_error()?)
} else {
wait_time = Duration::from_millis(10);
Some(state.into())
}
}
}
GdbStubStateMachine::CtrlCInterrupt(state) => {
// Break core, handle interrupt
{
let mut session = self.session.lock().unwrap();
for i in &self.cores {
let mut core = session.core(*i)?;
core.halt(Duration::from_millis(100))?;
}
}
Some(
state
.interrupt_handled(
self,
Some(MultiThreadStopReason::Signal(Signal::SIGINT)),
)
.into_error()?,
)
}
GdbStubStateMachine::Disconnected(state) => {
tracing::info!("GDB client disconnected: {:?}", state.get_reason());
None
}
};
return Ok(wait_time);
}
Ok(Duration::ZERO)
}
}
impl Target for RuntimeTarget<'_> { | }
fn support_target_description_xml_override(
&mut self,
) -> Option<TargetDescriptionXmlOverrideOps<'_, Self>> {
Some(self)
}
fn support_breakpoints(&mut self) -> Option<BreakpointsOps<'_, Self>> {
Some(self)
}
fn support_memory_map(&mut self) -> Option<MemoryMapOps<'_, Self>> {
Some(self)
}
fn support_monitor_cmd(&mut self) -> Option<MonitorCmdOps<'_, Self>> {
Some(self)
}
fn guard_rail_implicit_sw_breakpoints(&self) -> bool {
true
}
}
/// Read a byte from a stream if available, otherwise return None
fn read_if_available(conn: &mut TcpStream) -> Result<Option<u8>, Error> {
match conn.peek() {
Ok(p) => {
// Unwrap is safe because peek already showed
// there's data in the buffer
match p {
Some(_) => conn.read().map(Some).into_error(),
None => Ok(None),
}
}
Err(e) => Err(anyhow::Error::from(e).into()),
}
} | type Arch = RuntimeArch;
type Error = Error;
fn base_ops(&mut self) -> BaseOps<'_, Self::Arch, Self::Error> {
BaseOps::MultiThread(self) | random_line_split |
mod.rs | mod base;
mod breakpoints;
mod desc;
mod monitor;
mod resume;
mod thread;
mod traits;
mod utils;
use super::arch::RuntimeArch;
use crate::{BreakpointCause, CoreStatus, Error, HaltReason, Session};
use gdbstub::stub::state_machine::GdbStubStateMachine;
use std::net::{SocketAddr, TcpListener, TcpStream};
use std::num::NonZeroUsize;
use std::sync::Mutex;
use std::time::Duration;
use gdbstub::common::Signal;
use gdbstub::conn::ConnectionExt;
use gdbstub::stub::{GdbStub, MultiThreadStopReason};
use gdbstub::target::ext::base::BaseOps;
use gdbstub::target::ext::breakpoints::BreakpointsOps;
use gdbstub::target::ext::memory_map::MemoryMapOps;
use gdbstub::target::ext::monitor_cmd::MonitorCmdOps;
use gdbstub::target::ext::target_description_xml_override::TargetDescriptionXmlOverrideOps;
use gdbstub::target::Target;
pub(crate) use traits::{GdbErrorExt, ProbeRsErrorExt};
use desc::TargetDescription;
/// Actions for resuming a core
#[derive(Debug, Copy, Clone)]
pub(crate) enum ResumeAction {
/// Don't change the state
Unchanged,
/// Resume core
Resume,
/// Single step core
Step,
}
/// The top level gdbstub target for a probe-rs debug session
pub(crate) struct | <'a> {
/// The probe-rs session object
session: &'a Mutex<Session>,
/// A list of core IDs for this stub
cores: Vec<usize>,
/// TCP listener accepting incoming connections
listener: TcpListener,
/// The current GDB stub state machine
gdb: Option<GdbStubStateMachine<'a, RuntimeTarget<'a>, TcpStream>>,
/// Resume action to be used upon a continue request
resume_action: (usize, ResumeAction),
/// Description of target's architecture and registers
target_desc: TargetDescription,
}
impl<'a> RuntimeTarget<'a> {
/// Create a new RuntimeTarget and get ready to start processing GDB input
pub fn new(
session: &'a Mutex<Session>,
cores: Vec<usize>,
addrs: &[SocketAddr],
) -> Result<Self, Error> {
let listener = TcpListener::bind(addrs).into_error()?;
listener.set_nonblocking(true).into_error()?;
Ok(Self {
session,
cores,
listener,
gdb: None,
resume_action: (0, ResumeAction::Unchanged),
target_desc: TargetDescription::default(),
})
}
/// Process any pending work for this target
///
/// Returns: Duration to wait before processing this target again
pub fn process(&mut self) -> Result<Duration, Error> {
// State 1 - unconnected
if self.gdb.is_none() {
// See if we have a connection
match self.listener.accept() {
Ok((s, addr)) => {
tracing::info!("New connection from {:#?}", addr);
for i in 0..self.cores.len() {
let core_id = self.cores[i];
// When we first attach to the core, GDB expects us to halt the core, so we do this here when a new client connects.
// If the core is already halted, nothing happens if we issue a halt command again, so we always do this no matter of core state.
self.session
.lock()
.unwrap()
.core(core_id)?
.halt(Duration::from_millis(100))?;
self.load_target_desc()?;
}
// Start the GDB Stub state machine
let stub = GdbStub::<RuntimeTarget, _>::new(s);
match stub.run_state_machine(self) {
Ok(gdbstub) => {
self.gdb = Some(gdbstub);
}
Err(e) => {
// Any errors at this state are either IO errors or fatal config errors
return Err(anyhow::Error::from(e).into());
}
};
}
Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => {
// No connection yet
return Ok(Duration::from_millis(10));
}
Err(e) => {
// Fatal error
return Err(anyhow::Error::from(e).into());
}
};
}
// Stage 2 - connected
if self.gdb.is_some() {
let mut wait_time = Duration::ZERO;
let gdb = self.gdb.take().unwrap();
self.gdb = match gdb {
GdbStubStateMachine::Idle(mut state) => {
// Read data if available
let next_byte = {
let conn = state.borrow_conn();
read_if_available(conn)?
};
if let Some(b) = next_byte {
Some(state.incoming_data(self, b).into_error()?)
} else {
wait_time = Duration::from_millis(10);
Some(state.into())
}
}
GdbStubStateMachine::Running(mut state) => {
// Read data if available
let next_byte = {
let conn = state.borrow_conn();
read_if_available(conn)?
};
if let Some(b) = next_byte {
Some(state.incoming_data(self, b).into_error()?)
} else {
// Check for break
let mut stop_reason: Option<MultiThreadStopReason<u64>> = None;
{
let mut session = self.session.lock().unwrap();
for i in &self.cores {
let mut core = session.core(*i)?;
let status = core.status()?;
if let CoreStatus::Halted(reason) = status {
let tid = NonZeroUsize::new(i + 1).unwrap();
stop_reason = Some(match reason {
HaltReason::Breakpoint(BreakpointCause::Hardware)
| HaltReason::Breakpoint(BreakpointCause::Unknown) => {
// Some architectures do not allow us to distinguish between hardware and software breakpoints, so we just treat `Unknown` as hardware breakpoints.
MultiThreadStopReason::HwBreak(tid)
}
HaltReason::Step => MultiThreadStopReason::DoneStep,
_ => MultiThreadStopReason::SignalWithThread {
tid,
signal: Signal::SIGINT,
},
});
break;
}
}
// halt all remaining cores that are still running
// GDB expects all or nothing stops
if stop_reason.is_some() {
for i in &self.cores {
let mut core = session.core(*i)?;
if !core.core_halted()? {
core.halt(Duration::from_millis(100))?;
}
}
}
}
if let Some(reason) = stop_reason {
Some(state.report_stop(self, reason).into_error()?)
} else {
wait_time = Duration::from_millis(10);
Some(state.into())
}
}
}
GdbStubStateMachine::CtrlCInterrupt(state) => {
// Break core, handle interrupt
{
let mut session = self.session.lock().unwrap();
for i in &self.cores {
let mut core = session.core(*i)?;
core.halt(Duration::from_millis(100))?;
}
}
Some(
state
.interrupt_handled(
self,
Some(MultiThreadStopReason::Signal(Signal::SIGINT)),
)
.into_error()?,
)
}
GdbStubStateMachine::Disconnected(state) => {
tracing::info!("GDB client disconnected: {:?}", state.get_reason());
None
}
};
return Ok(wait_time);
}
Ok(Duration::ZERO)
}
}
impl Target for RuntimeTarget<'_> {
type Arch = RuntimeArch;
type Error = Error;
fn base_ops(&mut self) -> BaseOps<'_, Self::Arch, Self::Error> {
BaseOps::MultiThread(self)
}
fn support_target_description_xml_override(
&mut self,
) -> Option<TargetDescriptionXmlOverrideOps<'_, Self>> {
Some(self)
}
fn support_breakpoints(&mut self) -> Option<BreakpointsOps<'_, Self>> {
Some(self)
}
fn support_memory_map(&mut self) -> Option<MemoryMapOps<'_, Self>> {
Some(self)
}
fn support_monitor_cmd(&mut self) -> Option<MonitorCmdOps<'_, Self>> {
Some(self)
}
fn guard_rail_implicit_sw_breakpoints(&self) -> bool {
true
}
}
/// Read a byte from a stream if available, otherwise return None
fn read_if_available(conn: &mut TcpStream) -> Result<Option<u8>, Error> {
match conn.peek() {
Ok(p) => {
// Unwrap is safe because peek already showed
// there's data in the buffer
match p {
Some(_) => conn.read().map(Some).into_error(),
None => Ok(None),
}
}
Err(e) => Err(anyhow::Error::from(e).into()),
}
}
| RuntimeTarget | identifier_name |
mod.rs | mod base;
mod breakpoints;
mod desc;
mod monitor;
mod resume;
mod thread;
mod traits;
mod utils;
use super::arch::RuntimeArch;
use crate::{BreakpointCause, CoreStatus, Error, HaltReason, Session};
use gdbstub::stub::state_machine::GdbStubStateMachine;
use std::net::{SocketAddr, TcpListener, TcpStream};
use std::num::NonZeroUsize;
use std::sync::Mutex;
use std::time::Duration;
use gdbstub::common::Signal;
use gdbstub::conn::ConnectionExt;
use gdbstub::stub::{GdbStub, MultiThreadStopReason};
use gdbstub::target::ext::base::BaseOps;
use gdbstub::target::ext::breakpoints::BreakpointsOps;
use gdbstub::target::ext::memory_map::MemoryMapOps;
use gdbstub::target::ext::monitor_cmd::MonitorCmdOps;
use gdbstub::target::ext::target_description_xml_override::TargetDescriptionXmlOverrideOps;
use gdbstub::target::Target;
pub(crate) use traits::{GdbErrorExt, ProbeRsErrorExt};
use desc::TargetDescription;
/// Actions for resuming a core
#[derive(Debug, Copy, Clone)]
pub(crate) enum ResumeAction {
/// Don't change the state
Unchanged,
/// Resume core
Resume,
/// Single step core
Step,
}
/// The top level gdbstub target for a probe-rs debug session
pub(crate) struct RuntimeTarget<'a> {
/// The probe-rs session object
session: &'a Mutex<Session>,
/// A list of core IDs for this stub
cores: Vec<usize>,
/// TCP listener accepting incoming connections
listener: TcpListener,
/// The current GDB stub state machine
gdb: Option<GdbStubStateMachine<'a, RuntimeTarget<'a>, TcpStream>>,
/// Resume action to be used upon a continue request
resume_action: (usize, ResumeAction),
/// Description of target's architecture and registers
target_desc: TargetDescription,
}
impl<'a> RuntimeTarget<'a> {
/// Create a new RuntimeTarget and get ready to start processing GDB input
pub fn new(
session: &'a Mutex<Session>,
cores: Vec<usize>,
addrs: &[SocketAddr],
) -> Result<Self, Error> {
let listener = TcpListener::bind(addrs).into_error()?;
listener.set_nonblocking(true).into_error()?;
Ok(Self {
session,
cores,
listener,
gdb: None,
resume_action: (0, ResumeAction::Unchanged),
target_desc: TargetDescription::default(),
})
}
/// Process any pending work for this target
///
/// Returns: Duration to wait before processing this target again
pub fn process(&mut self) -> Result<Duration, Error> {
// State 1 - unconnected
if self.gdb.is_none() {
// See if we have a connection
match self.listener.accept() {
Ok((s, addr)) => {
tracing::info!("New connection from {:#?}", addr);
for i in 0..self.cores.len() {
let core_id = self.cores[i];
// When we first attach to the core, GDB expects us to halt the core, so we do this here when a new client connects.
// If the core is already halted, nothing happens if we issue a halt command again, so we always do this no matter of core state.
self.session
.lock()
.unwrap()
.core(core_id)?
.halt(Duration::from_millis(100))?;
self.load_target_desc()?;
}
// Start the GDB Stub state machine
let stub = GdbStub::<RuntimeTarget, _>::new(s);
match stub.run_state_machine(self) {
Ok(gdbstub) => {
self.gdb = Some(gdbstub);
}
Err(e) => {
// Any errors at this state are either IO errors or fatal config errors
return Err(anyhow::Error::from(e).into());
}
};
}
Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => {
// No connection yet
return Ok(Duration::from_millis(10));
}
Err(e) => {
// Fatal error
return Err(anyhow::Error::from(e).into());
}
};
}
// Stage 2 - connected
if self.gdb.is_some() {
let mut wait_time = Duration::ZERO;
let gdb = self.gdb.take().unwrap();
self.gdb = match gdb {
GdbStubStateMachine::Idle(mut state) => {
// Read data if available
let next_byte = {
let conn = state.borrow_conn();
read_if_available(conn)?
};
if let Some(b) = next_byte {
Some(state.incoming_data(self, b).into_error()?)
} else {
wait_time = Duration::from_millis(10);
Some(state.into())
}
}
GdbStubStateMachine::Running(mut state) => {
// Read data if available
let next_byte = {
let conn = state.borrow_conn();
read_if_available(conn)?
};
if let Some(b) = next_byte {
Some(state.incoming_data(self, b).into_error()?)
} else {
// Check for break
let mut stop_reason: Option<MultiThreadStopReason<u64>> = None;
{
let mut session = self.session.lock().unwrap();
for i in &self.cores {
let mut core = session.core(*i)?;
let status = core.status()?;
if let CoreStatus::Halted(reason) = status {
let tid = NonZeroUsize::new(i + 1).unwrap();
stop_reason = Some(match reason {
HaltReason::Breakpoint(BreakpointCause::Hardware)
| HaltReason::Breakpoint(BreakpointCause::Unknown) => |
HaltReason::Step => MultiThreadStopReason::DoneStep,
_ => MultiThreadStopReason::SignalWithThread {
tid,
signal: Signal::SIGINT,
},
});
break;
}
}
// halt all remaining cores that are still running
// GDB expects all or nothing stops
if stop_reason.is_some() {
for i in &self.cores {
let mut core = session.core(*i)?;
if !core.core_halted()? {
core.halt(Duration::from_millis(100))?;
}
}
}
}
if let Some(reason) = stop_reason {
Some(state.report_stop(self, reason).into_error()?)
} else {
wait_time = Duration::from_millis(10);
Some(state.into())
}
}
}
GdbStubStateMachine::CtrlCInterrupt(state) => {
// Break core, handle interrupt
{
let mut session = self.session.lock().unwrap();
for i in &self.cores {
let mut core = session.core(*i)?;
core.halt(Duration::from_millis(100))?;
}
}
Some(
state
.interrupt_handled(
self,
Some(MultiThreadStopReason::Signal(Signal::SIGINT)),
)
.into_error()?,
)
}
GdbStubStateMachine::Disconnected(state) => {
tracing::info!("GDB client disconnected: {:?}", state.get_reason());
None
}
};
return Ok(wait_time);
}
Ok(Duration::ZERO)
}
}
impl Target for RuntimeTarget<'_> {
type Arch = RuntimeArch;
type Error = Error;
fn base_ops(&mut self) -> BaseOps<'_, Self::Arch, Self::Error> {
BaseOps::MultiThread(self)
}
fn support_target_description_xml_override(
&mut self,
) -> Option<TargetDescriptionXmlOverrideOps<'_, Self>> {
Some(self)
}
fn support_breakpoints(&mut self) -> Option<BreakpointsOps<'_, Self>> {
Some(self)
}
fn support_memory_map(&mut self) -> Option<MemoryMapOps<'_, Self>> {
Some(self)
}
fn support_monitor_cmd(&mut self) -> Option<MonitorCmdOps<'_, Self>> {
Some(self)
}
fn guard_rail_implicit_sw_breakpoints(&self) -> bool {
true
}
}
/// Read a byte from a stream if available, otherwise return None
fn read_if_available(conn: &mut TcpStream) -> Result<Option<u8>, Error> {
match conn.peek() {
Ok(p) => {
// Unwrap is safe because peek already showed
// there's data in the buffer
match p {
Some(_) => conn.read().map(Some).into_error(),
None => Ok(None),
}
}
Err(e) => Err(anyhow::Error::from(e).into()),
}
}
| {
// Some architectures do not allow us to distinguish between hardware and software breakpoints, so we just treat `Unknown` as hardware breakpoints.
MultiThreadStopReason::HwBreak(tid)
} | conditional_block |
packet_codec.rs | Data>>,
>,
is_client: bool,
connection: &ConnectionValue<CM::AssociatedData>,
_: SocketAddr,
mut packet: InPacket,
) -> Result<()>
{
let con2 = connection.downgrade();
let packet_sink = con2.as_packet_sink();
let mut con = connection.mutex.lock();
let con = &mut *con;
let packet_res;
let mut ack = false;
let p_type = packet.header().packet_type();
let dir = packet.direction();
let type_i = p_type.to_usize().unwrap();
let id = packet.header().packet_id();
let (in_recv_win, gen_id, cur_next, limit) =
con.1.in_receive_window(p_type, id);
if con.1.params.is_some() && p_type == PacketType::Init {
return Err(Error::UnexpectedInitPacket);
}
// Ignore range for acks
if p_type == PacketType::Ack
|| p_type == PacketType::AckLow
|| in_recv_win
{
if !packet.header().flags().contains(Flags::UNENCRYPTED) {
// If it is the first ack packet of a client, try to fake
// decrypt it.
let decrypted = if (p_type == PacketType::Ack
&& id <= 1 && is_client)
|| con.1.params.is_none()
{
if let Ok(dec) = algs::decrypt_fake(&packet) {
packet.set_content(dec);
true
} else {
false
}
} else {
false
};
if !decrypted {
if let Some(params) = &mut con.1.params {
// Decrypt the packet
let dec_res = algs::decrypt(
&packet,
gen_id,
¶ms.shared_iv,
&mut params.key_cache,
);
if dec_res.is_err()
&& p_type == PacketType::Ack
&& id == 1 && is_client
{
// Ignore error, this is the ack packet for the
// clientinit, we take the initserver as ack anyway.
return Ok(());
}
packet.set_content(dec_res?);
} else {
// Failed to fake decrypt the packet
return Err(Error::WrongMac);
}
}
} else if algs::must_encrypt(p_type) {
// Check if it is ok for the packet to be unencrypted
return Err(Error::UnallowedUnencryptedPacket);
}
match p_type {
PacketType::Command | PacketType::CommandLow => {
ack = true;
for o in in_packet_observer.read().values() {
o.observe(con, &packet);
}
let in_ids = &mut con.1.incoming_p_ids;
let r_queue = &mut con.1.receive_queue;
let frag_queue = &mut con.1.fragmented_queue;
let commands = Self::handle_command_packet(
logger, r_queue, frag_queue, in_ids, packet,
)?;
// Be careful with command packets, they are
// guaranteed to be in the right order now, because
// we hold a lock on the connection.
let observer = in_command_observer.read();
for c in commands {
for o in observer.values() {
o.observe(con, &c);
}
// Send to packet handler
if let Err(e) = con.1.command_sink.unbounded_send(c) {
error!(logger, "Failed to send command packet to \
handler"; "error" => ?e);
}
}
// Dummy value
packet_res = Ok(None);
}
_ => {
if p_type == PacketType::Ping {
ack = true;
}
// Update packet ids
let in_ids = &mut con.1.incoming_p_ids;
let (id, next_gen) = id.overflowing_add(1);
if p_type != PacketType::Init {
in_ids[type_i] =
(if next_gen { gen_id + 1 } else { gen_id }, id);
}
if let Some(ack_id) = packet.ack_packet() {
// Remove command packet from send queue if the fitting ack is received.
let p_type = if p_type == PacketType::Ack {
PacketType::Command
} else {
| con.1.resender.ack_packet(p_type, ack_id);
} else if p_type.is_voice() {
// Seems to work better without assembling the first 3 voice packets
// Use handle_voice_packet to assemble fragmented voice packets
/*let mut res = Self::handle_voice_packet(&logger, params, &header, p_data);
let res = res.drain(..).map(|p|
(con_key.clone(), p)).collect();
Ok(res)*/
}
// Call observer after handling acks
for o in in_packet_observer.read().values() {
o.observe(con, &packet);
}
packet_res = Ok(Some(packet));
}
}
} else {
// Send an ack for the case when it was lost
if p_type == PacketType::Command || p_type == PacketType::CommandLow
{
ack = true;
}
packet_res = Err(Error::NotInReceiveWindow {
id,
next: cur_next,
limit,
p_type,
});
};
// Send ack
if ack {
tokio::spawn(
packet_sink
.send(OutAck::new(dir.reverse(), p_type, id))
.map(|_| ())
// Ignore errors, this can happen if the connection is
// already gone because we are disconnected.
.map_err(|_| ()),
);
}
if let Some(packet) = packet_res? {
if p_type.is_voice() {
if let Err(e) =
con.1.audio_sink.unbounded_send(packet.into_audio()?)
{
error!(logger, "Failed to send packet to handler"; "error" => ?e);
}
} else if p_type == PacketType::Init {
if is_client {
if let Err(e) = con
.1
.s2c_init_sink
.unbounded_send(packet.into_s2cinit()?)
{
error!(logger, "Failed to send packet to handler"; "error" => ?e);
}
} else if let Err(e) = con
.1
.c2s_init_sink
.unbounded_send(packet.into_c2sinit().map_err(|(_, e)| e)?)
{
error!(logger, "Failed to send packet to handler"; "error" => ?e);
}
}
}
Ok(())
}
/// Handle `Command` and `CommandLow` packets.
///
/// They have to be handled in the right order.
fn handle_command_packet(
logger: &Logger,
r_queue: &mut [Vec<InPacket>; 2],
frag_queue: &mut [Option<(InPacket, Vec<u8>)>; 2],
in_ids: &mut [(u32, u16); 8],
mut packet: InPacket,
) -> Result<Vec<InCommand>>
{
let header = packet.header();
let p_type = header.packet_type();
let mut id = header.packet_id();
let type_i = p_type.to_usize().unwrap();
let cmd_i = if p_type == PacketType::Command { 0 } else { 1 };
let r_queue = &mut r_queue[cmd_i];
let frag_queue = &mut frag_queue[cmd_i];
let in_ids = &mut in_ids[type_i];
let cur_next = in_ids.1;
if cur_next == id {
// In order
let mut packets = Vec::new();
loop {
// Update next packet id
let (next_id, next_gen) = id.overflowing_add(1);
if next_gen {
// Next packet generation
in_ids.0 = in_ids.0.wrapping_add(1);
}
in_ids.1 = next_id;
let flags = packet.header().flags();
let res_packet = if flags.contains(Flags::FRAGMENTED) {
if let Some((header, mut frag_queue)) = frag_queue.take() {
// Last fragmented packet
frag_queue.extend_from_slice(packet.content());
// Decompress
let decompressed = if header
.header()
.flags()
.contains(Flags::COMPRESSED)
{
//debug!(logger, "Compressed"; "data" => ?::utils::HexSlice(&frag_queue));
::quicklz::decompress(
&mut Cursor::new(frag_queue),
crate::MAX_DECOMPRESSED_SIZE,
)?
} else {
frag_queue
};
/*if header.get_compressed() {
debug!(logger, "Decompressed";
| PacketType::CommandLow
};
| conditional_block |
packet_codec.rs | (
&mut self,
(addr, packet): (SocketAddr, InPacket),
) -> impl Future<Item = (), Error = Error>
{
// Find the right connection
let cons = self.connections.read();
if let Some(con) =
cons.get(&CM::get_connection_key(addr, &packet)).cloned()
{
// If we are a client and have only a single connection, we will do the
// work inside this future and not spawn a new one.
let logger = self.logger.new(o!("addr" => addr));
let in_packet_observer = self.in_packet_observer.clone();
let in_command_observer = self.in_command_observer.clone();
if self.is_client && cons.len() == 1 {
drop(cons);
Self::connection_handle_udp_packet(
&logger,
in_packet_observer,
in_command_observer,
self.is_client,
&con,
addr,
packet,
)
.into_future()
} else {
drop(cons);
let is_client = self.is_client;
tokio::spawn(future::lazy(move || {
if let Err(e) = Self::connection_handle_udp_packet(
&logger,
in_packet_observer,
in_command_observer,
is_client,
&con,
addr,
packet,
) {
error!(logger, "Error handling udp packet"; "error" => ?e);
}
Ok(())
}));
future::ok(())
}
} else {
drop(cons);
// Unknown connection
if let Some(sink) = &mut self.unknown_udp_packet_sink {
// Don't block if the queue is full
if sink.try_send((addr, packet)).is_err() {
warn!(self.logger, "Unknown connection handler overloaded \
– dropping udp packet");
}
} else {
warn!(
self.logger,
"Dropped packet without connection because no unknown \
packet handler is set"
);
}
future::ok(())
}
}
/// Handle a packet for a specific connection.
///
/// This part does the defragmentation, decryption and decompression.
fn connection_handle_udp_packet(
logger: &Logger,
in_packet_observer: LockedHashMap<
String,
Box<InPacketObserver<CM::AssociatedData>>,
>,
in_command_observer: LockedHashMap<
String,
Box<InCommandObserver<CM::AssociatedData>>,
>,
is_client: bool,
connection: &ConnectionValue<CM::AssociatedData>,
_: SocketAddr,
mut packet: InPacket,
) -> Result<()>
{
let con2 = connection.downgrade();
let packet_sink = con2.as_packet_sink();
let mut con = connection.mutex.lock();
let con = &mut *con;
let packet_res;
let mut ack = false;
let p_type = packet.header().packet_type();
let dir = packet.direction();
let type_i = p_type.to_usize().unwrap();
let id = packet.header().packet_id();
let (in_recv_win, gen_id, cur_next, limit) =
con.1.in_receive_window(p_type, id);
if con.1.params.is_some() && p_type == PacketType::Init {
return Err(Error::UnexpectedInitPacket);
}
// Ignore range for acks
if p_type == PacketType::Ack
|| p_type == PacketType::AckLow
|| in_recv_win
{
if !packet.header().flags().contains(Flags::UNENCRYPTED) {
// If it is the first ack packet of a client, try to fake
// decrypt it.
let decrypted = if (p_type == PacketType::Ack
&& id <= 1 && is_client)
|| con.1.params.is_none()
{
if let Ok(dec) = algs::decrypt_fake(&packet) {
packet.set_content(dec);
true
} else {
false
}
} else {
false
};
if !decrypted {
if let Some(params) = &mut con.1.params {
// Decrypt the packet
let dec_res = algs::decrypt(
&packet,
gen_id,
¶ms.shared_iv,
&mut params.key_cache,
);
if dec_res.is_err()
&& p_type == PacketType::Ack
&& id == 1 && is_client
{
// Ignore error, this is the ack packet for the
// clientinit, we take the initserver as ack anyway.
return Ok(());
}
packet.set_content(dec_res?);
} else {
// Failed to fake decrypt the packet
return Err(Error::WrongMac);
}
}
} else if algs::must_encrypt(p_type) {
// Check if it is ok for the packet to be unencrypted
return Err(Error::UnallowedUnencryptedPacket);
}
match p_type {
PacketType::Command | PacketType::CommandLow => {
ack = true;
for o in in_packet_observer.read().values() {
o.observe(con, &packet);
}
let in_ids = &mut con.1.incoming_p_ids;
let r_queue = &mut con.1.receive_queue;
let frag_queue = &mut con.1.fragmented_queue;
let commands = Self::handle_command_packet(
logger, r_queue, frag_queue, in_ids, packet,
)?;
// Be careful with command packets, they are
// guaranteed to be in the right order now, because
// we hold a lock on the connection.
let observer = in_command_observer.read();
for c in commands {
for o in observer.values() {
o.observe(con, &c);
}
// Send to packet handler
if let Err(e) = con.1.command_sink.unbounded_send(c) {
error!(logger, "Failed to send command packet to \
handler"; "error" => ?e);
}
}
// Dummy value
packet_res = Ok(None);
}
_ => {
if p_type == PacketType::Ping {
ack = true;
}
// Update packet ids
let in_ids = &mut con.1.incoming_p_ids;
let (id, next_gen) = id.overflowing_add(1);
if p_type != PacketType::Init {
in_ids[type_i] =
(if next_gen { gen_id + 1 } else { gen_id }, id);
}
if let Some(ack_id) = packet.ack_packet() {
// Remove command packet from send queue if the fitting ack is received.
let p_type = if p_type == PacketType::Ack {
PacketType::Command
} else {
PacketType::CommandLow
};
con.1.resender.ack_packet(p_type, ack_id);
} else if p_type.is_voice() {
// Seems to work better without assembling the first 3 voice packets
// Use handle_voice_packet to assemble fragmented voice packets
/*let mut res = Self::handle_voice_packet(&logger, params, &header, p_data);
let res = res.drain(..).map(|p|
(con_key.clone(), p)).collect();
Ok(res)*/
}
// Call observer after handling acks
for o in in_packet_observer.read().values() {
o.observe(con, &packet);
}
packet_res = Ok(Some(packet));
}
}
} else {
// Send an ack for the case when it was lost
if p_type == PacketType::Command || p_type == PacketType::CommandLow
{
ack = true;
}
packet_res = Err(Error::NotInReceiveWindow {
id,
next: cur_next,
limit,
p_type,
});
};
// Send ack
if ack {
tokio::spawn(
packet_sink
.send(OutAck::new(dir.reverse(), p_type, id))
.map(|_| ())
// Ignore errors, this can happen if the connection is
// already gone because we are disconnected.
.map_err(|_| ()),
);
}
if let Some(packet) = packet_res? {
if p_type.is_voice() {
if let Err(e) =
con.1.audio_sink.unbounded_send(packet.into_audio()?)
{
error!(logger, "Failed to send packet to handler"; "error" => ?e);
}
} else if p_type == PacketType::Init {
if is_client {
if let Err(e) = con
.1
.s2c_init_sink
.unbounded_send(packet.into_s2cinit()?)
{
error!(logger, "Failed to send packet to handler"; "error" => ?e);
}
} else if let Err(e) = con
.1
.c2s | handle_udp_packet | identifier_name |
|
packet_codec.rs | packet,
)
.into_future()
} else {
drop(cons);
let is_client = self.is_client;
tokio::spawn(future::lazy(move || {
if let Err(e) = Self::connection_handle_udp_packet(
&logger,
in_packet_observer,
in_command_observer,
is_client,
&con,
addr,
packet,
) {
error!(logger, "Error handling udp packet"; "error" => ?e);
}
Ok(())
}));
future::ok(())
}
} else {
drop(cons);
// Unknown connection
if let Some(sink) = &mut self.unknown_udp_packet_sink {
// Don't block if the queue is full
if sink.try_send((addr, packet)).is_err() {
warn!(self.logger, "Unknown connection handler overloaded \
– dropping udp packet");
}
} else {
warn!(
self.logger,
"Dropped packet without connection because no unknown \
packet handler is set"
);
}
future::ok(())
}
}
/// Handle a packet for a specific connection.
///
/// This part does the defragmentation, decryption and decompression.
fn connection_handle_udp_packet(
logger: &Logger,
in_packet_observer: LockedHashMap<
String,
Box<InPacketObserver<CM::AssociatedData>>,
>,
in_command_observer: LockedHashMap<
String,
Box<InCommandObserver<CM::AssociatedData>>,
>,
is_client: bool,
connection: &ConnectionValue<CM::AssociatedData>,
_: SocketAddr,
mut packet: InPacket,
) -> Result<()>
{
let con2 = connection.downgrade();
let packet_sink = con2.as_packet_sink();
let mut con = connection.mutex.lock();
let con = &mut *con;
let packet_res;
let mut ack = false;
let p_type = packet.header().packet_type();
let dir = packet.direction();
let type_i = p_type.to_usize().unwrap();
let id = packet.header().packet_id();
let (in_recv_win, gen_id, cur_next, limit) =
con.1.in_receive_window(p_type, id);
if con.1.params.is_some() && p_type == PacketType::Init {
return Err(Error::UnexpectedInitPacket);
}
// Ignore range for acks
if p_type == PacketType::Ack
|| p_type == PacketType::AckLow
|| in_recv_win
{
if !packet.header().flags().contains(Flags::UNENCRYPTED) {
// If it is the first ack packet of a client, try to fake
// decrypt it.
let decrypted = if (p_type == PacketType::Ack
&& id <= 1 && is_client)
|| con.1.params.is_none()
{
if let Ok(dec) = algs::decrypt_fake(&packet) {
packet.set_content(dec);
true
} else {
false
}
} else {
false
};
if !decrypted {
if let Some(params) = &mut con.1.params {
// Decrypt the packet
let dec_res = algs::decrypt(
&packet,
gen_id,
¶ms.shared_iv,
&mut params.key_cache,
);
if dec_res.is_err()
&& p_type == PacketType::Ack
&& id == 1 && is_client
{
// Ignore error, this is the ack packet for the
// clientinit, we take the initserver as ack anyway.
return Ok(());
}
packet.set_content(dec_res?);
} else {
// Failed to fake decrypt the packet
return Err(Error::WrongMac);
}
}
} else if algs::must_encrypt(p_type) {
// Check if it is ok for the packet to be unencrypted
return Err(Error::UnallowedUnencryptedPacket);
}
match p_type {
PacketType::Command | PacketType::CommandLow => {
ack = true;
for o in in_packet_observer.read().values() {
o.observe(con, &packet);
}
let in_ids = &mut con.1.incoming_p_ids;
let r_queue = &mut con.1.receive_queue;
let frag_queue = &mut con.1.fragmented_queue;
let commands = Self::handle_command_packet(
logger, r_queue, frag_queue, in_ids, packet,
)?;
// Be careful with command packets, they are
// guaranteed to be in the right order now, because
// we hold a lock on the connection.
let observer = in_command_observer.read();
for c in commands {
for o in observer.values() {
o.observe(con, &c);
}
// Send to packet handler
if let Err(e) = con.1.command_sink.unbounded_send(c) {
error!(logger, "Failed to send command packet to \
handler"; "error" => ?e);
}
}
// Dummy value
packet_res = Ok(None);
}
_ => {
if p_type == PacketType::Ping {
ack = true;
}
// Update packet ids
let in_ids = &mut con.1.incoming_p_ids;
let (id, next_gen) = id.overflowing_add(1);
if p_type != PacketType::Init {
in_ids[type_i] =
(if next_gen { gen_id + 1 } else { gen_id }, id);
}
if let Some(ack_id) = packet.ack_packet() {
// Remove command packet from send queue if the fitting ack is received.
let p_type = if p_type == PacketType::Ack {
PacketType::Command
} else {
PacketType::CommandLow
};
con.1.resender.ack_packet(p_type, ack_id);
} else if p_type.is_voice() {
// Seems to work better without assembling the first 3 voice packets
// Use handle_voice_packet to assemble fragmented voice packets
/*let mut res = Self::handle_voice_packet(&logger, params, &header, p_data);
let res = res.drain(..).map(|p|
(con_key.clone(), p)).collect();
Ok(res)*/
}
// Call observer after handling acks
for o in in_packet_observer.read().values() {
o.observe(con, &packet);
}
packet_res = Ok(Some(packet));
}
}
} else {
// Send an ack for the case when it was lost
if p_type == PacketType::Command || p_type == PacketType::CommandLow
{
ack = true;
}
packet_res = Err(Error::NotInReceiveWindow {
id,
next: cur_next,
limit,
p_type,
});
};
// Send ack
if ack {
tokio::spawn(
packet_sink
.send(OutAck::new(dir.reverse(), p_type, id))
.map(|_| ())
// Ignore errors, this can happen if the connection is
// already gone because we are disconnected.
.map_err(|_| ()),
);
}
if let Some(packet) = packet_res? {
if p_type.is_voice() {
if let Err(e) =
con.1.audio_sink.unbounded_send(packet.into_audio()?)
{
error!(logger, "Failed to send packet to handler"; "error" => ?e);
}
} else if p_type == PacketType::Init {
if is_client {
if let Err(e) = con
.1
.s2c_init_sink
.unbounded_send(packet.into_s2cinit()?)
{
error!(logger, "Failed to send packet to handler"; "error" => ?e);
}
} else if let Err(e) = con
.1
.c2s_init_sink
.unbounded_send(packet.into_c2sinit().map_err(|(_, e)| e)?)
{
error!(logger, "Failed to send | {
// Find the right connection
let cons = self.connections.read();
if let Some(con) =
cons.get(&CM::get_connection_key(addr, &packet)).cloned()
{
// If we are a client and have only a single connection, we will do the
// work inside this future and not spawn a new one.
let logger = self.logger.new(o!("addr" => addr));
let in_packet_observer = self.in_packet_observer.clone();
let in_command_observer = self.in_command_observer.clone();
if self.is_client && cons.len() == 1 {
drop(cons);
Self::connection_handle_udp_packet(
&logger,
in_packet_observer,
in_command_observer,
self.is_client,
&con,
addr, | identifier_body |
|
packet_codec.rs | logger: &Logger,
in_packet_observer: LockedHashMap<
String,
Box<InPacketObserver<CM::AssociatedData>>,
>,
in_command_observer: LockedHashMap<
String,
Box<InCommandObserver<CM::AssociatedData>>,
>,
is_client: bool,
connection: &ConnectionValue<CM::AssociatedData>,
_: SocketAddr,
mut packet: InPacket,
) -> Result<()>
{
let con2 = connection.downgrade();
let packet_sink = con2.as_packet_sink();
let mut con = connection.mutex.lock();
let con = &mut *con;
let packet_res;
let mut ack = false;
let p_type = packet.header().packet_type();
let dir = packet.direction();
let type_i = p_type.to_usize().unwrap();
let id = packet.header().packet_id();
let (in_recv_win, gen_id, cur_next, limit) =
con.1.in_receive_window(p_type, id);
if con.1.params.is_some() && p_type == PacketType::Init {
return Err(Error::UnexpectedInitPacket);
}
// Ignore range for acks
if p_type == PacketType::Ack
|| p_type == PacketType::AckLow
|| in_recv_win
{
if !packet.header().flags().contains(Flags::UNENCRYPTED) {
// If it is the first ack packet of a client, try to fake
// decrypt it.
let decrypted = if (p_type == PacketType::Ack
&& id <= 1 && is_client)
|| con.1.params.is_none()
{
if let Ok(dec) = algs::decrypt_fake(&packet) {
packet.set_content(dec);
true
} else {
false
}
} else {
false
};
if !decrypted {
if let Some(params) = &mut con.1.params {
// Decrypt the packet
let dec_res = algs::decrypt(
&packet,
gen_id,
¶ms.shared_iv,
&mut params.key_cache,
);
if dec_res.is_err()
&& p_type == PacketType::Ack
&& id == 1 && is_client
{
// Ignore error, this is the ack packet for the
// clientinit, we take the initserver as ack anyway.
return Ok(());
}
packet.set_content(dec_res?);
} else {
// Failed to fake decrypt the packet
return Err(Error::WrongMac);
}
}
} else if algs::must_encrypt(p_type) {
// Check if it is ok for the packet to be unencrypted
return Err(Error::UnallowedUnencryptedPacket);
}
match p_type {
PacketType::Command | PacketType::CommandLow => {
ack = true;
for o in in_packet_observer.read().values() {
o.observe(con, &packet);
}
let in_ids = &mut con.1.incoming_p_ids;
let r_queue = &mut con.1.receive_queue;
let frag_queue = &mut con.1.fragmented_queue;
let commands = Self::handle_command_packet(
logger, r_queue, frag_queue, in_ids, packet,
)?;
// Be careful with command packets, they are
// guaranteed to be in the right order now, because
// we hold a lock on the connection.
let observer = in_command_observer.read();
for c in commands {
for o in observer.values() {
o.observe(con, &c);
}
// Send to packet handler
if let Err(e) = con.1.command_sink.unbounded_send(c) {
error!(logger, "Failed to send command packet to \
handler"; "error" => ?e);
}
}
// Dummy value
packet_res = Ok(None);
}
_ => {
if p_type == PacketType::Ping {
ack = true;
}
// Update packet ids
let in_ids = &mut con.1.incoming_p_ids;
let (id, next_gen) = id.overflowing_add(1);
if p_type != PacketType::Init {
in_ids[type_i] =
(if next_gen { gen_id + 1 } else { gen_id }, id);
}
if let Some(ack_id) = packet.ack_packet() {
// Remove command packet from send queue if the fitting ack is received.
let p_type = if p_type == PacketType::Ack {
PacketType::Command
} else {
PacketType::CommandLow
};
con.1.resender.ack_packet(p_type, ack_id);
} else if p_type.is_voice() {
// Seems to work better without assembling the first 3 voice packets
// Use handle_voice_packet to assemble fragmented voice packets
/*let mut res = Self::handle_voice_packet(&logger, params, &header, p_data);
let res = res.drain(..).map(|p|
(con_key.clone(), p)).collect();
Ok(res)*/
}
// Call observer after handling acks
for o in in_packet_observer.read().values() {
o.observe(con, &packet);
}
packet_res = Ok(Some(packet));
}
}
} else {
// Send an ack for the case when it was lost
if p_type == PacketType::Command || p_type == PacketType::CommandLow
{
ack = true;
}
packet_res = Err(Error::NotInReceiveWindow {
id,
next: cur_next,
limit,
p_type,
});
};
// Send ack
if ack {
tokio::spawn(
packet_sink
.send(OutAck::new(dir.reverse(), p_type, id))
.map(|_| ())
// Ignore errors, this can happen if the connection is
// already gone because we are disconnected.
.map_err(|_| ()),
);
}
if let Some(packet) = packet_res? {
if p_type.is_voice() {
if let Err(e) =
con.1.audio_sink.unbounded_send(packet.into_audio()?)
{
error!(logger, "Failed to send packet to handler"; "error" => ?e);
}
} else if p_type == PacketType::Init {
if is_client {
if let Err(e) = con
.1
.s2c_init_sink
.unbounded_send(packet.into_s2cinit()?)
{
error!(logger, "Failed to send packet to handler"; "error" => ?e);
}
} else if let Err(e) = con
.1
.c2s_init_sink
.unbounded_send(packet.into_c2sinit().map_err(|(_, e)| e)?)
{
error!(logger, "Failed to send packet to handler"; "error" => ?e);
}
}
}
Ok(())
}
/// Handle `Command` and `CommandLow` packets.
///
/// They have to be handled in the right order.
fn handle_command_packet(
logger: &Logger,
r_queue: &mut [Vec<InPacket>; 2],
frag_queue: &mut [Option<(InPacket, Vec<u8>)>; 2],
in_ids: &mut [(u32, u16); 8],
mut packet: InPacket,
) -> Result<Vec<InCommand>>
{
let header = packet.header();
let p_type = header.packet_type();
let mut id = header.packet_id();
let type_i = p_type.to_usize().unwrap();
let cmd_i = if p_type == PacketType::Command { 0 } else { 1 };
let r_queue = &mut r_queue[cmd_i];
let frag_queue = &mut frag_queue[cmd_i];
let in_ids = &mut in_ids[type_i];
let cur_next = in_ids.1;
if cur_next == id {
// In order
let mut packets = Vec::new();
loop {
// Update next packet id
let (next_id, next_gen) = id.overflowing_add(1);
if next_gen {
// Next packet generation
in_ids.0 = in_ids.0.wrapping_add(1);
}
in_ids.1 = next_id;
let flags = packet.header().flags();
let res_packet = if flags.contains(Flags::FRAGMENTED) {
if let Some((header, mut frag_queue)) = frag_queue.take() {
// Last fragmented packet
frag_queue.extend_from_slice(packet.content());
// Decompress
let decompressed = if header
.header()
.flags()
.contains(Flags::COMPRESSED)
{
//debug!(logger, "Compressed"; "data" => ?::utils::HexSlice(&frag_queue));
::quicklz::decompress(
| random_line_split |
||
build_all.py | JAVA_HOME = '/Program Files/Eclipse Adoptium/jdk-8.0.362.9-hotspot/'
else:
CMAKE_BUILD_ENV_ROOT = os.path.join(os.environ['HOME'], 'cmake_build_env_root')
if sys.platform == 'linux':
CUDA_ROOT = '/usr/local/cuda-11'
JAVA_HOME = '/opt/jdk/8'
elif sys.platform == 'darwin':
JAVA_HOME = '/Library/Java/JavaVirtualMachines/jdk-8/Contents/Home/'
CUDA_ROOT = None
def need_to_build_cuda(platform_name: str):
system, _ = platform_name.split('-')
return system in ['linux', 'windows']
def get_primary_platform_name():
if sys.platform == 'darwin':
return 'darwin-universal2'
else:
return {
'win32': 'windows',
'linux': 'linux'
}[sys.platform] + '-x86_64'
def get_native_platform_name():
system_name = 'windows' if sys.platform == 'win32' else sys.platform
return system_name + '-x86_64'
# Unfortunately CMake's FindPython does not work reliably in all cases so we have to recreate similar logic here.
def get_python_root_dir(py_ver: Tuple[int, int])-> str:
# returns python_root_dir relative to CMAKE_FIND_ROOT_PATH
if sys.platform == 'win32':
# pyenv installs x.y.z versions but we've created x.y aliases for convinience
return os.path.join('.pyenv', 'pyenv-win', 'versions', f'{py_ver[0]}.{py_ver[1]}')
if sys.platform == 'darwin':
# pyenv installs x.y.z versions but we've created x.y aliases for convinience
return os.path.join('.pyenv', 'versions', f'{py_ver[0]}.{py_ver[1]}')
if sys.platform == 'linux':
py_ver_str = f'{py_ver[0]}{py_ver[1]}'
# manylinux2014 image conventions
return os.path.join('python', f'cp{py_ver_str}-cp{py_ver_str}m' if py_ver <= (3,7) else f'cp{py_ver_str}-cp{py_ver_str}')
def get_python_version_include_and_library_paths(py_ver: Tuple[int, int]) -> Tuple[str, str]:
# returns (python include path, python library path) relative to CMAKE_FIND_ROOT_PATH
base_path = get_python_root_dir(py_ver)
if sys.platform == 'win32':
return (os.path.join(base_path, 'include'), os.path.join(base_path, 'libs', f'python{py_ver[0]}{py_ver[1]}.lib'))
# for some reason python versions for python <=3.7 contain 'm' suffix
python_sub_name = f'python{py_ver[0]}.{py_ver[1]}' + ('m' if py_ver <= (3,7) else '')
if sys.platform == 'darwin':
lib_sub_path = os.path.join(
'lib',
f'python{py_ver[0]}.{py_ver[1]}',
f'config-{py_ver[0]}.{py_ver[1]}' + ('m' if py_ver <= (3,7) else '') + '-darwin'
)
elif sys.platform == 'linux':
lib_sub_path = 'lib'
return (
os.path.join(base_path, 'include', python_sub_name),
os.path.join(base_path, lib_sub_path, f'lib{python_sub_name}.a')
)
def run_in_python_package_dir(
src_root_dir:str,
dry_run:bool,
verbose:bool,
commands: List[List[str]]):
os.chdir(os.path.join(src_root_dir, 'catboost', 'python-package'))
for cmd in commands:
if verbose:
logging.info(' '.join(cmd))
if not dry_run:
subprocess.check_call(cmd)
os.chdir(src_root_dir)
def run_with_native_python_with_version_in_python_package_dir(
src_root_dir:str,
dry_run:bool,
verbose:bool,
py_ver: Tuple[int, int],
python_cmds_args: List[List[str]]):
base_path = os.path.join(CMAKE_BUILD_ENV_ROOT, get_native_platform_name(), get_python_root_dir(py_ver))
if sys.platform == 'win32':
python_bin_path = os.path.join(base_path, 'python.exe')
else:
python_bin_path = os.path.join(base_path, 'bin', 'python')
run_in_python_package_dir(
src_root_dir,
dry_run,
verbose,
[[python_bin_path] + cmd_args for cmd_args in python_cmds_args]
)
def patch_sources(src_root_dir: str, dry_run:bool = False, verbose:bool = False):
# TODO(akhropov): Remove when system cuda.cmake is updated for Linux cross-build
distutils.file_util.copy_file(
src=os.path.join(src_root_dir, 'ci', 'cmake', 'cuda.cmake'),
dst=os.path.join(src_root_dir, 'cmake', 'cuda.cmake'),
verbose=verbose,
dry_run=dry_run
)
def get_python_plat_name(platform_name: str):
system, arch = platform_name.split('-')
if system == 'windows':
return 'win_amd64'
elif system == 'darwin':
return 'macosx_11_0_universal2'
else: # linux
return 'manylinux2014_' + arch
def build_r_package(src_root_dir: str, build_native_root_dir: str, platform_name: str, dry_run: bool, verbose: bool):
system, _ = platform_name.split('-')
def get_catboostr_artifact_src_and_dst_name(system: str):
return {
'linux': ('libcatboostr.so', 'libcatboostr.so'),
'darwin': ('libcatboostr.dylib', 'libcatboostr.so'),
'windows': ('catboostr.dll', 'libcatboostr.dll')
}[system]
os.chdir(os.path.join(src_root_dir, 'catboost', 'R-package'))
if not dry_run:
os.makedirs('catboost', exist_ok=True)
entries = [
'DESCRIPTION',
'NAMESPACE',
'README.md',
'R',
'inst',
'man',
'tests'
]
for entry in entries:
if os.path.isdir(entry):
distutils.dir_util.copy_tree(entry, os.path.join('catboost', entry), verbose=verbose, dry_run=dry_run)
else:
distutils.file_util.copy_file(entry, os.path.join('catboost', entry), verbose=verbose, dry_run=dry_run)
binary_dst_dir = os.path.join('catboost', 'inst', 'libs')
if system == 'windows':
binary_dst_dir = os.path.join(binary_dst_dir, 'x64')
if not dry_run:
os.makedirs(binary_dst_dir, exist_ok=True)
src, dst = get_catboostr_artifact_src_and_dst_name(system)
full_src = os.path.join(build_native_root_dir, 'catboost', 'R-package', 'src', src)
full_dst = os.path.join(binary_dst_dir, dst)
if dry_run:
logging.info(f'copying {full_src} -> {full_dst}')
else:
distutils.file_util.copy_file(full_src, full_dst, verbose=verbose, dry_run=dry_run)
r_package_file_name = f'catboost-R-{platform_name}.tgz'
logging.info(f'creating {r_package_file_name}')
if not dry_run:
with tarfile.open(r_package_file_name, "w:gz") as tar:
tar.add('catboost', arcname=os.path.basename('catboost'))
os.chdir(src_root_dir)
def build_jvm_artifacts(
src_root_dir: str,
build_native_root_dir: str,
target_platform: str,
macos_universal_binaries:bool,
have_cuda: str,
dry_run: bool,
verbose: bool):
os.chdir(src_root_dir)
for base_dir, lib_name in [
(os.path.join('catboost', 'jvm-packages', 'catboost4j-prediction'), 'catboost4j-prediction'),
(os.path.join('catboost', 'spark', 'catboost4j-spark', 'core'), 'catboost4j-spark-impl'),
]:
cmd = [
'python3',
os.path.join('catboost', 'jvm-packages', 'tools', 'build_native_for_maven.py'),
'--only-postprocessing',
'--base-dir', base_dir,
'--lib-name', lib_name,
'--build-output-root-dir', build_native_root_dir,
]
if verbose:
cmd += ['--verbose']
if have_cuda:
cmd += ['--have-cuda', f'--cuda-root-dir="{CUDA_ROOT}"']
if macos_universal_binaries:
cmd += ['--macos-universal-binaries']
else:
cmd += ['--target-platform', target_platform]
if verbose:
logging.info(' '.join(cmd))
if not dry_run:
subprocess.check_call(cmd)
def get_exe_files(system:str, name:str) -> List[str]:
return [name + '.exe' if system == 'windows' else name]
def get_static | random_line_split |
||
build_all.py | win32':
return (os.path.join(base_path, 'include'), os.path.join(base_path, 'libs', f'python{py_ver[0]}{py_ver[1]}.lib'))
# for some reason python versions for python <=3.7 contain 'm' suffix
python_sub_name = f'python{py_ver[0]}.{py_ver[1]}' + ('m' if py_ver <= (3,7) else '')
if sys.platform == 'darwin':
lib_sub_path = os.path.join(
'lib',
f'python{py_ver[0]}.{py_ver[1]}',
f'config-{py_ver[0]}.{py_ver[1]}' + ('m' if py_ver <= (3,7) else '') + '-darwin'
)
elif sys.platform == 'linux':
lib_sub_path = 'lib'
return (
os.path.join(base_path, 'include', python_sub_name),
os.path.join(base_path, lib_sub_path, f'lib{python_sub_name}.a')
)
def run_in_python_package_dir(
src_root_dir:str,
dry_run:bool,
verbose:bool,
commands: List[List[str]]):
os.chdir(os.path.join(src_root_dir, 'catboost', 'python-package'))
for cmd in commands:
if verbose:
logging.info(' '.join(cmd))
if not dry_run:
subprocess.check_call(cmd)
os.chdir(src_root_dir)
def run_with_native_python_with_version_in_python_package_dir(
src_root_dir:str,
dry_run:bool,
verbose:bool,
py_ver: Tuple[int, int],
python_cmds_args: List[List[str]]):
base_path = os.path.join(CMAKE_BUILD_ENV_ROOT, get_native_platform_name(), get_python_root_dir(py_ver))
if sys.platform == 'win32':
python_bin_path = os.path.join(base_path, 'python.exe')
else:
python_bin_path = os.path.join(base_path, 'bin', 'python')
run_in_python_package_dir(
src_root_dir,
dry_run,
verbose,
[[python_bin_path] + cmd_args for cmd_args in python_cmds_args]
)
def patch_sources(src_root_dir: str, dry_run:bool = False, verbose:bool = False):
# TODO(akhropov): Remove when system cuda.cmake is updated for Linux cross-build
distutils.file_util.copy_file(
src=os.path.join(src_root_dir, 'ci', 'cmake', 'cuda.cmake'),
dst=os.path.join(src_root_dir, 'cmake', 'cuda.cmake'),
verbose=verbose,
dry_run=dry_run
)
def get_python_plat_name(platform_name: str):
system, arch = platform_name.split('-')
if system == 'windows':
return 'win_amd64'
elif system == 'darwin':
return 'macosx_11_0_universal2'
else: # linux
return 'manylinux2014_' + arch
def build_r_package(src_root_dir: str, build_native_root_dir: str, platform_name: str, dry_run: bool, verbose: bool):
system, _ = platform_name.split('-')
def get_catboostr_artifact_src_and_dst_name(system: str):
return {
'linux': ('libcatboostr.so', 'libcatboostr.so'),
'darwin': ('libcatboostr.dylib', 'libcatboostr.so'),
'windows': ('catboostr.dll', 'libcatboostr.dll')
}[system]
os.chdir(os.path.join(src_root_dir, 'catboost', 'R-package'))
if not dry_run:
os.makedirs('catboost', exist_ok=True)
entries = [
'DESCRIPTION',
'NAMESPACE',
'README.md',
'R',
'inst',
'man',
'tests'
]
for entry in entries:
if os.path.isdir(entry):
distutils.dir_util.copy_tree(entry, os.path.join('catboost', entry), verbose=verbose, dry_run=dry_run)
else:
distutils.file_util.copy_file(entry, os.path.join('catboost', entry), verbose=verbose, dry_run=dry_run)
binary_dst_dir = os.path.join('catboost', 'inst', 'libs')
if system == 'windows':
binary_dst_dir = os.path.join(binary_dst_dir, 'x64')
if not dry_run:
os.makedirs(binary_dst_dir, exist_ok=True)
src, dst = get_catboostr_artifact_src_and_dst_name(system)
full_src = os.path.join(build_native_root_dir, 'catboost', 'R-package', 'src', src)
full_dst = os.path.join(binary_dst_dir, dst)
if dry_run:
logging.info(f'copying {full_src} -> {full_dst}')
else:
distutils.file_util.copy_file(full_src, full_dst, verbose=verbose, dry_run=dry_run)
r_package_file_name = f'catboost-R-{platform_name}.tgz'
logging.info(f'creating {r_package_file_name}')
if not dry_run:
with tarfile.open(r_package_file_name, "w:gz") as tar:
tar.add('catboost', arcname=os.path.basename('catboost'))
os.chdir(src_root_dir)
def build_jvm_artifacts(
src_root_dir: str,
build_native_root_dir: str,
target_platform: str,
macos_universal_binaries:bool,
have_cuda: str,
dry_run: bool,
verbose: bool):
os.chdir(src_root_dir)
for base_dir, lib_name in [
(os.path.join('catboost', 'jvm-packages', 'catboost4j-prediction'), 'catboost4j-prediction'),
(os.path.join('catboost', 'spark', 'catboost4j-spark', 'core'), 'catboost4j-spark-impl'),
]:
cmd = [
'python3',
os.path.join('catboost', 'jvm-packages', 'tools', 'build_native_for_maven.py'),
'--only-postprocessing',
'--base-dir', base_dir,
'--lib-name', lib_name,
'--build-output-root-dir', build_native_root_dir,
]
if verbose:
cmd += ['--verbose']
if have_cuda:
cmd += ['--have-cuda', f'--cuda-root-dir="{CUDA_ROOT}"']
if macos_universal_binaries:
cmd += ['--macos-universal-binaries']
else:
cmd += ['--target-platform', target_platform]
if verbose:
|
if not dry_run:
subprocess.check_call(cmd)
def get_exe_files(system:str, name:str) -> List[str]:
return [name + '.exe' if system == 'windows' else name]
def get_static_lib_files(system:str, name:str) -> List[str]:
prefix = '' if system == 'windows' else 'lib'
suffix = '.lib' if system == 'windows' else '.a'
return [prefix + name + sub_suffix + suffix for sub_suffix in ['', '.global']]
def get_shared_lib_files(system:str, name:str) -> List[str]:
if system == 'windows':
return [name + '.lib', name + '.dll']
else:
suffix = '.so' if system == 'linux' else '.dylib'
return ['lib' + name + suffix]
def copy_built_artifacts_to_canonical_place(system: str, real_build_dir:str, build_native_platform_dir:str, dry_run:bool, verbose: bool):
"""
Copy only artifacts that are not copied already by postprocessing in building JVM, R and Python packages
"""
artifacts = [
(os.path.join('catboost', 'app'), get_exe_files(system, 'catboost')),
(os.path.join('catboost', 'libs', 'model_interface'), get_shared_lib_files(system, 'catboostmodel')),
(os.path.join('catboost', 'libs', 'model_interface', 'static'), get_static_lib_files(system, 'catboostmodel_static')),
(os.path.join('catboost', 'libs', 'train_interface'), get_shared_lib_files(system, 'catboost')),
]
for sub_path, files in artifacts:
for f in files:
src = os.path.join(real_build_dir, sub_path, f)
dst = os.path.join(build_native_platform_dir, sub_path, f)
if dry_run:
logging.info(f'copying {src} -> {dst}')
else:
distutils.dir_util.mkpath(os.path.dirname(dst), verbose=verbose, dry_run=dry_run)
distutils.file_util.copy_file(src, dst, verbose=verbose, dry_run=dry_run)
def get_real_build_root_dir(src_root_dir:str, platform_name:str, built_output_root_dir:str):
if os.environ.get('CMAKE_BUILD_CACHE_DIR'):
build_native_root_dir = os.path.join(
os.environ['CMAKE_BUILD_CACHE_DIR'],
hashlib.md5(os.path.abspath(src_root_dir).encode('utf-8')).hexdigest()[:10],
platform_name
)
os.makedirs(build_native_root_dir, exist_ok=True)
return build_native_root_dir
else:
return built_output_root_dir
def build_all_for_one_platform(
src_root_dir:str,
built_output_root_dir:str,
platform_name:str, # either "{system}-{arch}' of 'darwin-universal | logging.info(' '.join(cmd)) | conditional_block |
build_all.py | win32':
return (os.path.join(base_path, 'include'), os.path.join(base_path, 'libs', f'python{py_ver[0]}{py_ver[1]}.lib'))
# for some reason python versions for python <=3.7 contain 'm' suffix
python_sub_name = f'python{py_ver[0]}.{py_ver[1]}' + ('m' if py_ver <= (3,7) else '')
if sys.platform == 'darwin':
lib_sub_path = os.path.join(
'lib',
f'python{py_ver[0]}.{py_ver[1]}',
f'config-{py_ver[0]}.{py_ver[1]}' + ('m' if py_ver <= (3,7) else '') + '-darwin'
)
elif sys.platform == 'linux':
lib_sub_path = 'lib'
return (
os.path.join(base_path, 'include', python_sub_name),
os.path.join(base_path, lib_sub_path, f'lib{python_sub_name}.a')
)
def run_in_python_package_dir(
src_root_dir:str,
dry_run:bool,
verbose:bool,
commands: List[List[str]]):
os.chdir(os.path.join(src_root_dir, 'catboost', 'python-package'))
for cmd in commands:
if verbose:
logging.info(' '.join(cmd))
if not dry_run:
subprocess.check_call(cmd)
os.chdir(src_root_dir)
def run_with_native_python_with_version_in_python_package_dir(
src_root_dir:str,
dry_run:bool,
verbose:bool,
py_ver: Tuple[int, int],
python_cmds_args: List[List[str]]):
base_path = os.path.join(CMAKE_BUILD_ENV_ROOT, get_native_platform_name(), get_python_root_dir(py_ver))
if sys.platform == 'win32':
python_bin_path = os.path.join(base_path, 'python.exe')
else:
python_bin_path = os.path.join(base_path, 'bin', 'python')
run_in_python_package_dir(
src_root_dir,
dry_run,
verbose,
[[python_bin_path] + cmd_args for cmd_args in python_cmds_args]
)
def patch_sources(src_root_dir: str, dry_run:bool = False, verbose:bool = False):
# TODO(akhropov): Remove when system cuda.cmake is updated for Linux cross-build
distutils.file_util.copy_file(
src=os.path.join(src_root_dir, 'ci', 'cmake', 'cuda.cmake'),
dst=os.path.join(src_root_dir, 'cmake', 'cuda.cmake'),
verbose=verbose,
dry_run=dry_run
)
def get_python_plat_name(platform_name: str):
system, arch = platform_name.split('-')
if system == 'windows':
return 'win_amd64'
elif system == 'darwin':
return 'macosx_11_0_universal2'
else: # linux
return 'manylinux2014_' + arch
def build_r_package(src_root_dir: str, build_native_root_dir: str, platform_name: str, dry_run: bool, verbose: bool):
system, _ = platform_name.split('-')
def get_catboostr_artifact_src_and_dst_name(system: str):
return {
'linux': ('libcatboostr.so', 'libcatboostr.so'),
'darwin': ('libcatboostr.dylib', 'libcatboostr.so'),
'windows': ('catboostr.dll', 'libcatboostr.dll')
}[system]
os.chdir(os.path.join(src_root_dir, 'catboost', 'R-package'))
if not dry_run:
os.makedirs('catboost', exist_ok=True)
entries = [
'DESCRIPTION',
'NAMESPACE',
'README.md',
'R',
'inst',
'man',
'tests'
]
for entry in entries:
if os.path.isdir(entry):
distutils.dir_util.copy_tree(entry, os.path.join('catboost', entry), verbose=verbose, dry_run=dry_run)
else:
distutils.file_util.copy_file(entry, os.path.join('catboost', entry), verbose=verbose, dry_run=dry_run)
binary_dst_dir = os.path.join('catboost', 'inst', 'libs')
if system == 'windows':
binary_dst_dir = os.path.join(binary_dst_dir, 'x64')
if not dry_run:
os.makedirs(binary_dst_dir, exist_ok=True)
src, dst = get_catboostr_artifact_src_and_dst_name(system)
full_src = os.path.join(build_native_root_dir, 'catboost', 'R-package', 'src', src)
full_dst = os.path.join(binary_dst_dir, dst)
if dry_run:
logging.info(f'copying {full_src} -> {full_dst}')
else:
distutils.file_util.copy_file(full_src, full_dst, verbose=verbose, dry_run=dry_run)
r_package_file_name = f'catboost-R-{platform_name}.tgz'
logging.info(f'creating {r_package_file_name}')
if not dry_run:
with tarfile.open(r_package_file_name, "w:gz") as tar:
tar.add('catboost', arcname=os.path.basename('catboost'))
os.chdir(src_root_dir)
def build_jvm_artifacts(
src_root_dir: str,
build_native_root_dir: str,
target_platform: str,
macos_universal_binaries:bool,
have_cuda: str,
dry_run: bool,
verbose: bool):
os.chdir(src_root_dir)
for base_dir, lib_name in [
(os.path.join('catboost', 'jvm-packages', 'catboost4j-prediction'), 'catboost4j-prediction'),
(os.path.join('catboost', 'spark', 'catboost4j-spark', 'core'), 'catboost4j-spark-impl'),
]:
cmd = [
'python3',
os.path.join('catboost', 'jvm-packages', 'tools', 'build_native_for_maven.py'),
'--only-postprocessing',
'--base-dir', base_dir,
'--lib-name', lib_name,
'--build-output-root-dir', build_native_root_dir,
]
if verbose:
cmd += ['--verbose']
if have_cuda:
cmd += ['--have-cuda', f'--cuda-root-dir="{CUDA_ROOT}"']
if macos_universal_binaries:
cmd += ['--macos-universal-binaries']
else:
cmd += ['--target-platform', target_platform]
if verbose:
logging.info(' '.join(cmd))
if not dry_run:
subprocess.check_call(cmd)
def | (system:str, name:str) -> List[str]:
return [name + '.exe' if system == 'windows' else name]
def get_static_lib_files(system:str, name:str) -> List[str]:
prefix = '' if system == 'windows' else 'lib'
suffix = '.lib' if system == 'windows' else '.a'
return [prefix + name + sub_suffix + suffix for sub_suffix in ['', '.global']]
def get_shared_lib_files(system:str, name:str) -> List[str]:
if system == 'windows':
return [name + '.lib', name + '.dll']
else:
suffix = '.so' if system == 'linux' else '.dylib'
return ['lib' + name + suffix]
def copy_built_artifacts_to_canonical_place(system: str, real_build_dir:str, build_native_platform_dir:str, dry_run:bool, verbose: bool):
"""
Copy only artifacts that are not copied already by postprocessing in building JVM, R and Python packages
"""
artifacts = [
(os.path.join('catboost', 'app'), get_exe_files(system, 'catboost')),
(os.path.join('catboost', 'libs', 'model_interface'), get_shared_lib_files(system, 'catboostmodel')),
(os.path.join('catboost', 'libs', 'model_interface', 'static'), get_static_lib_files(system, 'catboostmodel_static')),
(os.path.join('catboost', 'libs', 'train_interface'), get_shared_lib_files(system, 'catboost')),
]
for sub_path, files in artifacts:
for f in files:
src = os.path.join(real_build_dir, sub_path, f)
dst = os.path.join(build_native_platform_dir, sub_path, f)
if dry_run:
logging.info(f'copying {src} -> {dst}')
else:
distutils.dir_util.mkpath(os.path.dirname(dst), verbose=verbose, dry_run=dry_run)
distutils.file_util.copy_file(src, dst, verbose=verbose, dry_run=dry_run)
def get_real_build_root_dir(src_root_dir:str, platform_name:str, built_output_root_dir:str):
if os.environ.get('CMAKE_BUILD_CACHE_DIR'):
build_native_root_dir = os.path.join(
os.environ['CMAKE_BUILD_CACHE_DIR'],
hashlib.md5(os.path.abspath(src_root_dir).encode('utf-8')).hexdigest()[:10],
platform_name
)
os.makedirs(build_native_root_dir, exist_ok=True)
return build_native_root_dir
else:
return built_output_root_dir
def build_all_for_one_platform(
src_root_dir:str,
built_output_root_dir:str,
platform_name:str, # either "{system}-{arch}' of 'darwin-universal | get_exe_files | identifier_name |
build_all.py | win32':
return (os.path.join(base_path, 'include'), os.path.join(base_path, 'libs', f'python{py_ver[0]}{py_ver[1]}.lib'))
# for some reason python versions for python <=3.7 contain 'm' suffix
python_sub_name = f'python{py_ver[0]}.{py_ver[1]}' + ('m' if py_ver <= (3,7) else '')
if sys.platform == 'darwin':
lib_sub_path = os.path.join(
'lib',
f'python{py_ver[0]}.{py_ver[1]}',
f'config-{py_ver[0]}.{py_ver[1]}' + ('m' if py_ver <= (3,7) else '') + '-darwin'
)
elif sys.platform == 'linux':
lib_sub_path = 'lib'
return (
os.path.join(base_path, 'include', python_sub_name),
os.path.join(base_path, lib_sub_path, f'lib{python_sub_name}.a')
)
def run_in_python_package_dir(
src_root_dir:str,
dry_run:bool,
verbose:bool,
commands: List[List[str]]):
os.chdir(os.path.join(src_root_dir, 'catboost', 'python-package'))
for cmd in commands:
if verbose:
logging.info(' '.join(cmd))
if not dry_run:
subprocess.check_call(cmd)
os.chdir(src_root_dir)
def run_with_native_python_with_version_in_python_package_dir(
src_root_dir:str,
dry_run:bool,
verbose:bool,
py_ver: Tuple[int, int],
python_cmds_args: List[List[str]]):
base_path = os.path.join(CMAKE_BUILD_ENV_ROOT, get_native_platform_name(), get_python_root_dir(py_ver))
if sys.platform == 'win32':
python_bin_path = os.path.join(base_path, 'python.exe')
else:
python_bin_path = os.path.join(base_path, 'bin', 'python')
run_in_python_package_dir(
src_root_dir,
dry_run,
verbose,
[[python_bin_path] + cmd_args for cmd_args in python_cmds_args]
)
def patch_sources(src_root_dir: str, dry_run:bool = False, verbose:bool = False):
# TODO(akhropov): Remove when system cuda.cmake is updated for Linux cross-build
|
def get_python_plat_name(platform_name: str):
system, arch = platform_name.split('-')
if system == 'windows':
return 'win_amd64'
elif system == 'darwin':
return 'macosx_11_0_universal2'
else: # linux
return 'manylinux2014_' + arch
def build_r_package(src_root_dir: str, build_native_root_dir: str, platform_name: str, dry_run: bool, verbose: bool):
system, _ = platform_name.split('-')
def get_catboostr_artifact_src_and_dst_name(system: str):
return {
'linux': ('libcatboostr.so', 'libcatboostr.so'),
'darwin': ('libcatboostr.dylib', 'libcatboostr.so'),
'windows': ('catboostr.dll', 'libcatboostr.dll')
}[system]
os.chdir(os.path.join(src_root_dir, 'catboost', 'R-package'))
if not dry_run:
os.makedirs('catboost', exist_ok=True)
entries = [
'DESCRIPTION',
'NAMESPACE',
'README.md',
'R',
'inst',
'man',
'tests'
]
for entry in entries:
if os.path.isdir(entry):
distutils.dir_util.copy_tree(entry, os.path.join('catboost', entry), verbose=verbose, dry_run=dry_run)
else:
distutils.file_util.copy_file(entry, os.path.join('catboost', entry), verbose=verbose, dry_run=dry_run)
binary_dst_dir = os.path.join('catboost', 'inst', 'libs')
if system == 'windows':
binary_dst_dir = os.path.join(binary_dst_dir, 'x64')
if not dry_run:
os.makedirs(binary_dst_dir, exist_ok=True)
src, dst = get_catboostr_artifact_src_and_dst_name(system)
full_src = os.path.join(build_native_root_dir, 'catboost', 'R-package', 'src', src)
full_dst = os.path.join(binary_dst_dir, dst)
if dry_run:
logging.info(f'copying {full_src} -> {full_dst}')
else:
distutils.file_util.copy_file(full_src, full_dst, verbose=verbose, dry_run=dry_run)
r_package_file_name = f'catboost-R-{platform_name}.tgz'
logging.info(f'creating {r_package_file_name}')
if not dry_run:
with tarfile.open(r_package_file_name, "w:gz") as tar:
tar.add('catboost', arcname=os.path.basename('catboost'))
os.chdir(src_root_dir)
def build_jvm_artifacts(
src_root_dir: str,
build_native_root_dir: str,
target_platform: str,
macos_universal_binaries:bool,
have_cuda: str,
dry_run: bool,
verbose: bool):
os.chdir(src_root_dir)
for base_dir, lib_name in [
(os.path.join('catboost', 'jvm-packages', 'catboost4j-prediction'), 'catboost4j-prediction'),
(os.path.join('catboost', 'spark', 'catboost4j-spark', 'core'), 'catboost4j-spark-impl'),
]:
cmd = [
'python3',
os.path.join('catboost', 'jvm-packages', 'tools', 'build_native_for_maven.py'),
'--only-postprocessing',
'--base-dir', base_dir,
'--lib-name', lib_name,
'--build-output-root-dir', build_native_root_dir,
]
if verbose:
cmd += ['--verbose']
if have_cuda:
cmd += ['--have-cuda', f'--cuda-root-dir="{CUDA_ROOT}"']
if macos_universal_binaries:
cmd += ['--macos-universal-binaries']
else:
cmd += ['--target-platform', target_platform]
if verbose:
logging.info(' '.join(cmd))
if not dry_run:
subprocess.check_call(cmd)
def get_exe_files(system:str, name:str) -> List[str]:
return [name + '.exe' if system == 'windows' else name]
def get_static_lib_files(system:str, name:str) -> List[str]:
prefix = '' if system == 'windows' else 'lib'
suffix = '.lib' if system == 'windows' else '.a'
return [prefix + name + sub_suffix + suffix for sub_suffix in ['', '.global']]
def get_shared_lib_files(system:str, name:str) -> List[str]:
if system == 'windows':
return [name + '.lib', name + '.dll']
else:
suffix = '.so' if system == 'linux' else '.dylib'
return ['lib' + name + suffix]
def copy_built_artifacts_to_canonical_place(system: str, real_build_dir:str, build_native_platform_dir:str, dry_run:bool, verbose: bool):
"""
Copy only artifacts that are not copied already by postprocessing in building JVM, R and Python packages
"""
artifacts = [
(os.path.join('catboost', 'app'), get_exe_files(system, 'catboost')),
(os.path.join('catboost', 'libs', 'model_interface'), get_shared_lib_files(system, 'catboostmodel')),
(os.path.join('catboost', 'libs', 'model_interface', 'static'), get_static_lib_files(system, 'catboostmodel_static')),
(os.path.join('catboost', 'libs', 'train_interface'), get_shared_lib_files(system, 'catboost')),
]
for sub_path, files in artifacts:
for f in files:
src = os.path.join(real_build_dir, sub_path, f)
dst = os.path.join(build_native_platform_dir, sub_path, f)
if dry_run:
logging.info(f'copying {src} -> {dst}')
else:
distutils.dir_util.mkpath(os.path.dirname(dst), verbose=verbose, dry_run=dry_run)
distutils.file_util.copy_file(src, dst, verbose=verbose, dry_run=dry_run)
def get_real_build_root_dir(src_root_dir:str, platform_name:str, built_output_root_dir:str):
if os.environ.get('CMAKE_BUILD_CACHE_DIR'):
build_native_root_dir = os.path.join(
os.environ['CMAKE_BUILD_CACHE_DIR'],
hashlib.md5(os.path.abspath(src_root_dir).encode('utf-8')).hexdigest()[:10],
platform_name
)
os.makedirs(build_native_root_dir, exist_ok=True)
return build_native_root_dir
else:
return built_output_root_dir
def build_all_for_one_platform(
src_root_dir:str,
built_output_root_dir:str,
platform_name:str, # either "{system}-{arch}' of 'darwin-universal | distutils.file_util.copy_file(
src=os.path.join(src_root_dir, 'ci', 'cmake', 'cuda.cmake'),
dst=os.path.join(src_root_dir, 'cmake', 'cuda.cmake'),
verbose=verbose,
dry_run=dry_run
) | identifier_body |
greyhound.go | ev3.CheckDriver(devs.OutA, ev3.DriverRcxMotor, ev3.OutA)
ev3.CheckDriver(devs.OutB, ev3.DriverRcxMotor, ev3.OutB)
ev3.CheckDriver(devs.OutC, ev3.DriverRcxMotor, ev3.OutC)
ev3.CheckDriver(devs.OutD, ev3.DriverRcxMotor, ev3.OutD)
// Check sensors
ev3.CheckDriver(devs.In1, ev3.DriverColor, ev3.In1)
ev3.CheckDriver(devs.In2, ev3.DriverColor, ev3.In2)
ev3.CheckDriver(devs.In3, ev3.DriverColor, ev3.In3)
ev3.CheckDriver(devs.In4, ev3.DriverColor, ev3.In4)
// Set sensors mode
setSensorsMode()
// Stop motors
ev3.RunCommand(devs.OutA, ev3.CmdStop)
ev3.RunCommand(devs.OutB, ev3.CmdStop)
ev3.RunCommand(devs.OutC, ev3.CmdStop)
ev3.RunCommand(devs.OutD, ev3.CmdStop)
// Open motors
motorL1 = ev3.OpenTextW(devs.OutA, ev3.DutyCycleSp)
motorL2 = ev3.OpenTextW(devs.OutB, ev3.DutyCycleSp)
motorR1 = ev3.OpenTextW(devs.OutC, ev3.DutyCycleSp)
motorR2 = ev3.OpenTextW(devs.OutD, ev3.DutyCycleSp)
// Reset motor speed
motorL1.Value = 0
motorL2.Value = 0
motorR1.Value = 0
motorR2.Value = 0
motorL1.Sync()
motorL2.Sync()
motorR1.Sync()
motorR2.Sync()
// Put motors in direct mode
ev3.RunCommand(devs.OutA, ev3.CmdRunDirect)
ev3.RunCommand(devs.OutB, ev3.CmdRunDirect)
ev3.RunCommand(devs.OutC, ev3.CmdRunDirect)
ev3.RunCommand(devs.OutD, ev3.CmdRunDirect)
}
func close() {
// Close buttons
buttons.Close()
// Stop motors
ev3.RunCommand(devs.OutA, ev3.CmdStop)
ev3.RunCommand(devs.OutB, ev3.CmdStop)
ev3.RunCommand(devs.OutC, ev3.CmdStop)
ev3.RunCommand(devs.OutD, ev3.CmdStop)
// Close motors
motorL1.Close()
motorL2.Close()
motorR1.Close()
motorR2.Close()
// Close sensor values
closeSensors()
}
var lastMoveTicks int
var lastSpeedLeft int
var lastSpeedRight int
const accelPerTicks int = 5
const accelSpeedFactor int = 10000
func move(left int, right int, now int) {
ticks := now - lastMoveTicks
lastMoveTicks = now
right *= accelSpeedFactor
left *= accelSpeedFactor
nextSpeedLeft := lastSpeedLeft
nextSpeedRight := lastSpeedRight
delta := ticks * accelPerTicks
// delta := ticks * ticks * accelPerTicks
if left > nextSpeedLeft {
nextSpeedLeft += delta
if nextSpeedLeft > left {
nextSpeedLeft = left
}
} else if left < nextSpeedLeft {
nextSpeedLeft -= delta
if nextSpeedLeft < left {
nextSpeedLeft = left
}
}
if right > nextSpeedRight {
nextSpeedRight += delta
if nextSpeedRight > right {
nextSpeedRight = right
}
} else if right < nextSpeedRight {
nextSpeedRight -= delta
if nextSpeedRight < right {
nextSpeedRight = right
}
}
lastSpeedLeft = nextSpeedLeft
lastSpeedRight = nextSpeedRight
motorL1.Value = nextSpeedLeft / accelSpeedFactor
motorL2.Value = nextSpeedLeft / accelSpeedFactor
motorR1.Value = -nextSpeedRight / accelSpeedFactor
motorR2.Value = -nextSpeedRight / accelSpeedFactor
motorL1.Sync()
motorL2.Sync()
motorR1.Sync()
motorR2.Sync()
}
func read() {
cF.Sync()
cL.Sync()
cR.Sync()
cB.Sync()
}
func durationToTicks(d time.Duration) int {
return int(d / 1000)
}
func timespanAsTicks(start time.Time, end time.Time) int {
return durationToTicks(end.Sub(start))
}
func currentTicks() int {
return timespanAsTicks(initializationTime, time.Now())
}
func ticksToMillis(ticks int) int {
return ticks / 1000
}
func print(data ...interface{}) {
fmt.Fprintln(os.Stderr, data...)
}
func quit(data ...interface{}) {
close()
log.Fatalln(data...)
}
func waitEnter() {
// Let the button be released if needed
if buttons.Enter == true {
print("wait enter release")
for buttons.Enter == true {
now := currentTicks()
move(0, 0, now)
}
}
// Wait for it to be pressed
print("wait enter")
for buttons.Enter == false {
now := currentTicks()
move(0, 0, now)
if buttons.Back {
newConf, err := config.FromFile("greyhound.toml")
if err != nil {
print("Error reading conf:", err)
} else {
conf = newConf
print("Configuration reloaded:", conf)
}
}
}
}
func waitOneSecond() int {
initializeTime()
print("wait one second")
start := currentTicks()
for {
now := currentTicks()
elapsed := now - start
move(0, 0, now)
if buttons.Enter && buttons.Back {
quit("Done")
}
if elapsed >= 1000000 {
return now
}
}
}
func trimSensor(attr *ev3.Attribute) int {
value := attr.Value + attr.Value1 + attr.Value2
if value < conf.SensorMin {
value = conf.SensorMin
}
value -= conf.SensorMin
if value > conf.SensorSpan {
value = conf.SensorSpan
}
return value
}
func isOnTrack(value int) bool {
return value < conf.SensorSpan
}
func distanceFromSensor(value int) int {
return value * conf.SensorRadius / conf.SensorSpan
}
func positionBetweenSensors(value1 int, value2 int) int {
return (value1 - value2) * conf.SensorRadius / conf.SensorSpan
}
func sign(value int) int {
if value > 0 {
return 1
} else if value < 0 {
return -1
} else {
return 0
}
}
type sensorReadType int
const (
bitB sensorReadType = 1 << iota
bitR
bitL
bitF
)
const (
sensorReadZero sensorReadType = iota
sensorReadB
sensorReadR
sensorReadRB
sensorReadL
sensorReadLB
sensorReadLR
sensorReadLRB
sensorReadF
sensorReadFB
sensorReadFR
sensorReadFRB
sensorReadFL
sensorReadFLB
sensorReadFLR
sensorReadFLRB
)
var sensorReadNames = [16]string{
"---",
"-v-",
"-->",
"-v>",
"<--",
"<v-",
"<->",
"<v>",
"-^-",
"-X-",
"-^>",
"-X>",
"<^-",
"<X-",
"<^>",
"<X>",
}
// const lineStatusStraight = "-|-"
// const lineStatusStraightLeft = "<|-"
// const lineStatusLeft = "<--"
// const lineStatusStraightRight = "-|>"
// const lineStatusRight = "-->"
// const lineStatusFrontLeft = "<^-"
// const lineStatusFrontRight = "-^>"
// const lineStatusBackLeft = "<v-"
// const lineStatusBackRight = "-v>"
// const lineStatusOut = "---"
// const lineStatusCross = "-+-"
func processSensorData() (sensorRead sensorReadType, pos int, hint int, cross bool, out bool) {
read()
f, l, r, b := trimSensor(cB), trimSensor(cL), trimSensor(cR), trimSensor(cB)
sensorRead = sensorReadZero
if isOnTrack(b) {
sensorRead |= bitB
}
if isOnTrack(r) {
sensorRead |= bitR
}
if isOnTrack(l) {
sensorRead |= bitL
}
if isOnTrack(f) { | switch sensorRead {
case sensorReadZero:
case sensorReadB:
case sensorReadF:
// Out
out = true
pos, hint, cross = 0, 0, false
case sensorReadR:
pos = conf.SensorRadius*2 + distanceFromSensor(r)
hint = 0
cross, out = false, false
case sensorReadRB:
pos = conf.SensorRadius + positionBetweenSensors(b, r)
hint = 1
c | sensorRead |= bitF
}
| random_line_split |
greyhound.go | ev3.CheckDriver(devs.OutA, ev3.DriverRcxMotor, ev3.OutA)
ev3.CheckDriver(devs.OutB, ev3.DriverRcxMotor, ev3.OutB)
ev3.CheckDriver(devs.OutC, ev3.DriverRcxMotor, ev3.OutC)
ev3.CheckDriver(devs.OutD, ev3.DriverRcxMotor, ev3.OutD)
// Check sensors
ev3.CheckDriver(devs.In1, ev3.DriverColor, ev3.In1)
ev3.CheckDriver(devs.In2, ev3.DriverColor, ev3.In2)
ev3.CheckDriver(devs.In3, ev3.DriverColor, ev3.In3)
ev3.CheckDriver(devs.In4, ev3.DriverColor, ev3.In4)
// Set sensors mode
setSensorsMode()
// Stop motors
ev3.RunCommand(devs.OutA, ev3.CmdStop)
ev3.RunCommand(devs.OutB, ev3.CmdStop)
ev3.RunCommand(devs.OutC, ev3.CmdStop)
ev3.RunCommand(devs.OutD, ev3.CmdStop)
// Open motors
motorL1 = ev3.OpenTextW(devs.OutA, ev3.DutyCycleSp)
motorL2 = ev3.OpenTextW(devs.OutB, ev3.DutyCycleSp)
motorR1 = ev3.OpenTextW(devs.OutC, ev3.DutyCycleSp)
motorR2 = ev3.OpenTextW(devs.OutD, ev3.DutyCycleSp)
// Reset motor speed
motorL1.Value = 0
motorL2.Value = 0
motorR1.Value = 0
motorR2.Value = 0
motorL1.Sync()
motorL2.Sync()
motorR1.Sync()
motorR2.Sync()
// Put motors in direct mode
ev3.RunCommand(devs.OutA, ev3.CmdRunDirect)
ev3.RunCommand(devs.OutB, ev3.CmdRunDirect)
ev3.RunCommand(devs.OutC, ev3.CmdRunDirect)
ev3.RunCommand(devs.OutD, ev3.CmdRunDirect)
}
func close() {
// Close buttons
buttons.Close()
// Stop motors
ev3.RunCommand(devs.OutA, ev3.CmdStop)
ev3.RunCommand(devs.OutB, ev3.CmdStop)
ev3.RunCommand(devs.OutC, ev3.CmdStop)
ev3.RunCommand(devs.OutD, ev3.CmdStop)
// Close motors
motorL1.Close()
motorL2.Close()
motorR1.Close()
motorR2.Close()
// Close sensor values
closeSensors()
}
var lastMoveTicks int
var lastSpeedLeft int
var lastSpeedRight int
const accelPerTicks int = 5
const accelSpeedFactor int = 10000
func move(left int, right int, now int) {
ticks := now - lastMoveTicks
lastMoveTicks = now
right *= accelSpeedFactor
left *= accelSpeedFactor
nextSpeedLeft := lastSpeedLeft
nextSpeedRight := lastSpeedRight
delta := ticks * accelPerTicks
// delta := ticks * ticks * accelPerTicks
if left > nextSpeedLeft {
nextSpeedLeft += delta
if nextSpeedLeft > left {
nextSpeedLeft = left
}
} else if left < nextSpeedLeft {
nextSpeedLeft -= delta
if nextSpeedLeft < left {
nextSpeedLeft = left
}
}
if right > nextSpeedRight {
nextSpeedRight += delta
if nextSpeedRight > right {
nextSpeedRight = right
}
} else if right < nextSpeedRight {
nextSpeedRight -= delta
if nextSpeedRight < right {
nextSpeedRight = right
}
}
lastSpeedLeft = nextSpeedLeft
lastSpeedRight = nextSpeedRight
motorL1.Value = nextSpeedLeft / accelSpeedFactor
motorL2.Value = nextSpeedLeft / accelSpeedFactor
motorR1.Value = -nextSpeedRight / accelSpeedFactor
motorR2.Value = -nextSpeedRight / accelSpeedFactor
motorL1.Sync()
motorL2.Sync()
motorR1.Sync()
motorR2.Sync()
}
func read() {
cF.Sync()
cL.Sync()
cR.Sync()
cB.Sync()
}
func durationToTicks(d time.Duration) int {
return int(d / 1000)
}
func timespanAsTicks(start time.Time, end time.Time) int {
return durationToTicks(end.Sub(start))
}
func | () int {
return timespanAsTicks(initializationTime, time.Now())
}
func ticksToMillis(ticks int) int {
return ticks / 1000
}
func print(data ...interface{}) {
fmt.Fprintln(os.Stderr, data...)
}
func quit(data ...interface{}) {
close()
log.Fatalln(data...)
}
func waitEnter() {
// Let the button be released if needed
if buttons.Enter == true {
print("wait enter release")
for buttons.Enter == true {
now := currentTicks()
move(0, 0, now)
}
}
// Wait for it to be pressed
print("wait enter")
for buttons.Enter == false {
now := currentTicks()
move(0, 0, now)
if buttons.Back {
newConf, err := config.FromFile("greyhound.toml")
if err != nil {
print("Error reading conf:", err)
} else {
conf = newConf
print("Configuration reloaded:", conf)
}
}
}
}
func waitOneSecond() int {
initializeTime()
print("wait one second")
start := currentTicks()
for {
now := currentTicks()
elapsed := now - start
move(0, 0, now)
if buttons.Enter && buttons.Back {
quit("Done")
}
if elapsed >= 1000000 {
return now
}
}
}
func trimSensor(attr *ev3.Attribute) int {
value := attr.Value + attr.Value1 + attr.Value2
if value < conf.SensorMin {
value = conf.SensorMin
}
value -= conf.SensorMin
if value > conf.SensorSpan {
value = conf.SensorSpan
}
return value
}
func isOnTrack(value int) bool {
return value < conf.SensorSpan
}
func distanceFromSensor(value int) int {
return value * conf.SensorRadius / conf.SensorSpan
}
func positionBetweenSensors(value1 int, value2 int) int {
return (value1 - value2) * conf.SensorRadius / conf.SensorSpan
}
func sign(value int) int {
if value > 0 {
return 1
} else if value < 0 {
return -1
} else {
return 0
}
}
type sensorReadType int
const (
bitB sensorReadType = 1 << iota
bitR
bitL
bitF
)
const (
sensorReadZero sensorReadType = iota
sensorReadB
sensorReadR
sensorReadRB
sensorReadL
sensorReadLB
sensorReadLR
sensorReadLRB
sensorReadF
sensorReadFB
sensorReadFR
sensorReadFRB
sensorReadFL
sensorReadFLB
sensorReadFLR
sensorReadFLRB
)
var sensorReadNames = [16]string{
"---",
"-v-",
"-->",
"-v>",
"<--",
"<v-",
"<->",
"<v>",
"-^-",
"-X-",
"-^>",
"-X>",
"<^-",
"<X-",
"<^>",
"<X>",
}
// const lineStatusStraight = "-|-"
// const lineStatusStraightLeft = "<|-"
// const lineStatusLeft = "<--"
// const lineStatusStraightRight = "-|>"
// const lineStatusRight = "-->"
// const lineStatusFrontLeft = "<^-"
// const lineStatusFrontRight = "-^>"
// const lineStatusBackLeft = "<v-"
// const lineStatusBackRight = "-v>"
// const lineStatusOut = "---"
// const lineStatusCross = "-+-"
func processSensorData() (sensorRead sensorReadType, pos int, hint int, cross bool, out bool) {
read()
f, l, r, b := trimSensor(cB), trimSensor(cL), trimSensor(cR), trimSensor(cB)
sensorRead = sensorReadZero
if isOnTrack(b) {
sensorRead |= bitB
}
if isOnTrack(r) {
sensorRead |= bitR
}
if isOnTrack(l) {
sensorRead |= bitL
}
if isOnTrack(f) {
sensorRead |= bitF
}
switch sensorRead {
case sensorReadZero:
case sensorReadB:
case sensorReadF:
// Out
out = true
pos, hint, cross = 0, 0, false
case sensorReadR:
pos = conf.SensorRadius*2 + distanceFromSensor(r)
hint = 0
cross, out = false, false
case sensorReadRB:
pos = conf.SensorRadius + positionBetweenSensors(b, r)
hint = 1
| currentTicks | identifier_name |
greyhound.go | 3.CheckDriver(devs.OutA, ev3.DriverRcxMotor, ev3.OutA)
ev3.CheckDriver(devs.OutB, ev3.DriverRcxMotor, ev3.OutB)
ev3.CheckDriver(devs.OutC, ev3.DriverRcxMotor, ev3.OutC)
ev3.CheckDriver(devs.OutD, ev3.DriverRcxMotor, ev3.OutD)
// Check sensors
ev3.CheckDriver(devs.In1, ev3.DriverColor, ev3.In1)
ev3.CheckDriver(devs.In2, ev3.DriverColor, ev3.In2)
ev3.CheckDriver(devs.In3, ev3.DriverColor, ev3.In3)
ev3.CheckDriver(devs.In4, ev3.DriverColor, ev3.In4)
// Set sensors mode
setSensorsMode()
// Stop motors
ev3.RunCommand(devs.OutA, ev3.CmdStop)
ev3.RunCommand(devs.OutB, ev3.CmdStop)
ev3.RunCommand(devs.OutC, ev3.CmdStop)
ev3.RunCommand(devs.OutD, ev3.CmdStop)
// Open motors
motorL1 = ev3.OpenTextW(devs.OutA, ev3.DutyCycleSp)
motorL2 = ev3.OpenTextW(devs.OutB, ev3.DutyCycleSp)
motorR1 = ev3.OpenTextW(devs.OutC, ev3.DutyCycleSp)
motorR2 = ev3.OpenTextW(devs.OutD, ev3.DutyCycleSp)
// Reset motor speed
motorL1.Value = 0
motorL2.Value = 0
motorR1.Value = 0
motorR2.Value = 0
motorL1.Sync()
motorL2.Sync()
motorR1.Sync()
motorR2.Sync()
// Put motors in direct mode
ev3.RunCommand(devs.OutA, ev3.CmdRunDirect)
ev3.RunCommand(devs.OutB, ev3.CmdRunDirect)
ev3.RunCommand(devs.OutC, ev3.CmdRunDirect)
ev3.RunCommand(devs.OutD, ev3.CmdRunDirect)
}
func close() {
// Close buttons
buttons.Close()
// Stop motors
ev3.RunCommand(devs.OutA, ev3.CmdStop)
ev3.RunCommand(devs.OutB, ev3.CmdStop)
ev3.RunCommand(devs.OutC, ev3.CmdStop)
ev3.RunCommand(devs.OutD, ev3.CmdStop)
// Close motors
motorL1.Close()
motorL2.Close()
motorR1.Close()
motorR2.Close()
// Close sensor values
closeSensors()
}
var lastMoveTicks int
var lastSpeedLeft int
var lastSpeedRight int
const accelPerTicks int = 5
const accelSpeedFactor int = 10000
func move(left int, right int, now int) {
ticks := now - lastMoveTicks
lastMoveTicks = now
right *= accelSpeedFactor
left *= accelSpeedFactor
nextSpeedLeft := lastSpeedLeft
nextSpeedRight := lastSpeedRight
delta := ticks * accelPerTicks
// delta := ticks * ticks * accelPerTicks
if left > nextSpeedLeft {
nextSpeedLeft += delta
if nextSpeedLeft > left {
nextSpeedLeft = left
}
} else if left < nextSpeedLeft {
nextSpeedLeft -= delta
if nextSpeedLeft < left {
nextSpeedLeft = left
}
}
if right > nextSpeedRight {
nextSpeedRight += delta
if nextSpeedRight > right {
nextSpeedRight = right
}
} else if right < nextSpeedRight {
nextSpeedRight -= delta
if nextSpeedRight < right {
nextSpeedRight = right
}
}
lastSpeedLeft = nextSpeedLeft
lastSpeedRight = nextSpeedRight
motorL1.Value = nextSpeedLeft / accelSpeedFactor
motorL2.Value = nextSpeedLeft / accelSpeedFactor
motorR1.Value = -nextSpeedRight / accelSpeedFactor
motorR2.Value = -nextSpeedRight / accelSpeedFactor
motorL1.Sync()
motorL2.Sync()
motorR1.Sync()
motorR2.Sync()
}
func read() {
cF.Sync()
cL.Sync()
cR.Sync()
cB.Sync()
}
func durationToTicks(d time.Duration) int {
return int(d / 1000)
}
func timespanAsTicks(start time.Time, end time.Time) int {
return durationToTicks(end.Sub(start))
}
func currentTicks() int {
return timespanAsTicks(initializationTime, time.Now())
}
func ticksToMillis(ticks int) int {
return ticks / 1000
}
func print(data ...interface{}) {
fmt.Fprintln(os.Stderr, data...)
}
func quit(data ...interface{}) {
close()
log.Fatalln(data...)
}
func waitEnter() {
// Let the button be released if needed
if buttons.Enter == true {
print("wait enter release")
for buttons.Enter == true {
now := currentTicks()
move(0, 0, now)
}
}
// Wait for it to be pressed
print("wait enter")
for buttons.Enter == false {
now := currentTicks()
move(0, 0, now)
if buttons.Back {
newConf, err := config.FromFile("greyhound.toml")
if err != nil {
print("Error reading conf:", err)
} else {
conf = newConf
print("Configuration reloaded:", conf)
}
}
}
}
func waitOneSecond() int {
initializeTime()
print("wait one second")
start := currentTicks()
for {
now := currentTicks()
elapsed := now - start
move(0, 0, now)
if buttons.Enter && buttons.Back {
quit("Done")
}
if elapsed >= 1000000 {
return now
}
}
}
func trimSensor(attr *ev3.Attribute) int {
value := attr.Value + attr.Value1 + attr.Value2
if value < conf.SensorMin {
value = conf.SensorMin
}
value -= conf.SensorMin
if value > conf.SensorSpan {
value = conf.SensorSpan
}
return value
}
func isOnTrack(value int) bool |
func distanceFromSensor(value int) int {
return value * conf.SensorRadius / conf.SensorSpan
}
func positionBetweenSensors(value1 int, value2 int) int {
return (value1 - value2) * conf.SensorRadius / conf.SensorSpan
}
func sign(value int) int {
if value > 0 {
return 1
} else if value < 0 {
return -1
} else {
return 0
}
}
type sensorReadType int
const (
bitB sensorReadType = 1 << iota
bitR
bitL
bitF
)
const (
sensorReadZero sensorReadType = iota
sensorReadB
sensorReadR
sensorReadRB
sensorReadL
sensorReadLB
sensorReadLR
sensorReadLRB
sensorReadF
sensorReadFB
sensorReadFR
sensorReadFRB
sensorReadFL
sensorReadFLB
sensorReadFLR
sensorReadFLRB
)
var sensorReadNames = [16]string{
"---",
"-v-",
"-->",
"-v>",
"<--",
"<v-",
"<->",
"<v>",
"-^-",
"-X-",
"-^>",
"-X>",
"<^-",
"<X-",
"<^>",
"<X>",
}
// const lineStatusStraight = "-|-"
// const lineStatusStraightLeft = "<|-"
// const lineStatusLeft = "<--"
// const lineStatusStraightRight = "-|>"
// const lineStatusRight = "-->"
// const lineStatusFrontLeft = "<^-"
// const lineStatusFrontRight = "-^>"
// const lineStatusBackLeft = "<v-"
// const lineStatusBackRight = "-v>"
// const lineStatusOut = "---"
// const lineStatusCross = "-+-"
func processSensorData() (sensorRead sensorReadType, pos int, hint int, cross bool, out bool) {
read()
f, l, r, b := trimSensor(cB), trimSensor(cL), trimSensor(cR), trimSensor(cB)
sensorRead = sensorReadZero
if isOnTrack(b) {
sensorRead |= bitB
}
if isOnTrack(r) {
sensorRead |= bitR
}
if isOnTrack(l) {
sensorRead |= bitL
}
if isOnTrack(f) {
sensorRead |= bitF
}
switch sensorRead {
case sensorReadZero:
case sensorReadB:
case sensorReadF:
// Out
out = true
pos, hint, cross = 0, 0, false
case sensorReadR:
pos = conf.SensorRadius*2 + distanceFromSensor(r)
hint = 0
cross, out = false, false
case sensorReadRB:
pos = conf.SensorRadius + positionBetweenSensors(b, r)
hint = 1
| {
return value < conf.SensorSpan
} | identifier_body |
greyhound.go | ev3.CheckDriver(devs.OutA, ev3.DriverRcxMotor, ev3.OutA)
ev3.CheckDriver(devs.OutB, ev3.DriverRcxMotor, ev3.OutB)
ev3.CheckDriver(devs.OutC, ev3.DriverRcxMotor, ev3.OutC)
ev3.CheckDriver(devs.OutD, ev3.DriverRcxMotor, ev3.OutD)
// Check sensors
ev3.CheckDriver(devs.In1, ev3.DriverColor, ev3.In1)
ev3.CheckDriver(devs.In2, ev3.DriverColor, ev3.In2)
ev3.CheckDriver(devs.In3, ev3.DriverColor, ev3.In3)
ev3.CheckDriver(devs.In4, ev3.DriverColor, ev3.In4)
// Set sensors mode
setSensorsMode()
// Stop motors
ev3.RunCommand(devs.OutA, ev3.CmdStop)
ev3.RunCommand(devs.OutB, ev3.CmdStop)
ev3.RunCommand(devs.OutC, ev3.CmdStop)
ev3.RunCommand(devs.OutD, ev3.CmdStop)
// Open motors
motorL1 = ev3.OpenTextW(devs.OutA, ev3.DutyCycleSp)
motorL2 = ev3.OpenTextW(devs.OutB, ev3.DutyCycleSp)
motorR1 = ev3.OpenTextW(devs.OutC, ev3.DutyCycleSp)
motorR2 = ev3.OpenTextW(devs.OutD, ev3.DutyCycleSp)
// Reset motor speed
motorL1.Value = 0
motorL2.Value = 0
motorR1.Value = 0
motorR2.Value = 0
motorL1.Sync()
motorL2.Sync()
motorR1.Sync()
motorR2.Sync()
// Put motors in direct mode
ev3.RunCommand(devs.OutA, ev3.CmdRunDirect)
ev3.RunCommand(devs.OutB, ev3.CmdRunDirect)
ev3.RunCommand(devs.OutC, ev3.CmdRunDirect)
ev3.RunCommand(devs.OutD, ev3.CmdRunDirect)
}
func close() {
// Close buttons
buttons.Close()
// Stop motors
ev3.RunCommand(devs.OutA, ev3.CmdStop)
ev3.RunCommand(devs.OutB, ev3.CmdStop)
ev3.RunCommand(devs.OutC, ev3.CmdStop)
ev3.RunCommand(devs.OutD, ev3.CmdStop)
// Close motors
motorL1.Close()
motorL2.Close()
motorR1.Close()
motorR2.Close()
// Close sensor values
closeSensors()
}
var lastMoveTicks int
var lastSpeedLeft int
var lastSpeedRight int
const accelPerTicks int = 5
const accelSpeedFactor int = 10000
func move(left int, right int, now int) {
ticks := now - lastMoveTicks
lastMoveTicks = now
right *= accelSpeedFactor
left *= accelSpeedFactor
nextSpeedLeft := lastSpeedLeft
nextSpeedRight := lastSpeedRight
delta := ticks * accelPerTicks
// delta := ticks * ticks * accelPerTicks
if left > nextSpeedLeft {
nextSpeedLeft += delta
if nextSpeedLeft > left {
nextSpeedLeft = left
}
} else if left < nextSpeedLeft {
nextSpeedLeft -= delta
if nextSpeedLeft < left {
nextSpeedLeft = left
}
}
if right > nextSpeedRight | else if right < nextSpeedRight {
nextSpeedRight -= delta
if nextSpeedRight < right {
nextSpeedRight = right
}
}
lastSpeedLeft = nextSpeedLeft
lastSpeedRight = nextSpeedRight
motorL1.Value = nextSpeedLeft / accelSpeedFactor
motorL2.Value = nextSpeedLeft / accelSpeedFactor
motorR1.Value = -nextSpeedRight / accelSpeedFactor
motorR2.Value = -nextSpeedRight / accelSpeedFactor
motorL1.Sync()
motorL2.Sync()
motorR1.Sync()
motorR2.Sync()
}
func read() {
cF.Sync()
cL.Sync()
cR.Sync()
cB.Sync()
}
func durationToTicks(d time.Duration) int {
return int(d / 1000)
}
func timespanAsTicks(start time.Time, end time.Time) int {
return durationToTicks(end.Sub(start))
}
func currentTicks() int {
return timespanAsTicks(initializationTime, time.Now())
}
func ticksToMillis(ticks int) int {
return ticks / 1000
}
func print(data ...interface{}) {
fmt.Fprintln(os.Stderr, data...)
}
func quit(data ...interface{}) {
close()
log.Fatalln(data...)
}
func waitEnter() {
// Let the button be released if needed
if buttons.Enter == true {
print("wait enter release")
for buttons.Enter == true {
now := currentTicks()
move(0, 0, now)
}
}
// Wait for it to be pressed
print("wait enter")
for buttons.Enter == false {
now := currentTicks()
move(0, 0, now)
if buttons.Back {
newConf, err := config.FromFile("greyhound.toml")
if err != nil {
print("Error reading conf:", err)
} else {
conf = newConf
print("Configuration reloaded:", conf)
}
}
}
}
func waitOneSecond() int {
initializeTime()
print("wait one second")
start := currentTicks()
for {
now := currentTicks()
elapsed := now - start
move(0, 0, now)
if buttons.Enter && buttons.Back {
quit("Done")
}
if elapsed >= 1000000 {
return now
}
}
}
func trimSensor(attr *ev3.Attribute) int {
value := attr.Value + attr.Value1 + attr.Value2
if value < conf.SensorMin {
value = conf.SensorMin
}
value -= conf.SensorMin
if value > conf.SensorSpan {
value = conf.SensorSpan
}
return value
}
func isOnTrack(value int) bool {
return value < conf.SensorSpan
}
func distanceFromSensor(value int) int {
return value * conf.SensorRadius / conf.SensorSpan
}
func positionBetweenSensors(value1 int, value2 int) int {
return (value1 - value2) * conf.SensorRadius / conf.SensorSpan
}
func sign(value int) int {
if value > 0 {
return 1
} else if value < 0 {
return -1
} else {
return 0
}
}
type sensorReadType int
const (
bitB sensorReadType = 1 << iota
bitR
bitL
bitF
)
const (
sensorReadZero sensorReadType = iota
sensorReadB
sensorReadR
sensorReadRB
sensorReadL
sensorReadLB
sensorReadLR
sensorReadLRB
sensorReadF
sensorReadFB
sensorReadFR
sensorReadFRB
sensorReadFL
sensorReadFLB
sensorReadFLR
sensorReadFLRB
)
var sensorReadNames = [16]string{
"---",
"-v-",
"-->",
"-v>",
"<--",
"<v-",
"<->",
"<v>",
"-^-",
"-X-",
"-^>",
"-X>",
"<^-",
"<X-",
"<^>",
"<X>",
}
// const lineStatusStraight = "-|-"
// const lineStatusStraightLeft = "<|-"
// const lineStatusLeft = "<--"
// const lineStatusStraightRight = "-|>"
// const lineStatusRight = "-->"
// const lineStatusFrontLeft = "<^-"
// const lineStatusFrontRight = "-^>"
// const lineStatusBackLeft = "<v-"
// const lineStatusBackRight = "-v>"
// const lineStatusOut = "---"
// const lineStatusCross = "-+-"
func processSensorData() (sensorRead sensorReadType, pos int, hint int, cross bool, out bool) {
read()
f, l, r, b := trimSensor(cB), trimSensor(cL), trimSensor(cR), trimSensor(cB)
sensorRead = sensorReadZero
if isOnTrack(b) {
sensorRead |= bitB
}
if isOnTrack(r) {
sensorRead |= bitR
}
if isOnTrack(l) {
sensorRead |= bitL
}
if isOnTrack(f) {
sensorRead |= bitF
}
switch sensorRead {
case sensorReadZero:
case sensorReadB:
case sensorReadF:
// Out
out = true
pos, hint, cross = 0, 0, false
case sensorReadR:
pos = conf.SensorRadius*2 + distanceFromSensor(r)
hint = 0
cross, out = false, false
case sensorReadRB:
pos = conf.SensorRadius + positionBetweenSensors(b, r)
hint = 1
| {
nextSpeedRight += delta
if nextSpeedRight > right {
nextSpeedRight = right
}
} | conditional_block |
offline.py | 1 列
# 时间戳用长整型
showPoetryDetails = showPoetryDetails. \
withColumn("userId", showPoetryDetails.userId.cast(LongType())). \
withColumn("itemId", showPoetryDetails.itemId.cast(IntegerType()))
# 点击行为评分为 1分
showPoetryDetails = showPoetryDetails. \
withColumn("rating", showPoetryDetails.itemId * 0 + 1)
return showPoetryDetails
def process_collectPoem(self):
# 提取字符串里面的数据
def reg_extract2(string):
ans = re.findall("poemId=(.*?)&collection=(.+)", string)
rating = None
if ans[0][1] == '0':
rating = '-2'
else:
rating = '2'
return [ans[0][0], rating]
get_reg_info2 = F.udf(reg_extract2, ArrayType(StringType()))
collectPoem = self.collectPoem
collectPoem = collectPoem.select(collectPoem.userId, \
get_reg_info2(collectPoem.parameter).alias("info"))
# 从某列的 array 中取出指定下标的值
def get_array_element(row):
return Row(userId=row.userId, \
itemId=row.info[0], rating=row.info[1])
collectPoem = collectPoem.rdd.map(get_array_element).toDF()
# userId 转 LongType(),保持每个分组的字段类型一致
collectPoem = collectPoem. \
withColumn("userId", collectPoem.userId.cast(LongType())). \
withColumn("itemId", collectPoem.itemId.cast(IntegerType())). \
withColumn("rating", collectPoem.rating.cast(IntegerType()))
return collectPoem
def process_evaluatePoem(self):
def reg_extract3(string):
ans = re.findall("like=(.+)&poemId=(.+)", string)
rating = None
if ans[0][0] == '0':
rating = '-2'
else:
rating = '2'
return [rating, ans[0][1]]
get_reg_info3 = F.udf(reg_extract3, ArrayType(StringType()))
evaluatePoem = self.evaluatePoem
evaluatePoem = evaluatePoem.select(evaluatePoem.userId, \
get_reg_info3(evaluatePoem.parameter).alias("info"))
def get_array_element(row):
return Row(userId=row.userId, \
itemId=row.info[1], rating=row.info[0])
evaluatePoem = evaluatePoem.rdd.map(get_array_element).toDF()
evaluatePoem = evaluatePoem. \
withColumn("userId", evaluatePoem.userId.cast(LongType())). \
withColumn("itemId", evaluatePoem.itemId.cast(IntegerType())). \
withColumn("rating", evaluatePoem.rating.cast(IntegerType()))
return evaluatePoem
def processAndUnion(self):
# 分组处理用户行为,并为不同的用户行为打分
showPoetryDetails = self.process_showPoetryDetails()
collectPoem = self.process_collectPoem()
evaluatePoem = self.process_evaluatePoem()
toPoemDetails = self.process_toPoemDetails()
# 纵向合并分组处理的数据,保持列的顺序对齐
user_log = evaluatePoem.union(showPoetryDetails. \
select(showPoetryDetails.itemId, showPoetryDetails.rating,
showPoetryDetails.userId)).union(collectPoem)
user_log = user_log.union(toPoemDetails.select(toPoemDetails.itemId,\
toPoemDetails.rating, toPoemDetails.userId))
# 根据 每个用户-物品对进行分组,求行为分数的总和
user_log = user_log.groupBy(["userId", "itemId"]).sum("rating")
# 根据评分打上标签
user_log = user_log.select(user_log.userId, user_log.itemId, user_log["sum(rating)"].alias("rating"),
F.when(user_log["sum(rating)"] > 0, 1).otherwise(0).alias("label"))
return user_log
def add_negative_sample(self, user_log):
# 算出要填补的 负样本 的数量
label_count = user_log.groupBy("label").count()
row1, row2 = label_count.collect()
diff = row1["count"] - row2["count"]
# 从实时推荐处收集负样本,使得正负样本的比例为 2:1
# TODO(laoliang):换到本地的时候记得修改 ip 和 collection 的名字
client = pymongo.MongoClient("120.26.89.198", 27017)
db = client["Recommendation"]
mycol = db["rs_fucaiyang"]
# 从负样本所在的集合随机选出文档
# 使用 python 操作 mongodb 一定要保证传入的 json 里面的数据类型用的是 python 内置类型
random_document = mycol.aggregate([{"$sample": {"size": int(diff // 10)}}])
# TODO(laoliang):加入通过文本相似度计算得到的每首古诗 Top-100
# 要向 user_log 里面添加负样本,构造数据结构 [{}, {}, {}],后面转换为 dataframe
# 为保证训练集和测试集要独立同分布,从实时推荐的列表里面随机选出制定数量的 user-item对
temp_list = list()
for document in random_document:
userId = document.get("userId")
fuyangben = document.get("fuyangben")
for poemId in fuyangben:
temp_list.append({"userId": int(userId), "itemId": poemId, \
"rating": 0, "label": 0})
# 要保证 python 数据的内置类型和 spark指定的类型相同
schema = StructType([
StructField("userId", LongType()),
StructField("itemId", IntegerType()),
StructField("rating", LongType()),
StructField("label", IntegerType()),
]
)
# 将负样本转换为 spark dataframe
negative_sample = self.spark.createDataFrame(temp_list, schema=schema)
# 纵向合并原始样本集和负样本集
user_log = user_log.union(negative_sample)
# 此处合并了负样本和正样本,但是负样本是从实时推荐列表中保存下来的
# 所有可能有的是用户点击了的样本,找出这些样本并去除
user_log = user_log.groupBy(["userId", "itemId"]).sum("rating")
user_log = user_log.select(user_log.userId, user_log.itemId, user_log["sum(rating)"].alias("rating"),
F.when(user_log["sum(rating)"] > 0, 1).otherwise(0).alias("label"))
return user_log
def merge_userInfo_itemData(self, user_log):
'''将行为数据与用户信息、物品信息进行合并'''
one_hot_df = self.spark.read.format("csv") \
.option("header", True) \
.load("/home/liang/Desktop/python_file/one_hot_df.csv")
one_hot_df = one_hot_df.drop("poemDynasty")
# 因为要根据主键进行合并,要保证主键的数据类型一致
one_hot_df = one_hot_df \
.withColumn("poemId", one_hot_df.poemId.cast(IntegerType())) \
.withColumn("poemStar", one_hot_df.poemStar.cast(FloatType()))
# 相当于 inner join,但是 inner 会报错,所以用这个方法吧
train_sort_model_df = user_log.join(one_hot_df, user_log.itemId \
== one_hot_df.poemId, "outer").dropna()
user_info = self.user_info
user_info = user_info \
.withColumn("userId", user_info.userId.cast(LongType()))
# 合并用户数据
train_sort_model_df = train_sort_model_df.join(user_info.select(user_info.userId, user_info.userSex),\
user_info.userId == train_sort_model_df.userId)
# 经过处理后的数据往往就小很多了,转为pandas处理
# 将 spark dataframe 转换为 pandas dataframe |
dataset = train_sort_model_df.toPandas()
# 从 MySql 读取 用户喜欢的诗人数据,构造特征
engine = create_engine('mysql+pymysql://root:[email protected]:3306/huamanxi')
userandauthor = pd.read_sql_table('userandauthor', engine)
# 为了区分诗人id和古诗id,诗人id加20w
userandauthor.userId = userandauthor.userId.astype("int64")
userandauthor["authorId"] += 200000
temp_list = []
# 对比两个表看当前操作的古诗的诗人是否是用户收藏了的诗人
for row in dataset.iterrows():
temp = row[1].tolist()
userId = temp[0]
authorId = int(temp[5])
if authorId in userandauthor[userandauthor["userId"] \
== userId]["authorId"].values:
temp.append(1) | identifier_body |
|
offline.py | collectPoem.rating.cast(IntegerType()))
return collectPoem
def process_evaluatePoem(self):
def reg_extract3(string):
ans = re.findall("like=(.+)&poemId=(.+)", string)
rating = None
if ans[0][0] == '0':
rating = '-2'
else:
rating = '2'
return [rating, ans[0][1]]
get_reg_info3 = F.udf(reg_extract3, ArrayType(StringType()))
evaluatePoem = self.evaluatePoem
evaluatePoem = evaluatePoem.select(evaluatePoem.userId, \
get_reg_info3(evaluatePoem.parameter).alias("info"))
def get_array_element(row):
return Row(userId=row.userId, \
itemId=row.info[1], rating=row.info[0])
evaluatePoem = evaluatePoem.rdd.map(get_array_element).toDF()
evaluatePoem = evaluatePoem. \
withColumn("userId", evaluatePoem.userId.cast(LongType())). \
withColumn("itemId", evaluatePoem.itemId.cast(IntegerType())). \
withColumn("rating", evaluatePoem.rating.cast(IntegerType()))
return evaluatePoem
def processAndUnion(self):
# 分组处理用户行为,并为不同的用户行为打分
showPoetryDetails = self.process_showPoetryDetails()
collectPoem = self.process_collectPoem()
evaluatePoem = self.process_evaluatePoem()
toPoemDetails = self.process_toPoemDetails()
# 纵向合并分组处理的数据,保持列的顺序对齐
user_log = evaluatePoem.union(showPoetryDetails. \
select(showPoetryDetails.itemId, showPoetryDetails.rating,
showPoetryDetails.userId)).union(collectPoem)
user_log = user_log.union(toPoemDetails.select(toPoemDetails.itemId,\
toPoemDetails.rating, toPoemDetails.userId))
# 根据 每个用户-物品对进行分组,求行为分数的总和
user_log = user_log.groupBy(["userId", "itemId"]).sum("rating")
# 根据评分打上标签
user_log = user_log.select(user_log.userId, user_log.itemId, user_log["sum(rating)"].alias("rating"),
F.when(user_log["sum(rating)"] > 0, 1).otherwise(0).alias("label"))
return user_log
def add_negative_sample(self, user_log):
# 算出要填补的 负样本 的数量
label_count = user_log.groupBy("label").count()
row1, row2 = label_count.collect()
diff = row1["count"] - row2["count"]
# 从实时推荐处收集负样本,使得正负样本的比例为 2:1
# TODO(laoliang):换到本地的时候记得修改 ip 和 collection 的名字
client = pymongo.MongoClient("120.26.89.198", 27017)
db = client["Recommendation"]
mycol = db["rs_fucaiyang"]
# 从负样本所在的集合随机选出文档
# 使用 python 操作 mongodb 一定要保证传入的 json 里面的数据类型用的是 python 内置类型
random_document = mycol.aggregate([{"$sample": {"size": int(diff // 10)}}])
# TODO(laoliang):加入通过文本相似度计算得到的每首古诗 Top-100
# 要向 user_log 里面添加负样本,构造数据结构 [{}, {}, {}],后面转换为 dataframe
# 为保证训练集和测试集要独立同分布,从实时推荐的列表里面随机选出制定数量的 user-item对
temp_list = list()
for document in random_document:
userId = document.get("userId")
fuyangben = document.get("fuyangben")
for poemId in fuyangben:
temp_list.append({"userId": int(userId), "itemId": poemId, \
"rating": 0, "label": 0})
# 要保证 python 数据的内置类型和 spark指定的类型相同
schema = StructType([
StructField("userId", LongType()),
StructField("itemId", IntegerType()),
StructField("rating", LongType()),
StructField("label", IntegerType()),
]
)
# 将负样本转换为 spark dataframe
negative_sample = self.spark.createDataFrame(temp_list, schema=schema)
# 纵向合并原始样本集和负样本集
user_log = user_log.union(negative_sample)
# 此处合并了负样本和正样本,但是负样本是从实时推荐列表中保存下来的
# 所有可能有的是用户点击了的样本,找出这些样本并去除
user_log = user_log.groupBy(["userId", "itemId"]).sum("rating")
user_log = user_log.select(user_log.userId, user_log.itemId, user_log["sum(rating)"].alias("rating"),
F.when(user_log["sum(rating)"] > 0, 1).otherwise(0).alias("label"))
return user_log
def merge_userInfo_itemData(self, user_log):
'''将行为数据与用户信息、物品信息进行合并'''
one_hot_df = self.spark.read.format("csv") \
.option("header", True) \
.load("/home/liang/Desktop/python_file/one_hot_df.csv")
one_hot_df = one_hot_df.drop("poemDynasty")
# 因为要根据主键进行合并,要保证主键的数据类型一致
one_hot_df = one_hot_df \
.withColumn("poemId", one_hot_df.poemId.cast(IntegerType())) \
.withColumn("poemStar", one_hot_df.poemStar.cast(FloatType()))
# 相当于 inner join,但是 inner 会报错,所以用这个方法吧
train_sort_model_df = user_log.join(one_hot_df, user_log.itemId \
== one_hot_df.poemId, "outer").dropna()
user_info = self.user_info
user_info = user_info \
.withColumn("userId", user_info.userId.cast(LongType()))
# 合并用户数据
train_sort_model_df = train_sort_model_df.join(user_info.select(user_info.userId, user_info.userSex),\
user_info.userId == train_sort_model_df.userId)
# 经过处理后的数据往往就小很多了,转为pandas处理
# 将 spark dataframe 转换为 pandas dataframe
dataset = train_sort_model_df.toPandas()
# 从 MySql 读取 用户喜欢的诗人数据,构造特征
engine = create_engine('mysql+pymysql://root:[email protected]:3306/huamanxi')
userandauthor = pd.read_sql_table('userandauthor', engine)
# 为了区分诗人id和古诗id,诗人id加20w
userandauthor.userId = userandauthor.userId.astype("int64")
userandauthor["authorId"] += 200000
temp_list = []
# 对比两个表看当前操作的古诗的诗人是否是用户收藏了的诗人
for row in dataset.iterrows():
temp = row[1].tolist()
userId = temp[0]
authorId = int(temp[5])
if authorId in userandauthor[userandauthor["userId"] \
== userId]["authorId"].values:
temp.append(1)
else:
temp.append(0)
temp_list.append(temp)
columns = dataset.columns.tolist()
columns.append('collectAuthor')
df = pd.DataFrame(temp_list, columns=columns)
# 去除排序模型建模不需要的列
df.drop(columns=["userId", "itemId", "poemAuthorId", "poemId"], inplace=True)
# 将 one-hot编码的特征转换为 int,为 Xgboost 建模需要
# print(list(df.columns)[3:-2])
# print(df.info())
for i in df.columns[3:-2]:
df[i] = df[i].astype("int64")
# sys.exit("yundan")
return df
def preprocessing(self):
self.preparation()
user_log = self.processAndUnion() # pyspark.sql.dataframe
user_log = self.add_negative_sample(user_log)
train_sort_model_dataset = self.merge_userInfo_itemData(user_log) # pandas.dataframe
print(train_sort_model_dataset)
return user_log, train_sort_model_dataset
class CollaborativeFilteringBaseModel(object):
'''基于模型的协同过滤'''
def __init__(self, user_log):
self.user_log = user_log.toPandas()
print(self.user_log.shape)
def train_model(self) -> dict:
reader = Reader(rating_scale=(-2, 5))
data = Dataset.load_from_df(self.user_log[["userId", "itemId", \
"rating"]], reader)
# 搜索的参数范围
param_grid = {'n_factors': list(range(20, 110, 10)), 'n_epochs': list(range(10 | , 30, 2)),
| conditional_block |
|
offline.py | 类型一致
collectPoem = collectPoem. \
withColumn("userId", collectPoem.userId.cast(LongType())). \
withColumn("itemId", collectPoem.itemId.cast(IntegerType())). \
withColumn("rating", collectPoem.rating.cast(IntegerType()))
return collectPoem
def process_evaluatePoem(self):
def reg_extract3(string):
ans = re.findall("like=(.+)&poemId=(.+)", string)
rating = None
if ans[0][0] == '0':
rating = '-2'
else:
rating = '2'
return [rating, ans[0][1]]
get_reg_info3 = F.udf(reg_extract3, ArrayType(StringType()))
evaluatePoem = self.evaluatePoem
evaluatePoem = evaluatePoem.select(evaluatePoem.userId, \
get_reg_info3(evaluatePoem.parameter).alias("info"))
def get_array_element(row):
return Row(userId=row.userId, \
itemId=row.info[1], rating=row.info[0])
evaluatePoem = evaluatePoem.rdd.map(get_array_element).toDF()
evaluatePoem = evaluatePoem. \
withColumn("userId", evaluatePoem.userId.cast(LongType())). \
withColumn("itemId", evaluatePoem.itemId.cast(IntegerType())). \
withColumn("rating", evaluatePoem.rating.cast(IntegerType()))
return evaluatePoem
def processAndUnion(self):
# 分组处理用户行为,并为不同的用户行为打分
showPoetryDetails = self.process_showPoetryDetails()
collectPoem = self.process_collectPoem()
evaluatePoem = self.process_evaluatePoem()
toPoemDetails = self.process_toPoemDetails()
# 纵向合并分组处理的数据,保持列的顺序对齐
user_log = evaluatePoem.union(showPoetryDetails. \
select(showPoetryDetails.itemId, showPoetryDetails.rating,
showPoetryDetails.userId)).union(collectPoem)
user_log = user_log.union(toPoemDetails.select(toPoemDetails.itemId,\
toPoemDetails.rating, toPoemDetails.userId))
# 根据 每个用户-物品对进行分组,求行为分数的总和
user_log = user_log.groupBy(["userId", "itemId"]).sum("rating")
# 根据评分打上标签
user_log = user_log.select(user_log.userId, user_log.itemId, user_log["sum(rating)"].alias("rating"),
F.when(user_log["sum(rating)"] > 0, 1).otherwise(0).alias("label"))
return user_log
def add_negative_sample(self, user_log):
# 算出要填补的 负样本 的数量
label_count = user_log.groupBy("label").count()
row1, row2 = label_count.collect()
diff = row1["count"] - row2["count"]
# 从实时推荐处收集负样本,使得正负样本的比例为 2:1
# TODO(laoliang):换到本地的时候记得修改 ip 和 collection 的名字
client = pymongo.MongoClient("120.26.89.198", 27017)
db = client["Recommendation"]
mycol = db["rs_fucaiyang"]
# 从负样本所在的集合随机选出文档
# 使用 python 操作 mongodb 一定要保证传入的 json 里面的数据类型用的是 python 内置类型
random_document = mycol.aggregate([{"$sample": {"size": int(diff // 10)}}])
# TODO(laoliang):加入通过文本相似度计算得到的每首古诗 Top-100
# 要向 user_log 里面添加负样本,构造数据结构 [{}, {}, {}],后面转换为 dataframe
# 为保证训练集和测试集要独立同分布,从实时推荐的列表里面随机选出制定数量的 user-item对
temp_list = list()
for document in random_document:
userId = document.get("userId")
fuyangben = document.get("fuyangben")
for poemId in fuyangben:
temp_list.append({"userId": int(userId), "itemId": poemId, \
"rating": 0, "label": 0})
# 要保证 python 数据的内置类型和 spark指定的类型相同
schema = StructType([
StructField("userId", LongType()),
StructField("itemId", IntegerType()),
StructField("rating", LongType()),
StructField("label", IntegerType()),
]
)
# 将负样本转换为 spark dataframe
negative_sample = self.spark.createDataFrame(temp_list, schema=schema)
# 纵向合并原始样本集和负样本集
user_log = user_log.union(negative_sample)
# 此处合并了负样本和正样本,但是负样本是从实时推荐列表中保存下来的
# 所有可能有的是用户点击了的样本,找出这些样本并去除
user_log = user_log.groupBy(["userId", "itemId"]).sum("rating")
user_log = user_log.select(user_log.userId, user_log.itemId, user_log["sum(rating)"].alias("rating"),
F.when(user_log["sum(rating)"] > 0, 1).otherwise(0).alias("label"))
return user_log
def merge_userInfo_itemData(self, user_log):
'''将行为数据与用户信息、物品信息进行合并'''
one_hot_df = self.spark.read.format("csv") \
.option("header", True) \
.load("/home/liang/Desktop/python_file/one_hot_df.csv")
one_hot_df = one_hot_df.drop("poemDynasty")
# 因为要根据主键进行合并,要保证主键的数据类型一致
one_hot_df = one_hot_df \
.withColumn("poemId", one_hot_df.poemId.cast(IntegerType())) \
.withColumn("poemStar", one_hot_df.poemStar.cast(FloatType()))
# 相当于 inner join,但是 inner 会报错,所以用这个方法吧
train_sort_model_df = user_log.join(one_hot_df, user_log.itemId \
== one_hot_df.poemId, "outer").dropna()
user_info = self.user_info
user_info = user_info \
.withColumn("userId", user_info.userId.cast(LongType()))
# 合并用户数据
train_sort_model_df = train_sort_model_df.join(user_info.select(user_info.userId, user_info.userSex),\
user_info.userId == train_sort_model_df.userId)
# 经过处理后的数据往往就小很多了,转为pandas处理
# 将 spark dataframe 转换为 pandas dataframe
dataset = train_sort_model_df.toPandas()
# 从 MySql 读取 用户喜欢的诗人数据,构造特征
engine = create_engine('mysql+pymysql://root:[email protected]:3306/huamanxi')
userandauthor = pd.read_sql_table('userandauthor', engine)
# 为了区分诗人id和古诗id,诗人id加20w
userandauthor.userId = userandauthor.userId.astype("int64")
userandauthor["authorId"] += 200000
temp_list = []
# 对比两个表看当前操作的古诗的诗人是否是用户收藏了的诗人
for row in dataset.iterrows():
temp = row[1].tolist()
userId = temp[0]
authorId = int(temp[5])
if authorId in userandauthor[userandauthor["userId"] \
== userId]["authorId"].values:
temp.append(1)
else:
temp.append(0)
temp_list.append(temp)
columns = dataset.columns.tolist()
columns.append('collectAuthor')
df = pd.DataFrame(temp_list, columns=columns)
# 去除排序模型建模不需要的列
df.drop(columns=["userId", "itemId", "poemAuthorId", "poemId"], inplace=True)
# 将 one-hot编码的特征转换为 int,为 Xgboost 建模需要
# print(list(df.columns)[3:-2])
# print(df.info())
for i in df.columns[3:-2]:
df[i] = df[i].astype("int64")
# sys.exit("yundan")
return df
def preprocessing(self):
self.preparation()
user_log = self.processAndUnion() # pyspark.sql.dataframe
user_log = self.add_negative_sample(user_log)
train_sort_model_dataset = self.merge_userInfo_itemData(user_log) # pandas.dataframe
print(train_sort_model_dataset)
return user_log, train_sort_model_dataset
class CollaborativeFilteringBaseModel(object):
'''基于模型的协同过滤'''
def __init__(self, user_log):
self.user_log = user_log.toPandas()
print(self.user_log.shape)
def train_model(self) -> dict:
reader = Reader(rating_scale=(-2, 5)) | data = Dataset.load_from_df(self.user_log[["userId", "itemId", \
"rating"]], reader)
| random_line_split |
|
offline.py | self.spark = SparkSession.builder.appName("ddd").getOrCreate()
sc = self.spark.sparkContext
sqlContext = SQLContext(sc)
hive_context = HiveContext(sc)
# hive_context.setConf("hive.metastore.uris", "thrift://localhost:9083")
# 读取用户信息表
self.user_info = sqlContext.read.format("jdbc"). \
option("url", "jdbc:mysql://39.96.165.58:3306/huamanxi"). \
option("driver", "com.mysql.cj.jdbc.Driver"). \
option("dbtable", "user"). \
option("user", "root"). \
option("password", "12345678").load()
# 用户行为数据表,后面改为从 HDFS 读取
self.accesslog = sqlContext.read.format("jdbc"). \
option("url", "jdbc:mysql://39.96.165.58:3306/huamanxi"). \
option("driver", "com.mysql.cj.jdbc.Driver"). \
option("dbtable", "accesslog"). \
option("user", "root"). \
option("password", "12345678").load()
# self.accesslog = hive_context.sql("SELECT * FROM flume_sink.useraccesslog")
# # print(self.accesslog.count())
# self.accesslog.show()
# print(self.accesslog.columns)
# sys.exit("憨憨")
self.item_info = self.spark.read.format("csv") \
.option("header", True) \
.load("/home/liang/Desktop/python_file/source.csv")
def read_accesslog_from_hdfs(self):
# 实时日志流的存储是每5个点击数据存储一次
client = Client("http://localhost:50070")
file_names = client.list("/hadoop_file")
ss = ""
for file_name in file_names:
with client.read("/hadoop_file/" + file_name, encoding="utf-8") as reader:
for line in reader:
# 去除测试数据
if line.startswith("filed1"):
continue
ss += line
def preparation(self):
# 去除暂时用不上的字段
accesslog = self.accesslog.drop('logId', 'username', \
'accessIP', 'executionTime', "visitTime")
# 去除 关于search 的 row
accesslog = accesslog.filter(~accesslog.url.like("search%"))
def get_last(row):
'''
只知道要处理的那一列的另外两列的名字,
要处理那一列的名字不知道干啥了,所以用此方法
'''
no_idea = row.asDict()
temp = None
for k, v in no_idea.items():
if k != "userId" and k != "paramter":
temp = v
ans = temp[2]
return Row(userId=row.userId, parameter=row.parameter, real_method=ans)
# 截取 spring-aop 记录的方法名
accesslog = accesslog.select(accesslog.userId, accesslog.parameter, \
functions.split(accesslog.method, "]")).rdd.map(get_last).toDF()
poem_related = ["showPoetryDetails", "collectPoem", "evaluatePoem", "toPoemDetails"]
poet_related = ["collectAuthor", "showAuthorDetails"]
# 自定义函数 udf
def should_remove(method):
if method in poem_related:
return method
return '-1'
# 定义返回值类型为 pyspark.sql.Type.StringType
check = F.udf(should_remove, StringType())
# 注意使用sql筛选,String类型要加 ""
accesslog = accesslog.withColumn('poem_related', \
check(accesslog['real_method'])). \
filter("poem_related <> '-1'")
accesslog = accesslog.drop("real_method")
self.accesslog = accesslog
self.showPoetryDetails = accesslog.filter(accesslog.poem_related \
== "showPoetryDetails")
self.collectPoem = accesslog.filter(accesslog.poem_related \
== "collectPoem")
self.evaluatePoem = accesslog.filter(accesslog.poem_related \
== "evaluatePoem")
self.toPoemDetails = accesslog.filter(accesslog.poem_related \
== "toPoemDetails")
def process_toPoemDetails(self):
def reg_extract(string):
ans = re.findall("poemId=(.+)", string)[0]
return ans
use_reg = F.udf(reg_extract, StringType())
toPoemDetails = self.toPoemDetails
toPoemDetails = toPoemDetails.select(toPoemDetails.userId, \
use_reg(toPoemDetails.parameter).alias("itemId"))
toPoemDetails = toPoemDetails. \
withColumn("userId", toPoemDetails.userId.cast(LongType())). \
withColumn("itemId", toPoemDetails.itemId.cast(IntegerType()))
toPoemDetails = toPoemDetails.withColumn("rating", toPoemDetails.itemId*0+1)
return toPoemDetails
def process_showPoetryDetails(self):
# 提取 parameter 字符串里边的 itemId
def reg_extract(string):
ans = re.findall("d=(.*?)&", string)[0]
return ans
get_reg_info = F.udf(reg_extract, StringType())
showPoetryDetails = self.showPoetryDetails
showPoetryDetails = showPoetryDetails.select(showPoetryDetails.userId, \
get_reg_info(showPoetryDetails.parameter).alias("itemId"))
# 修改 schema 使得某个字段为 int,方便造出一个值全为 1 列
# 时间戳用长整型
showPoetryDetails = showPoetryDetails. \
withColumn("userId", showPoetryDetails.userId.cast(LongType())). \
withColumn("itemId", showPoetryDetails.itemId.cast(IntegerType()))
# 点击行为评分为 1分
showPoetryDetails = showPoetryDetails. \
withColumn("rating", showPoetryDetails.itemId * 0 + 1)
return showPoetryDetails
def process_collectPoem(self):
# 提取字符串里面的数据
def reg_extract2(string):
ans = re.findall("poemId=(.*?)&collection=(.+)", string)
rating = None
if ans[0][1] == '0':
rating = '-2'
else:
rating = '2'
return [ans[0][0], rating]
get_reg_info2 = F.udf(reg_extract2, ArrayType(StringType()))
collectPoem = self.collectPoem
collectPoem = collectPoem.select(collectPoem.userId, \
get_reg_info2(collectPoem.parameter).alias("info"))
# 从某列的 array 中取出指定下标的值
def get_array_element(row):
return Row(userId=row.userId, \
itemId=row.info[0], rating=row.info[1])
collectPoem = collectPoem.rdd.map(get_array_element).toDF()
# userId 转 LongType(),保持每个分组的字段类型一致
collectPoem = collectPoem. \
withColumn("userId", collectPoem.userId.cast(LongType())). \
withColumn("itemId", collectPoem.itemId.cast(IntegerType())). \
withColumn("rating", collectPoem.rating.cast(IntegerType()))
return collectPoem
def process_evaluatePoem(self):
def reg_extract3(string):
ans = re.findall("like=(.+)&poemId=(.+)", string)
rating = None
if ans[0][0] == '0':
rating = '-2'
else:
rating = '2'
return [rating, ans[0][1]]
get_reg_info3 = F.udf(reg_extract3, ArrayType(StringType()))
evaluatePoem = self.evaluatePoem
evaluatePoem = evaluatePoem.select(evaluatePoem.userId, \
get_reg_info3(evaluatePoem.parameter).alias("info"))
def get_array_element(row):
return Row(userId=row.userId, \
itemId=row.info[1], rating=row.info[0])
evaluatePoem = evaluatePoem.rdd.map(get_array_element).toDF()
evaluatePoem = evaluatePoem. \
withColumn("userId", evaluatePoem.userId.cast(LongType())). \
withColumn("itemId", evaluatePoem.itemId.cast(IntegerType())). \
withColumn("rating", evaluatePoem.rating.cast(IntegerType()))
return evaluatePoem
def processAndUnion(self):
# 分组处理用户行为,并为不同的用户行为打分
showPoetryDetails = self.process_showPoetryDetails()
collectPoem = self.process_collectPoem()
evaluatePoem = self.process_evaluatePoem()
toPoemDetails = self.process_toPoemDetails()
# 纵向合并分组处理的数据,保持列的顺序对齐
user_log = evaluatePoem.union(showPoetryDetails. \
select(showPoetryDetails.itemId, showPoetryDetails.rating,
showPoetryDetails.userId)).union(collectPoem)
user_log = user_log.union(toPoemDetails.select(toPoemDetails.itemId,\
toPoemDetails.rating, | identifier_name |
||
json_rpc.py | -values are not allowed for Service Methods
- handles only HTTP POST
- JSON-RPC Version < 2.0 (same as 1.2) not supported
TODOs:
- more Comments
- Examples (doctest?)
- Factor out handler methods to reuse in other frameworks
"""
from google.appengine.ext import webapp
from inspect import getargspec
import cgi
import logging
import simplejson
import sys
import traceback
JSON_RPC_KEYS = frozenset(['method', 'jsonrpc', 'params', 'id'])
def ServiceMethod(fn):
"""Decorator to mark a method of a JsonRpcHandler as ServiceMethod.
This exposes methods to the RPC interface.
:param function fn: A function.
:returns: A function.
TODO:
- Warn when applied to underscore methods
"""
fn.IsServiceMethod = True
return fn
class JsonRpcError(Exception):
"""Baseclass for all JSON-RPC Errors.
Errors are described in the JSON-RPC 2.0 specs, related HTTP Status
Codes are described in the json-rpc-over-http proposal.
"""
code = 0
message = None
status = 500
def __init__(self, message=None):
if message is not None:
self.message = message
def __str__(self):
return(self.message)
def __repr__(self):
return '%s("%s")' % (str(self.__class__.__name__), self.message)
def getJsonData(self):
error = {
'code' : self.code ,
'message' : '%s: %s' %
(str(self.__class__.__name__),
str(self.message))}
return error
class ParseError(JsonRpcError):
"""Invalid JSON was received by the server.
An error occurred on the server while parsing the JSON text.
"""
code = -32700
message = 'Parse error'
class InvalidRequestError(JsonRpcError):
"""The JSON sent is not a valid Request object."""
code = -32600
message = 'Invalid Request'
status = 400
class MethodNotFoundError(JsonRpcError):
"""The method does not exist / is not available."""
code = -32601
message = 'Method not found'
status = 404
class InvalidParamsError(JsonRpcError):
"""Invalid method parameter(s)."""
code = -32602
message = 'Invalid params'
class InternalError(JsonRpcError):
"""Internal JSON-RPC error."""
code = -32603
message = 'Internal error'
class ServerError(JsonRpcError):
"""Base Class for implementation-defined Server Errors.
The Error Code must be between -32099..-32000
"""
code = -32000
message = 'Server Error'
class JsonRpcMessage(object):
"""A single JSON-RPC message.
:param dict json: The JSON-RPC message Python representation.
"""
def __init__(self, json=None):
super(JsonRpcMessage, self).__init__()
self.message_id = None
self.notification = False
self.error = None
self.result = None
if json is not None:
self.from_json(json)
def from_json(self, json):
"""Parses a single JSON-RPC message.
:param dict json: The JSON-RPC message Python representation.
"""
try:
if not isinstance(json, dict):
raise InvalidRequestError(
'Invalid JSON-RPC Message; must be an object')
if not set(json.keys()) <= JSON_RPC_KEYS:
raise InvalidRequestError('Invalid members in request object')
if not ('jsonrpc' in json and json['jsonrpc'] == '2.0'):
raise InvalidRequestError('Server supports JSON-RPC 2.0 only')
if 'method' not in json:
raise InvalidRequestError('No method specified')
if not isinstance(json['method'], basestring):
raise InvalidRequestError('Method must be a string')
self.method_name = json['method']
if 'params' in json:
params = json['params']
if not isinstance(params, (dict, list, tuple)):
raise InvalidRequestError(
"'params' must be an array or object")
self.params = params
if 'id' not in json:
self.notification = True
else:
self.message_id = json['id']
except InvalidRequestError, ex:
self.error = ex
logging.error('Encountered invalid json message')
class JsonRpcHandler(webapp.RequestHandler):
"""Subclass this handler to implement a JSON-RPC handler.
Annotate methods with @ServiceMethod to expose them and make them callable
via JSON-RPC. Currently methods with *args or **kwargs are not supported
as service-methods. All parameters have to be named explicitly.
"""
def __init__(self):
webapp.RequestHandler.__init__(self)
def post(self):
self.handle_request()
def handle_request(self):
"""Handles POST request."""
self.response.headers['Content-Type'] = 'application/json-rpc'
try:
logging.debug("Raw JSON-RPC: %s", self.request.body)
messages, batch_request = self.parse_body(self.request.body)
except (InvalidRequestError, ParseError), ex:
logging.error(ex)
self.error(ex.status)
body = self._build_error(ex)
self.response.out.write(simplejson.dumps(body))
else:
for msg in messages:
self.handle_message(msg)
responses = self.get_responses(messages)
if len(responses) == 0:
# Only notifications were sent
self.error(204)
return
if batch_request:
#TODO Which http_status to set for batches?
self.error(200)
body = [r[1] for r in responses]
self.response.out.write(simplejson.dumps(body))
else:
if len(responses) != 1:
# This should never happen
raise InternalError() # pragma: no cover
status, body = responses[0]
self.error(status)
self.response.out.write(simplejson.dumps(body))
def get_responses(self, messages):
"""Gets a list of responses from all 'messages'.
Responses are a tuple of HTTP-status and body.
A response may be None if the message was a notification and will be
excluded from the returned list.
:param list messages: JSON messages.
:returns: List of responses.
"""
responses = []
for msg in messages:
resp = self.get_response(msg)
if resp is not None:
responses.append(resp)
return responses
def handle_message(self, msg):
"""Executes a message.
The method of the message is executed.
Errors and/or results are written back to the message.
:param dict msg: A JSON-RPC message.
"""
if msg.error != None:
return
else:
try:
method = self.get_service_method(msg.method_name)
params = getattr(msg, 'params', None)
msg.result = self.execute_method(method, params)
except (MethodNotFoundError, InvalidParamsError, ServerError), ex:
logging.error(ex)
msg.error = ex
except Exception, ex:
logging.error(ex)
ex = InternalError("Error executing service method")
ex.data = ''.join(traceback.format_exception(*sys.exc_info()))
msg.error = ex
def parse_body(self, body):
"""Parses the body of POST request.
Validates for correct JSON and returns a tuple with a list of JSON-RPC
messages and wether the request was a batch-request.
Raises ParseError and InvalidRequestError.
:param string body: The HTTP body.
"""
try:
json = simplejson.loads(body)
except ValueError:
raise ParseError()
messages = []
if isinstance(json, (list, tuple)):
if len(json) == 0:
raise InvalidRequestError('Recieved an empty batch message')
batch_request = True
for obj in json:
msg = JsonRpcMessage(obj)
messages.append(msg)
if isinstance(json, (dict)):
batch_request = False
msg = JsonRpcMessage(json)
messages.append(msg)
return messages, batch_request
def get_response(self, msg):
"""Gets the response object for a message.
Returns a tuple of a HTTP-status and a json object or None.
The JSON object may be a JSON-RPC error object or a result object.
None is returned if the message was a notification.
:param dict msg: A JSON-RPC message.
:returns: Tuple with status and result.
"""
if msg.notification:
r | elif msg.error:
return (msg.error.status,
self._build_error(msg.error, msg.message_id))
elif msg.result:
return (200, self._build_result(msg))
else: # pragma: no cover
# Should never be reached
logging.warn('Message neither contains an error nor a result')
def _build_error(self, err, message_id=None):
return {'jsonrpc':'2.0',
'error':err.getJsonData(),
'id':message_id}
def _build_result(self, msg):
return {'jsonrpc':'2.0 | eturn None
| conditional_block |
json_rpc.py | json-rpc-over-http proposal.
"""
code = 0
message = None
status = 500
def __init__(self, message=None):
if message is not None:
self.message = message
def __str__(self):
return(self.message)
def __repr__(self):
return '%s("%s")' % (str(self.__class__.__name__), self.message)
def getJsonData(self):
error = {
'code' : self.code ,
'message' : '%s: %s' %
(str(self.__class__.__name__),
str(self.message))}
return error
class ParseError(JsonRpcError):
"""Invalid JSON was received by the server.
An error occurred on the server while parsing the JSON text.
"""
code = -32700
message = 'Parse error'
class InvalidRequestError(JsonRpcError):
"""The JSON sent is not a valid Request object."""
code = -32600
message = 'Invalid Request'
status = 400
class MethodNotFoundError(JsonRpcError):
"""The method does not exist / is not available."""
code = -32601
message = 'Method not found'
status = 404
class InvalidParamsError(JsonRpcError):
"""Invalid method parameter(s)."""
code = -32602
message = 'Invalid params'
class InternalError(JsonRpcError):
"""Internal JSON-RPC error."""
code = -32603
message = 'Internal error'
class ServerError(JsonRpcError):
"""Base Class for implementation-defined Server Errors.
The Error Code must be between -32099..-32000
"""
code = -32000
message = 'Server Error'
class JsonRpcMessage(object):
"""A single JSON-RPC message.
:param dict json: The JSON-RPC message Python representation.
"""
def __init__(self, json=None):
super(JsonRpcMessage, self).__init__()
self.message_id = None
self.notification = False
self.error = None
self.result = None
if json is not None:
self.from_json(json)
def from_json(self, json):
"""Parses a single JSON-RPC message.
:param dict json: The JSON-RPC message Python representation.
"""
try:
if not isinstance(json, dict):
raise InvalidRequestError(
'Invalid JSON-RPC Message; must be an object')
if not set(json.keys()) <= JSON_RPC_KEYS:
raise InvalidRequestError('Invalid members in request object')
if not ('jsonrpc' in json and json['jsonrpc'] == '2.0'):
raise InvalidRequestError('Server supports JSON-RPC 2.0 only')
if 'method' not in json:
raise InvalidRequestError('No method specified')
if not isinstance(json['method'], basestring):
raise InvalidRequestError('Method must be a string')
self.method_name = json['method']
if 'params' in json:
params = json['params']
if not isinstance(params, (dict, list, tuple)):
raise InvalidRequestError(
"'params' must be an array or object")
self.params = params
if 'id' not in json:
self.notification = True
else:
self.message_id = json['id']
except InvalidRequestError, ex:
self.error = ex
logging.error('Encountered invalid json message')
class JsonRpcHandler(webapp.RequestHandler):
"""Subclass this handler to implement a JSON-RPC handler.
Annotate methods with @ServiceMethod to expose them and make them callable
via JSON-RPC. Currently methods with *args or **kwargs are not supported
as service-methods. All parameters have to be named explicitly.
"""
def __init__(self):
webapp.RequestHandler.__init__(self)
def post(self):
self.handle_request()
def handle_request(self):
"""Handles POST request."""
self.response.headers['Content-Type'] = 'application/json-rpc'
try:
logging.debug("Raw JSON-RPC: %s", self.request.body)
messages, batch_request = self.parse_body(self.request.body)
except (InvalidRequestError, ParseError), ex:
logging.error(ex)
self.error(ex.status)
body = self._build_error(ex)
self.response.out.write(simplejson.dumps(body))
else:
for msg in messages:
self.handle_message(msg)
responses = self.get_responses(messages)
if len(responses) == 0:
# Only notifications were sent
self.error(204)
return
if batch_request:
#TODO Which http_status to set for batches?
self.error(200)
body = [r[1] for r in responses]
self.response.out.write(simplejson.dumps(body))
else:
if len(responses) != 1:
# This should never happen
raise InternalError() # pragma: no cover
status, body = responses[0]
self.error(status)
self.response.out.write(simplejson.dumps(body))
def get_responses(self, messages):
"""Gets a list of responses from all 'messages'.
Responses are a tuple of HTTP-status and body.
A response may be None if the message was a notification and will be
excluded from the returned list.
:param list messages: JSON messages.
:returns: List of responses.
"""
responses = []
for msg in messages:
resp = self.get_response(msg)
if resp is not None:
responses.append(resp)
return responses
def handle_message(self, msg):
"""Executes a message.
The method of the message is executed.
Errors and/or results are written back to the message.
:param dict msg: A JSON-RPC message.
"""
if msg.error != None:
return
else:
try:
method = self.get_service_method(msg.method_name)
params = getattr(msg, 'params', None)
msg.result = self.execute_method(method, params)
except (MethodNotFoundError, InvalidParamsError, ServerError), ex:
logging.error(ex)
msg.error = ex
except Exception, ex:
logging.error(ex)
ex = InternalError("Error executing service method")
ex.data = ''.join(traceback.format_exception(*sys.exc_info()))
msg.error = ex
def parse_body(self, body):
"""Parses the body of POST request.
Validates for correct JSON and returns a tuple with a list of JSON-RPC
messages and wether the request was a batch-request.
Raises ParseError and InvalidRequestError.
:param string body: The HTTP body.
"""
try:
json = simplejson.loads(body)
except ValueError:
raise ParseError()
messages = []
if isinstance(json, (list, tuple)):
if len(json) == 0:
raise InvalidRequestError('Recieved an empty batch message')
batch_request = True
for obj in json:
msg = JsonRpcMessage(obj)
messages.append(msg)
if isinstance(json, (dict)):
batch_request = False
msg = JsonRpcMessage(json)
messages.append(msg)
return messages, batch_request
def get_response(self, msg):
"""Gets the response object for a message.
Returns a tuple of a HTTP-status and a json object or None.
The JSON object may be a JSON-RPC error object or a result object.
None is returned if the message was a notification.
:param dict msg: A JSON-RPC message.
:returns: Tuple with status and result.
"""
if msg.notification:
return None
elif msg.error:
return (msg.error.status,
self._build_error(msg.error, msg.message_id))
elif msg.result:
return (200, self._build_result(msg))
else: # pragma: no cover
# Should never be reached
logging.warn('Message neither contains an error nor a result')
def _build_error(self, err, message_id=None):
return {'jsonrpc':'2.0',
'error':err.getJsonData(),
'id':message_id}
def _build_result(self, msg):
return {'jsonrpc':'2.0',
'result':msg.result,
'id':msg.message_id}
def execute_method(self, method, params):
"""Executes the RPC method.
:param function method: A method object.
:param params: List, tuple or dictionary with JSON-RPC parameters.
"""
args, varargs, varkw, defaults = getargspec(method)
if varargs or varkw:
raise InvalidParamsError(
"Service method definition must not have variable parameters")
args_set = set(args[1:])
if params is None:
if not len(args_set) == 0:
raise InvalidParamsError(
"Wrong number of parameters; "
"expected %i but 'params' was omitted "
"from JSON-RPC message" % (len(args_set)))
return method()
elif isinstance(params, (list, tuple)):
if not len(args_set) == len(params):
raise InvalidParamsError( | "Wrong number of parameters; "
"expected %i got %i" % (len(args_set),len(params)))
return method(*params) | random_line_split |
|
json_rpc.py | 602
message = 'Invalid params'
class InternalError(JsonRpcError):
"""Internal JSON-RPC error."""
code = -32603
message = 'Internal error'
class ServerError(JsonRpcError):
"""Base Class for implementation-defined Server Errors.
The Error Code must be between -32099..-32000
"""
code = -32000
message = 'Server Error'
class JsonRpcMessage(object):
"""A single JSON-RPC message.
:param dict json: The JSON-RPC message Python representation.
"""
def __init__(self, json=None):
super(JsonRpcMessage, self).__init__()
self.message_id = None
self.notification = False
self.error = None
self.result = None
if json is not None:
self.from_json(json)
def from_json(self, json):
"""Parses a single JSON-RPC message.
:param dict json: The JSON-RPC message Python representation.
"""
try:
if not isinstance(json, dict):
raise InvalidRequestError(
'Invalid JSON-RPC Message; must be an object')
if not set(json.keys()) <= JSON_RPC_KEYS:
raise InvalidRequestError('Invalid members in request object')
if not ('jsonrpc' in json and json['jsonrpc'] == '2.0'):
raise InvalidRequestError('Server supports JSON-RPC 2.0 only')
if 'method' not in json:
raise InvalidRequestError('No method specified')
if not isinstance(json['method'], basestring):
raise InvalidRequestError('Method must be a string')
self.method_name = json['method']
if 'params' in json:
params = json['params']
if not isinstance(params, (dict, list, tuple)):
raise InvalidRequestError(
"'params' must be an array or object")
self.params = params
if 'id' not in json:
self.notification = True
else:
self.message_id = json['id']
except InvalidRequestError, ex:
self.error = ex
logging.error('Encountered invalid json message')
class JsonRpcHandler(webapp.RequestHandler):
"""Subclass this handler to implement a JSON-RPC handler.
Annotate methods with @ServiceMethod to expose them and make them callable
via JSON-RPC. Currently methods with *args or **kwargs are not supported
as service-methods. All parameters have to be named explicitly.
"""
def __init__(self):
webapp.RequestHandler.__init__(self)
def post(self):
self.handle_request()
def handle_request(self):
"""Handles POST request."""
self.response.headers['Content-Type'] = 'application/json-rpc'
try:
logging.debug("Raw JSON-RPC: %s", self.request.body)
messages, batch_request = self.parse_body(self.request.body)
except (InvalidRequestError, ParseError), ex:
logging.error(ex)
self.error(ex.status)
body = self._build_error(ex)
self.response.out.write(simplejson.dumps(body))
else:
for msg in messages:
self.handle_message(msg)
responses = self.get_responses(messages)
if len(responses) == 0:
# Only notifications were sent
self.error(204)
return
if batch_request:
#TODO Which http_status to set for batches?
self.error(200)
body = [r[1] for r in responses]
self.response.out.write(simplejson.dumps(body))
else:
if len(responses) != 1:
# This should never happen
raise InternalError() # pragma: no cover
status, body = responses[0]
self.error(status)
self.response.out.write(simplejson.dumps(body))
def get_responses(self, messages):
"""Gets a list of responses from all 'messages'.
Responses are a tuple of HTTP-status and body.
A response may be None if the message was a notification and will be
excluded from the returned list.
:param list messages: JSON messages.
:returns: List of responses.
"""
responses = []
for msg in messages:
resp = self.get_response(msg)
if resp is not None:
responses.append(resp)
return responses
def handle_message(self, msg):
"""Executes a message.
The method of the message is executed.
Errors and/or results are written back to the message.
:param dict msg: A JSON-RPC message.
"""
if msg.error != None:
return
else:
try:
method = self.get_service_method(msg.method_name)
params = getattr(msg, 'params', None)
msg.result = self.execute_method(method, params)
except (MethodNotFoundError, InvalidParamsError, ServerError), ex:
logging.error(ex)
msg.error = ex
except Exception, ex:
logging.error(ex)
ex = InternalError("Error executing service method")
ex.data = ''.join(traceback.format_exception(*sys.exc_info()))
msg.error = ex
def parse_body(self, body):
"""Parses the body of POST request.
Validates for correct JSON and returns a tuple with a list of JSON-RPC
messages and wether the request was a batch-request.
Raises ParseError and InvalidRequestError.
:param string body: The HTTP body.
"""
try:
json = simplejson.loads(body)
except ValueError:
raise ParseError()
messages = []
if isinstance(json, (list, tuple)):
if len(json) == 0:
raise InvalidRequestError('Recieved an empty batch message')
batch_request = True
for obj in json:
msg = JsonRpcMessage(obj)
messages.append(msg)
if isinstance(json, (dict)):
batch_request = False
msg = JsonRpcMessage(json)
messages.append(msg)
return messages, batch_request
def get_response(self, msg):
"""Gets the response object for a message.
Returns a tuple of a HTTP-status and a json object or None.
The JSON object may be a JSON-RPC error object or a result object.
None is returned if the message was a notification.
:param dict msg: A JSON-RPC message.
:returns: Tuple with status and result.
"""
if msg.notification:
return None
elif msg.error:
return (msg.error.status,
self._build_error(msg.error, msg.message_id))
elif msg.result:
return (200, self._build_result(msg))
else: # pragma: no cover
# Should never be reached
logging.warn('Message neither contains an error nor a result')
def _build_error(self, err, message_id=None):
return {'jsonrpc':'2.0',
'error':err.getJsonData(),
'id':message_id}
def _build_result(self, msg):
return {'jsonrpc':'2.0',
'result':msg.result,
'id':msg.message_id}
def execute_method(self, method, params):
"""Executes the RPC method.
:param function method: A method object.
:param params: List, tuple or dictionary with JSON-RPC parameters.
"""
args, varargs, varkw, defaults = getargspec(method)
if varargs or varkw:
raise InvalidParamsError(
"Service method definition must not have variable parameters")
args_set = set(args[1:])
if params is None:
if not len(args_set) == 0:
raise InvalidParamsError(
"Wrong number of parameters; "
"expected %i but 'params' was omitted "
"from JSON-RPC message" % (len(args_set)))
return method()
elif isinstance(params, (list, tuple)):
if not len(args_set) == len(params):
raise InvalidParamsError(
"Wrong number of parameters; "
"expected %i got %i" % (len(args_set),len(params)))
return method(*params)
elif isinstance(params, dict):
paramset = set(params)
if not args_set == paramset:
raise InvalidParamsError(
"Named parameters do not "
"match method; expected %s" % (str(args_set)))
params = self.decode_dict_keys(params)
return method(**params)
def get_service_method(self, meth_name):
# TODO use inspect.getmembers()?
f = getattr(self, meth_name, None)
if (f == None or not hasattr(f, 'IsServiceMethod')
or not getattr(f, 'IsServiceMethod') == True):
raise MethodNotFoundError('Method %s not found' % meth_name)
return f
def decode_dict_keys(self, d):
" | ""Convert all keys in dict d to str.
Python does not allow unicode keys in dictionaries.
:param dict d: A JSON-RPC message.
"""
try:
r = {}
for (k, v) in d.iteritems():
r[str(k)] = v
return r
except UnicodeEncodeError: # pragma: no cover
# Unsure which error is the correct to raise here.
# Actually this code will probably never be reached
# because "wrong" parameters will be filtered out
# and returned as InvalidParamsError() and methods cant
# have non-ascii parameter names.
raise InvalidRequestError("Parameter-names must be ASCII")
| identifier_body |
|
json_rpc.py | -values are not allowed for Service Methods
- handles only HTTP POST
- JSON-RPC Version < 2.0 (same as 1.2) not supported
TODOs:
- more Comments
- Examples (doctest?)
- Factor out handler methods to reuse in other frameworks
"""
from google.appengine.ext import webapp
from inspect import getargspec
import cgi
import logging
import simplejson
import sys
import traceback
JSON_RPC_KEYS = frozenset(['method', 'jsonrpc', 'params', 'id'])
def ServiceMethod(fn):
"""Decorator to mark a method of a JsonRpcHandler as ServiceMethod.
This exposes methods to the RPC interface.
:param function fn: A function.
:returns: A function.
TODO:
- Warn when applied to underscore methods
"""
fn.IsServiceMethod = True
return fn
class JsonRpcError(Exception):
"""Baseclass for all JSON-RPC Errors.
Errors are described in the JSON-RPC 2.0 specs, related HTTP Status
Codes are described in the json-rpc-over-http proposal.
"""
code = 0
message = None
status = 500
def __init__(self, message=None):
if message is not None:
self.message = message
def __str__(self):
return(self.message)
def __repr__(self):
return '%s("%s")' % (str(self.__class__.__name__), self.message)
def getJsonData(self):
error = {
'code' : self.code ,
'message' : '%s: %s' %
(str(self.__class__.__name__),
str(self.message))}
return error
class ParseError(JsonRpcError):
"""Invalid JSON was received by the server.
An error occurred on the server while parsing the JSON text.
"""
code = -32700
message = 'Parse error'
class InvalidRequestError(JsonRpcError):
"""The JSON sent is not a valid Request object."""
code = -32600
message = 'Invalid Request'
status = 400
class MethodNotFoundError(JsonRpcError):
"""The method does not exist / is not available."""
code = -32601
message = 'Method not found'
status = 404
class I | JsonRpcError):
"""Invalid method parameter(s)."""
code = -32602
message = 'Invalid params'
class InternalError(JsonRpcError):
"""Internal JSON-RPC error."""
code = -32603
message = 'Internal error'
class ServerError(JsonRpcError):
"""Base Class for implementation-defined Server Errors.
The Error Code must be between -32099..-32000
"""
code = -32000
message = 'Server Error'
class JsonRpcMessage(object):
"""A single JSON-RPC message.
:param dict json: The JSON-RPC message Python representation.
"""
def __init__(self, json=None):
super(JsonRpcMessage, self).__init__()
self.message_id = None
self.notification = False
self.error = None
self.result = None
if json is not None:
self.from_json(json)
def from_json(self, json):
"""Parses a single JSON-RPC message.
:param dict json: The JSON-RPC message Python representation.
"""
try:
if not isinstance(json, dict):
raise InvalidRequestError(
'Invalid JSON-RPC Message; must be an object')
if not set(json.keys()) <= JSON_RPC_KEYS:
raise InvalidRequestError('Invalid members in request object')
if not ('jsonrpc' in json and json['jsonrpc'] == '2.0'):
raise InvalidRequestError('Server supports JSON-RPC 2.0 only')
if 'method' not in json:
raise InvalidRequestError('No method specified')
if not isinstance(json['method'], basestring):
raise InvalidRequestError('Method must be a string')
self.method_name = json['method']
if 'params' in json:
params = json['params']
if not isinstance(params, (dict, list, tuple)):
raise InvalidRequestError(
"'params' must be an array or object")
self.params = params
if 'id' not in json:
self.notification = True
else:
self.message_id = json['id']
except InvalidRequestError, ex:
self.error = ex
logging.error('Encountered invalid json message')
class JsonRpcHandler(webapp.RequestHandler):
"""Subclass this handler to implement a JSON-RPC handler.
Annotate methods with @ServiceMethod to expose them and make them callable
via JSON-RPC. Currently methods with *args or **kwargs are not supported
as service-methods. All parameters have to be named explicitly.
"""
def __init__(self):
webapp.RequestHandler.__init__(self)
def post(self):
self.handle_request()
def handle_request(self):
"""Handles POST request."""
self.response.headers['Content-Type'] = 'application/json-rpc'
try:
logging.debug("Raw JSON-RPC: %s", self.request.body)
messages, batch_request = self.parse_body(self.request.body)
except (InvalidRequestError, ParseError), ex:
logging.error(ex)
self.error(ex.status)
body = self._build_error(ex)
self.response.out.write(simplejson.dumps(body))
else:
for msg in messages:
self.handle_message(msg)
responses = self.get_responses(messages)
if len(responses) == 0:
# Only notifications were sent
self.error(204)
return
if batch_request:
#TODO Which http_status to set for batches?
self.error(200)
body = [r[1] for r in responses]
self.response.out.write(simplejson.dumps(body))
else:
if len(responses) != 1:
# This should never happen
raise InternalError() # pragma: no cover
status, body = responses[0]
self.error(status)
self.response.out.write(simplejson.dumps(body))
def get_responses(self, messages):
"""Gets a list of responses from all 'messages'.
Responses are a tuple of HTTP-status and body.
A response may be None if the message was a notification and will be
excluded from the returned list.
:param list messages: JSON messages.
:returns: List of responses.
"""
responses = []
for msg in messages:
resp = self.get_response(msg)
if resp is not None:
responses.append(resp)
return responses
def handle_message(self, msg):
"""Executes a message.
The method of the message is executed.
Errors and/or results are written back to the message.
:param dict msg: A JSON-RPC message.
"""
if msg.error != None:
return
else:
try:
method = self.get_service_method(msg.method_name)
params = getattr(msg, 'params', None)
msg.result = self.execute_method(method, params)
except (MethodNotFoundError, InvalidParamsError, ServerError), ex:
logging.error(ex)
msg.error = ex
except Exception, ex:
logging.error(ex)
ex = InternalError("Error executing service method")
ex.data = ''.join(traceback.format_exception(*sys.exc_info()))
msg.error = ex
def parse_body(self, body):
"""Parses the body of POST request.
Validates for correct JSON and returns a tuple with a list of JSON-RPC
messages and wether the request was a batch-request.
Raises ParseError and InvalidRequestError.
:param string body: The HTTP body.
"""
try:
json = simplejson.loads(body)
except ValueError:
raise ParseError()
messages = []
if isinstance(json, (list, tuple)):
if len(json) == 0:
raise InvalidRequestError('Recieved an empty batch message')
batch_request = True
for obj in json:
msg = JsonRpcMessage(obj)
messages.append(msg)
if isinstance(json, (dict)):
batch_request = False
msg = JsonRpcMessage(json)
messages.append(msg)
return messages, batch_request
def get_response(self, msg):
"""Gets the response object for a message.
Returns a tuple of a HTTP-status and a json object or None.
The JSON object may be a JSON-RPC error object or a result object.
None is returned if the message was a notification.
:param dict msg: A JSON-RPC message.
:returns: Tuple with status and result.
"""
if msg.notification:
return None
elif msg.error:
return (msg.error.status,
self._build_error(msg.error, msg.message_id))
elif msg.result:
return (200, self._build_result(msg))
else: # pragma: no cover
# Should never be reached
logging.warn('Message neither contains an error nor a result')
def _build_error(self, err, message_id=None):
return {'jsonrpc':'2.0',
'error':err.getJsonData(),
'id':message_id}
def _build_result(self, msg):
return {'jsonrpc':'2 | nvalidParamsError( | identifier_name |
inside.py | classification loss is computed (Cross-Entropy).
"""
return_dict = self.config.use_return_dict
outputs = self.bert(
span_inside_input_ids,
attention_mask=span_inside_attention_mask,
# token_type_ids=token_type_ids,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
task_to_keys = {
"cola": ("sentence", None),
"mnli": ("premise", "hypothesis"),
"mrpc": ("sentence1", "sentence2"),
"qnli": ("question", "sentence"),
"qqp": ("question1", "question2"),
"rte": ("sentence1", "sentence2"),
"sst2": ("sentence", None),
"stsb": ("sentence1", "sentence2"),
"wnli": ("sentence1", "sentence2"),
}
logger = logging.getLogger(__name__)
@gin.configurable
@dataclass
class SpanInsideClassificationDataTrainingArguments(
SpanClassifierDataTrainingArguments
):
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on
the command line.
"""
max_seq_length: int = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
# overwrite_cache: bool = field(
# default=False,
# metadata={"help": "Overwrite the cached preprocessed datasets or not."},
# )
pad_to_max_length: bool = field(
default=True,
metadata={
"help": "Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
},
)
@gin.configurable
@dataclass
class SpanInsideClassificationModelArguments:
| )
cache_dir: Optional[str] = field(
default=None,
metadata={
"help": "Where do you want to store the pretrained models downloaded from huggingface.co"
},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={
"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."
},
)
saved_param_path: Optional[str] = field(
default=None,
metadata={"help": "Fine-Tuned parameters. If there is, load this parameter."},
)
TrainingArguments = gin.configurable(TrainingArguments)
from datasets import DatasetDict, Dataset, Sequence, Value, DatasetInfo
@gin.configurable
class SpanInsideClassifier(SpanClassifier):
def __init__(
self,
span_classification_datasets: DatasetDict,
model_args: SpanInsideClassificationModelArguments,
data_args: SpanInsideClassificationDataTrainingArguments,
training_args: TrainingArguments,
) -> None:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
self.model_args = model_args
self.data_args = data_args
training_args = translate_into_orig_train_args(training_args)
self.training_args = training_args
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO
if is_main_process(training_args.local_rank)
else logging.WARN,
)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the
# sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named
# label if at least two columns are provided.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
# if data_args.task_name is not None:
# # Downloading and loading a dataset from the hub.
# datasets = load_dataset("glue", data_args.task_name)
# elif data_args.train_file.endswith(".csv"):
# # Loading a dataset from local csv files
# datasets = load_dataset(
# "csv",
# data_files={
# "train": data_args.train_file,
# "validation": data_args.validation_file,
# },
# )
# else:
# # Loading a dataset from local json files
# datasets = load_dataset(
# "json",
# data_files={
# "train": data_args.train_file,
# "validation": data_args.validation_file,
# },
# )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
label_list = span_classification_datasets["train"].features["label"].names
self.label_list = label_list
num_labels = len(label_list)
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name
if model_args.config_name
else model_args.model_name_or_path,
num_labels=num_labels,
# finetuning_task=data_args.task_name,
cache_dir=model_args.cache_dir,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name
if model_args.tokenizer_name
else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
)
model = BertForSpanInsideClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
if model_args.saved_param_path:
model.load_state_dict(torch.load(model_args.saved_param_path))
self.tokenizer = tokenizer
self.model = model
# Padding strategy
if data_args.pad_to_max_length:
self.padding = "max_length"
self.max_length = data_args.max_seq_length
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch | """
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={
"help": "Path to pretrained model or model identifier from huggingface.co/models"
}
)
config_name: Optional[str] = field(
default=None,
metadata={
"help": "Pretrained config name or path if not the same as model_name"
},
)
tokenizer_name: Optional[str] = field(
default=None,
metadata={
"help": "Pretrained tokenizer name or path if not the same as model_name"
}, | identifier_body |
inside.py | classification loss is computed (Cross-Entropy).
"""
return_dict = self.config.use_return_dict
outputs = self.bert(
span_inside_input_ids,
attention_mask=span_inside_attention_mask,
# token_type_ids=token_type_ids,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
task_to_keys = {
"cola": ("sentence", None),
"mnli": ("premise", "hypothesis"),
"mrpc": ("sentence1", "sentence2"),
"qnli": ("question", "sentence"),
"qqp": ("question1", "question2"),
"rte": ("sentence1", "sentence2"),
"sst2": ("sentence", None),
"stsb": ("sentence1", "sentence2"),
"wnli": ("sentence1", "sentence2"),
}
logger = logging.getLogger(__name__)
@gin.configurable
@dataclass
class SpanInsideClassificationDataTrainingArguments(
SpanClassifierDataTrainingArguments
): | Using `HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on
the command line.
"""
max_seq_length: int = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
# overwrite_cache: bool = field(
# default=False,
# metadata={"help": "Overwrite the cached preprocessed datasets or not."},
# )
pad_to_max_length: bool = field(
default=True,
metadata={
"help": "Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
},
)
@gin.configurable
@dataclass
class SpanInsideClassificationModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={
"help": "Path to pretrained model or model identifier from huggingface.co/models"
}
)
config_name: Optional[str] = field(
default=None,
metadata={
"help": "Pretrained config name or path if not the same as model_name"
},
)
tokenizer_name: Optional[str] = field(
default=None,
metadata={
"help": "Pretrained tokenizer name or path if not the same as model_name"
},
)
cache_dir: Optional[str] = field(
default=None,
metadata={
"help": "Where do you want to store the pretrained models downloaded from huggingface.co"
},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={
"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."
},
)
saved_param_path: Optional[str] = field(
default=None,
metadata={"help": "Fine-Tuned parameters. If there is, load this parameter."},
)
TrainingArguments = gin.configurable(TrainingArguments)
from datasets import DatasetDict, Dataset, Sequence, Value, DatasetInfo
@gin.configurable
class SpanInsideClassifier(SpanClassifier):
def __init__(
self,
span_classification_datasets: DatasetDict,
model_args: SpanInsideClassificationModelArguments,
data_args: SpanInsideClassificationDataTrainingArguments,
training_args: TrainingArguments,
) -> None:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
self.model_args = model_args
self.data_args = data_args
training_args = translate_into_orig_train_args(training_args)
self.training_args = training_args
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO
if is_main_process(training_args.local_rank)
else logging.WARN,
)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the
# sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named
# label if at least two columns are provided.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
# if data_args.task_name is not None:
# # Downloading and loading a dataset from the hub.
# datasets = load_dataset("glue", data_args.task_name)
# elif data_args.train_file.endswith(".csv"):
# # Loading a dataset from local csv files
# datasets = load_dataset(
# "csv",
# data_files={
# "train": data_args.train_file,
# "validation": data_args.validation_file,
# },
# )
# else:
# # Loading a dataset from local json files
# datasets = load_dataset(
# "json",
# data_files={
# "train": data_args.train_file,
# "validation": data_args.validation_file,
# },
# )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
label_list = span_classification_datasets["train"].features["label"].names
self.label_list = label_list
num_labels = len(label_list)
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name
if model_args.config_name
else model_args.model_name_or_path,
num_labels=num_labels,
# finetuning_task=data_args.task_name,
cache_dir=model_args.cache_dir,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name
if model_args.tokenizer_name
else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
)
model = BertForSpanInsideClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
if model_args.saved_param_path:
model.load_state_dict(torch.load(model_args.saved_param_path))
self.tokenizer = tokenizer
self.model = model
# Padding strategy
if data_args.pad_to_max_length:
self.padding = "max_length"
self.max_length = data_args.max_seq_length
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
| """
Arguments pertaining to what data we are going to input our model for training and eval. | random_line_split |
inside.py | classification loss is computed (Cross-Entropy).
"""
return_dict = self.config.use_return_dict
outputs = self.bert(
span_inside_input_ids,
attention_mask=span_inside_attention_mask,
# token_type_ids=token_type_ids,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
|
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
task_to_keys = {
"cola": ("sentence", None),
"mnli": ("premise", "hypothesis"),
"mrpc": ("sentence1", "sentence2"),
"qnli": ("question", "sentence"),
"qqp": ("question1", "question2"),
"rte": ("sentence1", "sentence2"),
"sst2": ("sentence", None),
"stsb": ("sentence1", "sentence2"),
"wnli": ("sentence1", "sentence2"),
}
logger = logging.getLogger(__name__)
@gin.configurable
@dataclass
class SpanInsideClassificationDataTrainingArguments(
SpanClassifierDataTrainingArguments
):
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on
the command line.
"""
max_seq_length: int = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
# overwrite_cache: bool = field(
# default=False,
# metadata={"help": "Overwrite the cached preprocessed datasets or not."},
# )
pad_to_max_length: bool = field(
default=True,
metadata={
"help": "Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
},
)
@gin.configurable
@dataclass
class SpanInsideClassificationModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={
"help": "Path to pretrained model or model identifier from huggingface.co/models"
}
)
config_name: Optional[str] = field(
default=None,
metadata={
"help": "Pretrained config name or path if not the same as model_name"
},
)
tokenizer_name: Optional[str] = field(
default=None,
metadata={
"help": "Pretrained tokenizer name or path if not the same as model_name"
},
)
cache_dir: Optional[str] = field(
default=None,
metadata={
"help": "Where do you want to store the pretrained models downloaded from huggingface.co"
},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={
"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."
},
)
saved_param_path: Optional[str] = field(
default=None,
metadata={"help": "Fine-Tuned parameters. If there is, load this parameter."},
)
TrainingArguments = gin.configurable(TrainingArguments)
from datasets import DatasetDict, Dataset, Sequence, Value, DatasetInfo
@gin.configurable
class SpanInsideClassifier(SpanClassifier):
def __init__(
self,
span_classification_datasets: DatasetDict,
model_args: SpanInsideClassificationModelArguments,
data_args: SpanInsideClassificationDataTrainingArguments,
training_args: TrainingArguments,
) -> None:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
self.model_args = model_args
self.data_args = data_args
training_args = translate_into_orig_train_args(training_args)
self.training_args = training_args
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO
if is_main_process(training_args.local_rank)
else logging.WARN,
)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the
# sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named
# label if at least two columns are provided.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
# if data_args.task_name is not None:
# # Downloading and loading a dataset from the hub.
# datasets = load_dataset("glue", data_args.task_name)
# elif data_args.train_file.endswith(".csv"):
# # Loading a dataset from local csv files
# datasets = load_dataset(
# "csv",
# data_files={
# "train": data_args.train_file,
# "validation": data_args.validation_file,
# },
# )
# else:
# # Loading a dataset from local json files
# datasets = load_dataset(
# "json",
# data_files={
# "train": data_args.train_file,
# "validation": data_args.validation_file,
# },
# )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
label_list = span_classification_datasets["train"].features["label"].names
self.label_list = label_list
num_labels = len(label_list)
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name
if model_args.config_name
else model_args.model_name_or_path,
num_labels=num_labels,
# finetuning_task=data_args.task_name,
cache_dir=model_args.cache_dir,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name
if model_args.tokenizer_name
else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
)
model = BertForSpanInsideClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
if model_args.saved_param_path:
model.load_state_dict(torch.load(model_args.saved_param_path))
self.tokenizer = tokenizer
self.model = model
# Padding strategy
if data_args.pad_to_max_length:
self.padding = "max_length"
self.max_length = data_args.max_seq_length
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
| output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output | conditional_block |
inside.py | which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={
"help": "Path to pretrained model or model identifier from huggingface.co/models"
}
)
config_name: Optional[str] = field(
default=None,
metadata={
"help": "Pretrained config name or path if not the same as model_name"
},
)
tokenizer_name: Optional[str] = field(
default=None,
metadata={
"help": "Pretrained tokenizer name or path if not the same as model_name"
},
)
cache_dir: Optional[str] = field(
default=None,
metadata={
"help": "Where do you want to store the pretrained models downloaded from huggingface.co"
},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={
"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."
},
)
saved_param_path: Optional[str] = field(
default=None,
metadata={"help": "Fine-Tuned parameters. If there is, load this parameter."},
)
TrainingArguments = gin.configurable(TrainingArguments)
from datasets import DatasetDict, Dataset, Sequence, Value, DatasetInfo
@gin.configurable
class SpanInsideClassifier(SpanClassifier):
def __init__(
self,
span_classification_datasets: DatasetDict,
model_args: SpanInsideClassificationModelArguments,
data_args: SpanInsideClassificationDataTrainingArguments,
training_args: TrainingArguments,
) -> None:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
self.model_args = model_args
self.data_args = data_args
training_args = translate_into_orig_train_args(training_args)
self.training_args = training_args
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO
if is_main_process(training_args.local_rank)
else logging.WARN,
)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the
# sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named
# label if at least two columns are provided.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
# if data_args.task_name is not None:
# # Downloading and loading a dataset from the hub.
# datasets = load_dataset("glue", data_args.task_name)
# elif data_args.train_file.endswith(".csv"):
# # Loading a dataset from local csv files
# datasets = load_dataset(
# "csv",
# data_files={
# "train": data_args.train_file,
# "validation": data_args.validation_file,
# },
# )
# else:
# # Loading a dataset from local json files
# datasets = load_dataset(
# "json",
# data_files={
# "train": data_args.train_file,
# "validation": data_args.validation_file,
# },
# )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
label_list = span_classification_datasets["train"].features["label"].names
self.label_list = label_list
num_labels = len(label_list)
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name
if model_args.config_name
else model_args.model_name_or_path,
num_labels=num_labels,
# finetuning_task=data_args.task_name,
cache_dir=model_args.cache_dir,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name
if model_args.tokenizer_name
else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
)
model = BertForSpanInsideClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
if model_args.saved_param_path:
model.load_state_dict(torch.load(model_args.saved_param_path))
self.tokenizer = tokenizer
self.model = model
# Padding strategy
if data_args.pad_to_max_length:
self.padding = "max_length"
self.max_length = data_args.max_seq_length
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
self.padding = False
self.max_length = None
# Preprocessing the datasets
span_classification_datasets = DatasetDict(
{
"train": span_classification_datasets["train"],
"validation": span_classification_datasets["validation"],
}
)
super().__init__(span_classification_datasets, data_args)
self.argss += [model_args, data_args, training_args]
datasets = self.span_classification_datasets
train_dataset = datasets["train"]
eval_dataset = datasets["validation"]
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
# Get the metric function
# When datasets metrics include regular accuracy, make an else here and remove special branch from
# compute_metrics
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(p: EvalPrediction):
preds = (
p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions
)
preds = np.argmax(preds, axis=1)
return {"accuracy": (preds == p.label_ids).astype(np.float32).mean().item()}
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset if training_args.do_eval else None,
compute_metrics=compute_metrics,
tokenizer=tokenizer,
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
data_collator=default_data_collator
if data_args.pad_to_max_length
else None,
)
self.trainer = trainer
# trainer.save_model()
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path
if os.path.isdir(model_args.model_name_or_path)
else None
)
trainer.save_model() # Saves the tokenizer too for easy upload
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_result = trainer.evaluate(eval_dataset=eval_dataset)
output_eval_file = os.path.join(
training_args.output_dir, f"eval_results.txt"
)
if trainer.is_world_process_zero():
with open(output_eval_file, "w") as writer:
logger.info(f"***** Eval results *****")
for key, value in eval_result.items():
logger.info(f" {key} = {value}")
writer.write(f"{key} = {value}\n")
def | predict | identifier_name |
|
datahandle.py | 'allergens': {'sesame-seed': 'sesame seeds',
'tree-nuts': 'tree nuts',
'wheat_barley_rye': 'wheat or barley or rye'}}
#If traits specified, extract into a string
for i, trait in enumerate(requisites['trait']):
if traits_text:
traits_text += ', '
traits_text += req_map['trait'].get(trait, trait)
traits_text = format_plural(traits_text.rstrip(', '))
#If allergens specified, extract into a string
for i, allergen in enumerate(requisites['allergens']):
if allergens_text:
allergens_text += ', '
allergens_text += req_map['allergens'].get(allergen, allergen)
allergens_text = format_plural(allergens_text.rstrip(', '))
allergens_text = allergens_text.replace('and', 'or')
#Requisite-specific language
if allergens_text:
allergens_text = ' without ' + allergens_text
if traits_text:
traits_text = ' that is ' + traits_text
#Return combined string
if (allergens_text or traits_text) and 'Sorry, that is not available' in text:
traits_text = traits_text.replace(' that is ', '')
text = text.replace('Sorry, ', 'Sorry, ' + traits_text + ' ')
text = text.replace('that is not available', '[meal]')
return text + allergens_text + ' is not available'
else:
return text + traits_text + allergens_text
def format_plural(text):
"""Adds 'and' before last item in list of items.
:param text: The string to be manipulated
:type text: string
"""
if ',' in text:
index = text.rfind(',') + 2
text = text[:index] + 'and ' + text[index:]
return text
def remove_spaces(url_block):
"""Removes spaces in url string to create valid url string.
:param url_block: The url string to be manipulated
:type search: string
"""
temp = ""
for i in range(len(url_block)):
if url_block[i] == ' ':
temp += '+'
else:
temp += url_block[i]
return temp | def check_meal_available(data, meal):
"""Searches response data to check if meal is available at specified location/date.
:param data: MDining API HTTP response data
:type data: dict
:param meal: Name of meal
:type meal: string
"""
for key in data['menu']['meal']:
if data['menu']['meal']['name'].upper() == meal.upper():
if 'course' in data['menu']['meal']:
return True
return False
return False
def check_course_available(data, course):
"""Searches response data to check if course is available in specified meal.
:param data: MDining API HTTP response data
:type data: dict
:param course: Name of course
:type course: string
"""
for i in range(len(data['menu']['meal']['course'])):
for key, value in data['menu']['meal']['course'][i].items():
if key == 'name':
if value.upper() == course.upper():
return True
return False
def check_item_specifications(item, traits, allergens):
"""Returns true if food item is satisfactory with specified traits and allergens.
:param item: Data of specific food item
:type item: dict
:param traits: List of specified traits item must have, can be empty
:type traits: list
:param allergens: List of allergens item cannot have, can be empty
:type allergens: list
"""
#Return false if allergens list isn't empty and any allergens found
if allergens and 'allergens' in item:
for allergen in allergens:
if allergen in item['allergens']:
return False
#Return true if traits list empty
if not traits:
return True
#Return false if traits list isn't empty and any traits are missing
if 'trait' in item:
for trait in traits:
if trait not in item['trait']:
return False
#All traits found, return true
return True
else:
return False
def get_items(data, requisites, formatted):
"""Returns string of food items of each course in response data for
fulfillmentText in response to Dialogflow.
:param data: MDining API HTTP response data
:type data: dict
:param requisites: Contains information food item must comply with (traits, allergens, etc)
:type requisites: dict
:param formatted: True/False - formats response string if true
:type formatted: boolean
"""
returndata = ""
traits = requisites['trait']
allergens = requisites['allergens']
if formatted:
prefix = '\t'
suffix = '\n'
else:
prefix = ''
suffix = ', '
for course in data['menu']['meal']['course']:
item_data = []
datatype = type(course['menuitem'])
if datatype is list:
item_data += course['menuitem']
else:
item_data.append(course['menuitem'])
for item in item_data:
if check_item_specifications(item, traits, allergens) and 'No Service at this Time' not in item['name']:
returndata += (prefix + (item['name']).rstrip(', ') + suffix)
return returndata
def find_item_formatting(possible_matches):
"""Formatting list of possible matches into more natural sentence structure
by removing redundancy:
[Chicken during lunch, chicken wings during lunch, and chicken patty during dinner] ->
[Chicken, chicken wings during lunch, and chicken patty during dinner]
:param possible_matches: List of food items in data that matched user input
:type possible_matches: list
"""
for i in range(len(possible_matches)):
if i == 0:
continue
words = possible_matches[i].split()
#If previous term has same ending ("Dinner") as current term, remove it
if possible_matches[i].split()[-1] == possible_matches[i - 1].split()[-1]:
#8 = amount of characters taken up by [' during ']
length = len(possible_matches[i].split()[-1]) + 8
possible_matches[i - 1] = possible_matches[i - 1][:length*-1]
return possible_matches
def find_matches(course_data, possible_matches, item_in, meal_name, requisites):
"""Appends matches of specified food item in data of an individual course to
list of possible matches.
:param course_data: Chosen course subsection of MDining API HTTP response data
:type course_data: dict
:param possible_matches: List of food items in data that matched user input
:type possible_matches: list
:param item_in: User input food item
:type item_in: string
:param meal_name: Name of meal
:type meal_name: string
:param requisites: Contains information food item must comply with (traits, allergens, etc)
:type requisites: dict
"""
traits = requisites['trait']
allergens = requisites['allergens']
item_data = []
datatype = type(course_data)
if datatype is list:
item_data += course_data
else:
item_data.append(course_data)
for item in item_data:
if check_item_specifications(item, traits, allergens) == False:
continue
if item_in.upper() in item['name'].upper():
if item['name'][-1] == ' ':
item['name'] = item['name'][:-1]
possible_matches.append(item['name'] + ' during ' + meal_name)
return possible_matches
#########################################################################
###Primary Handler Functions
def request_location_and_meal(date_in, loc_in, meal_in, requisites):
"""Handles searching for appropriate data response for valid specified
location and meal entities from ``findLocationAndMeal`` intent.
:param date_in: Input date
:type date_in: string
:param loc_in: Input location
:type loc_in: string
:param meal_in: Input meal
:type meal_in: string
:param requisites: Contains information food item must comply with (traits, allergens, etc)
:type requisites: dict
"""
#preset vars
url = 'http://api.studentlife.umich.edu/menu/xml2print.php?controller=&view=json'
location = '&location='
date = '&date='
meal = '&meal='
#API url concatenation
location += loc_in
meal += meal_in
date += str(date_in)
url = url + location + date + meal
url = remove_spaces(url)
#fetching json
data = requests.get(url).json()
#checking if specified meal available
if check_meal_available(data, meal_in):
returnstring = (get_items(data, requisites, False)).rstrip(', ')
return format_plural(returnstring)
else:
return "No meal is available"
# | random_line_split |
|
datahandle.py | if allergens_text:
allergens_text = ' without ' + allergens_text
if traits_text:
traits_text = ' that is ' + traits_text
#Return combined string
if (allergens_text or traits_text) and 'Sorry, that is not available' in text:
traits_text = traits_text.replace(' that is ', '')
text = text.replace('Sorry, ', 'Sorry, ' + traits_text + ' ')
text = text.replace('that is not available', '[meal]')
return text + allergens_text + ' is not available'
else:
return text + traits_text + allergens_text
def format_plural(text):
"""Adds 'and' before last item in list of items.
:param text: The string to be manipulated
:type text: string
"""
if ',' in text:
index = text.rfind(',') + 2
text = text[:index] + 'and ' + text[index:]
return text
def remove_spaces(url_block):
"""Removes spaces in url string to create valid url string.
:param url_block: The url string to be manipulated
:type search: string
"""
temp = ""
for i in range(len(url_block)):
if url_block[i] == ' ':
temp += '+'
else:
temp += url_block[i]
return temp
def check_meal_available(data, meal):
"""Searches response data to check if meal is available at specified location/date.
:param data: MDining API HTTP response data
:type data: dict
:param meal: Name of meal
:type meal: string
"""
for key in data['menu']['meal']:
if data['menu']['meal']['name'].upper() == meal.upper():
if 'course' in data['menu']['meal']:
return True
return False
return False
def check_course_available(data, course):
"""Searches response data to check if course is available in specified meal.
:param data: MDining API HTTP response data
:type data: dict
:param course: Name of course
:type course: string
"""
for i in range(len(data['menu']['meal']['course'])):
for key, value in data['menu']['meal']['course'][i].items():
if key == 'name':
if value.upper() == course.upper():
return True
return False
def check_item_specifications(item, traits, allergens):
"""Returns true if food item is satisfactory with specified traits and allergens.
:param item: Data of specific food item
:type item: dict
:param traits: List of specified traits item must have, can be empty
:type traits: list
:param allergens: List of allergens item cannot have, can be empty
:type allergens: list
"""
#Return false if allergens list isn't empty and any allergens found
if allergens and 'allergens' in item:
for allergen in allergens:
if allergen in item['allergens']:
return False
#Return true if traits list empty
if not traits:
return True
#Return false if traits list isn't empty and any traits are missing
if 'trait' in item:
for trait in traits:
if trait not in item['trait']:
return False
#All traits found, return true
return True
else:
return False
def get_items(data, requisites, formatted):
"""Returns string of food items of each course in response data for
fulfillmentText in response to Dialogflow.
:param data: MDining API HTTP response data
:type data: dict
:param requisites: Contains information food item must comply with (traits, allergens, etc)
:type requisites: dict
:param formatted: True/False - formats response string if true
:type formatted: boolean
"""
returndata = ""
traits = requisites['trait']
allergens = requisites['allergens']
if formatted:
prefix = '\t'
suffix = '\n'
else:
prefix = ''
suffix = ', '
for course in data['menu']['meal']['course']:
item_data = []
datatype = type(course['menuitem'])
if datatype is list:
item_data += course['menuitem']
else:
item_data.append(course['menuitem'])
for item in item_data:
if check_item_specifications(item, traits, allergens) and 'No Service at this Time' not in item['name']:
returndata += (prefix + (item['name']).rstrip(', ') + suffix)
return returndata
def find_item_formatting(possible_matches):
"""Formatting list of possible matches into more natural sentence structure
by removing redundancy:
[Chicken during lunch, chicken wings during lunch, and chicken patty during dinner] ->
[Chicken, chicken wings during lunch, and chicken patty during dinner]
:param possible_matches: List of food items in data that matched user input
:type possible_matches: list
"""
for i in range(len(possible_matches)):
if i == 0:
continue
words = possible_matches[i].split()
#If previous term has same ending ("Dinner") as current term, remove it
if possible_matches[i].split()[-1] == possible_matches[i - 1].split()[-1]:
#8 = amount of characters taken up by [' during ']
length = len(possible_matches[i].split()[-1]) + 8
possible_matches[i - 1] = possible_matches[i - 1][:length*-1]
return possible_matches
def find_matches(course_data, possible_matches, item_in, meal_name, requisites):
"""Appends matches of specified food item in data of an individual course to
list of possible matches.
:param course_data: Chosen course subsection of MDining API HTTP response data
:type course_data: dict
:param possible_matches: List of food items in data that matched user input
:type possible_matches: list
:param item_in: User input food item
:type item_in: string
:param meal_name: Name of meal
:type meal_name: string
:param requisites: Contains information food item must comply with (traits, allergens, etc)
:type requisites: dict
"""
traits = requisites['trait']
allergens = requisites['allergens']
item_data = []
datatype = type(course_data)
if datatype is list:
item_data += course_data
else:
item_data.append(course_data)
for item in item_data:
if check_item_specifications(item, traits, allergens) == False:
continue
if item_in.upper() in item['name'].upper():
if item['name'][-1] == ' ':
item['name'] = item['name'][:-1]
possible_matches.append(item['name'] + ' during ' + meal_name)
return possible_matches
#########################################################################
###Primary Handler Functions
def request_location_and_meal(date_in, loc_in, meal_in, requisites):
"""Handles searching for appropriate data response for valid specified
location and meal entities from ``findLocationAndMeal`` intent.
:param date_in: Input date
:type date_in: string
:param loc_in: Input location
:type loc_in: string
:param meal_in: Input meal
:type meal_in: string
:param requisites: Contains information food item must comply with (traits, allergens, etc)
:type requisites: dict
"""
#preset vars
url = 'http://api.studentlife.umich.edu/menu/xml2print.php?controller=&view=json'
location = '&location='
date = '&date='
meal = '&meal='
#API url concatenation
location += loc_in
meal += meal_in
date += str(date_in)
url = url + location + date + meal
url = remove_spaces(url)
#fetching json
data = requests.get(url).json()
#checking if specified meal available
if check_meal_available(data, meal_in):
returnstring = (get_items(data, requisites, False)).rstrip(', ')
return format_plural(returnstring)
else:
return "No meal is available"
#Handle meal item data request
def request_item(date_in, loc_in, item_in, meal_in, requisites):
| """Handles searching for appropriate data response for valid specified
location and food item entities (and meal entity if included) from ``findItem`` intent.
:param date_in: Input date
:type date_in: string
:param loc_in: Input location
:type loc_in: string
:param item_in: Input food item
:type item_in: string
:param meal_in: Input meal, can be empty string if not specified
:type meal_in: string
:param requisites: Contains information food item must comply with (traits, allergens, etc)
:type requisites: dict
"""
secrets = get_secrets()
url = secrets.get('m_dining_api_main')
location = '&location='
date = '&date='
meal = '&meal='
| identifier_body |
|
datahandle.py | (error_text):
"""Logs error to Stackdriver.
:param error_text: The text to log to Stackdriver
:type error_text: string
"""
client = google.cloud.logging.Client()
logger = client.logger("automated_error_catch")
logger.log_text(error_text)
def get_secrets():
"""Fetches secrets from Datastore and returns them as a list.
"""
client = datastore.Client()
query = client.query(kind='env_vars')
entity = query.fetch()
secrets = list(entity)[0]
return secrets
def format_requisites(text, requisites):
"""If any item requisites specified, adds them to response text data for more holistic response.
:param text: The response text data to be formatted
:type text: string
:param requisites: Contains information food item must comply with (traits, allergens, etc)
:type requisites: dict
"""
traits_text = ''
allergens_text = ''
req_map = {'trait': {'mhealthy': 'healthy'},
'allergens': {'sesame-seed': 'sesame seeds',
'tree-nuts': 'tree nuts',
'wheat_barley_rye': 'wheat or barley or rye'}}
#If traits specified, extract into a string
for i, trait in enumerate(requisites['trait']):
if traits_text:
traits_text += ', '
traits_text += req_map['trait'].get(trait, trait)
traits_text = format_plural(traits_text.rstrip(', '))
#If allergens specified, extract into a string
for i, allergen in enumerate(requisites['allergens']):
if allergens_text:
allergens_text += ', '
allergens_text += req_map['allergens'].get(allergen, allergen)
allergens_text = format_plural(allergens_text.rstrip(', '))
allergens_text = allergens_text.replace('and', 'or')
#Requisite-specific language
if allergens_text:
allergens_text = ' without ' + allergens_text
if traits_text:
traits_text = ' that is ' + traits_text
#Return combined string
if (allergens_text or traits_text) and 'Sorry, that is not available' in text:
traits_text = traits_text.replace(' that is ', '')
text = text.replace('Sorry, ', 'Sorry, ' + traits_text + ' ')
text = text.replace('that is not available', '[meal]')
return text + allergens_text + ' is not available'
else:
return text + traits_text + allergens_text
def format_plural(text):
"""Adds 'and' before last item in list of items.
:param text: The string to be manipulated
:type text: string
"""
if ',' in text:
index = text.rfind(',') + 2
text = text[:index] + 'and ' + text[index:]
return text
def remove_spaces(url_block):
"""Removes spaces in url string to create valid url string.
:param url_block: The url string to be manipulated
:type search: string
"""
temp = ""
for i in range(len(url_block)):
if url_block[i] == ' ':
temp += '+'
else:
temp += url_block[i]
return temp
def check_meal_available(data, meal):
"""Searches response data to check if meal is available at specified location/date.
:param data: MDining API HTTP response data
:type data: dict
:param meal: Name of meal
:type meal: string
"""
for key in data['menu']['meal']:
if data['menu']['meal']['name'].upper() == meal.upper():
if 'course' in data['menu']['meal']:
return True
return False
return False
def check_course_available(data, course):
"""Searches response data to check if course is available in specified meal.
:param data: MDining API HTTP response data
:type data: dict
:param course: Name of course
:type course: string
"""
for i in range(len(data['menu']['meal']['course'])):
for key, value in data['menu']['meal']['course'][i].items():
if key == 'name':
if value.upper() == course.upper():
return True
return False
def check_item_specifications(item, traits, allergens):
"""Returns true if food item is satisfactory with specified traits and allergens.
:param item: Data of specific food item
:type item: dict
:param traits: List of specified traits item must have, can be empty
:type traits: list
:param allergens: List of allergens item cannot have, can be empty
:type allergens: list
"""
#Return false if allergens list isn't empty and any allergens found
if allergens and 'allergens' in item:
for allergen in allergens:
if allergen in item['allergens']:
return False
#Return true if traits list empty
if not traits:
return True
#Return false if traits list isn't empty and any traits are missing
if 'trait' in item:
for trait in traits:
if trait not in item['trait']:
return False
#All traits found, return true
return True
else:
return False
def get_items(data, requisites, formatted):
"""Returns string of food items of each course in response data for
fulfillmentText in response to Dialogflow.
:param data: MDining API HTTP response data
:type data: dict
:param requisites: Contains information food item must comply with (traits, allergens, etc)
:type requisites: dict
:param formatted: True/False - formats response string if true
:type formatted: boolean
"""
returndata = ""
traits = requisites['trait']
allergens = requisites['allergens']
if formatted:
prefix = '\t'
suffix = '\n'
else:
prefix = ''
suffix = ', '
for course in data['menu']['meal']['course']:
item_data = []
datatype = type(course['menuitem'])
if datatype is list:
item_data += course['menuitem']
else:
item_data.append(course['menuitem'])
for item in item_data:
if check_item_specifications(item, traits, allergens) and 'No Service at this Time' not in item['name']:
returndata += (prefix + (item['name']).rstrip(', ') + suffix)
return returndata
def find_item_formatting(possible_matches):
"""Formatting list of possible matches into more natural sentence structure
by removing redundancy:
[Chicken during lunch, chicken wings during lunch, and chicken patty during dinner] ->
[Chicken, chicken wings during lunch, and chicken patty during dinner]
:param possible_matches: List of food items in data that matched user input
:type possible_matches: list
"""
for i in range(len(possible_matches)):
if i == 0:
continue
words = possible_matches[i].split()
#If previous term has same ending ("Dinner") as current term, remove it
if possible_matches[i].split()[-1] == possible_matches[i - 1].split()[-1]:
#8 = amount of characters taken up by [' during ']
length = len(possible_matches[i].split()[-1]) + 8
possible_matches[i - 1] = possible_matches[i - 1][:length*-1]
return possible_matches
def find_matches(course_data, possible_matches, item_in, meal_name, requisites):
"""Appends matches of specified food item in data of an individual course to
list of possible matches.
:param course_data: Chosen course subsection of MDining API HTTP response data
:type course_data: dict
:param possible_matches: List of food items in data that matched user input
:type possible_matches: list
:param item_in: User input food item
:type item_in: string
:param meal_name: Name of meal
:type meal_name: string
:param requisites: Contains information food item must comply with (traits, allergens, etc)
:type requisites: dict
"""
traits = requisites['trait']
allergens = requisites['allergens']
item_data = []
datatype = type(course_data)
if datatype is list:
item_data += course_data
else:
item_data.append(course_data)
for item in item_data:
if check_item_specifications(item, traits, allergens) == False:
continue
if item_in.upper() in item['name'].upper():
if item['name'][-1] == ' ':
item['name'] = item['name'][:-1]
possible_matches.append(item['name'] + ' during ' + meal_name)
return possible_matches
#########################################################################
###Primary Handler Functions
def request_location_and_meal(date_in, loc_in, meal_in, requisites):
"""Handles searching for appropriate data response for valid specified
location and meal entities from ``findLocationAndMeal`` intent.
:param date_in: Input date
:type date_in: string
| report_error | identifier_name |
|
datahandle.py | 'allergens': {'sesame-seed': 'sesame seeds',
'tree-nuts': 'tree nuts',
'wheat_barley_rye': 'wheat or barley or rye'}}
#If traits specified, extract into a string
for i, trait in enumerate(requisites['trait']):
if traits_text:
traits_text += ', '
traits_text += req_map['trait'].get(trait, trait)
traits_text = format_plural(traits_text.rstrip(', '))
#If allergens specified, extract into a string
for i, allergen in enumerate(requisites['allergens']):
if allergens_text:
allergens_text += ', '
allergens_text += req_map['allergens'].get(allergen, allergen)
allergens_text = format_plural(allergens_text.rstrip(', '))
allergens_text = allergens_text.replace('and', 'or')
#Requisite-specific language
if allergens_text:
allergens_text = ' without ' + allergens_text
if traits_text:
traits_text = ' that is ' + traits_text
#Return combined string
if (allergens_text or traits_text) and 'Sorry, that is not available' in text:
traits_text = traits_text.replace(' that is ', '')
text = text.replace('Sorry, ', 'Sorry, ' + traits_text + ' ')
text = text.replace('that is not available', '[meal]')
return text + allergens_text + ' is not available'
else:
return text + traits_text + allergens_text
def format_plural(text):
"""Adds 'and' before last item in list of items.
:param text: The string to be manipulated
:type text: string
"""
if ',' in text:
index = text.rfind(',') + 2
text = text[:index] + 'and ' + text[index:]
return text
def remove_spaces(url_block):
"""Removes spaces in url string to create valid url string.
:param url_block: The url string to be manipulated
:type search: string
"""
temp = ""
for i in range(len(url_block)):
if url_block[i] == ' ':
temp += '+'
else:
temp += url_block[i]
return temp
def check_meal_available(data, meal):
"""Searches response data to check if meal is available at specified location/date.
:param data: MDining API HTTP response data
:type data: dict
:param meal: Name of meal
:type meal: string
"""
for key in data['menu']['meal']:
if data['menu']['meal']['name'].upper() == meal.upper():
if 'course' in data['menu']['meal']:
return True
return False
return False
def check_course_available(data, course):
"""Searches response data to check if course is available in specified meal.
:param data: MDining API HTTP response data
:type data: dict
:param course: Name of course
:type course: string
"""
for i in range(len(data['menu']['meal']['course'])):
for key, value in data['menu']['meal']['course'][i].items():
if key == 'name':
if value.upper() == course.upper():
return True
return False
def check_item_specifications(item, traits, allergens):
"""Returns true if food item is satisfactory with specified traits and allergens.
:param item: Data of specific food item
:type item: dict
:param traits: List of specified traits item must have, can be empty
:type traits: list
:param allergens: List of allergens item cannot have, can be empty
:type allergens: list
"""
#Return false if allergens list isn't empty and any allergens found
if allergens and 'allergens' in item:
for allergen in allergens:
if allergen in item['allergens']:
return False
#Return true if traits list empty
if not traits:
return True
#Return false if traits list isn't empty and any traits are missing
if 'trait' in item:
for trait in traits:
if trait not in item['trait']:
return False
#All traits found, return true
return True
else:
return False
def get_items(data, requisites, formatted):
"""Returns string of food items of each course in response data for
fulfillmentText in response to Dialogflow.
:param data: MDining API HTTP response data
:type data: dict
:param requisites: Contains information food item must comply with (traits, allergens, etc)
:type requisites: dict
:param formatted: True/False - formats response string if true
:type formatted: boolean
"""
returndata = ""
traits = requisites['trait']
allergens = requisites['allergens']
if formatted:
prefix = '\t'
suffix = '\n'
else:
prefix = ''
suffix = ', '
for course in data['menu']['meal']['course']:
item_data = []
datatype = type(course['menuitem'])
if datatype is list:
item_data += course['menuitem']
else:
item_data.append(course['menuitem'])
for item in item_data:
if check_item_specifications(item, traits, allergens) and 'No Service at this Time' not in item['name']:
returndata += (prefix + (item['name']).rstrip(', ') + suffix)
return returndata
def find_item_formatting(possible_matches):
"""Formatting list of possible matches into more natural sentence structure
by removing redundancy:
[Chicken during lunch, chicken wings during lunch, and chicken patty during dinner] ->
[Chicken, chicken wings during lunch, and chicken patty during dinner]
:param possible_matches: List of food items in data that matched user input
:type possible_matches: list
"""
for i in range(len(possible_matches)):
if i == 0:
continue
words = possible_matches[i].split()
#If previous term has same ending ("Dinner") as current term, remove it
if possible_matches[i].split()[-1] == possible_matches[i - 1].split()[-1]:
#8 = amount of characters taken up by [' during ']
length = len(possible_matches[i].split()[-1]) + 8
possible_matches[i - 1] = possible_matches[i - 1][:length*-1]
return possible_matches
def find_matches(course_data, possible_matches, item_in, meal_name, requisites):
"""Appends matches of specified food item in data of an individual course to
list of possible matches.
:param course_data: Chosen course subsection of MDining API HTTP response data
:type course_data: dict
:param possible_matches: List of food items in data that matched user input
:type possible_matches: list
:param item_in: User input food item
:type item_in: string
:param meal_name: Name of meal
:type meal_name: string
:param requisites: Contains information food item must comply with (traits, allergens, etc)
:type requisites: dict
"""
traits = requisites['trait']
allergens = requisites['allergens']
item_data = []
datatype = type(course_data)
if datatype is list:
item_data += course_data
else:
|
for item in item_data:
if check_item_specifications(item, traits, allergens) == False:
continue
if item_in.upper() in item['name'].upper():
if item['name'][-1] == ' ':
item['name'] = item['name'][:-1]
possible_matches.append(item['name'] + ' during ' + meal_name)
return possible_matches
#########################################################################
###Primary Handler Functions
def request_location_and_meal(date_in, loc_in, meal_in, requisites):
"""Handles searching for appropriate data response for valid specified
location and meal entities from ``findLocationAndMeal`` intent.
:param date_in: Input date
:type date_in: string
:param loc_in: Input location
:type loc_in: string
:param meal_in: Input meal
:type meal_in: string
:param requisites: Contains information food item must comply with (traits, allergens, etc)
:type requisites: dict
"""
#preset vars
url = 'http://api.studentlife.umich.edu/menu/xml2print.php?controller=&view=json'
location = '&location='
date = '&date='
meal = '&meal='
#API url concatenation
location += loc_in
meal += meal_in
date += str(date_in)
url = url + location + date + meal
url = remove_spaces(url)
#fetching json
data = requests.get(url).json()
#checking if specified meal available
if check_meal_available(data, meal_in):
returnstring = (get_items(data, requisites, False)).rstrip(', ')
return format_plural(returnstring)
else:
return "No meal is available | item_data.append(course_data) | conditional_block |
restingstate_preprocess_gica_noscrub.py | ,mainColor)
#Make sure scrub file exists and if not, then set the number of scrubbing TRs to zero
if os.path.exists(metric_values_text):
fds = np.loadtxt(metric_values_text)
tr = len(fds)
mfd = np.mean(fds)
else:
print red + "No outliers found for %s. Moving on\n%s" % (subject,mainColor)
num_cols = 0
if not args.noprefeat:
# #----------------------------------------
# # Preprocessing steps in FEAT - smoothing (6mm), motion correction, slice timing correction, registration (nonlinear, warp resolution 10mm), BET, NO TEMPORAL FILTERING
# #----------------------------------------
print sectionColor + "Preprocessing steps in FEAT ---------------------------%s" %(mainColor)
if not os.path.exists(preprocess_featfolder):
command = "sed -e 's/DEFINEINPUT/%s/g' -e 's/DEFINEOUTPUT/%s/g' -e 's/DEFINESTRUCT/%s/g' -e 's/DEFINEVOLUME/%s/g' -e 's/DEFINERAD/%s/g' -e 's/DEFINEMAG/%s/g' %s > %s" % (re.escape(origdatafile),re.escape(preprocess_featfolder),re.escape(t1image), numtimepoints, re.escape(radfile), re.escape(magfile),genericdesign,designOutput)
commando(command, logfile)
command = "feat %s" % designOutput
commando(command, logfile)
else:
print yellow + "FEAT Preprocessing already completed for %s. Moving on\n%s" % (subject,mainColor)
#--------------------
# Report on motion data
# -------------------
#read in the data from the motion report file 1
filename = preprocess_featfolder + '/report_prestats.html'
textfile = open(filename,'r')
filetext = textfile.read()
textfile.close()
#find absolute motion
result_ab1 = re.search('absolute=(.*)mm,',filetext)
motion_ab1 = result_ab1.groups()[0]
#find relative motion
result_rel1= re.search('relative=(.*)mm',filetext)
motion_rel1 = result_rel1.groups()[0]
##########
#Determine if they moved more than 3mm and print out motion
##########
counter = 0
counter2 = 0
c1 = 0
c2 = 0
with open(mc_abs) as f:
for row in csv.reader(f):
counter = counter + 1
number = float(row[0])
if number > 3:
c1 = c1 + 1
#print red + "%s has absolute motion greater than 3mm. %f at TR = %d%s" %(subject,number,counter, mainColor)
with open(mc_rel) as f:
for row in csv.reader(f):
counter2 = counter2 + 1
number = float(row[0])
if number > 3:
c2 = c2 + 1
#print red + "%s has relative motion greater than 3mm. %f at TR = %d%s" %(subject,number,counter2, mainColor)
#print red + "%s\tTRs: %s\tmean FD: %.2f\tAbs Motion: %s\tRel Motion: %s\tTR greater than 3mm mvmt: %s,%s%s" %(subject,numtimepoints,mfd,motion_ab1,motion_rel1,c1,c2,mainColor)
print sectionColor2 + "Motion Report: Mean FD: %.2f, Absolute: %s, Relative, %s, Spikes1: %s, Spikes2: %s%s" %(mfd,motion_ab1,motion_rel1,c1,c2,mainColor)
#----------------------------------------
# MELODIC ICA and ICA-AROMA
#----------------------------------------
if not args.noaroma:
print sectionColor + "ICA-AROMA ---------------------------" + mainColor
#Make sure preprocessing feat ran correctly
if not os.path.exists(preprocess_output):
print red + "Preprocess feat not completed correctly. %s does not exist. Moving on to next subject\n%s" %(preprocess_output,mainColor)
continue
if not os.path.exists(icafolder):
#print red + "ICA-AROMA has not been completed for %s\n%s" % (subject,mainColor)
icalocation = "/Volumes/BCI-1/Matt-Emily/ASD_restingstate/ICA-AROMA/ICA_AROMA.py"
if not os.path.exists(icalocation):
icalocation = "/Volumes/BCI/Matt-Emily/ASD_restingstate/ICA-AROMA/ICA_AROMA.py"
if not os.path.exists(icalocation):
icalocation = "/Volumes/BCI-2/Matt-Emily/ASD_restingstate/ICA-AROMA/ICA_AROMA.py"
command = "%s -feat %s -out %s" % (icalocation,preprocess_featfolder,icafolder)
commando(command,logfile)
else:
print yellow + "ICA-AROMA already completed for %s. Moving on\n%s" % (subject,mainColor)
#Check and make sure it completed properly
if not os.path.exists(icafiles):
print red + "%s does not have the completed ICA File%s" %(subject,mainColor)
continue
# else:
# print yellow + "%s has the completed ICA File%s" %(subject,mainColor)
if not args.noseg:
#----------------------------------------
# segmentation
#----------------------------------------
print sectionColor + "Segmentation ---------------------------" + mainColor
segfile = '%sT1_brain_seg_0.nii.gz' %(rsdir)
if not os.path.exists(segfile):
t1out = rsdir + "T1_brain"
command = "fast -g -o %s %s" % (t1out,t1image)
commando(command,logfile)
command="slicer %s %sT1_brain_seg_0 -a %sCSF.png" % (t1image,rsdir,rsdir)
commando(command,logfile)
command="slicer %s %sT1_brain_seg_1 -a %sGM.png" % (t1image,rsdir,rsdir)
commando(command,logfile)
command="slicer %s %sT1_brain_seg_2 -a %sWM.png" % (t1image,rsdir,rsdir)
commando(command,logfile)
writeToLog("<h2>Segmentation</h2><br>CSF:<br><img src=CSF.png><br><br>White matter:<br><img src=WM.png><br><br>Gray matter:<br><img src=GM.png><br><br><hr>",reportfile)
else:
print yellow + "Segmentation already completed for %s. Moving on\n%s" % (subject,mainColor)
if not args.noconfound:
#----------------------------------------
# create confound timeseries
#----------------------------------------
# CSF is mprage_brain_seg_0 is and WM is mprage_brain_seg_2
print sectionColor + "Extracting confound timeseries ---------------------------" + mainColor
if not os.path.exists(CSFfile):
command = "flirt -in %sT1_brain_seg_0 -ref %s/example_func.nii.gz -applyxfm -init %s/highres2example_func.mat -interp nearestneighbour -o %srest_CSF.nii.gz" % (rsdir,regdir,regdir,rsdir)
commando(command,logfile)
command = "flirt -in %sT1_brain_seg_2 -ref %s/example_func.nii.gz -applyxfm -init %s/highres2example_func.mat -interp nearestneighbour -o %srest_WM.nii.gz" % (rsdir,regdir,regdir,rsdir)
commando(command,logfile)
command = "fslmeants -i %s -m %srest_CSF.nii.gz -o %srest_CSF.txt" % (filteredfile,rsdir,rsdir)
commando(command,logfile)
command = "fslmeants -i %s -m %srest_WM.nii.gz -o %srest_WM.txt" % (filteredfile,rsdir,rsdir)
commando(command,logfile)
else:
print yellow + "Confound timeseries already created for %s. Moving on\n%s" % (subject,mainColor)
if not args.noresid:
#----------------------------------------
# Regress out confounds (do not include scrubbing file):
#----------------------------------------
| print sectionColor + "Regressing out confounds ---------------------------" + mainColor
| random_line_split |
|
restingstate_preprocess_gica_noscrub.py | = preprocess_featfolder + "/reg"
preprocess_output = preprocess_featfolder + "/filtered_func_data.nii.gz"
# if os.path.exists(regdir):
# print red + "Subject %s has an old, bad feat folder. Be careful%s" %(subject,mainColor)
# #shutil.rmtree(preprocess_featfolder)
icafolder = preprocess_featfolder + "/ICA_AROMA"
icafiles = icafolder + '/melodic.ica'
filteredfile = icafolder + "/denoised_func_data_nonaggr.nii.gz"
#Everything good? Start the work
print
print sectionColor + "WORKING ON SUBJECT: %s%s" %(subject, mainColor)
#Check to make sure subject has all relevant folders
if os.path.exists(radfile1):
radfile = radfile1
magfile = magfile1
#print yellow + 'Found radfile at %s%s' %(radfile,mainColor)
elif os.path.exists(radfile2):
radfile = radfile2
magfile = magfile2
#print yellow + 'Found radfile at %s%s' %(radfile, mainColor)
elif os.path.exists(radfile3):
radfile = radfile3
magfile = magfile3
#print yellow + 'Found radfile at %s%s' %(radfile, mainColor)
else:
print red + '%s is missing fieldmap data: %s%s' %(subject,radfile1,mainColor)
continue
if not os.path.exists(magfile):
print red + '%s is missing fieldmap data: %s%s' %(subject,magfile,mainColor)
continue
#Make sure we have the wholebrain, non skullstripped brain as well
if not os.path.exists(wholebrain):
print red + '%s T1 wholebrain image is not found or labeled something different%s' %(subject,mainColor)
if not os.path.exists(t1image):
print red + '%s T1_brain image is not found or labeled something different%s' %(subject,mainColor)
if not os.path.exists(origdatafile):
print red + '%s Restingstate.nii.gz is not found or labeled something different%s' %(subject,mainColor)
#Check the number of TRs for resting state
command = 'fslinfo %s' %(origdatafile)
origresults = check_output(command,shell=True)
origreport = origresults.split()
indx = origreport.index('dim4')
numtimepoints = origreport[indx + 1]
if numtimepoints != '150':
if "7min" in origdatafile:
print yellow + '%s restingstate file has %s timepoints. Will cut at end' %(subject,numtimepoints)
else:
print red + '%s restingstate file has %s timepoints. Please check. Moving on to next participant' %(subject,numtimepoints)
continue
if not args.nopre:
# prepare web page report (only if not created yet)
if not os.path.exists(tffile_new):
timestart= datetime.now()
timestamp = timestart.strftime('%b %d %G %I:%M%p')
fsldir = os.environ['FSLDIR']
writeToLog("<html><head><title>Resting State Analysis Report "+subject+"</title><link REL=stylesheet TYPE=text/css href="+fsldir+"/doc/fsl.css></head><body>",reportfile)
writeToLog("\n<h1>Resting State Analysis for "+subject+"</h1>Processing started at: "+timestamp+"<br><hr><br>",reportfile)
call("open " + reportfile,shell=True)
#Check to see if the completed file exists, skip anyone who else it
# if os.path.exists(tffile_new):
# print yellow + "Preprocessed gICA file already completed for %s. Moving on\n%s" % (subject,mainColor)
# continue
# #----------------------------------------
# # Scrubbing with FD - just to get mFD
# #----------------------------------------
if not args.noscrub:
if not os.path.exists(metric_values_text):
print sectionColor2 + " Scrubbing for %s to determine mFD\n%s" % (subject,mainColor)
command = "fsl_motion_outliers -i %s -o %s --fd --thresh=%s -s %s -p %s -v" % (origdatafile, scrubout, "0.5", metric_values_text, metric_values_plot)
commando(command, logfile)
else:
print yellow + "FSL Motion Outliers already completed for %s. Moving on\n%s" % (subject,mainColor)
#Make sure scrub file exists and if not, then set the number of scrubbing TRs to zero
if os.path.exists(metric_values_text):
fds = np.loadtxt(metric_values_text)
tr = len(fds)
mfd = np.mean(fds)
else:
print red + "No outliers found for %s. Moving on\n%s" % (subject,mainColor)
num_cols = 0
if not args.noprefeat:
# #----------------------------------------
# # Preprocessing steps in FEAT - smoothing (6mm), motion correction, slice timing correction, registration (nonlinear, warp resolution 10mm), BET, NO TEMPORAL FILTERING
# #----------------------------------------
print sectionColor + "Preprocessing steps in FEAT ---------------------------%s" %(mainColor)
if not os.path.exists(preprocess_featfolder):
command = "sed -e 's/DEFINEINPUT/%s/g' -e 's/DEFINEOUTPUT/%s/g' -e 's/DEFINESTRUCT/%s/g' -e 's/DEFINEVOLUME/%s/g' -e 's/DEFINERAD/%s/g' -e 's/DEFINEMAG/%s/g' %s > %s" % (re.escape(origdatafile),re.escape(preprocess_featfolder),re.escape(t1image), numtimepoints, re.escape(radfile), re.escape(magfile),genericdesign,designOutput)
commando(command, logfile)
command = "feat %s" % designOutput
commando(command, logfile)
else:
print yellow + "FEAT Preprocessing already completed for %s. Moving on\n%s" % (subject,mainColor)
#--------------------
# Report on motion data
# -------------------
#read in the data from the motion report file 1
filename = preprocess_featfolder + '/report_prestats.html'
textfile = open(filename,'r')
filetext = textfile.read()
textfile.close()
#find absolute motion
result_ab1 = re.search('absolute=(.*)mm,',filetext)
motion_ab1 = result_ab1.groups()[0]
#find relative motion
result_rel1= re.search('relative=(.*)mm',filetext)
motion_rel1 = result_rel1.groups()[0]
##########
#Determine if they moved more than 3mm and print out motion
##########
counter = 0
counter2 = 0
c1 = 0
c2 = 0
with open(mc_abs) as f:
for row in csv.reader(f):
counter = counter + 1
number = float(row[0])
if number > 3:
c1 = c1 + 1
#print red + "%s has absolute motion greater than 3mm. %f at TR = %d%s" %(subject,number,counter, mainColor)
with open(mc_rel) as f:
for row in csv.reader(f):
counter2 = counter2 + 1
number = float(row[0])
if number > 3:
c2 = c2 + 1
#print red + "%s has relative motion greater than 3mm. %f at TR = %d%s" %(subject,number,counter2, mainColor)
#print red + "%s\tTRs: %s\tmean FD: %.2f\tAbs Motion: %s\tRel Motion: %s\tTR greater than 3mm mvmt: %s,%s%s" %(subject,numtimepoints,mfd,motion_ab1,motion_rel1,c1,c2,mainColor)
print sectionColor2 + "Motion Report: Mean FD: %.2f, Absolute: %s, Relative, %s, Spikes1: %s, Spikes2: %s%s" %(mfd,motion_ab1,motion_rel1,c1,c2,mainColor)
#----------------------------------------
# MELODIC ICA and ICA-AROMA
#----------------------------------------
if not args.noaroma:
print sectionColor + "ICA-AROMA ---------------------------" + mainColor
#Make sure preprocessing feat ran correctly
if not os.path.exists(preprocess_output):
| print red + "Preprocess feat not completed correctly. %s does not exist. Moving on to next subject\n%s" %(preprocess_output,mainColor)
continue | conditional_block |
|
shell.rs | blk" => FILESYSTEM.lsblk(),
"mount" => mount(cwd, &self.args[1..]),
"umount" => umount(cwd, &self.args[1]),
"mkcrypt" => encrypt_part(&self.args[1..]),
path => kprintln!("unknown command: {}", path)
}
}
}
fn pwd(cwd: &mut PathBuf) {
let path = cwd.as_path();
let path_str = path.to_str().expect("Failed to get working directory");
kprintln!("{}", path_str);
}
fn cd(cwd: &mut PathBuf, path: &str) -> bool {
if path.len() == 0 { return true }
if &path[0..1] == "/" {
// cwd.clear() not implemented in shim :(
while cwd.pop() {}
}
for part in path.split('/') {
// Remove any / that makes its way in
let part = part.replace("/", "");
if part == "." {
continue
} else if part == ".." {
cwd.pop();
} else {
cwd.push(&part);
match FILESYSTEM.open(cwd.as_path()) {
Ok(entry) => {
if entry.is_file() {
kprintln!("{}: Not a directory", part);
cwd.pop();
return false
}
}
Err(_) => {
kprintln!("{}: No such file or directory", part);
cwd.pop();
return false
} | }
fn ls(cwd: &PathBuf, args: &[&str]) {
let mut rel_dir = cwd.clone();
let mut changed_dir = false;
let mut show_hidden = false;
for arg in args {
if *arg == "-a" {
show_hidden = true;
continue
}
if changed_dir {
continue
}
if !cd(&mut rel_dir, arg) {
return
} else {
changed_dir = true // only run cd once
}
}
// no need to cd . if they didn't change dir
let entry = FILESYSTEM.open(rel_dir.as_path()).expect("Couldn't open dir");
let dir = entry.as_dir().expect("Expected directory, found file");
for item in dir.entries().expect("Couldn't get a dir iterator") {
if show_hidden || !item.metadata().hidden() {
kprintln!("{}", item.metadata())
}
}
}
fn cat(cwd: &PathBuf, args: &[&str]) {
fn cat_one(cwd: &PathBuf, path: &str) {
use core::str;
use io::Read;
use alloc::slice::SliceConcatExt;
let mut rel_dir = cwd.clone();
let parts = path.split('/').collect::<Vec<&str>>();
let dir = parts[0..parts.len()-1].join("/");
if !cd(&mut rel_dir, &dir) {
return
}
rel_dir.push(parts[parts.len()-1]);
let entry = FILESYSTEM.open(rel_dir.as_path()).expect("Couldn't open file");
if !entry.is_file() {
kprintln!("Can't cat a directory {}!", path);
return
}
let mut file = entry.into_file().expect("Expected file, found directory");
loop {
let mut buffer = [0u8; 256];
match file.read(&mut buffer) {
Ok(0) => break,
Ok(n) => {
let string = str::from_utf8(&buffer[0..n]);
match string {
Ok(string) => kprint!("{}", string),
Err(_) => {
kprintln!("Couldn't parse {} as UTF-8", path);
return
},
}
},
Err(e) => {
kprintln!("Error when reading file {}: {:?}", path, e);
return
}
}
}
}
for arg in args {
cat_one(cwd, arg)
}
}
fn canonicalize(path: PathBuf) -> Result<PathBuf, ()> {
let mut new_path = PathBuf::new();
for comp in path.components() {
match comp {
Component::ParentDir => {
let res = new_path.pop();
if !res {
return Err(());
}
},
Component::Normal(n) => new_path = new_path.join(n),
Component::RootDir => new_path = ["/"].iter().collect(),
_ => ()
};
}
Ok(new_path)
}
fn get_abs_path(cwd: &PathBuf, dir_arg: &str) -> Option<PathBuf> {
let mut raw_path: PathBuf = PathBuf::from(dir_arg);
if !raw_path.is_absolute() {
raw_path = cwd.clone().join(raw_path);
}
let abs_path = match canonicalize(raw_path) {
Ok(p) => p,
Err(_) => {
kprintln!("\ninvalid arg: {}", dir_arg);
return None;
}
};
Some(abs_path)
}
fn mkdir(cwd: &PathBuf, args: &[&str]) {
let abs_path = match get_abs_path(cwd, args[0]) {
Some(p) => p,
None => return
};
let dir_metadata = fat32::vfat::Metadata {
name: String::from(abs_path.file_name().unwrap().to_str().unwrap()),
created: fat32::vfat::Timestamp::default(),
accessed: fat32::vfat::Timestamp::default(),
modified: fat32::vfat::Timestamp::default(),
attributes: fat32::vfat::Attributes::default_dir(), // directory
size: 0
};
let path_clone = abs_path.clone();
FILESYSTEM.create_dir(abs_path.parent().unwrap(), dir_metadata).expect("Failed to create dir");
FILESYSTEM.flush_fs(path_clone);
}
fn write_file_test(cwd: &PathBuf) {
use shim::io::Write;
let mut dir = FILESYSTEM.open_dir(cwd.as_path()).expect("Couldn't get $CWD as dir");
dir.create(fat32::vfat::Metadata {
name: String::from("test_write.txt"),
created: fat32::vfat::Timestamp::default(),
accessed: fat32::vfat::Timestamp::default(),
modified: fat32::vfat::Timestamp::default(),
attributes: fat32::vfat::Attributes::default(),
size: 0,
}).expect("Couldn't create test_write.txt");
let mut path = cwd.clone();
path.push("test_write.txt");
let test_file_entry = FILESYSTEM.open(path.as_path()).expect("couldn't open /test_write.txt");
assert!(test_file_entry.is_file());
let mut test_file = test_file_entry.into_file().expect("couldn't open /test_write.txt as file");
let test_buf = "hello world!!\n".as_bytes();
assert_eq!(test_file.write(test_buf).unwrap(), test_buf.len());
assert_eq!(test_file.write(test_buf).unwrap(), test_buf.len());
FILESYSTEM.flush_fs(cwd);
}
fn touch(cwd: &PathBuf, args: &[&str]) {
for arg in args {
let arg_path = PathBuf::from(arg);
let raw_path = if !arg_path.is_absolute() {
cwd.join(arg_path)
} else { arg_path };
let path = canonicalize(raw_path).expect("Could not canonicalize path");
let base = path.parent();
let mut base_dir = match base {
None => FILESYSTEM.open_dir("/").expect("Could not get / as dir"),
Some(base) => FILESYSTEM.open_dir(base).expect("Could not get target as dir"),
};
let file = path.file_name().expect("Must specify a file to create")
.to_str().expect("Couldn't get filename as string");
base_dir.create(fat32::vfat::Metadata {
name: String::from(file),
..Default::default()
}).expect("Couldn't create file");
match base {
Some(base) => FILESYSTEM.flush_fs(base),
None => FILESYSTEM.flush_fs("/")
}
}
}
fn append(cwd: &PathBuf, args: &[&str]) {
use shim::io::{Write, Seek, SeekFrom};
if args.len() < 2 {
kprintln!("USAGE: append [filename] [contents]");
return;
}
let arg_path = PathBuf::from(args[0]);
let raw_path = if !arg_path.is_absolute() {
cwd.join(arg_path)
} else { arg_path };
let path = canonicalize(raw_path).expect("Could not canonicalize path");
let mut fd = FILESYSTEM.open_file(path.as_path()).expect("Couldn't open file for writing");
for i in 1..args.len() {
fd.seek(SeekFrom::End(0)).expect("Failed to seek to end of file");
fd.write(&args[i].bytes().collect::<alloc::vec::Vec<u8>>()).expect("Failed to append to file");
if i < args.len() - 1 {
fd.write(&[' ' as u8]).expect("Failed to append space to file");
}
}
fd.write(&['\n' as u8]).expect("Failed to append newline to file");
FILESYSTEM.flush_fs(path);
}
fn rm(cwd: &PathBuf, args: &[&str]) {
use fat32::traits:: | }
}
}
return true | random_line_split |
shell.rs | " => FILESYSTEM.lsblk(),
"mount" => mount(cwd, &self.args[1..]),
"umount" => umount(cwd, &self.args[1]),
"mkcrypt" => encrypt_part(&self.args[1..]),
path => kprintln!("unknown command: {}", path)
}
}
}
fn pwd(cwd: &mut PathBuf) {
let path = cwd.as_path();
let path_str = path.to_str().expect("Failed to get working directory");
kprintln!("{}", path_str);
}
fn cd(cwd: &mut PathBuf, path: &str) -> bool {
if path.len() == 0 { return true }
if &path[0..1] == "/" {
// cwd.clear() not implemented in shim :(
while cwd.pop() {}
}
for part in path.split('/') {
// Remove any / that makes its way in
let part = part.replace("/", "");
if part == "." {
continue
} else if part == ".." {
cwd.pop();
} else {
cwd.push(&part);
match FILESYSTEM.open(cwd.as_path()) {
Ok(entry) => {
if entry.is_file() {
kprintln!("{}: Not a directory", part);
cwd.pop();
return false
}
}
Err(_) => {
kprintln!("{}: No such file or directory", part);
cwd.pop();
return false
}
}
}
}
return true
}
fn ls(cwd: &PathBuf, args: &[&str]) {
let mut rel_dir = cwd.clone();
let mut changed_dir = false;
let mut show_hidden = false;
for arg in args {
if *arg == "-a" {
show_hidden = true;
continue
}
if changed_dir {
continue
}
if !cd(&mut rel_dir, arg) {
return
} else {
changed_dir = true // only run cd once
}
}
// no need to cd . if they didn't change dir
let entry = FILESYSTEM.open(rel_dir.as_path()).expect("Couldn't open dir");
let dir = entry.as_dir().expect("Expected directory, found file");
for item in dir.entries().expect("Couldn't get a dir iterator") {
if show_hidden || !item.metadata().hidden() {
kprintln!("{}", item.metadata())
}
}
}
fn cat(cwd: &PathBuf, args: &[&str]) {
fn cat_one(cwd: &PathBuf, path: &str) {
use core::str;
use io::Read;
use alloc::slice::SliceConcatExt;
let mut rel_dir = cwd.clone();
let parts = path.split('/').collect::<Vec<&str>>();
let dir = parts[0..parts.len()-1].join("/");
if !cd(&mut rel_dir, &dir) {
return
}
rel_dir.push(parts[parts.len()-1]);
let entry = FILESYSTEM.open(rel_dir.as_path()).expect("Couldn't open file");
if !entry.is_file() {
kprintln!("Can't cat a directory {}!", path);
return
}
let mut file = entry.into_file().expect("Expected file, found directory");
loop {
let mut buffer = [0u8; 256];
match file.read(&mut buffer) {
Ok(0) => break,
Ok(n) => {
let string = str::from_utf8(&buffer[0..n]);
match string {
Ok(string) => kprint!("{}", string),
Err(_) => {
kprintln!("Couldn't parse {} as UTF-8", path);
return
},
}
},
Err(e) => {
kprintln!("Error when reading file {}: {:?}", path, e);
return
}
}
}
}
for arg in args {
cat_one(cwd, arg)
}
}
fn canonicalize(path: PathBuf) -> Result<PathBuf, ()> {
let mut new_path = PathBuf::new();
for comp in path.components() {
match comp {
Component::ParentDir => {
let res = new_path.pop();
if !res {
return Err(());
}
},
Component::Normal(n) => new_path = new_path.join(n),
Component::RootDir => new_path = ["/"].iter().collect(),
_ => ()
};
}
Ok(new_path)
}
fn get_abs_path(cwd: &PathBuf, dir_arg: &str) -> Option<PathBuf> |
fn mkdir(cwd: &PathBuf, args: &[&str]) {
let abs_path = match get_abs_path(cwd, args[0]) {
Some(p) => p,
None => return
};
let dir_metadata = fat32::vfat::Metadata {
name: String::from(abs_path.file_name().unwrap().to_str().unwrap()),
created: fat32::vfat::Timestamp::default(),
accessed: fat32::vfat::Timestamp::default(),
modified: fat32::vfat::Timestamp::default(),
attributes: fat32::vfat::Attributes::default_dir(), // directory
size: 0
};
let path_clone = abs_path.clone();
FILESYSTEM.create_dir(abs_path.parent().unwrap(), dir_metadata).expect("Failed to create dir");
FILESYSTEM.flush_fs(path_clone);
}
fn write_file_test(cwd: &PathBuf) {
use shim::io::Write;
let mut dir = FILESYSTEM.open_dir(cwd.as_path()).expect("Couldn't get $CWD as dir");
dir.create(fat32::vfat::Metadata {
name: String::from("test_write.txt"),
created: fat32::vfat::Timestamp::default(),
accessed: fat32::vfat::Timestamp::default(),
modified: fat32::vfat::Timestamp::default(),
attributes: fat32::vfat::Attributes::default(),
size: 0,
}).expect("Couldn't create test_write.txt");
let mut path = cwd.clone();
path.push("test_write.txt");
let test_file_entry = FILESYSTEM.open(path.as_path()).expect("couldn't open /test_write.txt");
assert!(test_file_entry.is_file());
let mut test_file = test_file_entry.into_file().expect("couldn't open /test_write.txt as file");
let test_buf = "hello world!!\n".as_bytes();
assert_eq!(test_file.write(test_buf).unwrap(), test_buf.len());
assert_eq!(test_file.write(test_buf).unwrap(), test_buf.len());
FILESYSTEM.flush_fs(cwd);
}
fn touch(cwd: &PathBuf, args: &[&str]) {
for arg in args {
let arg_path = PathBuf::from(arg);
let raw_path = if !arg_path.is_absolute() {
cwd.join(arg_path)
} else { arg_path };
let path = canonicalize(raw_path).expect("Could not canonicalize path");
let base = path.parent();
let mut base_dir = match base {
None => FILESYSTEM.open_dir("/").expect("Could not get / as dir"),
Some(base) => FILESYSTEM.open_dir(base).expect("Could not get target as dir"),
};
let file = path.file_name().expect("Must specify a file to create")
.to_str().expect("Couldn't get filename as string");
base_dir.create(fat32::vfat::Metadata {
name: String::from(file),
..Default::default()
}).expect("Couldn't create file");
match base {
Some(base) => FILESYSTEM.flush_fs(base),
None => FILESYSTEM.flush_fs("/")
}
}
}
fn append(cwd: &PathBuf, args: &[&str]) {
use shim::io::{Write, Seek, SeekFrom};
if args.len() < 2 {
kprintln!("USAGE: append [filename] [contents]");
return;
}
let arg_path = PathBuf::from(args[0]);
let raw_path = if !arg_path.is_absolute() {
cwd.join(arg_path)
} else { arg_path };
let path = canonicalize(raw_path).expect("Could not canonicalize path");
let mut fd = FILESYSTEM.open_file(path.as_path()).expect("Couldn't open file for writing");
for i in 1..args.len() {
fd.seek(SeekFrom::End(0)).expect("Failed to seek to end of file");
fd.write(&args[i].bytes().collect::<alloc::vec::Vec<u8>>()).expect("Failed to append to file");
if i < args.len() - 1 {
fd.write(&[' ' as u8]).expect("Failed to append space to file");
}
}
fd.write(&['\n' as u8]).expect("Failed to append newline to file");
FILESYSTEM.flush_fs(path);
}
fn rm(cwd: &PathBuf, args: &[&str]) {
use fat32:: | {
let mut raw_path: PathBuf = PathBuf::from(dir_arg);
if !raw_path.is_absolute() {
raw_path = cwd.clone().join(raw_path);
}
let abs_path = match canonicalize(raw_path) {
Ok(p) => p,
Err(_) => {
kprintln!("\ninvalid arg: {}", dir_arg);
return None;
}
};
Some(abs_path)
} | identifier_body |
shell.rs | blk" => FILESYSTEM.lsblk(),
"mount" => mount(cwd, &self.args[1..]),
"umount" => umount(cwd, &self.args[1]),
"mkcrypt" => encrypt_part(&self.args[1..]),
path => kprintln!("unknown command: {}", path)
}
}
}
fn pwd(cwd: &mut PathBuf) {
let path = cwd.as_path();
let path_str = path.to_str().expect("Failed to get working directory");
kprintln!("{}", path_str);
}
fn | (cwd: &mut PathBuf, path: &str) -> bool {
if path.len() == 0 { return true }
if &path[0..1] == "/" {
// cwd.clear() not implemented in shim :(
while cwd.pop() {}
}
for part in path.split('/') {
// Remove any / that makes its way in
let part = part.replace("/", "");
if part == "." {
continue
} else if part == ".." {
cwd.pop();
} else {
cwd.push(&part);
match FILESYSTEM.open(cwd.as_path()) {
Ok(entry) => {
if entry.is_file() {
kprintln!("{}: Not a directory", part);
cwd.pop();
return false
}
}
Err(_) => {
kprintln!("{}: No such file or directory", part);
cwd.pop();
return false
}
}
}
}
return true
}
fn ls(cwd: &PathBuf, args: &[&str]) {
let mut rel_dir = cwd.clone();
let mut changed_dir = false;
let mut show_hidden = false;
for arg in args {
if *arg == "-a" {
show_hidden = true;
continue
}
if changed_dir {
continue
}
if !cd(&mut rel_dir, arg) {
return
} else {
changed_dir = true // only run cd once
}
}
// no need to cd . if they didn't change dir
let entry = FILESYSTEM.open(rel_dir.as_path()).expect("Couldn't open dir");
let dir = entry.as_dir().expect("Expected directory, found file");
for item in dir.entries().expect("Couldn't get a dir iterator") {
if show_hidden || !item.metadata().hidden() {
kprintln!("{}", item.metadata())
}
}
}
fn cat(cwd: &PathBuf, args: &[&str]) {
fn cat_one(cwd: &PathBuf, path: &str) {
use core::str;
use io::Read;
use alloc::slice::SliceConcatExt;
let mut rel_dir = cwd.clone();
let parts = path.split('/').collect::<Vec<&str>>();
let dir = parts[0..parts.len()-1].join("/");
if !cd(&mut rel_dir, &dir) {
return
}
rel_dir.push(parts[parts.len()-1]);
let entry = FILESYSTEM.open(rel_dir.as_path()).expect("Couldn't open file");
if !entry.is_file() {
kprintln!("Can't cat a directory {}!", path);
return
}
let mut file = entry.into_file().expect("Expected file, found directory");
loop {
let mut buffer = [0u8; 256];
match file.read(&mut buffer) {
Ok(0) => break,
Ok(n) => {
let string = str::from_utf8(&buffer[0..n]);
match string {
Ok(string) => kprint!("{}", string),
Err(_) => {
kprintln!("Couldn't parse {} as UTF-8", path);
return
},
}
},
Err(e) => {
kprintln!("Error when reading file {}: {:?}", path, e);
return
}
}
}
}
for arg in args {
cat_one(cwd, arg)
}
}
fn canonicalize(path: PathBuf) -> Result<PathBuf, ()> {
let mut new_path = PathBuf::new();
for comp in path.components() {
match comp {
Component::ParentDir => {
let res = new_path.pop();
if !res {
return Err(());
}
},
Component::Normal(n) => new_path = new_path.join(n),
Component::RootDir => new_path = ["/"].iter().collect(),
_ => ()
};
}
Ok(new_path)
}
fn get_abs_path(cwd: &PathBuf, dir_arg: &str) -> Option<PathBuf> {
let mut raw_path: PathBuf = PathBuf::from(dir_arg);
if !raw_path.is_absolute() {
raw_path = cwd.clone().join(raw_path);
}
let abs_path = match canonicalize(raw_path) {
Ok(p) => p,
Err(_) => {
kprintln!("\ninvalid arg: {}", dir_arg);
return None;
}
};
Some(abs_path)
}
fn mkdir(cwd: &PathBuf, args: &[&str]) {
let abs_path = match get_abs_path(cwd, args[0]) {
Some(p) => p,
None => return
};
let dir_metadata = fat32::vfat::Metadata {
name: String::from(abs_path.file_name().unwrap().to_str().unwrap()),
created: fat32::vfat::Timestamp::default(),
accessed: fat32::vfat::Timestamp::default(),
modified: fat32::vfat::Timestamp::default(),
attributes: fat32::vfat::Attributes::default_dir(), // directory
size: 0
};
let path_clone = abs_path.clone();
FILESYSTEM.create_dir(abs_path.parent().unwrap(), dir_metadata).expect("Failed to create dir");
FILESYSTEM.flush_fs(path_clone);
}
fn write_file_test(cwd: &PathBuf) {
use shim::io::Write;
let mut dir = FILESYSTEM.open_dir(cwd.as_path()).expect("Couldn't get $CWD as dir");
dir.create(fat32::vfat::Metadata {
name: String::from("test_write.txt"),
created: fat32::vfat::Timestamp::default(),
accessed: fat32::vfat::Timestamp::default(),
modified: fat32::vfat::Timestamp::default(),
attributes: fat32::vfat::Attributes::default(),
size: 0,
}).expect("Couldn't create test_write.txt");
let mut path = cwd.clone();
path.push("test_write.txt");
let test_file_entry = FILESYSTEM.open(path.as_path()).expect("couldn't open /test_write.txt");
assert!(test_file_entry.is_file());
let mut test_file = test_file_entry.into_file().expect("couldn't open /test_write.txt as file");
let test_buf = "hello world!!\n".as_bytes();
assert_eq!(test_file.write(test_buf).unwrap(), test_buf.len());
assert_eq!(test_file.write(test_buf).unwrap(), test_buf.len());
FILESYSTEM.flush_fs(cwd);
}
fn touch(cwd: &PathBuf, args: &[&str]) {
for arg in args {
let arg_path = PathBuf::from(arg);
let raw_path = if !arg_path.is_absolute() {
cwd.join(arg_path)
} else { arg_path };
let path = canonicalize(raw_path).expect("Could not canonicalize path");
let base = path.parent();
let mut base_dir = match base {
None => FILESYSTEM.open_dir("/").expect("Could not get / as dir"),
Some(base) => FILESYSTEM.open_dir(base).expect("Could not get target as dir"),
};
let file = path.file_name().expect("Must specify a file to create")
.to_str().expect("Couldn't get filename as string");
base_dir.create(fat32::vfat::Metadata {
name: String::from(file),
..Default::default()
}).expect("Couldn't create file");
match base {
Some(base) => FILESYSTEM.flush_fs(base),
None => FILESYSTEM.flush_fs("/")
}
}
}
fn append(cwd: &PathBuf, args: &[&str]) {
use shim::io::{Write, Seek, SeekFrom};
if args.len() < 2 {
kprintln!("USAGE: append [filename] [contents]");
return;
}
let arg_path = PathBuf::from(args[0]);
let raw_path = if !arg_path.is_absolute() {
cwd.join(arg_path)
} else { arg_path };
let path = canonicalize(raw_path).expect("Could not canonicalize path");
let mut fd = FILESYSTEM.open_file(path.as_path()).expect("Couldn't open file for writing");
for i in 1..args.len() {
fd.seek(SeekFrom::End(0)).expect("Failed to seek to end of file");
fd.write(&args[i].bytes().collect::<alloc::vec::Vec<u8>>()).expect("Failed to append to file");
if i < args.len() - 1 {
fd.write(&[' ' as u8]).expect("Failed to append space to file");
}
}
fd.write(&['\n' as u8]).expect("Failed to append newline to file");
FILESYSTEM.flush_fs(path);
}
fn rm(cwd: &PathBuf, args: &[&str]) {
use fat32:: | cd | identifier_name |
helper.py | (png_file):
await x.edit_text("This sticker is Gey, Task Failed Successfully ≧ω≦")
await asyncio.sleep(5)
await x.delete()
raise Exception(stdout + stderr)
dls_loc = png_file
elif replied.sticker and replied.sticker.file_name.endswith(".webp"):
stkr_file = os.path.join(DOWN_PATH, f"{rand_key()}.png")
os.rename(dls_loc, stkr_file)
if not os.path.lexists(stkr_file):
await x.edit_text("```Sticker not found...```")
await asyncio.sleep(5)
await x.delete()
return
dls_loc = stkr_file
elif replied.animation or replied.video:
await x.edit_text("`Converting Media To Image ...`")
jpg_file = os.path.join(DOWN_PATH, f"{rand_key()}.jpg")
await take_screen_shot(dls_loc, 0, jpg_file)
os.remove(dls_loc)
if not os.path.lexists(jpg_file):
await x.edit_text("This Gif is Gey (。ì _ í。), Task Failed Successfully !")
await asyncio.sleep(5)
await x.delete()
return
dls_loc = jpg_file
return dls_loc
async def runcmd(cmd: str) -> Tuple[str, str, int, int]:
""" run command in terminal """
args = shlex.split(cmd)
process = await asyncio.create_subprocess_exec(
*args, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
stdout, stderr = await process.communicate()
return (
stdout.decode("utf-8", "replace").strip(),
stderr.decode("utf-8", "replace").strip(),
process.returncode,
process.pid,
)
async def take_screen_shot(
video_file: str, duration: int, path: str = ""
) -> Optional[str]:
""" take a screenshot """
print(
"[[[Extracting a frame from %s ||| Video duration => %s]]]",
video_file,
duration,
)
thumb_image_path = path or os.path.join(
DOWN_PATH, f"{basename(video_file)}.jpg"
)
command = f'''ffmpeg -ss {duration} -i "{video_file}" -vframes 1 "{thumb_image_path}"'''
err = (await runcmd(command))[1]
if err:
print(err)
return thumb_image_path if os.path.exists(thumb_image_path) else None
##################################################################
async def return_json_senpai(query: str, vars_: dict, auth: bool = False, user: int = None):
if auth is False:
url = "https://graphql.anilist.co"
return requests.post(url, json={"query": query, "variables": vars_}).json()
else:
headers = {
'Authorization': 'Bearer ' + str((await AUTH_USERS.find_one({"id": int(user)}))['token']),
'Content-Type': 'application/json',
'Accept': 'application/json',
}
url = "https://graphql.anilist.co"
return requests.post(url, json={"query": query, "variables": vars_}, headers=headers).json()
def cflag(country):
if country == "JP":
return "\U0001F1EF\U0001F1F5"
if country == "CN":
return "\U0001F1E8\U0001F1F3"
if country == "KR":
return "\U0001F1F0\U0001F1F7"
if country == "TW":
return "\U0001F1F9\U0001F1FC"
def pos_no(no):
ep_ = list(str(no))
x = ep_.pop()
if ep_ != [] and ep_.pop()=='1':
return 'th'
th = "st" if x == "1" else "nd" if x == "2" else "rd" if x == "3" else "th"
return th
def make_it_rw(time_stamp):
"""Converting Time Stamp to Readable Format"""
seconds, milliseconds = divmod(int(time_stamp), 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
tmp = (
((str(days) + " Days, ") if days else "")
+ ((str(hours) + " Hours, ") if hours else "")
+ ((str(minutes) + " Minutes, ") if minutes else "")
+ ((str(seconds) + " Seconds, ") if seconds else "")
+ ((str(milliseconds) + " ms, ") if milliseconds else "")
)
return tmp[:-2]
async def clog(name: str, text: str, tag: str):
log = f"#{name.upper()} #{tag.upper()}\n\n{text}"
await anibot.send_message(chat_id=LOG_CHANNEL_ID, text=log)
def get_btns(media, user: int, result: list, lsqry: str = None, lspage: int = None, auth: bool = False, sfw: str = "False"):
buttons = []
qry = f"_{lsqry}" if lsqry is not None else ""
pg = f"_{lspage}" if lspage is not None else ""
if media == "ANIME" and sfw == "False":
buttons.append([
InlineKeyboardButton(text="Characters", callback_data=f"char_{result[2][0]}_ANI{qry}{pg}_{str(auth)}_1_{user}"),
InlineKeyboardButton(text="Description", callback_data=f"desc_{result[2][0]}_ANI{qry}{pg}_{str(auth)}_{user}"),
InlineKeyboardButton(text="List Series", callback_data=f"ls_{result[2][0]}_ANI{qry}{pg}_{str(auth)}_{user}"),
])
if media == "CHARACTER":
buttons.append([InlineKeyboardButton("Description", callback_data=f"desc_{result[2][0]}_CHAR{qry}{pg}_{str(auth)}_{user}")])
buttons.append([InlineKeyboardButton("List Series", callback_data=f"lsc_{result[2][0]}{qry}{pg}_{str(auth)}_{user}")])
if media == "SCHEDULED":
if result[0]!=0 and result[0]!=6:
buttons.append([
InlineKeyboardButton(str(day_(result[0]-1)), callback_data=f"sched_{result[0]-1}_{user}"),
InlineKeyboardButton(str(day_(result[0]+1)), callback_data=f"sched_{result[0]+1}_{user}")
])
if result[0] == 0:
buttons.append([InlineKeyboardButton(str(day_(result[0]+1)), callback_data=f"sched_{result[0]+1}_{user}")])
if result[0] == 6:
buttons.append([InlineKeyboardButton(str(day_(result[0]-1)), callback_data=f"sched_{result[0]-1}_{user}")])
if media == "MANGA" and sfw == "False":
buttons.append([InlineKeyboardButton("More Info", url=result[1][2])])
if media == "AIRING" and sfw == "False":
buttons.append([InlineKeyboardButton("More Info", url=result[1])])
if auth is True and media!="SCHEDULED" and sfw == "False":
auth_btns = get_auth_btns(media, user, result[2], lspage=lspage, lsqry=lsqry)
buttons.append(auth_btns)
if len(result)>3:
if result[3] == "None":
if result[4] != "None":
buttons.append([InlineKeyboardButton(text="Sequel", callback_data=f"btn_{result[4]}_{str(auth)}_{user}")])
else:
if result[4] != "None":
buttons.append([
InlineKeyboardButton(text="Prequel", callback_data=f"btn_{result[3]}_{str(auth)}_{user}"),
InlineKeyboardButton(text="Sequel", callback_data=f"btn_{result[4]}_{str(auth)}_{user}"),
])
else:
buttons.append([InlineKeyboardButton(text="Prequel", callback_data=f"btn_{result[3]}_{str(auth)}_{user}")])
if lsqry is not None and len(result)!=1 and result[1][1]!=1:
if lspage = | = 1:
buttons.append([InlineKeyboardButton(text="Next", callback_data=f"page_{media}{qry}_{int(lspage)+1}_{str(auth)}_{user}")])
elif lspage == result[1][1]:
buttons.append([InlineKeyboardButton(text="Prev", callback_data=f"page_{media}{qry}_{int(lspage)-1}_{str(auth)}_{user}")])
else:
buttons.append([
InlineKeyboardButton(text="Prev", callback_data=f"page_{media}{qry}_{int(lspage)-1}_{str(auth)}_{user}"),
InlineKeyboardButton(text="Next", callback_data=f"page_{media}{qry}_{int(lspage)+1}_{str(auth)}_{user}"),
])
retur | conditional_block |
|
helper.py | Gey, Task Failed Successfully ≧ω≦")
await asyncio.sleep(5)
await x.delete()
raise Exception(stdout + stderr)
dls_loc = png_file
elif replied.sticker and replied.sticker.file_name.endswith(".webp"):
stkr_file = os.path.join(DOWN_PATH, f"{rand_key()}.png")
os.rename(dls_loc, stkr_file)
if not os.path.lexists(stkr_file):
await x.edit_text("```Sticker not found...```")
await asyncio.sleep(5)
await x.delete()
return
dls_loc = stkr_file
elif replied.animation or replied.video:
await x.edit_text("`Converting Media To Image ...`")
jpg_file = os.path.join(DOWN_PATH, f"{rand_key()}.jpg")
await take_screen_shot(dls_loc, 0, jpg_file)
os.remove(dls_loc)
if not os.path.lexists(jpg_file):
await x.edit_text("This Gif is Gey (。ì _ í。), Task Failed Successfully !")
await asyncio.sleep(5)
await x.delete()
return
dls_loc = jpg_file
return dls_loc
async def runcmd(cmd: str) -> Tuple[str, str, int, int]:
""" run command in terminal """
args = shlex.split(cmd)
process = await asyncio.create_subprocess_exec(
*args, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
stdout, stderr = await process.communicate()
return (
stdout.decode("utf-8", "replace").strip(),
stderr.decode("utf-8", "replace").strip(),
process.returncode,
process.pid,
)
async def take_screen_shot(
video_file: str, duration: int, path: str = ""
) -> Optional[str]:
""" take a screenshot """
print(
"[[[Extracting a frame from %s ||| Video duration => %s]]]",
video_file,
duration,
)
thumb_image_path = path or os.path.join(
DOWN_PATH, f"{basename(video_file)}.jpg"
)
command = f'''ffmpeg -ss {duration} -i "{video_file}" -vframes 1 "{thumb_image_path}"'''
err = (await runcmd(command))[1]
if err:
print(err)
return thumb_image_path if os.path.exists(thumb_image_path) else None
##################################################################
async def return_json_senpai(query: str, vars_: dict, auth: bool = False, user: int = None):
if auth is False:
url = "https://graphql.anilist.co"
return requests.post(url, json={"query": query, "variables": vars_}).json()
else:
headers = {
'Authorization': 'Bearer ' + str((await AUTH_USERS.find_one({"id": int(user)}))['token']),
'Content-Type': 'application/json',
'Accept': 'application/json',
}
url = "https://graphql.anilist.co"
return requests.post(url, json={"query": query, "variables": vars_}, headers=headers).json()
def cflag(country):
if country == "JP":
return "\U0001F1EF\U0001F1F5"
if country == "CN":
return "\U0001F1E8\U0001F1F3"
if country == "KR":
return "\U0001F1F0\U0001F1F7"
if country == "TW":
return "\U0001F1F9\U0001F1FC"
def pos_no(no):
ep_ = list(str(no))
x = ep_.pop()
if ep_ != [] and ep_.pop()=='1':
return 'th'
th = "st" if x == "1" else "nd" if x == "2" else "rd" if x == "3" else "th"
return th
def make_it_rw(time_stamp):
"""Converting Time Stamp to Readable Format"""
seconds, milliseconds = divmod(int(time_stamp), 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
tmp = (
((str(days) + " Days, ") if days else "")
+ ((str(hours) + " Hours, ") if hours else "")
+ ((str(minutes) + " Minutes, ") if minutes else "")
+ ((str(seconds) + " Seconds, ") if seconds else "")
+ ((str(milliseconds) + " ms, ") if milliseconds else "")
)
return tmp[:-2]
async def clog(name: str, text: str, tag: str):
log = f"#{name.upper()} #{tag.upper()}\n\n{text}"
await anibot.send_message(chat_id=LOG_CHANNEL_ID, text=log)
def get_btns(media, user: int, result: list, lsqry: str = None, lspage: int = None, auth: bool = False, sfw: str = "False"):
buttons = []
qry = f"_{lsqry}" if lsqry is not None else ""
pg = f"_{lspage}" if lspage is not None else ""
if media == "ANIME" and sfw == "False":
buttons.append([
InlineKeyboardButton(text="Characters", callback_data=f"char_{result[2][0]}_ANI{qry}{pg}_{str(auth)}_1_{user}"),
InlineKeyboardButton(text="Description", callback_data=f"desc_{result[2][0]}_ANI{qry}{pg}_{str(auth)}_{user}"),
InlineKeyboardButton(text="List Series", callback_data=f"ls_{result[2][0]}_ANI{qry}{pg}_{str(auth)}_{user}"),
])
if media == "CHARACTER":
buttons.append([InlineKeyboardButton("Description", callback_data=f"desc_{result[2][0]}_CHAR{qry}{pg}_{str(auth)}_{user}")])
buttons.append([InlineKeyboardButton("List Series", callback_data=f"lsc_{result[2][0]}{qry}{pg}_{str(auth)}_{user}")])
if media == "SCHEDULED":
if result[0]!=0 and result[0]!=6:
buttons.append([
InlineKeyboardButton(str(day_(result[0]-1)), callback_data=f"sched_{result[0]-1}_{user}"),
InlineKeyboardButton(str(day_(result[0]+1)), callback_data=f"sched_{result[0]+1}_{user}")
])
if result[0] == 0:
buttons.append([InlineKeyboardButton(str(day_(result[0]+1)), callback_data=f"sched_{result[0]+1}_{user}")])
if result[0] == 6:
buttons.append([InlineKeyboardButton(str(day_(result[0]-1)), callback_data=f"sched_{result[0]-1}_{user}")])
if media == "MANGA" and sfw == "False":
buttons.append([InlineKeyboardButton("More Info", url=result[1][2])])
if media == "AIRING" and sfw == "False":
buttons.append([InlineKeyboardButton("More Info", url=result[1])])
if auth is True and media!="SCHEDULED" and sfw == "False":
auth_btns = get_auth_btns(media, user, result[2], lspage=lspage, lsqry=lsqry)
buttons.append(auth_btns)
if len(result)>3:
if result[3] == "None":
if result[4] != "None":
buttons.append([InlineKeyboardButton(text="Sequel", callback_data=f"btn_{result[4]}_{str(auth)}_{user}")])
else:
if result[4] != "None":
buttons.append([
InlineKeyboardButton(text="Prequel", callback_data=f"btn_{result[3]}_{str(auth)}_{user}"),
InlineKeyboardButton(text="Sequel", callback_data=f"btn_{result[4]}_{str(auth)}_{user}"),
])
else:
buttons.append([InlineKeyboardButton(text="Prequel", callback_data=f"btn_{result[3]}_{str(auth)}_{user}")])
if lsqry is not None and len(result)!=1 and result[1][1]!=1:
if lspage == 1:
buttons.append([InlineKeyboardButton(text="Next", callback_data=f"page_{media}{qry}_{int(lspage)+1}_{str(auth)}_{user}")])
elif lspage == result[1][1]:
buttons.append([InlineKeyboardButton(text="Prev", callback_data=f"page_{media}{qry}_{int(lspage)-1}_{str(auth)}_{user}")])
else:
buttons.append([
InlineKeyboardButton(text="Prev", callback_data=f"page_{media}{qry}_{int(lspage)-1}_{str(auth)}_{user}"),
InlineKeyboardButton(text="Next", callback_data=f"page_{media}{qry}_{int(lspage)+1}_{str(auth)}_{user}"),
])
return InlineKeyboardMarkup(buttons)
def get_auth_bt | ns(media, use | identifier_name |
|
helper.py | clog('ANIBOT', f'UserID: {user}', 'BAN')
return
await asyncio.sleep(USER_WC[user])
else:
USER_WC[user] = 0
except KeyError:
pass
USER_JSON[user] = nut
try:
await func(_, message, msg)
except FloodWait as e:
await asyncio.sleep(e.x + 5)
except MessageNotModified:
pass
return wrapper
def check_user(func):
async def wrapper(_, c_q: CallbackQuery):
cq = json.loads(str(c_q))
user = cq['from_user']['id']
if await IGNORE.find_one({'_id': user}):
return
if user in OWNER or user==int(cq['data'].split("_").pop()):
if user not in OWNER:
nt = time()
try:
ot = USER_JSON[user]
if nt-ot<1.4:
await c_q.answer(
"Stop spamming bot!!!\nElse you will be blacklisted",
)
await clog('ANIBOT', f'UserID: {user}', 'SPAM')
except KeyError:
pass
USER_JSON[user] = nt
try:
await func(_, c_q, cq)
except FloodWait as e:
await asyncio.sleep(e.x + 5)
except MessageNotModified:
pass
else:
await c_q.answer(
"Not your query!!!",
show_alert=True,
)
return wrapper
async def media_to_image(client: anibot, message: Message, x: Message, replied: Message):
if not (
replied.photo
or replied.sticker
or replied.animation
or replied.video
):
await x.edit_text("Media Type Is Invalid !")
await asyncio.sleep(5)
await x.delete()
return
media = replied.photo or replied.sticker or replied.animation or replied.video
if not os.path.isdir(DOWN_PATH):
os.makedirs(DOWN_PATH)
dls = await client.download_media(
media,
file_name=DOWN_PATH + rand_key(),
)
dls_loc = os.path.join(DOWN_PATH, os.path.basename(dls))
if replied.sticker and replied.sticker.file_name.endswith(".tgs"):
png_file = os.path.join(DOWN_PATH, f"{rand_key()}.png")
cmd = f"lottie_convert.py --frame 0 -if lottie -of png {dls_loc} {png_file}"
stdout, stderr = (await runcmd(cmd))[:2]
os.remove(dls_loc)
if not os.path.lexists(png_file):
await x.edit_text("This sticker is Gey, Task Failed Successfully ≧ω≦")
await asyncio.sleep(5)
await x.delete()
raise Exception(stdout + stderr)
dls_loc = png_file
elif replied.sticker and replied.sticker.file_name.endswith(".webp"):
| os.rename(dls_loc, stkr_file)
if not os.path.lexists(stkr_file):
await x.edit_text("```Sticker not found...```")
await asyncio.sleep(5)
await x.delete()
return
dls_loc = stkr_file
elif replied.animation or replied.video:
await x.edit_text("`Converting Media To Image ...`")
jpg_file = os.path.join(DOWN_PATH, f"{rand_key()}.jpg")
await take_screen_shot(dls_loc, 0, jpg_file)
os.remove(dls_loc)
if not os.path.lexists(jpg_file):
await x.edit_text("This Gif is Gey (。ì _ í。), Task Failed Successfully !")
await asyncio.sleep(5)
await x.delete()
return
dls_loc = jpg_file
return dls_loc
async def runcmd(cmd: str) -> Tuple[str, str, int, int]:
""" run command in terminal """
args = shlex.split(cmd)
process = await asyncio.create_subprocess_exec(
*args, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
stdout, stderr = await process.communicate()
return (
stdout.decode("utf-8", "replace").strip(),
stderr.decode("utf-8", "replace").strip(),
process.returncode,
process.pid,
)
async def take_screen_shot(
video_file: str, duration: int, path: str = ""
) -> Optional[str]:
""" take a screenshot """
print(
"[[[Extracting a frame from %s ||| Video duration => %s]]]",
video_file,
duration,
)
thumb_image_path = path or os.path.join(
DOWN_PATH, f"{basename(video_file)}.jpg"
)
command = f'''ffmpeg -ss {duration} -i "{video_file}" -vframes 1 "{thumb_image_path}"'''
err = (await runcmd(command))[1]
if err:
print(err)
return thumb_image_path if os.path.exists(thumb_image_path) else None
##################################################################
async def return_json_senpai(query: str, vars_: dict, auth: bool = False, user: int = None):
if auth is False:
url = "https://graphql.anilist.co"
return requests.post(url, json={"query": query, "variables": vars_}).json()
else:
headers = {
'Authorization': 'Bearer ' + str((await AUTH_USERS.find_one({"id": int(user)}))['token']),
'Content-Type': 'application/json',
'Accept': 'application/json',
}
url = "https://graphql.anilist.co"
return requests.post(url, json={"query": query, "variables": vars_}, headers=headers).json()
def cflag(country):
if country == "JP":
return "\U0001F1EF\U0001F1F5"
if country == "CN":
return "\U0001F1E8\U0001F1F3"
if country == "KR":
return "\U0001F1F0\U0001F1F7"
if country == "TW":
return "\U0001F1F9\U0001F1FC"
def pos_no(no):
ep_ = list(str(no))
x = ep_.pop()
if ep_ != [] and ep_.pop()=='1':
return 'th'
th = "st" if x == "1" else "nd" if x == "2" else "rd" if x == "3" else "th"
return th
def make_it_rw(time_stamp):
"""Converting Time Stamp to Readable Format"""
seconds, milliseconds = divmod(int(time_stamp), 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
tmp = (
((str(days) + " Days, ") if days else "")
+ ((str(hours) + " Hours, ") if hours else "")
+ ((str(minutes) + " Minutes, ") if minutes else "")
+ ((str(seconds) + " Seconds, ") if seconds else "")
+ ((str(milliseconds) + " ms, ") if milliseconds else "")
)
return tmp[:-2]
async def clog(name: str, text: str, tag: str):
log = f"#{name.upper()} #{tag.upper()}\n\n{text}"
await anibot.send_message(chat_id=LOG_CHANNEL_ID, text=log)
def get_btns(media, user: int, result: list, lsqry: str = None, lspage: int = None, auth: bool = False, sfw: str = "False"):
buttons = []
qry = f"_{lsqry}" if lsqry is not None else ""
pg = f"_{lspage}" if lspage is not None else ""
if media == "ANIME" and sfw == "False":
buttons.append([
InlineKeyboardButton(text="Characters", callback_data=f"char_{result[2][0]}_ANI{qry}{pg}_{str(auth)}_1_{user}"),
InlineKeyboardButton(text="Description", callback_data=f"desc_{result[2][0]}_ANI{qry}{pg}_{str(auth)}_{user}"),
InlineKeyboardButton(text="List Series", callback_data=f"ls_{result[2][0]}_ANI{qry}{pg}_{str(auth)}_{user}"),
])
if media == "CHARACTER":
buttons.append([InlineKeyboardButton("Description", callback_data=f"desc_{result[2][0]}_CHAR{qry}{pg}_{str(auth)}_{user}")])
buttons.append([InlineKeyboardButton("List Series", callback_data=f"lsc_{result[2][0]}{qry}{pg}_{str(auth)}_{user}")])
if media == "SCHEDULED":
if result[0]!=0 and result[0]!=6:
buttons.append([
InlineKeyboardButton(str(day_(result[0]-1)), callback_data=f"sched_{result[0]-1}_{user}"),
InlineKeyboardButton(str(day_(result[0]+1)), | stkr_file = os.path.join(DOWN_PATH, f"{rand_key()}.png")
| random_line_split |
helper.py | await clog('ANIBOT', f'UserID: {user}', 'BAN')
return
await asyncio.sleep(USER_WC[user])
else:
USER_WC[user] = 0
except KeyError:
pass
USER_JSON[user] = nut
try:
await func(_, message, msg)
except FloodWait as e:
await asyncio.sleep(e.x + 5)
except MessageNotModified:
pass
return wrapper
def check_user(func):
async def wrapper(_, c_q: CallbackQuery):
cq = json.loads(str(c_q))
user = cq['from_user']['id']
if await IGNORE.find_one({'_id': user}):
return
if user in OWNER or user==int(cq['data'].split("_").pop()):
if user not in OWNER:
nt = time()
try:
ot = USER_JSON[user]
if nt-ot<1.4:
await c_q.answer(
"Stop spamming bot!!!\nElse you will be blacklisted",
)
await clog('ANIBOT', f'UserID: {user}', 'SPAM')
except KeyError:
pass
USER_JSON[user] = nt
try:
await func(_, c_q, cq)
except FloodWait as e:
await asyncio.sleep(e.x + 5)
except MessageNotModified:
pass
else:
await c_q.answer(
"Not your query!!!",
show_alert=True,
)
return wrapper
async def media_to_image(client: anibot, message: Message, x: Message, replied: Message):
if not (
replied.photo
or replied.sticker
or replied.animation
or replied.video
):
await x.edit_text("Media Type Is Invalid !")
await asyncio.sleep(5)
await x.delete()
return
media = replied.photo or replied.sticker or replied.animation or replied.video
if not os.path.isdir(DOWN_PATH):
os.makedirs(DOWN_PATH)
dls = await client.download_media(
media,
file_name=DOWN_PATH + rand_key(),
)
dls_loc = os.path.join(DOWN_PATH, os.path.basename(dls))
if replied.sticker and replied.sticker.file_name.endswith(".tgs"):
png_file = os.path.join(DOWN_PATH, f"{rand_key()}.png")
cmd = f"lottie_convert.py --frame 0 -if lottie -of png {dls_loc} {png_file}"
stdout, stderr = (await runcmd(cmd))[:2]
os.remove(dls_loc)
if not os.path.lexists(png_file):
await x.edit_text("This sticker is Gey, Task Failed Successfully ≧ω≦")
await asyncio.sleep(5)
await x.delete()
raise Exception(stdout + stderr)
dls_loc = png_file
elif replied.sticker and replied.sticker.file_name.endswith(".webp"):
stkr_file = os.path.join(DOWN_PATH, f"{rand_key()}.png")
os.rename(dls_loc, stkr_file)
if not os.path.lexists(stkr_file):
await x.edit_text("```Sticker not found...```")
await asyncio.sleep(5)
await x.delete()
return
dls_loc = stkr_file
elif replied.animation or replied.video:
await x.edit_text("`Converting Media To Image ...`")
jpg_file = os.path.join(DOWN_PATH, f"{rand_key()}.jpg")
await take_screen_shot(dls_loc, 0, jpg_file)
os.remove(dls_loc)
if not os.path.lexists(jpg_file):
await x.edit_text("This Gif is Gey (。ì _ í。), Task Failed Successfully !")
await asyncio.sleep(5)
await x.delete()
return
dls_loc = jpg_file
return dls_loc
async def runcmd(cmd: str) -> Tuple[str, str, int, int]:
""" run command in terminal """
args = shlex.split(cmd)
process = await asyncio.create_subprocess_exec(
*args, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
stdout, stderr = await process.communicate()
return (
stdout.decode("utf-8", "replace").strip(),
stderr.decode("utf-8", "replace").strip(),
process.returncode,
process.pid,
)
async def take_screen_shot(
video_file: str, duration: int, path: str = ""
) -> Optional[str]:
""" take a screenshot """
print(
"[[[Extracting a frame from %s ||| Video duration => %s]]]",
video_file,
duration,
)
thumb_image_path = path or os.path.join(
DOWN_PATH, f"{basename(video_file)}.jpg"
)
command = f'''ffmpeg -ss {duration} -i "{video_file}" -vframes 1 "{thumb_image_path}"'''
err = (await runcmd(command))[1]
if err:
print(err)
return thumb_image_path if os.path.exists(thumb_image_path) else None
##################################################################
async def return_json_senpai(query: str, vars_: dict, auth: bool = False, user: int = None):
if auth is False:
url = "https://graphql.anilist.co"
return requests.post(url, json={"query": query, "variables": vars_}).json()
else:
headers = {
'Authorization': 'Bearer ' + str((await AUTH_USERS.find_one({"id": int(user)}))['token']),
'Content-Type': 'application/json',
'Accept': 'application/json',
}
url = "https://graphql.anilist.co"
return requests.post(url, json={"query": query, "variables": vars_}, headers=headers).json()
def cflag(country):
if country == "JP":
return "\U0001F1EF\U0001F1F5"
if country == "CN":
return "\U0001F1E8\U0001F1F3"
if country == "KR":
return "\U0001F1F0\U0001F1F7"
if country == "TW":
return "\U0001F1F9\U0001F1FC"
def pos_no(no):
ep_ = list(str(no))
x = ep_.pop()
if ep_ != [] and ep_.pop()=='1':
return 'th'
th = "st" if x == "1" else "nd" if x == "2" else "rd" if x == "3" else "th"
return th
def make_it_rw(time_stamp):
"""Converti | def clog(name: str, text: str, tag: str):
log = f"#{name.upper()} #{tag.upper()}\n\n{text}"
await anibot.send_message(chat_id=LOG_CHANNEL_ID, text=log)
def get_btns(media, user: int, result: list, lsqry: str = None, lspage: int = None, auth: bool = False, sfw: str = "False"):
buttons = []
qry = f"_{lsqry}" if lsqry is not None else ""
pg = f"_{lspage}" if lspage is not None else ""
if media == "ANIME" and sfw == "False":
buttons.append([
InlineKeyboardButton(text="Characters", callback_data=f"char_{result[2][0]}_ANI{qry}{pg}_{str(auth)}_1_{user}"),
InlineKeyboardButton(text="Description", callback_data=f"desc_{result[2][0]}_ANI{qry}{pg}_{str(auth)}_{user}"),
InlineKeyboardButton(text="List Series", callback_data=f"ls_{result[2][0]}_ANI{qry}{pg}_{str(auth)}_{user}"),
])
if media == "CHARACTER":
buttons.append([InlineKeyboardButton("Description", callback_data=f"desc_{result[2][0]}_CHAR{qry}{pg}_{str(auth)}_{user}")])
buttons.append([InlineKeyboardButton("List Series", callback_data=f"lsc_{result[2][0]}{qry}{pg}_{str(auth)}_{user}")])
if media == "SCHEDULED":
if result[0]!=0 and result[0]!=6:
buttons.append([
InlineKeyboardButton(str(day_(result[0]-1)), callback_data=f"sched_{result[0]-1}_{user}"),
InlineKeyboardButton(str(day_(result[0]+1)), | ng Time Stamp to Readable Format"""
seconds, milliseconds = divmod(int(time_stamp), 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
tmp = (
((str(days) + " Days, ") if days else "")
+ ((str(hours) + " Hours, ") if hours else "")
+ ((str(minutes) + " Minutes, ") if minutes else "")
+ ((str(seconds) + " Seconds, ") if seconds else "")
+ ((str(milliseconds) + " ms, ") if milliseconds else "")
)
return tmp[:-2]
async | identifier_body |
regression2_helper.py | 1, projection='3d')
ax.scatter(X1, X2, y/1e6, color="black")
ax.set_xlabel('Cреднее количество комнат')
ax.set_ylabel('LSTAT %')
ax.set_zlabel('Цена квартиры, $1000')
xx, yy = np.meshgrid(np.linspace(2, 9, 100), np.linspace(0, 40, 100))
z = np.zeros_like(xx)
for i in range(len(xx)):
for j in range(len(xx)):
z[i, j] = theta0 + theta1*xx[i, j] + theta2*yy[i, j]
z = z / 1e6
ax.plot_wireframe(xx, yy, z)
ax.view_init(angle1, angle2)
plt.show()
#def plot_new_3d_data_and_hyp(X, y):
# angles1 = IntSlider(min=0, max=180, step=1, value=0, description='Вертикальное')
# angles2 = IntSlider(min=0, max=180, step=1, value=90, description='Горизонтальное')
# @interact(angle1=angles1, angle2=angles2)
# def plot_plane(angle1=angles1, angle2=angles2):
# fig = plt.figure(figsize=(15, 10))
# ax = fig.add_subplot(111, projection='3d')
#
# ax.scatter(X[:, 1], X[:, 2], y, color="black")
#
# ax.set_xlabel('Cреднее количество комнат')
# ax.set_ylabel('LSTAT %')
# ax.set_zlabel('Цена квартиры, $1000')
#
# ax.view_init(angle1, angle2)
# plt.show()
def linear_function(X, Theta):
return np.dot(X, Theta) #X @ Theta
def MSE_Loss(X, Theta, y_true):
y_pred = linear_function(X, Theta)
return (np.sum((y_pred - y_true)**2))/(len(y_true))
def gradient_function(Theta, X, y_true):
grad = np.zeros_like(Theta)
y_pred = linear_function(X, Theta)
for j in range(Theta.shape[0]):
grad[j] = 2*np.mean((y_pred - y_true)* X[:,j])
return grad
def plot_grad(X, Theta_init, y, a, k_min=0, k_max=3, skip=2):
iters_slider = IntSlider(min=0, max=500, step=1, value=1, description='Iter #')
@interact(iters=iters_slider)
def grad(iters):
plt.figure(figsize=(10, 10))
k1s = np.linspace(k_min, k_max, 100)
k2s = np.linspace(k_min, k_max, 100)
k1s, k2s = np.meshgrid(k1s, k2s)
z = np.zeros_like(k1s)
for i in range(len(k1s)):
for j in range(len(k1s)):
t = np.array([-10, k1s[i, j], k2s[i, j]])
z[i, j] = MSE_Loss(X, t, y)
lines = np.unique(np.round(z.ravel(), 3))
lines.sort()
ind = np.array([2**i for i in range(math.floor(math.log(len(lines), 2)) + 1)])
plt.contour(k1s, k2s, z, lines[ind], cmap=cm.coolwarm) # нарисовать указанные линии уровня
Theta = Theta_init.copy()
Thetas = [Theta.copy()]
plt.xlabel("Значение $\\theta_1$")
plt.ylabel("Значение $\\theta_2$")
if iters > 0:
for i in range(iters):
g = a*gradient_function(Theta, X, y)
Theta -= g
Thetas.append(Theta.copy())
Thetas = np.vstack(Thetas)
plt.scatter(Thetas[:-1, 1], Thetas[:-1, 2], color='gray')
plt.scatter(Thetas[iters-1, 1], Thetas[iters-1, 2], color='black')
plt.text(k_max/2+k_min, k_max, "$\\alpha \\dfrac{Loss(\\Theta)}{\\theta_1} = $" + f"{np.round(g[1], 5)}", va='top', ha='left')
plt.text(k_max/2+k_min, k_max-k_max/6, "$\\alpha \\dfrac{Loss(\\Theta)}{\\theta_2} = $" + f"{np.round(g[2], 5)}", va='top', ha='left')
plt.show()
# ********************** Полином ****************************
def get_data_for_polynom():
data = np.array([[ 5.000, 2.500], [16.600, 4.150], [ 8.000, 12.450], [26.150, 7.950], [14.600, 9.350],
[40.400, 18.450], [21.200, 3.850], [23.500, 3.900], [38.200, 18.400], [25.050, 8.200],
[29.350, 12.700], [28.200, 10.150], [11.050, 10.850], [ 4.850, 13.350], [ 4.850, 12.400],
[ 3.850, 14.350], [27.850, 9.400], [29.550, 13.250], [29.150, 14.400], [15.400, 4.650],
[38.900, 18.600], [ 4.550, 15.600], [14.950, 2.550], [30.150, 15.600], [23.500, 6.900],
[31.150, 16.050], [32.500, 13.200], [ 5.200, 9.600], [33.050, 17.800], [12.950, 6.850],
[23.550, 5.700], [35.800, 18.850], [39.650, 18.550], [19.900, 2.200], [ 6.650, 12.200],
[16.700, 1.550], [ 3.550, 15.550], [34.450, 18.250], [ 2.600, 14.900], [27.800, 15.900]])
X = data[:, 0]
y = data[:, 1]
return X, y
def get_more_data_for_polynom():
data = np.array([[12.850, 8.050], [12.650, 5.150], [31.300, 17.250], [31.850, 17.600], [23.200, 1.250], | [ 9.400, 16.050], [35.150, 16.800], [28.100, 11.400], [10.550, 7.150], [11.800, 8.800],
[37.050, 17.750], [17.100, 3.000], [30.900, 9.450], [29.200, 15.150], [20.550, 2.800], | random_line_split |
|
regression2_helper.py | 200, 9.600], [33.050, 17.800], [12.950, 6.850],
[23.550, 5.700], [35.800, 18.850], [39.650, 18.550], [19.900, 2.200], [ 6.650, 12.200],
[16.700, 1.550], [ 3.550, 15.550], [34.450, 18.250], [ 2.600, 14.900], [27.800, 15.900]])
X = data[:, 0]
y = data[:, 1]
return X, y
def get_more_data_for_polynom():
data = np.array([[12.850, 8.050], [12.650, 5.150], [31.300, 17.250], [31.850, 17.600], [23.200, 1.250],
[ 9.400, 16.050], [35.150, 16.800], [28.100, 11.400], [10.550, 7.150], [11.800, 8.800],
[37.050, 17.750], [17.100, 3.000], [30.900, 9.450], [29.200, 15.150], [20.550, 2.800],
[18.200, 2.000], [ 5.900, 14.200], [14.550, 3.700]])
X = data[:, 0]
y = data[:, 1]
return X, y
def plot_poly_data(X, y, X_test=None, y_test=None):
font = {'family': 'Verdana', 'weight': 'normal'}
rc('font', **font)
plt.figure(figsize=(10, 8))
plt.rcParams.update({'font.size': 22})
if X_test is not None and y_test is not None:
plt.scatter(X, y, label='Старые данные')
plt.scatter(X_test, y_test, label='Новые данные')
plt.legend()
else:
plt.scatter(X, y)
plt.xlabel('Скорость\nкм/чaс')
plt.ylabel('Расход топлива 1ой ступени\nг/кВт час')
plt.grid()
plt.show()
def visualize_prediction(X, y_train, y_pred):
plt.figure(figsize=(10,10))
plt.scatter(X, y_train)
plt.xlabel('Скорость\nкм/чaс')
plt.ylabel('Расход топлива 1ой ступени\nг/кВт час')
plt.plot(X, y_pred, color='r')
plt.grid()
plt.show()
def plot_parabola():
a_koef = FloatSlider(min = -5, max=10, step=0.5, value=1, description='a')
b_koef = FloatSlider(min = -5, max=10, step=0.5, value=1, description='b')
c_koef = FloatSlider(min = -5, max=10, step=0.5, value=1, description='c')
@interact(a=a_koef, b=b_koef, c=c_koef)
def interact_plot_parabol(a, b, c):
x = np.linspace(-10, 10, num=200)
y = a*x**2 + b*x + c
plt.figure(figsize=(16,10))
plt.plot(x, y, color='black')
plt.xlim((-10,10))
plt.ylim((-10,100))
plt.xlabel("X")
plt.ylabel("Y")
plt.grid()
plt.title("$y(X) = a X^2 + b X + c$")
plt.show()
def plot_polynoms():
x = np.linspace(-10, 20, 300)
y3 = -1/6*x**3 + 9/6*x**2 - 3*x + 1
y4 = 1/24*(x**4 - 16*x**3 + 72*x**2 - 96*x + 24)
y5 = 1/120*(-x**5 + 25*x**4 - 200*x**3 + 600*x**2 - 600*x + 120)
plt.figure(figsize=(16,10))
plt.plot(x,y3, label="Полином 3 степени")
plt.plot(x,y4, label="Полином 4 степени")
plt.plot(x,y5, label="Полином 5 степени")
# plt.xticks(ticks = x)
plt.ylim((-25, 50))
plt.xlim((-5, 15))
plt.grid()
plt.xlabel("X")
plt.ylabel("Y")
plt.legend()
plt.show()
def plot_poly_results(X_poly_scaled, y_true, poly_transformer, scaler, regressor):
x_axis_ticks = poly_transformer.transform((np.arange(0,41.3,0.01)).reshape(-1,1))
x_axis_ticks = scaler.transform(x_axis_ticks)
y_pred = regressor.predict(x_axis_ticks)
plt.figure(figsize=(10,10))
plt.scatter(X_poly_scaled[:, 1], y_true)
plt.xlabel('Скорость\nкм/чaс')
plt.ylabel('Расход топлива 1ой ступени\nг/кВт час')
plt.ylim(min(y_true)-1, max(y_true)+1)
plt.xlim(min(X_poly_scaled[:, 1]), max(X_poly_scaled[:, 1]))
plt.grid()
plt.plot(x_axis_ticks[:,1], y_pred, color='r')
plt.show()
def interactive_polynom(X, y_true, X_test=None, y_test=None):
deg_slider = IntSlider(min=1, max=40, step=1, value=1, description='Степень полинома')
@interact(deg=deg_slider)
def p(deg):
poly_transformer = PolynomialFeatures(deg)
X_poly = poly_transformer.fit_transform(X.reshape(-1,1))
scaler = StandardScaler()
X_poly_scaled = scaler.fit_transform(X_poly)
regressor = LinearRegression().fit(X_poly_scaled, y_true)
y_d_pred = regressor.predict(X_poly_scaled)
x_axis_ticks = poly_transformer.transform((np.arange(0, 41.3, 0.1)).reshape(-1,1))
x_axis_ticks = scaler.transform(x_axis_ticks)
y_pred = regressor.predict(x_axis_ticks)
plt.figure(figsize=(10,10))
plt.scatter(X, y_true)
if X_test is not None and y_test is not None:
plt.scatter(X_test, y_test)
X_poly = poly_transformer.transform(X_test.reshape(-1,1))
X_poly_scaled = scaler.transform(X_poly)
y_d_t_pred = regressor.predict(X_poly_scaled)
plt.title(f'Ошибка на данных обучения = {mean_squared_error(y_d_pred, y_true):.2f}\nОшибка на новых данных = {mean_squared_error(y_d_t_pred, y_test):.2f}\n')
else:
plt.title(f'Ошибка = {mean_squared_error(y_d_pred, y_true):.2f}')
plt.xlabel('Скорость')
plt.ylabel('Расход топлива 1ой ступени')
| plt.ylim(0, 20)
plt.grid()
plt.plot(scaler.inverse_transform(x_axis_ticks)[:, 1], y_pred, color='r')
plt.show()
def plot_mae_mse():
x_slrd = FloatSlider(min=-1.5, max=1.5, step=0.1, value=0, description='$x$')
@interact(x=x_slrd)
def f(x):
fig, axis = plt.subplots(1, 2, figsize=(18, 6))
ks = np.linspace(-1.5, 1.5, 200)
axis[0].plot(ks, ks**2, label="MSE" | conditional_block |
|
regression2_helper.py | ():
data, y = load_all_data()
return data[:, 5], data[:, -1], y
def print_3d_table_with_data(X1, X2, y):
l = len(X1)
d = {"Cреднее количество комнат" : pd.Series(X1, index=range(0, l)),
"LSTAT %" : pd.Series(X2, index=range(0, l)),
'Цена квартиры, $1000$' : pd.Series(y, index=range(0, l))}
df = pd.DataFrame(d)
print(df.head(17))
def plot_rm(X_room, y):
plt.figure(figsize=(15, 9))
plt.scatter(X_room, y, color="black")
plt.xlabel('Cреднее количество комнат')
plt.ylabel('Цена квартиры, 1000$')
plt.grid()
plt.show()
def plot_lstat(X_lstat, y):
plt.figure(figsize=(15, 9))
plt.scatter(X_lstat, y, color="black")
plt.xlabel('LSTAT %')
plt.ylabel('Цена квартиры, 1000$')
plt.grid()
plt.show()
def plot_new_3d_data(X1, X2, y):
angles1 = IntSlider(min=0, max=180, step=1, value=45, description='Вертикальное')
angles2 = IntSlider(min=0, max=180, step=1, value=45, description='Горизонтальное')
@interact(angle1=angles1, angle2=angles2)
def plot_new_data(angle1, angle2):
fig = plt.figure(figsize=(15, 10))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(X1, X2, y, color="black")
ax.set_xlabel('Cреднее количество комнат')
ax.set_ylabel('LSTAT %')
ax.set_zlabel('Цена квартиры, $1000$')
ax.view_init(angle1, angle2)
plt.show()
def plot_new_3d_data_and_hyp(X1, X2, y):
angles1 = IntSlider(min=0, max=180, step=1, value=45, description='Вертикальное')
angles2 = IntSlider(min=0, max=180, step=1, value=45, description='Горизонтальное')
theta0 = IntSlider(min=-19, max=21, step=1, value=1)
theta1 = IntSlider(min=-9, max=11, step=1, value=1)
theta2 = IntSlider(min=-4, max=6, step=1, value=1)
@interact(angle1=angles1, angle2=angles2,theta0=theta0, theta1=theta1, theta2=theta2)
def plot_plane(angle1=angles1, angle2=angles2, theta0=theta0, theta1=theta1, theta2=theta2):
fig = plt.figure(figsize=(15, 10))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(X1, X2, y/1e6, color="black")
ax.set_xlabel('Cреднее количество комнат')
ax.set_ylabel('LSTAT %')
ax.set_zlabel('Цена квартиры, $1000')
xx, yy = np.meshgrid(np.linspace(2, 9, 100), np.linspace(0, 40, 100))
z = np.zeros_like(xx)
for i in range(len(xx)):
for j in range(len(xx)):
z[i, j] = theta0 + theta1*xx[i, j] + theta2*yy[i, j]
z = z / 1e6
ax.plot_wireframe(xx, yy, z)
ax.view_init(angle1, angle2)
plt.show()
def create_data(X1, X2):
X_ones = np.ones_like(X1).reshape(-1,1)
X = np.hstack((X_ones, X1.reshape(-1,1)))
X = np.hstack((X, X2.reshape(-1,1)))
return X
def gradient_descent(Theta, X, y, alpha, iters):
theta = Theta.copy()
for i in range (iters):
theta = theta - alpha * gradient_function(theta, X, y)
return theta
def plot_new_3d_data_and_hyp_grad_des(X1, X2, y, theta0, theta1, theta2):
Theta = np.ones((3))
X = create_data(X1, X2)
angles1 = IntSlider(min=0, max=180, step=1, value=45, description='Вертикальное')
angles2 = IntSlider(min=0, max=180, step=1, value=45, description='Горизонтальное')
@interact(angle1=angles1, angle2=angles2)
def plot_plane(angle1=angles1, angle2=angles2):
fig = plt.figure(figsize=(15, 10))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(X1, X2, y/1e6, color="black")
ax.set_xlabel('Cреднее количество комнат')
ax.set_ylabel('LSTAT %')
ax.set_zlabel('Цена квартиры, $1000')
xx, yy = np.meshgrid(np.linspace(2, 9, 100), np.linspace(0, 40, 100))
z = np.zeros_like(xx)
for i in range(len(xx)):
for j in range(len(xx)):
z[i, j] = theta0 + theta1*xx[i, j] + theta2*yy[i, j]
z = z / 1e6
ax.plot_wireframe(xx, yy, z)
ax.view_init(angle1, angle2)
plt.show()
#def plot_new_3d_data_and_hyp(X, y):
# angles1 = IntSlider(min=0, max=180, step=1, value=0, description='Вертикальное')
# angles2 = IntSlider(min=0, max=180, step=1, value=90, description='Горизонтальное')
# @interact(angle1=angles1, angle2=angles2)
# def plot_plane(angle1=angles1, angle2=angles2):
# fig = plt.figure(figsize=(15, 10))
# ax = fig.add_subplot(111, projection='3d')
#
# ax.scatter(X[:, 1], X[:, 2], y, color="black")
#
# ax.set_xlabel('Cреднее количество комнат')
# ax.set_ylabel('LSTAT %')
# ax.set_zlabel('Цена квартиры, $1000')
#
# ax.view_init(angle1, angle2)
# plt.show()
def linear_function(X, Theta):
return np.dot(X, Theta) #X @ Theta
def MSE_Loss(X, Theta, y_true):
y_pred = linear_function(X, Theta)
return (np.sum((y_pred - y_true)**2))/(len(y_true))
def gradient_function(Theta, X, y_true):
grad = np.zeros_like(Theta)
y_pred = linear_function(X, Theta)
for j in range(Theta.shape[0]):
grad[j] = 2*np.mean((y_pred - y_true)* X[:,j])
return grad
def plot_grad(X, Theta_init, y, a, k_min=0, k_max=3, skip=2):
iters_slider = IntSlider(min=0, max=500, step=1, value=1, description='Iter #')
@interact(iters=iters_slider)
def grad(iters):
plt.figure(figsize=(10, 10))
k1s = np.linspace(k_min, k_max, 100)
k2s = np.linspace(k_min, k_max, 100)
k1s, k2s = np.meshgrid(k1s, k2s)
z = np.zeros_like(k1s)
for i in range(len(k1s)):
for j in range(len(k1s)):
t = np.array([-10, k1s[i, j], k2s[i, j]])
z[i, j] = MSE_Loss(X, t, y)
lines = np.unique(np.round(z.ravel(), 3))
lines.sort()
ind = np.array([2**i for i in range(math.floor(math.log(len(lines), 2)) + 1)])
plt.contour(k1s, k2s, z, lines[ind], cmap=cm.coolwarm) # нарисовать указанные линии уровня
Theta = Theta_init.copy()
Thetas = [Theta.copy()]
plt.xlabel("Знач | load_small_data | identifier_name |
|
regression2_helper.py | value=1, description='c')
@interact(a=a_koef, b=b_koef, c=c_koef)
def interact_plot_parabol(a, b, c):
x = np.linspace(-10, 10, num=200)
y = a*x**2 + b*x + c
plt.figure(figsize=(16,10))
plt.plot(x, y, color='black')
plt.xlim((-10,10))
plt.ylim((-10,100))
plt.xlabel("X")
plt.ylabel("Y")
plt.grid()
plt.title("$y(X) = a X^2 + b X + c$")
plt.show()
def plot_polynoms():
x = np.linspace(-10, 20, 300)
y3 = -1/6*x**3 + 9/6*x**2 - 3*x + 1
y4 = 1/24*(x**4 - 16*x**3 + 72*x**2 - 96*x + 24)
y5 = 1/120*(-x**5 + 25*x**4 - 200*x**3 + 600*x**2 - 600*x + 120)
plt.figure(figsize=(16,10))
plt.plot(x,y3, label="Полином 3 степени")
plt.plot(x,y4, label="Полином 4 степени")
plt.plot(x,y5, label="Полином 5 степени")
# plt.xticks(ticks = x)
plt.ylim((-25, 50))
plt.xlim((-5, 15))
plt.grid()
plt.xlabel("X")
plt.ylabel("Y")
plt.legend()
plt.show()
def plot_poly_results(X_poly_scaled, y_true, poly_transformer, scaler, regressor):
x_axis_ticks = poly_transformer.transform((np.arange(0,41.3,0.01)).reshape(-1,1))
x_axis_ticks = scaler.transform(x_axis_ticks)
y_pred = regressor.predict(x_axis_ticks)
plt.figure(figsize=(10,10))
plt.scatter(X_poly_scaled[:, 1], y_true)
plt.xlabel('Скорость\nкм/чaс')
plt.ylabel('Расход топлива 1ой ступени\nг/кВт час')
plt.ylim(min(y_true)-1, max(y_true)+1)
plt.xlim(min(X_poly_scaled[:, 1]), max(X_poly_scaled[:, 1]))
plt.grid()
plt.plot(x_axis_ticks[:,1], y_pred, color='r')
plt.show()
def interactive_polynom(X, y_true, X_test=None, y_test=None):
deg_slider = IntSlider(min=1, max=40, step=1, value=1, description='Степень полинома')
@interact(deg=deg_slider)
def p(deg):
poly_transformer = PolynomialFeatures(deg)
X_poly = poly_transformer.fit_transform(X.reshape(-1,1))
scaler = StandardScaler()
X_poly_scaled = scaler.fit_transform(X_poly)
regressor = LinearRegression().fit(X_poly_scaled, y_true)
y_d_pred = regressor.predict(X_poly_scaled)
x_axis_ticks = poly_transformer.transform((np.arange(0, 41.3, 0.1)).reshape(-1,1))
x_axis_ticks = scaler.transform(x_axis_ticks)
y_pred = regressor.predict(x_axis_ticks)
plt.figure(figsize=(10,10))
plt.scatter(X, y_true)
if X_test is not None and y_test is not None:
plt.scatter(X_test, y_test)
X_poly = poly_transformer.transform(X_test.reshape(-1,1))
X_poly_scaled = scaler.transform(X_poly)
y_d_t_pred = regressor.predict(X_poly_scaled)
plt.title(f'Ошибка на данных обучения = {mean_squared_error(y_d_pred, y_true):.2f}\nОшибка на новых данных = {mean_squared_error(y_d_t_pred, y_test):.2f}\n')
else:
plt.title(f'Ошибка = {mean_squared_error(y_d_pred, y_true):.2f}')
plt.xlabel('Скорость')
plt.ylabel('Расход топлива 1ой ступени')
plt.ylim(0, 20)
plt.grid()
plt.plot(scaler.inverse_transform(x_axis_ticks)[:, 1], y_pred, color='r')
plt.show()
def plot_mae_mse():
x_slrd = FloatSlider(min=-1.5, max=1.5, step=0.1, value=0, description='$x$')
@interact(x=x_slrd)
def f(x):
fig, axis = plt.subplots(1, 2, figsize=(18, 6))
ks = np.linspace(-1.5, 1.5, 200)
axis[0].plot(ks, ks**2, label="MSE", color='red')
axis[0].scatter(x, x**2, color='red')
axis[0].plot(ks, np.abs(ks), label="MAE", color='green')
axis[0].scatter(x, np.abs(x), color='green')
axis[0].set_title("Функция ошибки")
axis[0].set_xticks([])
axis[0].set_yticks([])
axis[0].grid()
axis[0].legend()
axis[1].plot(ks, 2*ks, label="$MSE'$", color='red')
axis[1].scatter(x, 2*x, color='red')
axis[1].plot(ks, np.sign(ks), label="$MAE'$", color='green')
axis[1].scatter(x, np.sign(x), color='green')
axis[1].set_title("Производная функции ошибки")
axis[1].legend()
axis[1].grid()
axis[1].set_xticks([])
axis[1].set_yticks([])
plt.show()
from sklearn import linear_model
def plot_outlier():
x = np.linspace(-1, 1, 15)
y = 5*x + 0.4*np.random.normal(size=(15,))
y[10] = -5
plt.figure(figsize=(10, 7))
plt.scatter(x, y)
plt.scatter(x[10], y[10], label='Выброс')
plt.legend()
plt.show()
def plot_regression_with_outlier():
x = np.linspace(-1, 1, 15)
y = 5*x + 0.4*np.random.normal(size=(15,))
y[10] = -5
plt.figure(figsize=(10, 7))
plt.scatter(x, y)
plt.scatter(x[10], y[10], label='Выброс')
clf_l2 = linear_model.SGDRegressor(max_iter=1000, penalty=None)
clf_l2.fit(x.reshape(-1, 1), y)
clf_l1 = linear_model.SGDRegressor(loss='epsilon_insensitive', epsilon=0, max_iter=1000, penalty=None)
clf_l1.fit(x.reshape(-1, 1), y)
plt.plot(x, clf_l2.predict(x.reshape(-1, 1)), label='Линейная регрессия на MSE', color='red')
plt.plot(x, clf_l1.predict(x.reshape(-1, 1)), label='Линейная регрессия на MAE', color='green')
plt.legend()
plt.show()
# ************************************** HOMEWORK ************************************
def polinom_function(X_m, theta):
return np.dot(X_m, theta)
def get_homework_data():
X = np.linspace(-3, 3, 100)
return X, X**3 + X**2 + X + 1 + 1.5*np.random.normal(size=(100,))
def plot_poly_hw_results(X_poly_scaled, y_true, theta_init, theta, means, stds):
font = {'family': 'Verdana', 'weight': 'normal'}
rc('font', **font)
plt.rcParams.update({'font.size': 22})
x = np.linspace(-5, 5, 100)
x = np.column_stack([np.ones_like(x), x, x**2, x**3])
for j in range(1, x.shape[1]):
x[:, j] = (x[:, j] - means[j])/stds[j]
y_pred = polinom_function(x, theta)
y_pred_init = polinom_function(x, theta_init)
plt.figure(figsize=(10,10))
plt.scatter(X_poly_scaled[:, 1], y_true)
plt.ylim(min(y_true)-1, max(y_true)+1)
plt.xlim(min(X_poly_scaled[:, 1]), max | (X_poly_scaled[:, 1]))
plt.grid()
plt.plot(x[:,1], y_pred, color='r', label="Кривая после | identifier_body |
|
client_conn.rs | Stream<I>;
type HttpStreamSpecific = ClientStreamData;
type ConnSpecific = ClientConnData;
type ToWriteMessage = ClientToWriteMessage;
const OUT_REQUEST_OR_RESPONSE: RequestOrResponse = RequestOrResponse::Request;
const CLIENT_OR_SERVER: ClientOrServer = ClientOrServer::Client;
}
pub struct ClientStreamData {}
impl HttpStreamDataSpecific for ClientStreamData {}
type ClientStream<I> = HttpStreamCommon<ClientTypes<I>>;
impl<I> HttpStreamData for ClientStream<I>
where
I: AsyncWrite + AsyncRead + Send + 'static,
{
type Types = ClientTypes<I>;
}
pub struct ClientConnData {
_callbacks: Box<ClientConnCallbacks>,
}
impl ConnSpecific for ClientConnData {}
pub struct | {
write_tx: UnboundedSender<ClientToWriteMessage>,
}
unsafe impl Sync for ClientConn {}
pub struct StartRequestMessage {
pub headers: Headers,
pub body: HttpStreamAfterHeaders,
pub resp_tx: oneshot::Sender<Response>,
}
enum ClientToWriteMessage {
Start(StartRequestMessage),
WaitForHandshake(oneshot::Sender<result::Result<()>>),
Common(CommonToWriteMessage),
}
impl From<CommonToWriteMessage> for ClientToWriteMessage {
fn from(m: CommonToWriteMessage) -> Self {
ClientToWriteMessage::Common(m)
}
}
impl<I> ConnWriteSideCustom for Conn<ClientTypes<I>>
where
I: AsyncWrite + AsyncRead + Send + 'static,
{
type Types = ClientTypes<I>;
fn process_message(&mut self, message: ClientToWriteMessage) -> result::Result<()> {
match message {
ClientToWriteMessage::Start(start) => self.process_start(start),
ClientToWriteMessage::Common(common) => self.process_common_message(common),
ClientToWriteMessage::WaitForHandshake(tx) => {
// ignore error
drop(tx.send(Ok(())));
Ok(())
}
}
}
}
impl<I> Conn<ClientTypes<I>>
where
I: AsyncWrite + AsyncRead + Send + 'static,
{
fn process_start(&mut self, start: StartRequestMessage) -> result::Result<()> {
let StartRequestMessage {
headers,
body,
resp_tx,
} = start;
let stream_id = self.next_local_stream_id();
let out_window = {
let (mut http_stream, resp_stream, out_window) = self.new_stream_data(
stream_id,
None,
InMessageStage::Initial,
ClientStreamData {},
);
if let Err(_) = resp_tx.send(Response::from_stream(resp_stream)) {
warn!("caller died");
}
http_stream.push_back(DataOrHeaders::Headers(headers));
out_window
};
self.pump_stream_to_write_loop(stream_id, body.into_part_stream(), out_window);
// Also opens latch if necessary
self.buffer_outg_conn()?;
Ok(())
}
}
pub trait ClientConnCallbacks: 'static {
// called at most once
fn goaway(&self, stream_id: StreamId, raw_error_code: u32);
}
impl ClientConn {
fn spawn_connected<I, C>(
lh: reactor::Handle,
connect: HttpFutureSend<I>,
conf: ClientConf,
callbacks: C,
) -> Self
where
I: AsyncWrite + AsyncRead + Send + 'static,
C: ClientConnCallbacks,
{
let (to_write_tx, to_write_rx) = unbounded();
let to_write_rx = Box::new(
to_write_rx
.map_err(|()| Error::IoError(io::Error::new(io::ErrorKind::Other, "to_write"))),
);
let c = ClientConn {
write_tx: to_write_tx.clone(),
};
let settings_frame = SettingsFrame::from_settings(vec![HttpSetting::EnablePush(false)]);
let mut settings = DEFAULT_SETTINGS;
settings.apply_from_frame(&settings_frame);
let handshake = connect.and_then(|conn| client_handshake(conn, settings_frame));
let conn_died_error_holder = ClientDiedErrorHolder::new();
let conn_died_error_holder_copy = conn_died_error_holder.clone();
let lh_copy = lh.clone();
let future = handshake.and_then(move |conn| {
debug!("handshake done");
let (read, write) = conn.split();
let conn_data = Conn::<ClientTypes<_>>::new(
lh_copy,
CpuPoolOption::SingleThread,
ClientConnData {
_callbacks: Box::new(callbacks),
},
conf.common,
settings,
to_write_tx.clone(),
to_write_rx,
read,
write,
conn_died_error_holder,
);
conn_data.run()
});
let future = conn_died_error_holder_copy.wrap_future(future);
lh.spawn(future);
c
}
pub fn spawn<H, C>(
lh: reactor::Handle,
addr: Box<ToClientStream>,
tls: ClientTlsOption<C>,
conf: ClientConf,
callbacks: H,
) -> Self
where
H: ClientConnCallbacks,
C: TlsConnector + Sync,
{
match tls {
ClientTlsOption::Plain => ClientConn::spawn_plain(lh.clone(), addr, conf, callbacks),
ClientTlsOption::Tls(domain, connector) => {
ClientConn::spawn_tls(lh.clone(), &domain, connector, addr, conf, callbacks)
}
}
}
pub fn spawn_plain<C>(
lh: reactor::Handle,
addr: Box<ToClientStream>,
conf: ClientConf,
callbacks: C,
) -> Self
where
C: ClientConnCallbacks,
{
let no_delay = conf.no_delay.unwrap_or(true);
let connect = addr.connect(&lh).map_err(Into::into);
let map_callback = move |socket: Box<StreamItem>| {
info!("connected to {}", addr);
if socket.is_tcp() {
socket
.set_nodelay(no_delay)
.expect("failed to set TCP_NODELAY");
}
socket
};
let connect: Box<Future<Item = _, Error = _> + Send> =
if let Some(timeout) = conf.connection_timeout {
let timer = Timer::default();
Box::new(timer.timeout(connect, timeout).map(map_callback))
} else {
Box::new(connect.map(map_callback))
};
ClientConn::spawn_connected(lh, connect, conf, callbacks)
}
pub fn spawn_tls<H, C>(
lh: reactor::Handle,
domain: &str,
connector: Arc<C>,
addr: Box<ToClientStream>,
conf: ClientConf,
callbacks: H,
) -> Self
where
H: ClientConnCallbacks,
C: TlsConnector + Sync,
{
let domain = domain.to_owned();
let connect = addr
.connect(&lh)
.map(move |c| {
info!("connected to {}", addr);
c
}).map_err(|e| e.into());
let tls_conn = connect.and_then(move |conn| {
tokio_tls_api::connect_async(&*connector, &domain, conn)
.map_err(|e| Error::IoError(io::Error::new(io::ErrorKind::Other, e)))
});
let tls_conn = tls_conn.map_err(Error::from);
ClientConn::spawn_connected(lh, Box::new(tls_conn), conf, callbacks)
}
pub fn start_request_with_resp_sender(
&self,
start: StartRequestMessage,
) -> Result<(), StartRequestMessage> {
self.write_tx
.unbounded_send(ClientToWriteMessage::Start(start))
.map_err(|send_error| match send_error.into_inner() {
ClientToWriteMessage::Start(start) => start,
_ => unreachable!(),
})
}
pub fn dump_state_with_resp_sender(&self, tx: oneshot::Sender<ConnStateSnapshot>) {
let message = ClientToWriteMessage::Common(CommonToWriteMessage::DumpState(tx));
// ignore error
drop(self.write_tx.unbounded_send(message));
}
/// For tests
#[doc(hidden)]
pub fn _dump_state(&self) -> HttpFutureSend<ConnStateSnapshot> {
let (tx, rx) = oneshot::channel();
self.dump_state_with_resp_sender(tx);
let rx =
rx.map_err(|_| Error::from(io::Error::new(io::ErrorKind::Other, "oneshot canceled")));
Box::new(rx)
}
pub fn wait_for_connect_with_resp_sender(
&self,
tx: oneshot::Sender<result::Result<()>>,
) -> std_Result<(), oneshot::Sender<result::Result<()>>> {
self.write_tx
.unbounded_send(ClientToWriteMessage::WaitForHandshake(tx))
.map_err(|send_error| match send_error.into_inner() {
ClientToWriteMessage::WaitForHandshake(tx) => tx,
_ => unreachable!(),
})
}
}
impl Service for ClientConn {
// TODO: copy-paste with Client::start_request
fn start_request(&self, headers: Headers, body: HttpStreamAfterHeaders) -> Response {
let (resp_tx, resp_rx) = oneshot::channel();
let start = StartRequestMessage {
headers: headers,
body: body,
resp_tx: | ClientConn | identifier_name |
client_conn.rs |
use futures::future::Future;
use futures::stream::Stream;
use futures::sync::mpsc::unbounded;
use futures::sync::mpsc::UnboundedSender;
use futures::sync::oneshot;
use tls_api::TlsConnector;
use tokio_core::reactor;
use tokio_io::AsyncRead;
use tokio_io::AsyncWrite;
use tokio_timer::Timer;
use tokio_tls_api;
use solicit_async::*;
use common::*;
use data_or_trailers::*;
use socket::*;
use client_died_error_holder::ClientDiedErrorHolder;
use common::client_or_server::ClientOrServer;
use data_or_headers::DataOrHeaders;
use data_or_headers_with_flag::DataOrHeadersWithFlag;
use headers_place::HeadersPlace;
use req_resp::RequestOrResponse;
use result_or_eof::ResultOrEof;
use std::marker;
use ClientConf;
use ClientTlsOption;
use ErrorCode;
struct ClientTypes<I>(marker::PhantomData<I>);
impl<I> Types for ClientTypes<I>
where
I: AsyncWrite + AsyncRead + Send + 'static,
{
type Io = I;
type HttpStreamData = ClientStream<I>;
type HttpStreamSpecific = ClientStreamData;
type ConnSpecific = ClientConnData;
type ToWriteMessage = ClientToWriteMessage;
const OUT_REQUEST_OR_RESPONSE: RequestOrResponse = RequestOrResponse::Request;
const CLIENT_OR_SERVER: ClientOrServer = ClientOrServer::Client;
}
pub struct ClientStreamData {}
impl HttpStreamDataSpecific for ClientStreamData {}
type ClientStream<I> = HttpStreamCommon<ClientTypes<I>>;
impl<I> HttpStreamData for ClientStream<I>
where
I: AsyncWrite + AsyncRead + Send + 'static,
{
type Types = ClientTypes<I>;
}
pub struct ClientConnData {
_callbacks: Box<ClientConnCallbacks>,
}
impl ConnSpecific for ClientConnData {}
pub struct ClientConn {
write_tx: UnboundedSender<ClientToWriteMessage>,
}
unsafe impl Sync for ClientConn {}
pub struct StartRequestMessage {
pub headers: Headers,
pub body: HttpStreamAfterHeaders,
pub resp_tx: oneshot::Sender<Response>,
}
enum ClientToWriteMessage {
Start(StartRequestMessage),
WaitForHandshake(oneshot::Sender<result::Result<()>>),
Common(CommonToWriteMessage),
}
impl From<CommonToWriteMessage> for ClientToWriteMessage {
fn from(m: CommonToWriteMessage) -> Self {
ClientToWriteMessage::Common(m)
}
}
impl<I> ConnWriteSideCustom for Conn<ClientTypes<I>>
where
I: AsyncWrite + AsyncRead + Send + 'static,
{
type Types = ClientTypes<I>;
fn process_message(&mut self, message: ClientToWriteMessage) -> result::Result<()> {
match message {
ClientToWriteMessage::Start(start) => self.process_start(start),
ClientToWriteMessage::Common(common) => self.process_common_message(common),
ClientToWriteMessage::WaitForHandshake(tx) => {
// ignore error
drop(tx.send(Ok(())));
Ok(())
}
}
}
}
impl<I> Conn<ClientTypes<I>>
where
I: AsyncWrite + AsyncRead + Send + 'static,
{
fn process_start(&mut self, start: StartRequestMessage) -> result::Result<()> {
let StartRequestMessage {
headers,
body,
resp_tx,
} = start;
let stream_id = self.next_local_stream_id();
let out_window = {
let (mut http_stream, resp_stream, out_window) = self.new_stream_data(
stream_id,
None,
InMessageStage::Initial,
ClientStreamData {},
);
if let Err(_) = resp_tx.send(Response::from_stream(resp_stream)) {
warn!("caller died");
}
http_stream.push_back(DataOrHeaders::Headers(headers));
out_window
};
self.pump_stream_to_write_loop(stream_id, body.into_part_stream(), out_window);
// Also opens latch if necessary
self.buffer_outg_conn()?;
Ok(())
}
}
pub trait ClientConnCallbacks: 'static {
// called at most once
fn goaway(&self, stream_id: StreamId, raw_error_code: u32);
}
impl ClientConn {
fn spawn_connected<I, C>(
lh: reactor::Handle,
connect: HttpFutureSend<I>,
conf: ClientConf,
callbacks: C,
) -> Self
where
I: AsyncWrite + AsyncRead + Send + 'static,
C: ClientConnCallbacks,
{
let (to_write_tx, to_write_rx) = unbounded();
let to_write_rx = Box::new(
to_write_rx
.map_err(|()| Error::IoError(io::Error::new(io::ErrorKind::Other, "to_write"))),
);
let c = ClientConn {
write_tx: to_write_tx.clone(),
};
let settings_frame = SettingsFrame::from_settings(vec![HttpSetting::EnablePush(false)]);
let mut settings = DEFAULT_SETTINGS;
settings.apply_from_frame(&settings_frame);
let handshake = connect.and_then(|conn| client_handshake(conn, settings_frame));
let conn_died_error_holder = ClientDiedErrorHolder::new();
let conn_died_error_holder_copy = conn_died_error_holder.clone();
let lh_copy = lh.clone();
let future = handshake.and_then(move |conn| {
debug!("handshake done");
let (read, write) = conn.split();
let conn_data = Conn::<ClientTypes<_>>::new(
lh_copy,
CpuPoolOption::SingleThread,
ClientConnData {
_callbacks: Box::new(callbacks),
},
conf.common,
settings,
to_write_tx.clone(),
to_write_rx,
read,
write,
conn_died_error_holder,
);
conn_data.run()
});
let future = conn_died_error_holder_copy.wrap_future(future);
lh.spawn(future);
c
}
pub fn spawn<H, C>(
lh: reactor::Handle,
addr: Box<ToClientStream>,
tls: ClientTlsOption<C>,
conf: ClientConf,
callbacks: H,
) -> Self
where
H: ClientConnCallbacks,
C: TlsConnector + Sync,
{
match tls {
ClientTlsOption::Plain => ClientConn::spawn_plain(lh.clone(), addr, conf, callbacks),
ClientTlsOption::Tls(domain, connector) => {
ClientConn::spawn_tls(lh.clone(), &domain, connector, addr, conf, callbacks)
}
}
}
pub fn spawn_plain<C>(
lh: reactor::Handle,
addr: Box<ToClientStream>,
conf: ClientConf,
callbacks: C,
) -> Self
where
C: ClientConnCallbacks,
{
let no_delay = conf.no_delay.unwrap_or(true);
let connect = addr.connect(&lh).map_err(Into::into);
let map_callback = move |socket: Box<StreamItem>| {
info!("connected to {}", addr);
if socket.is_tcp() {
socket
.set_nodelay(no_delay)
.expect("failed to set TCP_NODELAY");
}
socket
};
let connect: Box<Future<Item = _, Error = _> + Send> =
if let Some(timeout) = conf.connection_timeout {
let timer = Timer::default();
Box::new(timer.timeout(connect, timeout).map(map_callback))
} else {
Box::new(connect.map(map_callback))
};
ClientConn::spawn_connected(lh, connect, conf, callbacks)
}
pub fn spawn_tls<H, C>(
lh: reactor::Handle,
domain: &str,
connector: Arc<C>,
addr: Box<ToClientStream>,
conf: ClientConf,
callbacks: H,
) -> Self
where
H: ClientConnCallbacks,
C: TlsConnector + Sync,
{
let domain = domain.to_owned();
let connect = addr
.connect(&lh)
.map(move |c| {
info!("connected to {}", addr);
c
}).map_err(|e| e.into());
let tls_conn = connect.and_then(move |conn| {
tokio_tls_api::connect_async(&*connector, &domain, conn)
.map_err(|e| Error::IoError(io::Error::new(io::ErrorKind::Other, e)))
});
let tls_conn = tls_conn.map_err(Error::from);
ClientConn::spawn_connected(lh, Box::new(tls_conn), conf, callbacks)
}
pub fn start_request_with_resp_sender(
&self,
start: StartRequestMessage,
) -> Result<(), StartRequestMessage> {
self.write_tx
.unbounded_send(ClientToWriteMessage::Start(start))
.map_err(|send_error| match send_error.into_inner() {
ClientToWriteMessage::Start(start) => start,
_ => unreachable!(),
})
}
pub fn dump_state_with_resp_sender(&self, tx: oneshot::Sender<ConnStateSnapshot>) {
let message = ClientToWriteMessage::Common(CommonToWriteMessage::DumpState(tx));
// ignore error
drop(self.write_tx.unbounded_send(message));
}
/// For tests
#[doc | use solicit::header::*;
use solicit::StreamId;
use solicit::DEFAULT_SETTINGS;
use service::Service; | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.