code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
import argparse
import logging
import os
from . import AA_stat, utils, io
def main():
pars = argparse.ArgumentParser(epilog='Instead of file lists, you can pass directory names. '
'This will process all files in the directory.')
pars.add_argument('--params', help='CFG file with parameters. If there is no file, AA_stat uses default one. '
'An example can be found at https://github.com/SimpleNumber/aa_stat', required=False)
pars.add_argument('--dir', help='Directory to store the results. Default value is current directory.', default='.')
pars.add_argument('-v', '--verbosity', type=int, choices=range(4), default=1, help='Output verbosity.')
input_spectra = pars.add_mutually_exclusive_group()
input_spectra.add_argument('--mgf', nargs='+', help='MGF files to localize modifications.')
input_spectra.add_argument('--mzml', nargs='+', help='mzML files to localize modifications.')
input_file = pars.add_mutually_exclusive_group(required=True)
input_file.add_argument('--pepxml', nargs='+', help='List of input files in pepXML format.')
input_file.add_argument('--csv', nargs='+', help='List of input files in CSV format.')
pars.add_argument('--fmods', help='Fixed modifications specified in the search (needed with CSV input). '
'Example: +57.0215 @ C, +229.1630 @ N-term')
pars.add_argument('--vmods', help='Variable modifications specified in the search (needed with CSV input). '
'Example: 15.9959 @ M, 42.0106 @ N-term')
pars.add_argument('--enzyme', help='Enzyme specificity set in the search (needed with CSV input).')
pars.add_argument('-n', '--processes', type=int, help='Maximum number of processes to use.')
args = pars.parse_args()
levels = [logging.WARNING, logging.INFO, logging.DEBUG, utils.INTERNAL]
logging.basicConfig(format='{levelname:>8}: {asctime} {message}',
datefmt='[%H:%M:%S]', level=levels[args.verbosity], style='{')
# Performance optimizations as per https://docs.python.org/3/howto/logging.html#optimization
logging._srcfile = None
logging.logThreads = 0
logging.logProcesses = 0
logger = logging.getLogger(__name__)
logging.getLogger('matplotlib').setLevel(logging.WARNING)
logger.info('Starting...')
io.resolve_filenames(args)
total_inputs = len(args.csv or []) + len(args.pepxml or [])
if not total_inputs:
logger.error('No input files found.')
return
params_dict = io.get_params_dict(args)
logger.debug(params_dict)
os.makedirs(args.dir, exist_ok=True)
AA_stat.AA_stat(params_dict, args)
logger.info('Done.') | AA-stat | /AA_stat-2.5.5-py3-none-any.whl/AA_stat/main.py | main.py |
import os
import operator
import logging
import pandas as pd
import numpy as np
import warnings
from collections import Counter
import re
import pathlib
import itertools as it
from pyteomics import parser, pepxml, mass
logger = logging.getLogger(__name__)
MASS_FORMAT = '{:+.4f}'
UNIMOD = mass.Unimod(pathlib.Path(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'unimod.xml')).as_uri())
INTERNAL = 5
DIFF_C13 = mass.calculate_mass(formula='C[13]') - mass.calculate_mass(formula='C')
H = mass.nist_mass['H+'][0][0]
def internal(*args, **kwargs):
"""Emit log message with level INTERNAL, which is lower than DEBUG."""
logger.log(INTERNAL, *args, **kwargs)
def mass_format(mass):
return MASS_FORMAT.format(mass)
def make_0mc_peptides(pep_list, rule):
"""b, y
In silico cleaves all peptides with a given rule.
Parameters
----------
pep_list : Iterable
An iterable of peptides
rule : str or compiled regex.
Cleavage rule in pyteomics format.
Returns
-------
Set of fully cleaved peptides.
"""
out_set = set()
for i in pep_list:
out_set.update(parser.cleave(i, rule))
return out_set
def fdr_filter_mass_shift(mass_shift, data, params_dict):
shifts = params_dict['mass_shifts_column']
ms_shift = data.loc[np.abs(data[shifts] - mass_shift[1]) < mass_shift[2], shifts].mean()
mask = np.abs(data[shifts] - mass_shift[1]) < 3 * mass_shift[2]
internal('Mass shift %.3f +- 3 * %.3f', mass_shift[1], mass_shift[2])
data_slice = data.loc[mask].sort_values(by=[params_dict['score_column'], params_dict['spectrum_column']],
ascending=params_dict['score_ascending']).drop_duplicates(subset=params_dict['peptides_column'])
internal('%d peptide rows selected for filtering', data_slice.shape[0])
with warnings.catch_warnings():
warnings.simplefilter("ignore")
df = pepxml.filter_df(data_slice, key=params_dict['score_column'],
fdr=params_dict['FDR'], reverse=not params_dict['score_ascending'], correction=params_dict['FDR_correction'], is_decoy='is_decoy')
internal('Filtered data for %s: %d rows', mass_shift, df.shape[0])
return ms_shift, df
def group_specific_filtering(data, mass_shifts, params_dict):
"""
Selects window around found mass shift and filters using TDA.
Window is defined as mean +- sigma.
Parameters
----------
data : DataFrame
DF with all open search data.
mass_shifts: numpy array
Output of utils.fit_peaks function (poptperr matrix). An array of Gauss fitted mass shift
parameters and their tolerances. [[A, mean, sigma, A_error, mean_error, sigma_error],...]
params_dict : dict
Dict with paramenters for parsing csv file.
`mass_shifts_column`, `FDR`, `FDR_correction`, `peptides_column`
Returns
-------
Dict with mass shifts (in str format) as key and values is a DF with filtered PSMs.
"""
logger.info('Performing group-wise FDR filtering...')
out_data = {}
for ind, ms in enumerate(mass_shifts):
if ind != len(mass_shifts) - 1:
diff = abs(ms[1] - mass_shifts[ind + 1][1])
width_sum = 3 * (ms[2] + mass_shifts[ind + 1][2])
if diff < width_sum:
coef = width_sum / diff
ms[2] /= coef
mass_shifts[ind + 1][2] /= coef
logger.debug('Mass shifts %.3f and %.3f are too close, dividing their sigma by %.4f', ms[1], mass_shifts[ind + 1][1], coef)
shift, df = fdr_filter_mass_shift(ms, data, params_dict)
if len(df) > 0:
# shift = np.mean(df[shifts]) ###!!!!!!!mean of from fit!!!!
out_data[mass_format(shift)] = (shift, df)
logger.info('# of filtered mass shifts = %s', len(out_data))
return out_data
def check_composition(peptide, aa_labels):
'''
Checks composition of peptides.
Parameters
----------
peptide: str
Peptide sequence
aa_labels: list
list of acceptable aa.
Returns
-------
True if accebtable, False overwise.
'''
return set(peptide) < set(aa_labels)
def get_varmod_combinations(recommended_vmods, values, tolerance):
logger.debug('Received recommended vmods: %s', recommended_vmods)
counter = Counter(aa for aa, shift in recommended_vmods)
eligible = {aa for aa, count in counter.items() if count >= 3}
out = {}
if eligible:
for i, (aa, shift) in enumerate(recommended_vmods):
if aa == 'isotope error' or aa not in eligible:
continue
candidates = [(aac, shiftc) for aac, shiftc in recommended_vmods if aac == aa and shiftc != shift]
for c1, c2 in it.combinations(candidates, 2):
if abs(values[c1[1]] + values[c2[1]] - values[shift]) <= tolerance:
out[i] = (c1[1], c2[1])
return out
def get_opposite_mods(fmods, rec_fmods, rec_vmods, values, tolerance):
fmods = masses_to_mods(fmods)
for aa, mod in rec_fmods.items():
if aa in fmods:
fmods[aa] = fmods[aa] + values[mod]
else:
fmods[aa] = values[mod]
logger.debug('Calculating opposites using effective fixed mod dict: %s', fmods)
vmod_idx = []
for aaf, fmod in fmods.items():
for i, (aav, vmod) in enumerate(rec_vmods):
if aaf == aav and abs(fmod + values[vmod]) < tolerance:
vmod_idx.append(i)
return vmod_idx
def find_isotopes(ms, peptides_in_bin, tolerance=0.01):
"""
Find the isotopes between mass shifts using mass difference of C13 and C12, information of amino acids statistics as well.
Paramenters
-----------
ms : Series
Series with mass in str format as index and values float mass shift.
peptides_in_bin : Series
Series with # of peptides in each mass shift.
tolerance : float
Tolerance for isotop matching.
Returns
-------
DataFrame with 'isotop'(boolean) and 'monoisotop_index' columns.
"""
out = pd.DataFrame({'isotope': False, 'monoisotop_index': None}, index=ms.index)
np_ms = ms.to_numpy()
difference_matrix = np.abs(np_ms.reshape(-1, 1) - np_ms.reshape(1, -1) - DIFF_C13)
isotop, monoisotop = np.where(difference_matrix < tolerance)
logger.debug('Found %d potential isotopes.', isotop.sum())
out.iloc[isotop, 0] = True
out.iloc[isotop, 1] = out.iloc[monoisotop, :].index
for i, row in out.iterrows():
if row['isotope']:
if peptides_in_bin[i] > peptides_in_bin[row['monoisotop_index']]:
out.at[i, 'isotope'], out.at[i, 'monoisotop_index'] = False, None
return out
def get_candidates_from_unimod(mass_shift, tolerance, unimod_df):
"""
Find modifications for `mass_shift` in Unimod.org database with a given `tolerance`.
Paramenters
-----------
mass_shift : float
Modification mass in Da.
tolerance : float
Tolerance for the search in Unimod db.
unimod_df : DataFrame
DF with all unimod mo9difications.
Returns
-------
List of amino acids.
"""
ind = abs(unimod_df['mono_mass'] - mass_shift) < tolerance
sites_set = set()
accessions = set()
for i, row in unimod_df.loc[ind].iterrows():
sites_set.update(s['site'] if s['position'][:3] == 'Any' else s['position'] for s in row['specificity'])
accessions.add(row['record_id'])
return sites_set, accessions
def find_mod_sum(x, index, sum_matrix, tolerance):
"""
Finds mass shift that are sum of given mass shift and other mass shift results in already existing mass shift.
Parameters
----------
x : float
Mass shift that considered as a component of a modification.
index : dict
Map for mass shift indexes and their values.
sum_matrix : numpy 2D array
Matrix of sums for all mass shifts.
tolerance: float
Matching tolerance in Da.
Returns
-------
List of tuples.
"""
rows, cols = np.where(np.abs(sum_matrix - x) < tolerance)
i = rows <= cols
if rows.size:
return list(zip(index[rows[i]], index[cols[i]]))
return None
def find_sums(ms, tolerance=0.005):
"""
Finds the sums of mass shifts in Series, if it exists.
Parameters
----------
ms : Series
Series with mass in str format as index and values float mass shift.
tolerance : float
Matching tolerance in Da.
Returns
-------
Series with pairs of mass shift for all mass shifts.
"""
zero = mass_format(0.0)
if zero in ms.index:
col = ms.drop(zero)
else:
col = ms
logger.info('Zero mass shift not found in candidates.')
values = col.values
sum_matrix = values.reshape(-1, 1) + values.reshape(1, -1)
out = col.apply(find_mod_sum, args=(col.index, sum_matrix, tolerance))
return out
def apply_var_mods(seq, mods):
parsed = parser.parse(seq)
out = []
for i, aa in enumerate(parsed):
if i in mods:
out.append('{{{:+.0f}}}'.format(mods[i]) + aa)
else:
out.append(aa)
seqout = ''.join(out)
internal('%s + %s = %s', seq, mods, seqout)
return seqout
def get_column_with_mods(row, params_dict):
peptide = params_dict['peptides_column']
mods = get_var_mods(row, params_dict)
return apply_var_mods(row[peptide], mods)
def format_isoform(row, params_dict):
ms = row['mod_dict']
seq = row['top isoform']
pc, nc, mc = operator.itemgetter('prev_aa_column', 'next_aa_column', 'mods_column')(params_dict)
prev_aa, next_aa = operator.itemgetter(pc, nc)(row)
mods = get_var_mods(row, params_dict)
seq = apply_var_mods(seq, mods)
sequence = re.sub(r'([a-z])([A-Z])', lambda m: '{}[{:+.0f}]'.format(m.group(2), float(ms[m.group(1)])), seq)
return '{}.{}.{}'.format(prev_aa[0], sequence, next_aa[0])
def get_fix_var_modifications(pepxml_file, labels):
fout, vout = {}, []
p = pepxml.PepXML(pepxml_file, use_index=False)
mod_list = list(p.iterfind('aminoacid_modification'))
logger.debug('mod_list: %s', mod_list)
p.reset()
term_mods = list(p.iterfind('terminal_modification'))
logger.debug('term_mods: %s', term_mods)
p.close()
for m in mod_list:
if m['aminoacid'] not in labels:
continue
if 'peptide_terminus' in m:
key = '{}-term {}'.format(m['peptide_terminus'].upper(), m['aminoacid'])
else:
key = m['aminoacid']
if m['variable'] == 'N':
fout[key] = m['mass']
else:
vout.append((key, m['massdiff']))
for m in term_mods:
if m['variable'] == 'N':
if m['terminus'] == 'N':
fout['H-'] = m['mass']
else:
fout['-OH'] = m['mass']
else:
key = ('Protein ' if m.get('protein_terminus') == 'Y' else '') + m['terminus'] + '-term'
vout.append((key, m['massdiff']))
return fout, vout
def get_specificity(pepxml_file):
with pepxml.PepXML(pepxml_file, use_index=False) as p:
s = next(p.iterfind('specificity'))
logger.debug('Extracted enzyme specificity: %s', s)
return s
def parse_l10n_site(site):
aa, shift = site.split('_')
return aa, shift
def mass_to_mod(label, value, aa_mass=mass.std_aa_mass):
words = label.split()
if len(words) > 1:
# terminal mod
label = words[-1]
return value - aa_mass.get(label, 0)
def masses_to_mods(d, fix_mod=None):
aa_mass = mass.std_aa_mass.copy()
aa_mass['H-'] = 1.007825
aa_mass['-OH'] = 17.00274
if fix_mod:
aa_mass.update(fix_mod)
d = {k: mass_to_mod(k, v, aa_mass) for k, v in d.items()}
if 'H-' in d:
d['N-term'] = d.pop('H-')
if '-OH' in d:
d['C-term'] = d.pop('-OH')
return d
def get_var_mods(row, params_dict):
# produce a dict for specific PSM: position (int) -> mass shift (float)
modifications = row[params_dict['mods_column']]
peptide = params_dict['peptides_column']
mass_dict_0 = mass.std_aa_mass.copy()
mass_dict_0['H-'] = 1.007825
mass_dict_0['-OH'] = 17.00274
mass_dict_0.update(params_dict['fix_mod'])
mod_dict = {}
if modifications:
internal('Got modifications for peptide %s: %s', row[peptide], modifications)
for m in modifications:
# internal('Parsing modification: %s', m)
mmass, pos = m.split('@')
mmass = float(mmass)
pos = int(pos)
if pos == 0:
key = 'H-'
elif pos == len(row[peptide]) + 1:
key = '-OH'
else:
key = row[peptide][pos-1]
if abs(mmass - mass_dict_0[key]) > params_dict['frag_acc']:
# utils.internal('%s modified in %s at position %s: %.3f -> %.3f', key, row[peptide], pos, mass_dict_0[key], mmass)
mod_dict[pos] = mmass - mass_dict_0[key]
if mod_dict:
internal('Final mod dict: %s', mod_dict)
return mod_dict
def format_grouped_keys(items, params_dict):
out = []
for k, td in items:
if k[1:] == '-term':
t = k[0]
if isinstance(td, list):
keys, values = zip(*td)
diff = max(values) - min(values)
label_condition = set(keys) >= set(params_dict['labels'])
if diff < params_dict['prec_acc'] and label_condition:
out.append((k, values[0])) # arbitrary amino acid, they all have the same modification
logger.debug('Collapsing %s-terminal mods.', t)
else:
logger.debug('Not collapsing %s-term dict: diff in values is %.3f, set of labels condition is %ssatisfied',
t, diff, '' if label_condition else 'not ')
for aa, v in td:
out.append((k + ' ' + aa, v))
else:
out.append((k, td))
else:
out.append((k, td))
logger.debug('Variable mods with grouped keys: %s', out)
return out
def group_terminal(items):
grouped = []
tg = {}
for k, v in items:
prefix, protein, term, aa = re.match(r'((Protein)?(?: )?([NC]-term)?)(?: )?([A-Z])?', k).groups()
if term is None or aa is None:
grouped.append((k, v))
else:
tg.setdefault(prefix, []).append((aa, v))
grouped.extend(tg.items())
logger.debug('Variable mods after grouping: %s', grouped)
return grouped
def format_mod_dict_str(d):
if d:
return ', '.join('{} @ {}'.format(v, k) for k, v in d.items())
return 'none'
def format_mod_dict(d):
if d:
return ', '.join('{} @ {}'.format(mass_format(v), k) for k, v in d.items())
return 'none'
def format_mod_list(items):
if items:
return ', '.join('{} @ {}'.format(v, k) for k, v in items)
return 'none'
def get_isotope_shift(label, locmod_df):
isotope = locmod_df[locmod_df['isotope index'] == label]
if not isotope.shape[0]:
return
return isotope[isotope['# peptides in bin'] == isotope['# peptides in bin'].max()].index[0]
def format_localization_key(site, ms):
if not isinstance(ms, str):
ms = mass_format(ms)
return site + '_' + ms
def measured_mz_series(df, params_dict):
return (df[params_dict['measured_mass_column']] + df[params_dict['charge_column']] * H
) / df[params_dict['charge_column']]
def calculated_mz_series(df, params_dict):
return (df[params_dict['calculated_mass_column']] + df[params_dict['charge_column']] * H
) / df[params_dict['charge_column']]
def format_list(lst, sep1=', ', sep2=' or '):
lst = list(lst)
if not lst:
return ''
if len(lst) == 1:
return lst[0]
*most, last = lst
return sep1.join(most) + sep2 + last
def find_mass_shift(value, data_dict, tolerance):
s = sorted(data_dict, key=lambda x: abs(value - data_dict[x][0]))
if abs(data_dict[s[0]][0] - value) <= tolerance:
return s[0]
def loc_positions(top_isoform):
return [i for i, a in enumerate(top_isoform, 1) if len(a) > 1]
def choose_correct_massdiff(reported, calculated, params_dict):
maxdiff = np.abs(reported - calculated).max()
if maxdiff < params_dict['bin_width'] / 2:
logger.debug('Maximum mass diff is within bounds: %.4f', maxdiff)
return reported
elif maxdiff < params_dict['prec_acc']:
logger.warning('Reported mass shifts have a high calculation error (%.4f).'
' Using own calculations', maxdiff)
return calculated
else:
logger.warning('Reported mass shifts differ from calculated values (up to %.4f).'
' Using the reported values. Consider reporting this to the developers.', maxdiff)
return reported
def convert_tandem_cleave_rule_to_regexp(cleavage_rule, params_dict):
def get_sense(c_term_rule, n_term_rule):
if '{' in c_term_rule:
return 'N'
elif '{' in n_term_rule:
return 'C'
else:
if len(c_term_rule) <= len(n_term_rule):
return 'C'
else:
return 'N'
def get_cut(cut, no_cut):
aminoacids = set(params_dict['labels'])
cut = ''.join(aminoacids & set(cut))
if '{' in no_cut:
no_cut = ''.join(aminoacids & set(no_cut))
return cut, no_cut
else:
no_cut = ''.join(set(params_dict['labels']) - set(no_cut))
return cut, no_cut
protease = cleavage_rule.replace('X', ''.join(params_dict['labels']))
c_term_rule, n_term_rule = protease.split('|')
sense = get_sense(c_term_rule, n_term_rule)
if sense == 'C':
cut, no_cut = get_cut(c_term_rule, n_term_rule)
else:
cut, no_cut = get_cut(n_term_rule, c_term_rule)
return {'sense': sense, 'cut': cut, 'no_cut': no_cut}
def parse_mod_list(s, kind):
pairs = re.split(r'\s*[,;]\s*', s)
if kind == 'fixed':
out = {}
elif kind == 'variable':
out = []
else:
raise ValueError('`kind` must be "fixed" or "variable", not "{}".'.format(kind))
for p in pairs:
if p:
m, aa = re.split(r'\s*@\s*', p)
m = float(m)
if kind == 'fixed':
if aa == 'N-term':
out['H-'] = 1.007825 + m
elif aa == 'C-term':
out['-OH'] = 17.00274 + m
else:
out[aa] = mass.std_aa_mass[aa] + m
else:
out.append((aa, m))
return out | AA-stat | /AA_stat-2.5.5-py3-none-any.whl/AA_stat/utils.py | utils.py |
import pandas as pd
import numpy as np
import os
import sys
from collections import defaultdict
from scipy.stats import ttest_ind
import logging
import warnings
from pyteomics import parser
from . import utils, localization, html, io, stats, recommendations
logger = logging.getLogger(__name__)
def get_peptide_statistics(peptide_list):
'''
Calculates presence of amino acid in peptide sequences.
Parameters
----------
peptide_list : Iterable
An iterable of peptides, that are already fully cleaved.
Returns
-------
out : dict with amino acids as a key and its persentage of peptides with it as a value.
'''
sum_aa = 0
pep_set = set(peptide_list)
d = defaultdict(int)
for seq in pep_set:
for let in set(seq):
d[let] += 1
sum_aa += 1
for i in d:
d[i] = int(100 * d[i] / sum_aa)
return d
def get_aa_distribution(peptide_list, rule):
'''
Calculates amino acid statistics for peptide list.
In silico cleaves peptides to get fully cleaved set of peptides.
Parameters
----------
peptide_list : Iterable
An iterable of peptides.
rule : str or compiled regex.
Cleavage rule in pyteomics format.
Returns
-------
out : dict with amino acids as a key and its persentage as a value.
'''
sum_aa = 0
pep_set = utils.make_0mc_peptides(peptide_list, rule)
d = defaultdict(int)
for seq in pep_set:
for let in seq:
d[let] += 1
sum_aa += 1
for i in d:
d[i] /= sum_aa
return d
def make_table(distributions, number_of_PSMs, mass_shifts, reference_label):
'''
Prepares amino acid statistis result table.
Parameters
----------
distributions : DataFrame
Amino acids statistics, where indexes are amino acids, columns mass shifts (str)
number_of_PSMs : Series
Indexes are mass shifts (in str format) and values are numbers of filtered PSMs
mass_shifts : dict
Mass shift in str format (rounded) -> actual mass shift (float)
Returns
-------
A table with mass shifts, psms, amino acid statistics columns.
'''
df = pd.DataFrame({'mass shift': [mass_shifts[k] for k in distributions.columns],
'# peptides in bin': number_of_PSMs},
index=distributions.columns)
df['# peptides in bin'] = df['# peptides in bin'].astype(np.int64)
out = pd.concat([df, distributions.T], axis=1)
out['is reference'] = df.index == reference_label
return out
def calculate_error_and_p_vals(pep_list, err_ref_df, reference, rule, aas):
'''
Calculates p-values and error standard deviation of amino acids statistics
using bootstraping method.
Parameters
----------
pep_list : Iterable
An iterable of peptides.
err_ref_df : Series
Indexes are amino acids and values are stds of a `reference` mass shift.
reference : Series
Indexes are amino acids and values are amino acids statistics of a reference mass shift.
rule : str or compiled regex.
Cleavage rule in pyteomics format.
aas: Iterable
An Iterable of amino acids to be considered.
Returns
-------
Series of p-values, std of amino acid statistics for considered `pep_list`.
'''
d = pd.DataFrame(index=aas)
for i in range(50):
d[i] = pd.Series(get_aa_distribution(
np.random.choice(np.array(pep_list),
size=(len(pep_list) // 2), replace=False), rule)) / reference
p_val = pd.Series()
for i in aas:
p_val[i] = ttest_ind(err_ref_df.loc[i, :], d.loc[i, :])[1]
return p_val, d.std(axis=1)
def get_zero_mass_shift(mass_shift_data_dict, params_dict):
"""
Shift of non-modified peak. Finds zero mass shift.
Parameters
----------
mass_shift_data_dict : dict
dict of mass shifts.
params_dict: dict
Returns
-------
Mass shift label, Mass shift in float format.
"""
values = [v[0] for v in mass_shift_data_dict.values()]
keys = list(mass_shift_data_dict.keys())
data = [v[1] for v in mass_shift_data_dict.values()]
lref = np.argmin(np.abs(values))
maxbin = max(df.shape[0] for df in data)
logger.debug('Closest to zero: %s, with %d peptides. Top mass shift has %d peptides.',
keys[lref], data[lref].shape[0], maxbin)
if abs(values[lref]) > params_dict['zero bin tolerance'] or data[lref].shape[0] / maxbin < params_dict['zero min intensity']:
logger.warning('Too few unmodified peptides. Mass shift with most identifications will be the reference.')
identifications = [df.shape[0] for df in data]
lref = np.argmax(identifications)
return keys[lref], values[lref]
def check_difference(shift1, shift2, tolerance=0.05):
"""
Checks two mass shifts means to be closer than the sum of their std.
Parameters
----------
shift1 : List
list that describes mass shift. On the first position have to be mean of mass shift,
on second position have to be std.
shift2 : List
list that describes mass shift. On the first position have to be mean of mass shift,
on second position have to be std.
tolerance : float
Matching tolerance in Da.
Returns
-------
out : bool
"""
mean_diff = (shift1[1] - shift2[1]) ** 2
sigma_diff = (shift1[2] + shift2[2]) ** 2
res = mean_diff > sigma_diff
if abs(shift1[1] - shift2[1]) < tolerance:
res = False
return res
def filter_mass_shifts(results, tolerance=0.05):
"""
Merges close mass shifts. If difference between means of two mass shifts less
than sum of sigmas, they are merged.
Parameters
----------
results : numpy array
Output of utils.fit_peaks function (poptperr matrix). An array of Gauss fitted mass shift
parameters and their tolerances. [[A, mean, sigma, A_error, mean_error, sigma_error],...]
tolerance : float
Matching tolerance in Da.
Returns
-------
Updated poptperr matrix.
"""
logger.info('Discarding bad peaks...')
temp = []
out = []
if not results.size:
return []
if results.size == 1:
return [results[0]]
temp = [results[0]]
for mass_shift in results[1:]:
if check_difference(temp[-1], mass_shift, tolerance=tolerance):
if len(temp) > 1:
logger.info('Joined mass shifts %s', ['{:0.4f}'.format(x[1]) for x in temp])
out.append(max(temp, key=lambda x: x[0]))
temp = [mass_shift]
else:
temp.append(mass_shift)
out.append(max(temp, key=lambda x: x[0]))
logger.info('Peaks for subsequent analysis: %s', len(out))
return out
def calculate_statistics(mass_shifts_dict, reference_label, params_dict, args):
"""
Calculates amino acid statistics, relative amino acids presence in peptides
for all mass shifts.
Paramenters
-----------
mass_shifts_dict : dict
A dict with mass shifts (in str format) as key and values is a DF with filtered PSMs.
zero_mass_shift : float
Reference mass shift.
params_dict : dict
Dict with paramenters for parsing csv file.
`labels`, `rule`, `peptides_column` and other params
Returns
-------
DF with amino acid statistics, Series with number of PSMs and dict of data
for mass shift figures.
"""
logger.info('Calculating distributions...')
labels = params_dict['labels']
rule = params_dict['rule']
expasy_rule = parser.expasy_rules.get(rule, rule)
save_directory = args.dir
peptides = params_dict['peptides_column']
reference_bin = mass_shifts_dict[reference_label][1]
number_of_PSMs = dict() # pd.Series(index=list(mass_shifts_labels.keys()), dtype=int)
reference = pd.Series(get_aa_distribution(reference_bin[peptides], expasy_rule))
reference.fillna(0, inplace=True)
# bootstraping for errors and p values calculation in reference (zero) mass shift
err_reference_df = pd.DataFrame(index=labels)
for i in range(50):
err_reference_df[i] = pd.Series(get_aa_distribution(
np.random.choice(np.array(reference_bin[peptides]), size=(len(reference_bin) // 2), replace=False),
expasy_rule)) / reference
logger.info('Mass shifts:')
distributions = pd.DataFrame(index=labels)
p_values = pd.DataFrame(index=labels)
figure_args = {}
for ms_label, (ms, ms_df) in mass_shifts_dict.items():
aa_statistics = pd.Series(get_aa_distribution(ms_df[peptides], expasy_rule))
peptide_stat = pd.Series(get_peptide_statistics(ms_df[peptides]), index=labels)
number_of_PSMs[ms_label] = len(ms_df)
aa_statistics.fillna(0, inplace=True)
distributions[ms_label] = aa_statistics / reference
with warnings.catch_warnings():
warnings.simplefilter("ignore")
p_vals, errors = calculate_error_and_p_vals(ms_df[peptides], err_reference_df, reference, expasy_rule, labels)
# errors.fillna(0, inplace=True)
p_values[ms_label] = p_vals
distributions.fillna(0, inplace=True)
figure_args[ms_label] = (len(ms_df), [distributions[ms_label], errors], peptide_stat.fillna(0))
logger.info('%s Da', ms_label)
pout = p_values.T
pout.fillna(0).to_csv(os.path.join(save_directory, 'p_values.csv'), index=False)
return distributions, pd.Series(number_of_PSMs), figure_args
def systematic_mass_shift_correction(mass_shifts_dict, mass_correction):
"""
Parameters
----------
mass_shifts_dict : dict
A dict with in the format: `mass_shift_label`: `(mass_shift_value, filtered_peptide_dataframe)`.
mass_correction: float
Mass of reference (zero) mass shift, that should be moved to 0.0
Returns
-------
out : dict
Updated `mass_shifts_dict`
"""
out = {}
for k, v in mass_shifts_dict.items():
corr_mass = v[0] - mass_correction
out[utils.mass_format(corr_mass)] = (corr_mass, v[1])
return out
def AA_stat(params_dict, args, step=None):
"""
Calculates all statistics, saves tables and pictures.
"""
save_directory = args.dir
logger.debug('Fixed modifications: %s', params_dict['fix_mod'])
logger.debug('Variable modifications: %s', params_dict['var_mod'])
logger.info('Using fixed modifications: %s.', utils.format_mod_dict(utils.masses_to_mods(params_dict['fix_mod'])))
logger.info('Variable modifications in search results: %s.', utils.format_mod_list(params_dict['var_mod']))
logger.debug('Enzyme specificity: %s', params_dict['enzyme'])
data = io.read_input(args, params_dict)
if data is None:
sys.exit(1)
popt_pvar = stats.fit_peaks(data, args, params_dict)
# logger.debug('popt_pvar: %s', popt_pvar)
final_mass_shifts = filter_mass_shifts(popt_pvar, tolerance=params_dict['shift_error'] * params_dict['bin_width'])
# logger.debug('final_mass_shifts: %s', final_mass_shifts)
mass_shift_data_dict = utils.group_specific_filtering(data, final_mass_shifts, params_dict)
del data
# logger.debug('mass_shift_data_dict: %s', mass_shift_data_dict)
if not mass_shift_data_dict:
html.render_html_report(None, mass_shift_data_dict, None, params_dict, {}, {}, {}, [], save_directory, [], step=step)
return None, None, None, mass_shift_data_dict, {}
reference_label, reference_mass_shift = get_zero_mass_shift(mass_shift_data_dict, params_dict)
if abs(reference_mass_shift) < params_dict['zero bin tolerance']:
logger.info('Systematic mass shift equals %s', reference_label)
if params_dict['calibration'] != 'off':
mass_shift_data_dict = systematic_mass_shift_correction(mass_shift_data_dict, reference_mass_shift)
reference_mass_shift = 0.0
reference_label = utils.mass_format(0.0)
else:
logger.info('Leaving systematic shift in place (calibration disabled).')
else:
logger.info('Reference mass shift is %s', reference_label)
ms_labels = {k: v[0] for k, v in mass_shift_data_dict.items()}
logger.debug('Final shift labels: %s', ms_labels.keys())
distributions, number_of_PSMs, figure_data = calculate_statistics(mass_shift_data_dict, reference_label, params_dict, args)
table = make_table(distributions, number_of_PSMs, ms_labels, reference_label)
stats.summarizing_hist(table, save_directory)
logger.info('Summary histogram saved.')
# table.index = table['mass shift'].apply(utils.mass_format)
table[['is isotope', 'isotope index']] = utils.find_isotopes(
table['mass shift'], table['# peptides in bin'], tolerance=params_dict['prec_acc'])
table.at[reference_label, 'is isotope'] = False
table.at[reference_label, 'isotope index'] = None
logger.debug('Isotopes:\n%s', table.loc[table['is isotope']])
u = utils.UNIMOD.mods
unimod_df = pd.DataFrame(u)
table['unimod candidates'], table['unimod accessions'] = zip(*table['mass shift'].apply(
lambda x: utils.get_candidates_from_unimod(x, params_dict['prec_acc'], unimod_df)))
table['sum of mass shifts'] = utils.find_sums(table.loc[~table['is isotope'], 'mass shift'],
tolerance=params_dict['shift_error'] * params_dict['bin_width'])
logger.debug('Sums of mass shifts:\n%s', table.loc[table['sum of mass shifts'].notna()])
table.to_csv(os.path.join(save_directory, 'aa_statistics_table.csv'), index=False)
spectra_dict = io.read_spectra(args)
if spectra_dict:
if args.mgf:
params_dict['mzml_files'] = False
else:
params_dict['mzml_files'] = True
logger.info('Starting Localization using MS/MS spectra...')
ms_labels = pd.Series(ms_labels)
locmod_df = table[['mass shift', '# peptides in bin', 'is isotope', 'isotope index', 'sum of mass shifts',
'unimod candidates', 'unimod accessions']].copy()
locmod_df['aa_stat candidates'] = localization.get_candidates_from_aastat(
table, labels=params_dict['labels'], threshold=params_dict['candidate threshold'])
if params_dict['use_all_loc']:
logger.info('Localizaing all mass shifts on all amino acids. This may take some time.')
locmod_df['all candidates'] = [set(parser.std_amino_acids)] * locmod_df.shape[0]
else:
locmod_df['all candidates'] = locmod_df.apply(
lambda x: set(x['unimod candidates']) | set(x['aa_stat candidates']), axis=1)
if params_dict['force_term_loc']:
logger.debug('Adding terminal localizations for all mass shifts.')
locmod_df['all candidates'] = locmod_df['all candidates'].apply(lambda x: x | {'N-term', 'C-term'})
for i in locmod_df.loc[locmod_df['is isotope']].index:
locmod_df.at[i, 'all candidates'] = locmod_df.at[i, 'all candidates'].union(
locmod_df.at[locmod_df.at[i, 'isotope index'], 'all candidates'])
for i in locmod_df.index:
ac = locmod_df.at[i, 'all candidates']
for term in ('N', 'C'):
if 'Protein {}-term'.format(term) in ac and '{}-term'.format(term) in ac:
ac.remove('Protein {}-term'.format(term))
logger.debug('Removing protein %s-term localization for %s as redundant.', term, i)
if reference_mass_shift == 0.0:
locmod_df.at[reference_label, 'all candidates'] = set()
locmod_df['candidates for loc'] = localization.get_full_set_of_candidates(locmod_df)
logger.info('Reference mass shift %s', reference_label)
localization_dict = {}
for ms_label, (ms, df) in mass_shift_data_dict.items():
localization_dict.update(localization.localization(
df, ms, ms_label, locmod_df.at[ms_label, 'candidates for loc'],
params_dict, spectra_dict, {k: v[0] for k, v in mass_shift_data_dict.items()}))
locmod_df['localization'] = pd.Series(localization_dict).apply(dict)
locmod_df.to_csv(os.path.join(save_directory, 'localization_statistics.csv'), index=False)
if not locmod_df.at[reference_label, 'all candidates']:
logger.debug('Explicitly writing out peptide table for reference mass shift.')
df = mass_shift_data_dict[reference_label][1]
io.save_df(reference_label, df, save_directory, params_dict)
for reader in spectra_dict.values():
reader.close()
else:
locmod_df = None
io.save_peptides(mass_shift_data_dict, save_directory, params_dict)
logger.info('No spectrum files. MS/MS localization is not performed.')
logger.info('Plotting mass shift figures...')
for ms_label, data in figure_data.items():
if locmod_df is not None:
localizations = locmod_df.at[ms_label, 'localization']
sumof = locmod_df.at[ms_label, 'sum of mass shifts']
else:
localizations = None
sumof = None
stats.plot_figure(ms_label, *data, params_dict, save_directory, localizations, sumof)
logger.info('AA_stat results saved to %s', os.path.abspath(args.dir))
utils.internal('Data dict: \n%s', mass_shift_data_dict)
recommended_fix_mods = recommendations.determine_fixed_mods(figure_data, table, locmod_df, mass_shift_data_dict, params_dict)
logger.debug('Recommended fixed mods: %s', recommended_fix_mods)
if recommended_fix_mods:
logger.info('Recommended fixed modifications: %s.', utils.format_mod_dict_str(recommended_fix_mods))
else:
logger.info('Fixed modifications not recommended.')
recommended_var_mods = recommendations.determine_var_mods(
figure_data, table, locmod_df, mass_shift_data_dict, params_dict, recommended_fix_mods)
logger.debug('Recommended variable mods: %s', recommended_var_mods)
if recommended_var_mods:
logger.info('Recommended variable modifications: %s.', utils.format_mod_list(recommended_var_mods))
else:
logger.info('Variable modifications not recommended.')
combinations = utils.get_varmod_combinations(recommended_var_mods, ms_labels, params_dict['prec_acc'])
logger.debug('Found combinations in recommended variable mods: %s', combinations)
opposite = utils.get_opposite_mods(
params_dict['fix_mod'], recommended_fix_mods, recommended_var_mods, ms_labels, params_dict['prec_acc'])
logger.debug('Opposite modifications: %s', utils.format_mod_list([recommended_var_mods[i] for i in opposite]))
html.render_html_report(table, mass_shift_data_dict, locmod_df, params_dict,
recommended_fix_mods, recommended_var_mods, combinations, opposite, save_directory, ms_labels, step=step)
return figure_data, table, locmod_df, mass_shift_data_dict, recommended_fix_mods, recommended_var_mods | AA-stat | /AA_stat-2.5.5-py3-none-any.whl/AA_stat/AA_stat.py | AA_stat.py |
import matplotlib
matplotlib.use('Agg')
import pylab as plt
from matplotlib.backends.backend_pdf import PdfPages
import sys
import ast
import os
import glob
from configparser import ConfigParser
import multiprocessing as mp
from collections import defaultdict
import logging
import re
import numpy as np
import pandas as pd
from pyteomics import pepxml, mgf, mzml
from . import utils, stats
AA_STAT_PARAMS_DEFAULT = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'default.cfg')
logger = logging.getLogger(__name__)
def sanitize_df(df, params_dict):
# drop unneeded columns
column_keys = ['proteins_column', 'peptides_column', 'mass_shifts_column', 'score_column', 'measured_mass_column',
'calculated_mass_column', 'rt_column', 'next_aa_column', 'prev_aa_column', 'spectrum_column', 'charge_column', 'mods_column']
needed = {params_dict[k] for k in column_keys}
to_drop = [c for c in df.columns if c not in needed]
old_size = df.shape[1]
df.drop(to_drop, axis=1, inplace=True)
logger.debug('Kept %d and dropped %d out of %d initial columns.', df.shape[1], len(to_drop), old_size)
# TODO: simplify and sanitize columns here
return df
def preprocess_df(df, filename, params_dict):
'''
Preprocesses DataFrame.
Parameters
----------
df: DataFrame
Open search result df.
filename: str
Path of initial (pepxml or csv) file
params_dict: dict
Dict with all input parameters
Returns
-------
DataFrame
'''
logger.debug('Preprocessing %s', filename)
pp = PdfPages(os.path.join(params_dict['output directory'], os.path.basename(filename) + '.clustering.pdf'))
window = params_dict['zero_window']
zero_bin = 0
shifts = params_dict['mass_shifts_column']
if not params_dict['decoy_prefix']:
isdddict = {}
for prefix in params_dict['decoy_prefix_list']:
is_decoy = df[params_dict['proteins_column']].apply(
lambda s: all(x.startswith(prefix) for x in s))
isd = is_decoy.sum()
logger.debug('Trying prefix %s for %s... Found %d decoys.', prefix, filename, isd)
isdddict[prefix] = isd
prefix = max(isdddict, key=isdddict.get)
logger.debug('Selected prefix %s for file %s (%d decoys)', prefix, filename, isdddict[prefix])
else:
prefix = params_dict['decoy_prefix']
df['is_decoy'] = df[params_dict['proteins_column']].apply(lambda s: all(x.startswith(prefix) for x in s))
del df[params_dict['proteins_column']]
if not df['is_decoy'].sum():
logger.error('No decoy IDs found in %s.', filename)
if not params_dict['decoy_prefix']:
logger.error('Configured decoy prefixes are: %s. Check you files or config.',
', '.join(params_dict['decoy_prefix_list']))
else:
logger.error('Configured decoy prefix is: %s. Check your files or config.', prefix)
return
ms, filtered = utils.fdr_filter_mass_shift([None, zero_bin, window], df, params_dict)
n = filtered.shape[0]
logger.debug('%d filtered peptides near zero.', n)
df[shifts] = utils.choose_correct_massdiff(
df[shifts],
df[params_dict['measured_mass_column']] - df[params_dict['calculated_mass_column']], params_dict)
if params_dict['calibration'] == 'off':
logger.info('Mass calibration is disabled. Skipping.')
elif params_dict['calibration'] != 'simple':
if n < params_dict['min_peptides_for_mass_calibration']:
logger.warning('Skipping mass calibration: not enough peptides near zero mass shift.')
else:
to_fit, unit = stats.get_fittable_series(filtered, params_dict)
# save copies of mass shift column, for use in boolean indexing
shift_copy = df[shifts].copy()
old_shifts = filtered[shifts].copy()
if params_dict['clustering']:
clustering = stats.clusters(filtered, to_fit, unit, filename, params_dict, pp)
if clustering is None:
filtered_clusters = None
else:
filtered_clusters = stats.filter_clusters(clustering, filtered, to_fit, params_dict)
if not filtered_clusters:
logger.info('Clustering was unsuccesful for %s. Calibrating masses in the whole file.', filename)
elif len(filtered_clusters) == 1:
logger.info('One large cluster found in %s. Calibrating masses in the whole file.', filename)
filtered_clusters = None
else:
logger.info('Splitting %s into %d pieces.', filename, len(filtered_clusters))
f = plt.figure()
for i in filtered_clusters:
plt.hist(filtered.loc[to_fit.index].loc[clustering.labels_ == i, shifts], label=i, alpha=0.2, bins=25, density=True)
plt.xlabel(shifts)
plt.title('Before correction')
plt.legend()
pp.savefig(f)
plt.close()
else:
filtered_clusters = None
if not filtered_clusters:
slices = [None]
titles = ['Whole file']
assigned_masks = [slice(None)]
filtered_clusters = ['<all>']
else:
slices, titles = [], []
for i in filtered_clusters:
slices.append(clustering.labels_ == i)
titles.append('Cluster {}'.format(i))
assigned_masks = stats.get_cluster_masks(filtered_clusters, clustering, df, to_fit, params_dict)
for c, slice_, title, mask in zip(filtered_clusters, slices, titles, assigned_masks):
# logger.debug('Slice size for cluster %s is: %s', c, slice_.size if slice_ is not None else None)
to_fit, unit = stats.get_fittable_series(filtered, params_dict, slice_)
popt = stats._gauss_fit_slice(to_fit, unit, filename, title, params_dict, pp)
if unit == 'Da':
shift_copy.loc[mask] -= popt[1]
elif unit == 'ppm':
shift_copy.loc[mask] -= popt[1] * df[params_dict['calculated_mass_column']] / 1e6
else:
freq_measured = 1e6 / np.sqrt(utils.measured_mz_series(df.loc[mask], params_dict)) - popt[1]
mass_corrected = (((1e6 / freq_measured) ** 2) * df.loc[mask, params_dict['charge_column']] -
utils.H * df.loc[mask, params_dict['charge_column']])
correction = mass_corrected - df.loc[mask, params_dict['measured_mass_column']]
logger.debug('Average systematic mass shift for cluster %s: %f', c, -correction.mean())
shift_copy.loc[mask] += correction
# corrected mass shifts are written back here
df[shifts] = shift_copy
filtered[shifts] = df.loc[filtered.index, shifts]
f = plt.figure()
floc = filtered.loc[old_shifts.abs() < params_dict['zero_window']]
sc = plt.scatter(floc[shifts], floc[params_dict['rt_column']],
c=clustering.labels_ if (params_dict['clustering'] and clustering) else 'k')
if params_dict['clustering'] and clustering:
plt.legend(*sc.legend_elements(), title='Clusters')
plt.xlabel(shifts)
plt.ylabel(params_dict['rt_column'])
plt.title('After correction')
pp.savefig(f)
plt.close()
if filtered_clusters != ['<all>']:
f = plt.figure()
for i in filtered_clusters:
plt.hist(floc.loc[clustering.labels_ == i, shifts], label=i, alpha=0.2, bins=25, density=True)
plt.xlabel(shifts)
plt.legend()
pp.savefig(f)
plt.close()
pp.close()
df['file'] = os.path.splitext(os.path.basename(filename))[0]
check_composition = df[params_dict['peptides_column']].apply(lambda x: utils.check_composition(x, params_dict['labels']))
del df[params_dict['measured_mass_column']]
del df[params_dict['calculated_mass_column']]
del df[params_dict['rt_column']]
return df.loc[check_composition]
def read_pepxml(fname, params_dict):
'''
Reads pepxml file and preprocess it.
Parameters
----------
fname: str
Path to pepxml file
params_dict: dict
Dict with all input parameters
Returns
-------
DataFrame
'''
logger.debug('Reading %s', fname)
df = pepxml.DataFrame(fname, read_schema=False)
return preprocess_df(sanitize_df(df, params_dict), fname, params_dict)
def read_csv(fname, params_dict):
"""
Reads csv file.
Paramenters
-----------
fname : str
Path to file name.
params_dict : dict
Dict with paramenters for parsing csv file.
`csv_delimiter`, `proteins_column`, `proteins_delimiter`
Returns
-------
A DataFrame of csv file.
"""
# logger.info('Reading %s', fname)
df = pd.read_csv(fname, sep=params_dict['csv_delimiter'])
df[params_dict['mods_column']] = df[params_dict['mods_column']].apply(ast.literal_eval)
protein = params_dict['proteins_column']
prev = params_dict['prev_aa_column']
next_ = params_dict['next_aa_column']
for c in [protein, prev, next_]:
if (df[c].str[0] == '[').all() and (df[c].str[-1] == ']').all():
df[c] = df[c].apply(ast.literal_eval)
else:
df[c] = df[c].str.split(params_dict['proteins_delimeter'])
return preprocess_df(sanitize_df(df, params_dict), fname, params_dict)
def read_mgf(file_path):
return mgf.IndexedMGF(file_path)
def read_mzml(file_path): # write this
return mzml.PreIndexedMzML(file_path)
def read_spectra(args):
readers = {
'mgf': read_mgf,
'mzml': read_mzml,
}
out_dict = {}
for ftype, reader in readers.items():
spec_filenames = getattr(args, ftype)
if spec_filenames:
break
else:
return {}
for inp in [args.pepxml, args.csv]:
if inp:
break
if len(inp) != len(spec_filenames):
logger.critical('Numbers of input files and spectrum files do not match (%d and %d).', len(inp), len(spec_filenames))
sys.exit(1)
for i, filename in zip(inp, spec_filenames):
name = os.path.splitext(os.path.basename(i))[0]
out_dict[name] = reader(filename)
return out_dict
def read_input(args, params_dict):
"""
Reads open search output, assembles all data in one DataFrame.
"""
logger.info('Reading input files...')
readers = {
'pepxml': read_pepxml,
'csv': read_csv,
}
nproc = params_dict['processes']
if nproc == 1:
dfs = []
logger.debug('Reading files in one process.')
for ftype, reader in readers.items():
filenames = getattr(args, ftype)
logger.debug('Filenames [%s]: %s', ftype, filenames)
if filenames:
for filename in filenames:
dfs.append(reader(filename, params_dict))
else:
nfiles = 0
for ftype, reader in readers.items():
filenames = getattr(args, ftype)
if filenames:
nfiles += len(filenames)
if nproc > 0:
nproc = min(nproc, nfiles)
else:
nproc = min(nfiles, mp.cpu_count())
logger.debug('Reading files using %s processes.', nproc)
pool = mp.Pool(nproc)
results = []
for ftype, reader in readers.items():
filenames = getattr(args, ftype)
logger.debug('Filenames [%s]: %s', ftype, filenames)
if filenames:
for filename in filenames:
results.append(pool.apply_async(reader, args=(filename, params_dict)))
dfs = [r.get() for r in results]
pool.close()
pool.join()
if any(x is None for x in dfs):
logger.critical('There were errors when reading input.')
return
logger.info('Starting analysis...')
logger.debug('%d dfs collected.', len(dfs))
data = pd.concat(dfs, axis=0)
data.index = range(len(data))
data['file'] = data['file'].astype('category')
logger.debug('Memory usage:')
logger.debug(data.memory_usage(deep=True))
return data
def read_config_file(fname):
params = ConfigParser(delimiters=('=', ':'), comment_prefixes=('#'), inline_comment_prefixes=('#'))
params.read(AA_STAT_PARAMS_DEFAULT)
if fname:
if not os.path.isfile(fname):
logger.error('Configuration file not found: %s', fname)
else:
params.read(fname)
else:
logger.info('Using default parameters for AA_stat.')
return params
def get_parameters(params):
"""
Reads paramenters from cfg file to one dict.
Returns dict.
"""
params_dict = defaultdict()
# data
params_dict['decoy_prefix'] = params.get('data', 'decoy prefix')
params_dict['decoy_prefix_list'] = re.split(r',\s*', params.get('data', 'decoy prefix list'))
params_dict['FDR'] = params.getfloat('data', 'FDR')
params_dict['labels'] = params.get('data', 'labels').strip().split()
params_dict['rule'] = params.get('data', 'cleavage rule')
# csv input
params_dict['csv_delimiter'] = params.get('csv input', 'delimiter')
params_dict['proteins_delimeter'] = params.get('csv input', 'proteins delimiter')
params_dict['proteins_column'] = params.get('csv input', 'proteins column')
params_dict['peptides_column'] = params.get('csv input', 'peptides column')
params_dict['mass_shifts_column'] = params.get('csv input', 'mass shift column')
params_dict['score_column'] = params.get('csv input', 'score column')
params_dict['measured_mass_column'] = params.get('csv input', 'measured mass column')
params_dict['calculated_mass_column'] = params.get('csv input', 'calculated mass column')
params_dict['rt_column'] = params.get('csv input', 'retention time column')
params_dict['next_aa_column'] = params.get('csv input', 'next aa column')
params_dict['prev_aa_column'] = params.get('csv input', 'previous aa column')
params_dict['spectrum_column'] = params.get('csv input', 'spectrum column')
params_dict['charge_column'] = params.get('csv input', 'charge column')
params_dict['mods_column'] = params.get('csv input', 'modifications column')
params_dict['score_ascending'] = params.getboolean('csv input', 'score ascending')
# general
params_dict['bin_width'] = params.getfloat('general', 'width of bin in histogram')
params_dict['so_range'] = tuple(float(x) for x in params.get('general', 'open search range').split(','))
params_dict['walking_window'] = params.getfloat('general', 'shifting window')
params_dict['FDR_correction'] = params.getboolean('general', 'FDR correction')
params_dict['processes'] = params.getint('general', 'processes')
params_dict['zero_window'] = params.getfloat('general', 'zero peak window')
params_dict['prec_acc'] = params.getfloat('general', 'mass shift tolerance')
params_dict['zero bin tolerance'] = params.getfloat('general', 'zero shift mass tolerance')
params_dict['zero min intensity'] = params.getfloat('general', 'zero shift minimum intensity')
params_dict['min_peptides_for_mass_calibration'] = params.getint('general', 'minimum peptides for mass calibration')
params_dict['specific_mass_shift_flag'] = params.getboolean('general', 'use specific mass shift window')
params_dict['specific_window'] = [float(x) for x in params.get('general', 'specific mass shift window').split(',')]
params_dict['figsize'] = tuple(float(x) for x in params.get('general', 'figure size in inches').split(','))
params_dict['calibration'] = params.get('general', 'mass calibration')
params_dict['artefact_thresh'] = params.getfloat('general', 'artefact detection threshold')
params_dict['html_truncate'] = params.getint('general', 'html info truncation length')
#clustering
params_dict['clustering'] = params.getboolean('clustering', 'use clustering')
params_dict['eps_adjust'] = params.getfloat('clustering', 'dbscan eps factor')
params_dict['min_samples'] = params.getint('clustering', 'dbscan min_samples')
params_dict['clustered_pct_min'] = params.getfloat('clustering', 'total clustered peptide percentage minimum')
params_dict['cluster_span_min'] = params.getfloat('clustering', 'cluster span percentage minimum')
# fit
params_dict['shift_error'] = params.getint('fit', 'shift error')
params_dict['max_deviation_sigma'] = params.getfloat('fit', 'standard deviation threshold for sigma')
params_dict['max_deviation_height'] = params.getfloat('fit', 'standard deviation threshold for height')
params_dict['fit batch'] = params.getint('fit', 'batch')
# localization
params_dict['ion_types'] = tuple(params.get('localization', 'ion type').replace(' ', '').split(','))
params_dict['frag_acc'] = params.getfloat('localization', 'fragment ion mass tolerance')
params_dict['candidate threshold'] = params.getfloat('localization', 'frequency threshold')
params_dict['min_spec_matched'] = params.getint('localization', 'minimum matched peaks')
params_dict['force_term_loc'] = params.getboolean('localization', 'always try terminal localization')
params_dict['use_all_loc'] = params.getboolean('localization', 'try all localizations')
# modifications
params_dict['variable_mods'] = params.getint('modifications', 'recommend variable modifications')
params_dict['multiple_mods'] = params.getboolean('modifications', 'recommend multiple modifications on residue')
params_dict['fix_mod_zero_thresh'] = params.getfloat('modifications', 'fixed modification intensity threshold')
params_dict['min_fix_mod_pep_count_factor'] = params.getfloat('modifications', 'peptide count factor threshold')
params_dict['recommend isotope threshold'] = params.getfloat('modifications', 'isotope error abundance threshold')
params_dict['min_loc_count'] = params.getint('modifications', 'minimum localization count')
params_dict['fix_mod'] = utils.parse_mod_list(params.get('modifications', 'configured fixed modifications'), 'fixed')
params_dict['var_mod'] = utils.parse_mod_list(params.get('modifications', 'configured variable modifications'), 'variable')
return params_dict
def set_additional_params(params_dict):
if params_dict['specific_mass_shift_flag']:
logger.info('Custom bin: %s', params_dict['specific_window'])
params_dict['so_range'] = params_dict['specific_window'][:]
elif params_dict['so_range'][1] - params_dict['so_range'][0] > params_dict['walking_window']:
window = params_dict['walking_window'] / params_dict['bin_width']
else:
window = (params_dict['so_range'][1] - params_dict['so_range']) / params_dict['bin_width']
if int(window) % 2 == 0:
params_dict['window'] = int(window) + 1
else:
params_dict['window'] = int(window) # should be odd
params_dict['bins'] = np.arange(params_dict['so_range'][0],
params_dict['so_range'][1] + params_dict['bin_width'], params_dict['bin_width'])
_rule_to_enz = {
'trypsin': {'cut': 'KR', 'no_cut': 'P', 'sense': 'C'},
}
def get_params_dict(args):
logger.debug('Received args: %s', args)
fname = args.params
outdir = args.dir
params = read_config_file(fname)
params_dict = get_parameters(params)
set_additional_params(params_dict)
if args.processes is not None:
params_dict['processes'] = args.processes
params_dict['output directory'] = outdir
if args.pepxml:
fmod, vmod = utils.get_fix_var_modifications(args.pepxml[0], params_dict['labels'])
params_dict['fix_mod'] = fmod
params_dict['var_mod'] = utils.format_grouped_keys(utils.group_terminal(vmod), params_dict)
params_dict['enzyme'] = utils.get_specificity(args.pepxml[0])
else:
if args.fmods:
if '@' in args.fmods:
params_dict['fix_mod'] = utils.parse_mod_list(args.fmods, 'fixed')
else:
params_dict['fix_mod'] = ast.literal_eval(args.fmods)
elif not params_dict['fix_mod']:
logger.info('No fixed modifications specified. Use --fmods to configure them.')
if args.vmods:
if '@' in args.vmods:
params_dict['var_mod'] = utils.parse_mod_list(args.vmods, 'variable')
else:
params_dict['var_mod'] = ast.literal_eval(args.vmods)
elif not params_dict['var_mod']:
logger.info('No variable modifications specified. Use --vmods to configure them.')
if args.enzyme:
if '|' in args.enzyme:
params_dict['enzyme'] = utils.convert_tandem_cleave_rule_to_regexp(args.enzyme, params_dict)
else:
params_dict['enzyme'] = ast.literal_eval(args.enzyme)
elif params_dict['rule'] in _rule_to_enz:
params_dict['enzyme'] = _rule_to_enz[params_dict['rule']]
logger.info('Using standard specificity for %s.', params_dict['rule'])
else:
logger.info('Enyzme not specified. Use --enzyme to configure.')
params_dict['enzyme'] = None
return params_dict
_format_globs = {
'pepxml': ['*.pepXML', '*.pep.xml'],
'csv': ['*.csv'],
'mzml': ['*.mzML'],
'mgf': ['*.mgf'],
}
def resolve_filenames(args):
for fformat, gs in _format_globs.items():
value = getattr(args, fformat)
if value:
logger.debug('Received %s list: %s', fformat, value)
out = []
for val in value:
if os.path.isdir(val):
for g in gs:
files = glob.glob(os.path.join(val, g))
logger.debug('Found %d files for glob %s in %s', len(files), g, val)
out.extend(files)
else:
out.append(val)
logger.debug('Final %s list: %s', fformat, out)
setattr(args, fformat, out)
def table_path(dir, ms):
return os.path.join(dir, ms + '.csv')
def save_df(ms, df, save_directory, params_dict):
peptide = params_dict['peptides_column']
spectrum = params_dict['spectrum_column']
prev_aa = params_dict['prev_aa_column']
next_aa = params_dict['next_aa_column']
table = df[[peptide, spectrum]].copy()
peptide1 = df.apply(utils.get_column_with_mods, axis=1, args=(params_dict,))
table[peptide] = df[prev_aa].str[0] + '.' + peptide1 + '.' + df[next_aa].str[0]
with open(table_path(save_directory, ms), 'w') as out:
table.to_csv(out, index=False, sep='\t')
def save_peptides(data, save_directory, params_dict):
for ms_label, (ms, df) in data.items():
save_df(ms_label, df, save_directory, params_dict) | AA-stat | /AA_stat-2.5.5-py3-none-any.whl/AA_stat/io.py | io.py |
import logging
import logging.handlers
import socketserver
import struct
import pickle
import socket
import tkinter as tk
class LoggingToGUI(logging.Handler):
# https://stackoverflow.com/a/18194597/1258041
def __init__(self, console):
logging.Handler.__init__(self)
self.console = console
def emit(self, message):
formattedMessage = self.format(message)
self.console.configure(state=tk.NORMAL)
self.console.insert(tk.END, formattedMessage + '\n')
self.console.configure(state=tk.DISABLED)
self.console.see(tk.END)
class LogRecordStreamHandler(socketserver.StreamRequestHandler):
"""Handler for a streaming logging request."""
def __init__(self, *args, **kwargs):
socketserver.StreamRequestHandler.__init__(self, *args, **kwargs)
def handle(self):
"""
Handle multiple requests - each expected to be a 4-byte length,
followed by the LogRecord in pickle format. Logs the record
according to whatever policy is configured locally.
"""
while True:
chunk = self.connection.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack('>L', chunk)[0]
chunk = self.connection.recv(slen)
while len(chunk) < slen:
chunk = chunk + self.connection.recv(slen - len(chunk))
obj = self.unPickle(chunk)
record = logging.makeLogRecord(obj)
self.handleLogRecord(record)
def unPickle(self, data):
return pickle.loads(data)
def handleLogRecord(self, record):
self._record_handler.handle(record)
class LogRecordSocketReceiver(socketserver.ThreadingTCPServer):
"""
Simple TCP socket-based logging receiver suitable for testing.
"""
allow_reuse_address = True
daemon_threads = True
def __init__(self, host='localhost',
port=logging.handlers.DEFAULT_TCP_LOGGING_PORT,
handler=LogRecordStreamHandler):
socketserver.ThreadingTCPServer.__init__(self, (host, port), handler)
self.abort = 0
self.timeout = 1
self.logname = None
def serve_until_stopped(self):
import select
abort = 0
while not abort:
rd, wr, ex = select.select([self.socket.fileno()], [], [], self.timeout)
if rd:
self.handle_request()
abort = self.abort
tcpserver = None
def _socket_listener_worker(logger, port, handler):
global tcpserver
try:
tcpserver = LogRecordSocketReceiver(port=port, handler=handler)
except socket.error as e:
logger.error('Couldn\'t start TCP server: %s', e)
return
if port == 0:
port = tcpserver.socket.getsockname()[1]
tcpserver.serve_until_stopped()
def get_logger():
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.DEBUG)
logger.addHandler(stream_handler)
formatter = logging.Formatter('{levelname:>8}: {asctime} {message}',
datefmt='[%H:%M:%S]', style='{')
stream_handler.setFormatter(formatter)
logging.getLogger('matplotlib').setLevel(logging.WARNING)
tcpHandler = logging.handlers.SocketHandler('localhost', logging.handlers.DEFAULT_TCP_LOGGING_PORT)
tcpHandler.setLevel(logging.INFO)
logging.getLogger('AA_stat').addHandler(tcpHandler)
return logger
def get_aastat_handler(log_txt):
class AAstatHandler(LogRecordStreamHandler):
def __init__(self, *args, **kwargs):
self._record_handler = LoggingToGUI(log_txt)
formatter = logging.Formatter('{levelname:>8}: {asctime} {message}',
datefmt='[%H:%M:%S]', style='{')
self._record_handler.setFormatter(formatter)
super().__init__(*args, **kwargs)
return AAstatHandler | AA-stat | /AA_stat-2.5.5-py3-none-any.whl/AA_stat/gui/logging.py | logging.py |
import tkinter as tk
from tkinter.scrolledtext import ScrolledText
from tkinter.filedialog import askopenfilenames, askopenfilename, askdirectory, asksaveasfilename
from functools import partial
import os
import threading
import sys
import logging
import logging.handlers
import pathlib
import webbrowser
import tempfile
from idlelib.tooltip import Hovertip
from . import logging as logutils
from .shortcut import create_shortcut
from ..version import version
from .. import AA_stat, io
AA_STAT_VERSION = version
INPUT_FILES = []
INPUT_SPECTRA = []
OUTDIR = '.'
PARAMS = None
PARAMS_TMP = None
logger = logutils.get_logger()
class Args:
"""Emulates parsed args from argparse for AA_stat"""
pepxml = mgf = mzml = csv = None
params = PARAMS
dir = '.'
verbosity = 1
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
def get_input_filenames(label, activate):
fnames = askopenfilenames(title='Open search results',
filetypes=[('pepXML file', '*.pepXML'), ('XML file', '*.pep.xml'), ('CSV file', '*.[ct]sv')],
multiple=True)
if fnames:
INPUT_FILES[:] = fnames
label['text'] = f'{len(fnames)} open search files selected.' #+ '\n'.join(os.path.basename(f) for f in fnames)
activate['state'] = tk.NORMAL
def get_spectrum_filenames(label):
fnames = askopenfilenames(title='Spectrum files',
filetypes=[('mzML file', '*.mzML'), ('MGF file', '*.mgf')],
multiple=True)
if fnames:
INPUT_SPECTRA[:] = fnames
label['text'] = f'{len(fnames)} spectrum files selected.' # + '\n'.join(os.path.basename(f) for f in fnames)
def get_outdir_name(label):
global OUTDIR
dirname = askdirectory(title='Output directory')
if dirname:
OUTDIR = dirname
label['text'] = 'Output directory: ' + os.path.abspath(dirname)
def get_params(label):
global PARAMS
PARAMS = askopenfilename(title='Parameters file',
filetypes=[('Config files', '*.cfg'), ('INI files', '*.ini'), ('Text files', '*.txt'), ('All files', '*.*')])
label['text'] = "Loaded parameters: " + PARAMS
def _save_params(txt, fname):
global PARAMS
PARAMS = fname
with open(fname, 'w') as f:
f.write(txt.get('1.0', tk.END))
def save_params(txt, writeback):
global PARAMS_TMP
if PARAMS is None:
PARAMS_TMP = params = tempfile.NamedTemporaryFile(delete=False, suffix='.cfg').name
logger.debug('Saving params to a temporary file: %s', params)
writeback['text'] = "Using temporary parameters."
else:
PARAMS_TMP = None
params = PARAMS
logger.debug('Saving params to file: %s', params)
writeback['text'] = "Using edited file: " + PARAMS
_save_params(txt, params)
def save_params_as(txt, writeback):
global PARAMS
PARAMS = asksaveasfilename(title='Save params as...')
save_params(txt, writeback)
def edit_params(w, writeback):
window = tk.Toplevel(w)
window.title('AA_stat GUI: edit parameters')
window.geometry('900x600')
params_txt = tk.Text(window)
params = PARAMS or io.AA_STAT_PARAMS_DEFAULT
with open(params) as f:
for line in f:
params_txt.insert(tk.END, line)
params_txt.pack(fill=tk.BOTH, expand=True)
save_frame = tk.Frame(window)
save_btn = tk.Button(save_frame, text="Save", command=partial(save_params, params_txt, writeback))
save_btn.pack(side=tk.LEFT)
save_as_btn = tk.Button(save_frame, text="Save As...", command=partial(save_params_as, params_txt, writeback))
save_as_btn.pack(side=tk.LEFT)
save_frame.pack()
def get_aa_stat_version():
if AA_STAT_VERSION:
return 'AA_stat v' + AA_STAT_VERSION
else:
return 'AA_stat not installed.'
def get_aa_stat_args():
pepxml, csv = [], []
for f in INPUT_FILES:
ext = os.path.splitext(f)[1].lower()
if ext in {'.pepxml', '.xml'}:
pepxml.append(f)
else:
csv.append(f)
mzml, mgf = [], []
for f in INPUT_SPECTRA:
ext = os.path.splitext(f)[1].lower()
if ext == '.mzml':
mzml.append(f)
else:
mgf.append(f)
args = Args(pepxml=pepxml, mgf=mgf, csv=csv, mzml=mzml, dir=OUTDIR, params=PARAMS)
params_dict = io.get_params_dict(args)
return args, params_dict
def start_aastat(t):
t.start()
def run_aastat(run_btn, status_to, log_to):
run_btn['state'] = tk.DISABLED
status_to['text'] = 'Checking arguments...'
args, params_dict = get_aa_stat_args()
status_to['text'] = 'Running AA_stat...'
AA_stat.AA_stat(params_dict, args)
status_to['text'] = 'Done.'
run_btn['state'] = tk.NORMAL
run_btn['text'] = 'View report'
run_btn['command'] = partial(view_report, run_btn)
def view_report(btn):
url = (pathlib.Path(os.path.abspath(OUTDIR)) / 'report.html').as_uri()
webbrowser.open(url)
def main():
if len(sys.argv) == 2 and sys.argv[1] == '--create-shortcut':
create_shortcut()
return
window = tk.Tk()
window.title('AA_stat GUI')
window.geometry('900x600')
try:
try:
window.tk.call('tk_getOpenFile', '-foobarbaz')
except tk.TclError:
pass
window.tk.call('set', '::tk::dialog::file::showHiddenBtn', '1')
window.tk.call('set', '::tk::dialog::file::showHiddenVar', '0')
except:
pass
top_frame = tk.Frame()
input_frame = tk.Frame(master=top_frame)
spectra_frame = tk.Frame(master=top_frame)
selected_spectra_lbl = tk.Label(master=spectra_frame, text="(optional)", justify='left')
get_spectra_btn = tk.Button(master=spectra_frame, text="Select mzML or MGF files",
command=partial(get_spectrum_filenames, selected_spectra_lbl), width=20)
spectra_tip_text = ("If you provide original mzML or MGF files,\n"
"AA_stat will perform MS/MS-based localization of mass shifts\nand recommend variable modifications.")
Hovertip(spectra_frame, text=spectra_tip_text)
get_spectra_btn.pack(side=tk.LEFT, anchor=tk.E)
selected_spectra_lbl.pack(side=tk.LEFT, padx=15, anchor=tk.W)
dir_frame = tk.Frame(master=top_frame)
dir_lbl = tk.Label(master=dir_frame, text="Output directory: " + os.path.abspath(OUTDIR), justify='left')
get_dir_btn = tk.Button(master=dir_frame, text="Select output directory",
command=partial(get_outdir_name, dir_lbl), width=20)
get_dir_btn.pack(side=tk.LEFT, anchor=tk.E)
dir_lbl.pack(side=tk.LEFT, anchor=tk.W, padx=15)
main_frame = tk.Frame()
run_btn = tk.Button(master=main_frame, text='Run AA_stat', state=tk.DISABLED)
status_lbl = tk.Label(master=main_frame, text=get_aa_stat_version())
log_txt = ScrolledText(master=main_frame, state=tk.DISABLED)
t = threading.Thread(target=run_aastat, args=(run_btn, status_lbl, log_txt), name='aastat-runner')
t.daemon = True
run_btn['command'] = partial(start_aastat, t)
AAstatHandler = logutils.get_aastat_handler(log_txt)
log_t = threading.Thread(target=logutils._socket_listener_worker,
args=(logger, logging.handlers.DEFAULT_TCP_LOGGING_PORT, AAstatHandler),
name='aastat-listener')
log_t.start()
logger.debug('AA_stat logging initiated.')
log_txt.pack(fill=tk.BOTH, expand=True)
run_btn.pack()
status_lbl.pack()
selected_os_lbl = tk.Label(master=input_frame, text="No files selected", justify='left')
get_os_files_btn = tk.Button(master=input_frame, text="Select open search files",
command=partial(get_input_filenames, selected_os_lbl, run_btn), width=20)
get_os_files_btn.pack(side=tk.LEFT, anchor=tk.E)
selected_os_lbl.pack(side=tk.LEFT, padx=15, anchor=tk.W)
Hovertip(input_frame, text="Specify open search results in pepXML or CSV format.")
params_frame = tk.Frame(master=top_frame)
params_lbl = tk.Label(master=params_frame, text="Using default parameters.")
load_params_btn = tk.Button(master=params_frame, width=10, padx=4, text="Load params",
command=partial(get_params, params_lbl))
edit_params_btn = tk.Button(master=params_frame, width=10, padx=4, text="Edit params",
command=partial(edit_params, window, params_lbl))
load_params_btn.pack(side=tk.LEFT, fill=tk.X, anchor=tk.E)
edit_params_btn.pack(side=tk.LEFT, fill=tk.X, anchor=tk.E)
params_lbl.pack(side=tk.LEFT, fill=tk.X, anchor=tk.W, padx=15)
input_frame.pack(side=tk.TOP, fill=tk.X, expand=True)
spectra_frame.pack(side=tk.TOP, fill=tk.X, expand=True)
dir_frame.pack(side=tk.TOP, fill=tk.X, expand=True)
params_frame.pack(side=tk.TOP, fill=tk.X, expand=True)
top_frame.pack()
main_frame.pack(fill=tk.BOTH, expand=True)
if not AA_STAT_VERSION:
for btn in [get_spectra_btn, get_os_files_btn, get_dir_btn]:
btn['state'] = tk.DISABLED
window.mainloop()
if PARAMS_TMP:
logger.debug('Removing temporary file %s', PARAMS_TMP)
os.remove(PARAMS_TMP)
logutils.tcpserver.abort = 1
logutils.tcpserver.server_close()
sys.exit() # needed because there are still working (daemon) threads
if __name__ == '__main__':
main() | AA-stat | /AA_stat-2.5.5-py3-none-any.whl/AA_stat/gui/gui.py | gui.py |
### AAFTF - Automatic Assembly For The Fungi
*Jason Stajich and Jon Palmer*

Requirements
===================
- BBTools - https://jgi.doe.gov/data-and-tools/bbtools/
- Trimmomatic - http://www.usadellab.org/cms/?page=trimmomatic (Optional)
- bowtie2 - http://bowtie-bio.sourceforge.net/bowtie2/index.shtml (Optional)
- bwa - https://github.com/lh3/bwa
- Pilon - https://github.com/broadinstitute/pilon/wiki
- sourmash (>=v3.5)- https://sourmash.readthedocs.io/ (install via conda/pip)
- NCBI BLAST+ - ftp://ftp.ncbi.nlm.nih.gov/blast/executables/LATEST
- minimap2 - https://github.com/lh3/minimap2
Assemblers
- SPAdes - http://cab.spbu.ru/software/spades/
- megahit - https://github.com/voutcn/megahit
- dipspades - (SPAdes 3.11.1 - note it is not part of later SPAdes packages) http://cab.spbu.ru/files/release3.11.1/dipspades_manual.html
- NOVOplasty - https://github.com/ndierckx/NOVOPlasty
Authors
============
* Jason Stajich [@hyphaltip](https://github.com/hyphaltip) - http://lab.stajich.org
* Jon Palmer [@nextgenusfs](https://github.com/nextgenusfs) - https://twitter.com/jonpalmer2013
Install
===========
We are working on simplifying the install, ie getting on Pypi and bioconda. Currently you could create conda environment and install like this:
```
conda create -n aaftf -c bioconda "python>=3.6" bbmap trimmomatic bowtie2 bwa pilon sourmash \
blast minimap2 spades megahit novoplasty biopython fastp
```
And then install this repo with git/pip:
```
$ conda activate aaftf
$ python -m pip install git+https://github.com/stajichlab/AAFTF.git
```
Notes
===========
This is partially a python re-write of [JAAWS](https://github.com/nextgenusfs/jaaws) which was a unix shell based cleanup and assembly tool written by Jon.
Steps / Procedures
==================
1. trim Trim FASTQ input reads - with BBMap
2. mito De novo assemble mitochondrial genome
3. filter Filter contaminanting reads - with BBMap
4. assemble Assemble reads - with SPAdes
5. vecscreen Vector and Contaminant Screening of assembled contigs - with BlastN based method to replicate NCBI screening
6. sourpurge Purge contigs based on sourmash results - with sourmash
7. rmdup Remove duplicate contigs - using minimap2 to find duplicates
8. pilon Polish contig sequences with Pilon - uses Pilon
9. sort Sort contigs by length and rename FASTA headers
10. assess Assess completeness of genome assembly
11. pipeline Run AAFTF pipeline all in one go.
# Typical runs
## Trimming and Filtering
Trimming options spelled out:
```
usage: AAFTF trim [-h] [-q] [-o BASENAME] [-c cpus] [-ml MINLEN] -l LEFT
[-r RIGHT] [-v] [--pipe] [--method {bbduk,trimmomatic}]
[-m MEMORY] [--trimmomatic trimmomatic_jar]
[--trimmomatic_adaptors TRIMMOMATIC_ADAPTORS]
[--trimmomatic_clip TRIMMOMATIC_CLIP]
[--trimmomatic_leadingwindow TRIMMOMATIC_LEADINGWINDOW]
[--trimmomatic_trailingwindow TRIMMOMATIC_TRAILINGWINDOW]
[--trimmomatic_slidingwindow TRIMMOMATIC_SLIDINGWINDOW]
[--trimmomatic_quality TRIMMOMATIC_QUALITY]
This command trims reads in FASTQ format to remove low quality reads and trim
adaptor sequences
optional arguments:
-h, --help show this help message and exit
-q, --quiet Do not output warnings to stderr
-o BASENAME, --out BASENAME
Output basename, default to base name of --left reads
-c cpus, --cpus cpus Number of CPUs/threads to use.
-ml MINLEN, --minlen MINLEN
Minimum read length after trimming, default: 75
-l LEFT, --left LEFT left/forward reads of paired-end FASTQ or single-end
FASTQ.
-r RIGHT, --right RIGHT
right/reverse reads of paired-end FASTQ.
-v, --debug Provide debugging messages
--pipe AAFTF is running in pipeline mode
--method {bbduk,trimmomatic}
Program to use for adapter trimming
-m MEMORY, --memory MEMORY
Max Memory (in GB)
--trimmomatic trimmomatic_jar, --jar trimmomatic_jar
Trimmomatic JAR path
Trimmomatic options:
Trimmomatic trimming options
--trimmomatic_adaptors TRIMMOMATIC_ADAPTORS
Trimmomatic adaptor file, default: TruSeq3-PE.fa
--trimmomatic_clip TRIMMOMATIC_CLIP
Trimmomatic clipping, default:
ILLUMINACLIP:TruSeq3-PE.fa:2:30:10
--trimmomatic_leadingwindow TRIMMOMATIC_LEADINGWINDOW
Trimmomatic window processing arguments, default:
LEADING:3
--trimmomatic_trailingwindow TRIMMOMATIC_TRAILINGWINDOW
Trimmomatic window processing arguments, default:
TRAILING:3
--trimmomatic_slidingwindow TRIMMOMATIC_SLIDINGWINDOW
Trimmomatic window processing arguments, default:
SLIDINGWINDOW:4:15
--trimmomatic_quality TRIMMOMATIC_QUALITY
Trimmomatic quality encoding -phred33 or phred64
```
Example usage:
```
MEM=128 # 128gb
BASE=STRAINX
READSDIR=reads
TRIMREAD=reads_trimmed
CPU=8
AAFTF trim --method bbduk --memory $MEM -c $CPU \
--left $READSDIR/${BASE}_R1.fq.gz --right $READSDIR/${BASE}_R2.fq.gz \
-o $TRIMREAD/${BASE}
# this step make take a lot of memory depending on how many filtering libraries you use
AAFTF filter -c $CPU --memory $MEM --aligner bbduk \
-o $TRIMREAD/${BASE} --left $TRIMREAD/${BASE}_1P.fastq.gz --right $TRIMREAD/${BASE}_2P.fastq.gz
```
## Assembly
The specified assembler can be made through the `--method` option.
The full set of options are below.
```
usage: AAFTF assemble [-h] [-q] [--method METHOD] -o OUT [-w WORKDIR]
[-c cpus] [-m MEMORY] [-l LEFT] [-r RIGHT] [-v]
[--tmpdir TMPDIR] [--assembler_args ASSEMBLER_ARGS]
[--haplocontigs] [--pipe]
Run assembler on cleaned reads
optional arguments:
-h, --help show this help message and exit
-q, --quiet Do not output warnings to stderr
--method METHOD Assembly method: spades, dipspades, megahit
-o OUT, --out OUT Output assembly FASTA
-w WORKDIR, --workdir WORKDIR
assembly output directory
-c cpus, --cpus cpus Number of CPUs/threads to use.
-m MEMORY, --memory MEMORY
Memory (in GB) setting for SPAdes. Default is 32
-l LEFT, --left LEFT Left (Forward) reads
-r RIGHT, --right RIGHT
Right (Reverse) reads
-v, --debug Print Spades stdout to terminal
--tmpdir TMPDIR Assembler temporary dir
--assembler_args ASSEMBLER_ARGS
Additional SPAdes/Megahit arguments
--haplocontigs For dipSPAdes take the haplocontigs file
--pipe AAFTF is running in pipeline mode
```
```
CPU=24
MEM=96
LEFT=$TRIMREAD/${BASE}_filtered_1.fastq.gz
RIGHT=$TRIMREAD/${BASE}_filtered_2.fastq.gz
WORKDIR=working_AAFTF
OUTDIR=genomes
ASMFILE=$OUTDIR/${BASE}.spades.fasta
mkdir -p $WORKDIR $OUTDIR
AAFTF assemble -c $CPU --mem $MEM \
--left $LEFT --right $RIGHT \
-o $ASMFILE -w $WORKDIR/spades_$BASE
```
## vectrim
```
CPU=16
MEM=16
LEFT=$TRIMREAD/${BASE}_filtered_1.fastq.gz
RIGHT=$TRIMREAD/${BASE}_filtered_2.fastq.gz
WORKDIR=working_AAFTF
OUTDIR=genomes
ASMFILE=$OUTDIR/${BASE}.spades.fasta
VECTRIM=$OUTDIR/${BASE}.vecscreen.fasta
mkdir -p $WORKDIR $OUTDIR
AAFTF vecscreen -c $CPU -i $ASMFILE -o $VECTRIM
```
| AAFTF | /AAFTF-0.4.1.tar.gz/AAFTF-0.4.1/README.md | README.md |
import argparse
import os
import sys
import logging
from Crypto.PublicKey import RSA
from PyQt5.QtWidgets import QApplication, QMessageBox
from common.settings import DEFAULT_IP_ADDRESS, DEFAULT_PORT
from common.decos import log
from common.errors import ServerError
from client.database import ClientDatabase
from client.transport import ClientTransport
from client.main_window import ClientMainWindow
from client.start_dialog import UserNameDialog
# Инициализация клиентского логера:
CLIENT_LOGGER = logging.getLogger('client')
# Парсер аргументов коммандной строки
@log
def args_handler():
'''
Парсер аргументов командной строки, возвращает кортеж из 4 элементов
адрес сервера, порт, имя пользователя, пароль.
Выполняет проверку на корректность номера порта.
'''
parser = argparse.ArgumentParser()
parser.add_argument('addr', default=DEFAULT_IP_ADDRESS, nargs='?')
parser.add_argument('port', type=int, default=DEFAULT_PORT, nargs='?')
parser.add_argument('-n', '--name', default=None, nargs='?')
parser.add_argument('-p', '--password', default='', nargs='?')
args = parser.parse_args()
server_address = args.addr
server_port = args.port
client_name = args.name
client_passwd = args.password
# Проверим подходит ли номер порта:
if not 1023 < server_port < 65535:
CLIENT_LOGGER.critical(f'Попытка запуска с неподходящим номером '
f'порта: {server_port}. Номер порта должен '
f'находиться в диапозоне от 1024 до 65535')
exit(1)
return server_address, server_port, client_name, client_passwd
# Основная функция клиента
if __name__ == '__main__':
print('Консольный мессенджер. Клиентский модуль')
# Загружаем параметы коммандной строки:
server_address, server_port, client_name, client_passwd = args_handler()
# Создаём клиентокое приложение
client_app = QApplication(sys.argv)
# Если имя пользователя не было указано в командной строке,
# то запросим его:
start_dialog = UserNameDialog()
if not client_name or not client_passwd:
client_app.exec_()
# Если пользователь ввёл имя и нажал ОК, то сохраняем ведённое и
# удаляем объект, иначе выходим:
if start_dialog.ok_pressed:
client_name = start_dialog.client_name.text()
client_passwd = start_dialog.client_passwd.text()
else:
exit(0)
CLIENT_LOGGER.info(f'Клиент запущен с параметрами: '
f'IP сервера: {server_address}, '
f'порт сервера: {server_port}, '
f'имя пользователя {client_name}')
# Загружаем ключи с файла, если же файла нет, то генерируем новую пару:
# dir_path = os.path.dirname(os.path.realpath(__file__))
dir_path = os.getcwd()
key_file = os.path.join(dir_path, f'{client_name}.key')
if not os.path.exists(key_file):
keys = RSA.generate(2048, os.urandom)
with open(key_file, 'wb') as key:
key.write(keys.export_key())
else:
with open(key_file, 'rb') as key:
keys = RSA.import_key(key.read())
keys.publickey().export_key()
# Создаём объект базы данных
database = ClientDatabase(client_name)
# Создаём объект - транспорт и запускаем транспортный поток:
try:
transport = ClientTransport(
server_port,
server_address,
database,
client_name,
client_passwd,
keys)
except ServerError as error:
message = QMessageBox()
message.critical(start_dialog, 'Ошибка сервера', error.text)
exit(1)
transport.setDaemon(True)
transport.start()
# Удалим объект диалога за ненадобностью
del start_dialog
# Создаём GUI
main_window = ClientMainWindow(database, transport, keys)
main_window.make_connection(transport)
main_window.setWindowTitle(f'Messenger - alpha release - {client_name}')
client_app.exec_()
# Раз графическая оболочка закрылась, закрываем транспорт
transport.transport_shutdown()
transport.join() | AASMessenger_Client | /AASMessenger_Client-1.0.1.tar.gz/AASMessenger_Client-1.0.1/client/client.py | client.py |
import logging
from PyQt5.QtWidgets import QDialog, QLabel, QComboBox, QPushButton
from PyQt5.QtCore import Qt
# Инициализация клиентского логера:
CLIENT_LOGGER = logging.getLogger('client')
# Диалог выбора контакта для добавления:
class AddContactDialog(QDialog):
'''
Диалог добавления пользователя в список контактов.
Предлагает пользователю список возможных контактов и
добавляет выбранный в контакты.
'''
def __init__(self, transport, database):
super().__init__()
self.transport = transport
self.database = database
self.setFixedSize(350, 120)
self.setWindowTitle('Выберите контакт для добавления:')
self.setAttribute(Qt.WA_DeleteOnClose)
self.setModal(True)
self.selector_label = QLabel('Выберите контакт для добавления:', self)
self.selector_label.setFixedSize(200, 20)
self.selector_label.move(10, 0)
self.selector = QComboBox(self)
self.selector.setFixedSize(200, 20)
self.selector.move(10, 30)
self.btn_refresh = QPushButton('Обновить список', self)
self.btn_refresh.setFixedSize(100, 30)
self.btn_refresh.move(60, 60)
self.btn_ok = QPushButton('Добавить', self)
self.btn_ok.setFixedSize(100, 30)
self.btn_ok.move(230, 20)
self.btn_cancel = QPushButton('Отмена', self)
self.btn_cancel.setFixedSize(100, 30)
self.btn_cancel.move(230, 60)
self.btn_cancel.clicked.connect(self.close)
# Заполняем список возможных контактов:
self.possible_contacts_update()
# Назначаем действие на кнопку обновить:
self.btn_refresh.clicked.connect(self.update_possible_contacts)
def possible_contacts_update(self):
'''
Метод заполнения списка возможных контактов.
Создаёт список всех зарегистрированных пользователей
за исключением уже добавленных в контакты и самого себя.
'''
self.selector.clear()
# Множества всех контактов и контактов клиента:
contacts_list = set(self.database.get_contacts())
users_list = set(self.database.get_users())
# Удалим сами себя из списка пользователей,
# чтобы нельзя было добавить самого себя:
users_list.remove(self.transport.username)
# Добавляем список возможных контактов:
self.selector.addItems(users_list - contacts_list)
def update_possible_contacts(self):
'''
Метод обновления списка возможных контактов. Запрашивает с сервера
список известных пользователей и обносляет содержимое окна.
'''
try:
self.transport.user_list_update()
except OSError:
pass
else:
CLIENT_LOGGER.debug('Обновление списка пользователей с сервера '
'выполнено')
self.possible_contacts_update() | AASMessenger_Client | /AASMessenger_Client-1.0.1.tar.gz/AASMessenger_Client-1.0.1/client/client/add_contact.py | add_contact.py |
import socket
import time
import logging
import json
import threading
import hashlib
import hmac
import binascii
from PyQt5.QtCore import pyqtSignal, QObject
from common.utils import send_message, recv_message
from common.settings import ACTION, PRESENCE, TIME, USER, ACCOUNT_NAME, \
PUBLIC_KEY, RESPONSE, ERROR, DATA, RESPONSE_511, MESSAGE_TEXT, \
DESTINATION, SENDER, MESSAGE, GET_CONTACTS, LIST_INFO, USERS_REQUEST, \
PUBLIC_KEY_REQUEST, ADD_CONTACT, REMOVE_CONTACT, EXIT
from common.errors import ServerError
# Логер и объект блокировки для работы с сокетом:
logger = logging.getLogger('client')
socket_lock = threading.Lock()
class ClientTransport(threading.Thread, QObject):
'''
Класс реализующий транспортную подсистему клиентского
модуля. Отвечает за взаимодействие с сервером.
'''
# Сигналы новое сообщение и потеря соединения:
new_message = pyqtSignal(dict)
message_205 = pyqtSignal()
connection_lost = pyqtSignal()
def __init__(self, port, ip_address, database, username, passwd, keys):
# Вызываем конструктор предка:
threading.Thread.__init__(self)
QObject.__init__(self)
# Класс База данных - работа с базой:
self.database = database
# Имя пользователя:
self.username = username
# Пароль:
self.password = passwd
# Сокет для работы с сервером:
self.transport = None
# Набор ключей для шифрования:
self.keys = keys
# Устанавливаем соединение:
self.connection_init(port, ip_address)
# Обновляем таблицы известных пользователей и контактов:
try:
self.user_list_update()
self.contacts_list_update()
except OSError as err:
if err.errno:
logger.critical(f'Потеряно соединение с сервером.')
raise ServerError('Потеряно соединение с сервером!')
logger.error(
'Timeout соединения при обновлении списков пользователей.')
except json.JSONDecodeError:
logger.critical(f'Потеряно соединение с сервером.')
raise ServerError('Потеряно соединение с сервером!')
# Флаг продолжения работы транспорта:
self.running = True
# Функция инициализации соединения с сервером:
def connection_init(self, port, ip):
# Инициализация сокета и сообщение серверу о нашем появлении:
self.transport = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Таймаут необходим для освобождения сокета:
self.transport.settimeout(5)
# Соединяемся, 5 попыток соединения,
# флаг успеха ставим в True если удалось:
connected = False
for i in range(5):
logger.info(f'Попытка подключения №{i + 1}')
try:
self.transport.connect((ip, port))
except (OSError, ConnectionRefusedError):
pass
else:
connected = True
break
time.sleep(1)
# Если соединится не удалось - исключение
if not connected:
logger.critical('Не удалось установить соединение с сервером')
raise ServerError('Не удалось установить соединение с сервером')
logger.debug('Установлено соединение с сервером')
# Запускаем процедуру авторизации.
# Получаем хэш пароля:
passwd_bytes = self.password.encode('utf-8')
salt = self.username.lower().encode('utf-8')
passwd_hash = hashlib.pbkdf2_hmac('sha512', passwd_bytes, salt, 10000)
passwd_hash_string = binascii.hexlify(passwd_hash)
# Получаем публичный ключ и декодируем его из байтов:
pubkey = self.keys.publickey().export_key().decode('ascii')
# Авторизируемся на сервере:
with socket_lock:
presense = {
ACTION: PRESENCE,
TIME: time.time(),
USER: {
ACCOUNT_NAME: self.username,
PUBLIC_KEY: pubkey
}
}
# Отправляем серверу приветственное сообщение:
try:
send_message(self.transport, presense)
ans = recv_message(self.transport)
# Если сервер вернул ошибку, бросаем исключение:
if RESPONSE in ans:
if ans[RESPONSE] == 400:
raise ServerError(ans[ERROR])
elif ans[RESPONSE] == 511:
# Если всё нормально, то продолжаем процедуру
# авторизации:
ans_data = ans[DATA]
hash = hmac.new(
passwd_hash_string, ans_data.encode('utf-8'))
digest = hash.digest()
my_ans = RESPONSE_511
my_ans[DATA] = binascii.b2a_base64(
digest).decode('ascii')
send_message(self.transport, my_ans)
self.process_server_ans(recv_message(self.transport))
except (OSError, json.JSONDecodeError):
raise ServerError('Сбой соединения в процессе авторизации.')
def process_server_ans(self, message):
'''Метод обработчик поступающих сообщений с сервера.'''
logger.debug(f'Разбор сообщения от сервера: {message}')
# Если это подтверждение чего-либо:
if RESPONSE in message:
if message[RESPONSE] == 200:
return
elif message[RESPONSE] == 400:
raise ServerError(f'{message[ERROR]}')
elif message[RESPONSE] == 205:
self.user_list_update()
self.contacts_list_update()
self.message_205.emit()
else:
logger.error(
f'Принят неизвестный код подтверждения {message[RESPONSE]}')
# Если это сообщение от пользователя добавляем в базу,
# даём сигнал о новом сообщении:
elif ACTION in message and message[ACTION] == MESSAGE \
and SENDER in message and DESTINATION in message \
and MESSAGE_TEXT in message \
and message[DESTINATION] == self.username:
logger.debug(
f'Получено сообщение от пользователя '
f'{message[SENDER]}:{message[MESSAGE_TEXT]}')
self.new_message.emit(message)
def contacts_list_update(self):
'''Метод обновляющий с сервера список контактов.'''
self.database.contacts_clear()
logger.debug(f'Запрос контакт листа для пользователся {self.name}')
req = {
ACTION: GET_CONTACTS,
TIME: time.time(),
USER: self.username
}
logger.debug(f'Сформирован запрос {req}')
with socket_lock:
send_message(self.transport, req)
ans = recv_message(self.transport)
logger.debug(f'Получен ответ {ans}')
if RESPONSE in ans and ans[RESPONSE] == 202:
for contact in ans[LIST_INFO]:
self.database.add_contact(contact)
else:
logger.error('Не удалось обновить список контактов.')
def user_list_update(self):
'''Метод обновляющий с сервера список пользователей.'''
logger.debug(f'Запрос списка известных пользователей {self.username}')
req = {
ACTION: USERS_REQUEST,
TIME: time.time(),
ACCOUNT_NAME: self.username
}
with socket_lock:
send_message(self.transport, req)
ans = recv_message(self.transport)
if RESPONSE in ans and ans[RESPONSE] == 202:
self.database.add_users(ans[LIST_INFO])
else:
logger.error('Не удалось обновить список известных пользователей.')
def key_request(self, user):
'''Метод запрашивающий с сервера публичный ключ пользователя.'''
logger.debug(f'Запрос публичного ключа для {user}')
req = {
ACTION: PUBLIC_KEY_REQUEST,
TIME: time.time(),
ACCOUNT_NAME: user
}
with socket_lock:
send_message(self.transport, req)
ans = recv_message(self.transport)
if RESPONSE in ans and ans[RESPONSE] == 511:
return ans[DATA]
else:
logger.error(f'Не удалось получить ключ собеседника{user}.')
def add_contact(self, contact):
'''Метод отправляющий на сервер сведения о добавлении контакта.'''
logger.debug(f'Создание контакта {contact}')
req = {
ACTION: ADD_CONTACT,
TIME: time.time(),
USER: self.username,
ACCOUNT_NAME: contact
}
with socket_lock:
send_message(self.transport, req)
self.process_server_ans(recv_message(self.transport))
def remove_contact(self, contact):
'''Метод отправляющий на сервер сведения о удалении контакта.'''
logger.debug(f'Удаление контакта {contact}')
req = {
ACTION: REMOVE_CONTACT,
TIME: time.time(),
USER: self.username,
ACCOUNT_NAME: contact
}
with socket_lock:
send_message(self.transport, req)
self.process_server_ans(recv_message(self.transport))
def transport_shutdown(self):
'''Метод уведомляющий сервер о завершении работы клиента.'''
self.running = False
message = {
ACTION: EXIT,
TIME: time.time(),
ACCOUNT_NAME: self.username
}
with socket_lock:
try:
send_message(self.transport, message)
except OSError:
pass
logger.debug('Транспорт завершает работу.')
time.sleep(0.5)
def send_message(self, to, message):
'''Метод отправляющий на сервер сообщения для пользователя.'''
message_dict = {
ACTION: MESSAGE,
SENDER: self.username,
DESTINATION: to,
TIME: time.time(),
MESSAGE_TEXT: message
}
logger.debug(f'Сформирован словарь сообщения: {message_dict}')
# Необходимо дождаться освобождения сокета для отправки сообщения:
with socket_lock:
send_message(self.transport, message_dict)
self.process_server_ans(recv_message(self.transport))
logger.info(f'Отправлено сообщение для пользователя {to}')
def run(self):
'''Метод содержащий основной цикл работы транспортного потока.'''
logger.debug('Запущен процесс - приёмник собщений с сервера.')
while self.running:
# Отдыхаем секунду и снова пробуем захватить сокет.
# если не сделать тут задержку, то отправка может
# достаточно долго ждать освобождения сокета:
time.sleep(1)
message = None
with socket_lock:
try:
self.transport.settimeout(0.5)
message = recv_message(self.transport)
except OSError as err:
if err.errno:
logger.critical(f'Потеряно соединение с сервером.')
self.running = False
self.connection_lost.emit()
# Проблемы с соединением
except (ConnectionError,
ConnectionAbortedError,
ConnectionResetError,
json.JSONDecodeError,
TypeError):
logger.debug(f'Потеряно соединение с сервером.')
self.running = False
self.connection_lost.emit()
finally:
self.transport.settimeout(5)
# Если сообщение получено, то вызываем функцию обработчик:
if message:
logger.debug(f'Принято сообщение с сервера: {message}')
self.process_server_ans(message) | AASMessenger_Client | /AASMessenger_Client-1.0.1.tar.gz/AASMessenger_Client-1.0.1/client/client/transport.py | transport.py |
import json
import logging
import base64
from PyQt5.QtWidgets import QMainWindow, qApp, QMessageBox
from PyQt5.QtGui import QStandardItemModel, QStandardItem, QBrush, QColor
from PyQt5.QtCore import pyqtSlot, Qt
from Crypto.Cipher import PKCS1_OAEP
from Crypto.PublicKey import RSA
from client.main_window_conv import Ui_MainClientWindow
from client.add_contact import AddContactDialog
from client.del_contact import DelContactDialog
from common.errors import ServerError
from common.settings import MESSAGE_TEXT, SENDER
# Инициализация клиентского логера:
logger = logging.getLogger('client')
class ClientMainWindow(QMainWindow):
'''
Класс - основное окно пользователя.
Содержит всю основную логику работы клиентского модуля.
Конфигурация окна создана в QTDesigner и загружается из
конвертированого файла main_window_conv.py
'''
def __init__(self, database, transport, keys):
super().__init__()
# Основные переменные:
self.database = database
self.transport = transport
# Объект - дешифорвщик сообщений с предзагруженным ключём:
self.decrypter = PKCS1_OAEP.new(keys)
# Загружаем конфигурацию окна из дизайнера:
self.ui = Ui_MainClientWindow()
self.ui.setupUi(self)
# Кнопка "Выход":
self.ui.menu_exit.triggered.connect(qApp.exit)
# Кнопка отправить сообщение:
self.ui.btn_send.clicked.connect(self.send_message)
# Кнопка "Добавить контакт":
self.ui.btn_add_contact.clicked.connect(self.add_contact_window)
self.ui.menu_add_contact.triggered.connect(self.add_contact_window)
# Удалить контакт:
self.ui.btn_remove_contact.clicked.connect(self.delete_contact_window)
self.ui.menu_del_contact.triggered.connect(self.delete_contact_window)
# Дополнительные требующиеся атрибуты:
self.contacts_model = None
self.history_model = None
self.messages = QMessageBox()
self.current_chat = None
self.current_chat_key = None
self.encryptor = None
self.ui.list_messages.setHorizontalScrollBarPolicy(
Qt.ScrollBarAlwaysOff)
self.ui.list_messages.setWordWrap(True)
# Даблклик по листу контактов отправляется в обработчик:
self.ui.list_contacts.doubleClicked.connect(self.select_active_user)
self.clients_list_update()
self.set_disabled_input()
self.show()
def set_disabled_input(self):
''' Метод делающий поля ввода неактивными'''
# Надпись - получатель:
self.ui.label_new_message.setText(
'Для выбора получателя дважды кликните на нем в окне контактов.')
self.ui.text_message.clear()
if self.history_model:
self.history_model.clear()
# Поле ввода и кнопка отправки неактивны до выбора получателя:
self.ui.btn_clear.setDisabled(True)
self.ui.btn_send.setDisabled(True)
self.ui.text_message.setDisabled(True)
self.encryptor = None
self.current_chat = None
self.current_chat_key = None
def history_list_update(self):
'''
Метод заполняющий соответствующий QListView
историей переписки с текущим собеседником.
'''
# Получаем историю сортированную по дате:
list = sorted(
self.database.get_history(
self.current_chat),
key=lambda item: item[3])
# Если модель не создана, создадим:
if not self.history_model:
self.history_model = QStandardItemModel()
self.ui.list_messages.setModel(self.history_model)
# Очистим от старых записей:
self.history_model.clear()
# Берём не более 20 последних записей:
length = len(list)
start_index = 0
if length > 20:
start_index = length - 20
# Заполнение модели записями, так-же стоит разделить входящие
# и исходящие выравниванием и разным фоном.
# Записи в обратном порядке, поэтому выбираем их с конца
# и не более 20:
for i in range(start_index, length):
item = list[i]
if item[1] == 'in':
mess = QStandardItem(
f'Входящее от {item[3].replace(microsecond=0)}:\n '
f'{item[2]}')
mess.setEditable(False)
mess.setBackground(QBrush(QColor(255, 213, 213)))
mess.setTextAlignment(Qt.AlignLeft)
self.history_model.appendRow(mess)
else:
mess = QStandardItem(
f'Исходящее от {item[3].replace(microsecond=0)}:\n '
f'{item[2]}')
mess.setEditable(False)
mess.setTextAlignment(Qt.AlignRight)
mess.setBackground(QBrush(QColor(204, 255, 204)))
self.history_model.appendRow(mess)
self.ui.list_messages.scrollToBottom()
def select_active_user(self):
'''Метод обработчик события двойного клика по списку контактов.'''
# Выбранный пользователем (даблклик) находится
# в выделеном элементе в QListView
self.current_chat = self.ui.list_contacts.currentIndex().data()
# Вызываем основную функцию:
self.set_active_user()
def set_active_user(self):
'''Метод активации чата с собеседником.'''
# Запрашиваем публичный ключ пользователя
# и создаём объект шифрования:
try:
self.current_chat_key = self.transport.key_request(
self.current_chat)
logger.debug(f'Загружен открытый ключ для {self.current_chat}')
if self.current_chat_key:
self.encryptor = PKCS1_OAEP.new(
RSA.import_key(self.current_chat_key))
except (OSError , json.JSONDecodeError):
self.current_chat_key = None
self.encryptor = None
logger.debug(f'Не удалось получить ключ для {self.current_chat}')
# Если ключа нет то ошибка, что не удалось начать чат с пользователем:
if not self.current_chat_key:
self.messages.warning(
self,
'Ошибка',
'Для выбранного пользователя нет ключа шифрования.')
return
# Ставим надпись и активируем кнопки:
self.ui.label_new_message.setText(
f'Введите сообщенние для {self.current_chat}:')
self.ui.btn_clear.setDisabled(False)
self.ui.btn_send.setDisabled(False)
self.ui.text_message.setDisabled(False)
# Заполняем окно историю сообщений по требуемому пользователю:
self.history_list_update()
def clients_list_update(self):
'''Метод обновляющий список контактов.'''
contacts_list = self.database.get_contacts()
self.contacts_model = QStandardItemModel()
for i in sorted(contacts_list):
item = QStandardItem(i)
item.setEditable(False)
self.contacts_model.appendRow(item)
self.ui.list_contacts.setModel(self.contacts_model)
def add_contact_window(self):
'''Метод создающий окно - диалог добавления контакта'''
global select_dialog
select_dialog = AddContactDialog(self.transport, self.database)
select_dialog.btn_ok.clicked.connect(
lambda: self.add_contact_action(select_dialog))
select_dialog.show()
def add_contact_action(self, item):
'''Метод обработчк нажатия кнопки "Добавить"'''
new_contact = item.selector.currentText()
self.add_contact(new_contact)
item.close()
def add_contact(self, new_contact):
'''
Метод добавляющий контакт в серверную и клиентсткую BD.
После обновления баз данных обновляет и содержимое окна.
'''
try:
self.transport.add_contact(new_contact)
except ServerError as err:
self.messages.critical(self, 'Ошибка сервера', err.text)
except OSError as err:
if err.errno:
self.messages.critical(
self, 'Ошибка', 'Потеряно соединение с сервером!')
self.close()
self.messages.critical(self, 'Ошибка', 'Таймаут соединения!')
else:
self.database.add_contact(new_contact)
new_contact = QStandardItem(new_contact)
new_contact.setEditable(False)
self.contacts_model.appendRow(new_contact)
logger.info(f'Успешно добавлен контакт {new_contact}')
self.messages.information(self, 'Успех', 'Контакт успешно добавлен.')
def delete_contact_window(self):
'''Метод создающий окно удаления контакта.'''
global remove_dialog
remove_dialog = DelContactDialog(self.database)
remove_dialog.btn_ok.clicked.connect(
lambda: self.delete_contact(remove_dialog))
remove_dialog.show()
def delete_contact(self, item):
'''
Метод удаляющий контакт из серверной и клиентсткой BD.
После обновления баз данных обновляет и содержимое окна.
'''
selected = item.selector.currentText()
try:
self.transport.remove_contact(selected)
except ServerError as err:
self.messages.critical(self, 'Ошибка сервера', err.text)
except OSError as err:
if err.errno:
self.messages.critical(
self, 'Ошибка', 'Потеряно соединение с сервером!')
self.close()
self.messages.critical(self, 'Ошибка', 'Таймаут соединения!')
else:
self.database.del_contact(selected)
self.clients_list_update()
logger.info(f'Успешно удалён контакт {selected}')
self.messages.information(self, 'Успех', 'Контакт успешно удалён.')
item.close()
# Если удалён активный пользователь, то деактивируем поля ввода:
if selected == self.current_chat:
self.current_chat = None
self.set_disabled_input()
def send_message(self):
'''
Функция отправки сообщения текущему собеседнику.
Реализует шифрование сообщения и его отправку.
'''
# Текст в поле, проверяем что поле не пустое
# затем забирается сообщение и поле очищается:
message_text = self.ui.text_message.toPlainText()
self.ui.text_message.clear()
if not message_text:
return
# Шифруем сообщение ключом получателя и упаковываем в base64:
message_text_encrypted = self.encryptor.encrypt(
message_text.encode('utf8'))
message_text_encrypted_base64 = base64.b64encode(
message_text_encrypted)
try:
self.transport.send_message(
self.current_chat,
message_text_encrypted_base64.decode('ascii'))
pass
except ServerError as err:
self.messages.critical(self, 'Ошибка', err.text)
except OSError as err:
if err.errno:
self.messages.critical(
self, 'Ошибка', 'Потеряно соединение с сервером!')
self.close()
self.messages.critical(self, 'Ошибка', 'Таймаут соединения!')
except (ConnectionResetError, ConnectionAbortedError):
self.messages.critical(
self, 'Ошибка', 'Потеряно соединение с сервером!')
self.close()
else:
self.database.save_message(self.current_chat, 'out', message_text)
logger.debug(
f'Отправлено сообщение для {self.current_chat}: {message_text}')
self.history_list_update()
@pyqtSlot(dict)
def message(self, message):
'''
Слот обработчик поступаемых сообщений, выполняет дешифровку
поступаемых сообщений и их сохранение в истории сообщений.
Запрашивает пользователя если пришло сообщение не от текущего
собеседника. При необходимости меняет собеседника.
'''
# Получаем строку байтов:
encrypted_message = base64.b64decode(message[MESSAGE_TEXT])
# Декодируем строку, при ошибке выдаём сообщение и завершаем функцию
try:
decrypted_message = self.decrypter.decrypt(encrypted_message)
except (ValueError , TypeError):
self.messages.warning(
self, 'Ошибка', 'Не удалось декодировать сообщение.')
return
# Сохраняем сообщение в базу и обновляем историю сообщений
# или открываем новый чат:
self.database.save_message(
self.current_chat, 'in', decrypted_message.decode('utf8'))
sender = message[SENDER]
if sender == self.current_chat:
self.history_list_update()
else:
# Проверим есть ли такой пользователь у нас в контактах:
if self.database.check_contact(sender):
# Если есть, спрашиваем и желании открыть с ним чат
# и открываем при желании:
if self.messages.question(
self,
'Новое сообщение',
f'Получено новое сообщение от {sender}, '
f'открыть чат с ним?',
QMessageBox.Yes,
QMessageBox.No) == QMessageBox.Yes:
self.current_chat = sender
self.set_active_user()
else:
print('NO')
# Раз нету,спрашиваем хотим ли добавить юзера в контакты:
if self.messages.question(
self, 'Новое сообщение',
f'Получено новое сообщение от {sender}.\n '
f'Данного пользователя нет в вашем контакт-листе.\n '
f'Добавить в контакты и открыть чат с ним?',
QMessageBox.Yes,
QMessageBox.No) == QMessageBox.Yes:
self.add_contact(sender)
self.current_chat = sender
# Нужно заново сохранить сообщение,
# иначе оно будет потеряно,
# т.к. на момент предыдущего вызова контакта не было:
self.database.save_message(
self.current_chat,
'in', decrypted_message.decode('utf8'))
self.set_active_user()
@pyqtSlot()
def connection_lost(self):
'''
Слот обработчик потери соеднинения с сервером.
Выдаёт окно предупреждение и завершает работу приложения.
'''
self.messages.warning(
self, 'Сбой соединения', 'Потеряно соединение с сервером. ')
self.close()
@pyqtSlot()
def sig_205(self):
'''
Слот выполняющий обновление баз данных по команде сервера.
'''
if self.current_chat and not self.database.check_user(
self.current_chat):
self.messages.warning(
self,
'Сочувствую',
'К сожалению собеседник был удалён с сервера.')
self.set_disabled_input()
self.current_chat = None
self.clients_list_update()
def make_connection(self, trans_obj):
'''Метод обеспечивающий соединение сигналов и слотов.'''
trans_obj.new_message.connect(self.message)
trans_obj.connection_lost.connect(self.connection_lost)
trans_obj.message_205.connect(self.sig_205) | AASMessenger_Client | /AASMessenger_Client-1.0.1.tar.gz/AASMessenger_Client-1.0.1/client/client/main_window.py | main_window.py |
import datetime
import os
from sqlalchemy import create_engine, Column, String, Text, DateTime, Integer
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
class ClientDatabase:
'''
Класс - оболочка для работы с базой данных клиента.
Использует SQLite базу данных, реализован с помощью
SQLAlchemy ORM и используется декларативный подход.
'''
Base = declarative_base()
class KnowUsers(Base):
'''
Класс - отображение для таблицы всех пользователей.
'''
__tablename__ = 'know_users'
id = Column(Integer, primary_key=True)
username = Column(String, unique=True)
def __init__(self, user):
self.id = None
self.username = user
class MessageHistory(Base):
'''
Класс - отображение для таблицы статистики переданных сообщений.
'''
__tablename__ = 'message_history'
id = Column(Integer, primary_key=True)
contact = Column(String)
direction = Column(String)
message = Column(Text)
date = Column(DateTime)
def __init__(self, contact, direction, message):
self.id = None
self.contact = contact
self.direction = direction
self.message = message
self.date = datetime.datetime.now()
class Contacts(Base):
'''
Класс - отображение для таблицы контактов.
'''
__tablename__ = 'contacts'
id = Column(Integer, primary_key=True)
name = Column(String, unique=True)
def __init__(self, contact):
self.id = None
self.name = contact
# Конструктор класса:
def __init__(self, name):
# Создаём движок базы данных, поскольку разрешено несколько
# клиентов одновременно, каждый должен иметь свою БД
# Поскольку клиент мультипоточный необходимо отключить
# проверки на подключения с разных потоков,
# иначе sqlite3.ProgrammingError.
# path = os.path.dirname(os.path.realpath(__file__))
path = os.getcwd()
filename = f'client_{name}.db3'
self.engine = create_engine(f'sqlite:///{os.path.join(path, filename)}',
echo=False,
pool_recycle=7200,
connect_args={'check_same_thread': False})
# Создаём таблицы:
self.Base.metadata.create_all(self.engine)
# Создаём сессию:
Session = sessionmaker(bind=self.engine)
self.session = Session()
# Очистка таблицы контактов, для подгрузки контактов с сервера:
self.session.query(self.Contacts).delete()
self.session.commit()
def add_contact(self, contact):
'''Метод добавляющий контакт в базу данных.'''
if not self.session.query(
self.Contacts).filter_by(
name=contact).count():
contact_row = self.Contacts(contact)
self.session.add(contact_row)
self.session.commit()
def contacts_clear(self):
'''Метод очищающий таблицу со списком контактов.'''
self.session.query(self.Contacts).delete()
def del_contact(self, contact):
'''Метод удаляющий определённый контакт.'''
self.session.query(self.Contacts).filter_by(name=contact).delete()
def add_users(self, users_list):
'''Метод заполняющий таблицу известных пользователей.'''
self.session.query(self.KnowUsers).delete()
for user in users_list:
user_row = self.KnowUsers(user)
self.session.add(user_row)
self.session.commit()
def save_message(self, contact, direction, message):
'''Метод сохраняющий сообщение в базе данных.'''
message_row = self.MessageHistory(contact, direction, message)
self.session.add(message_row)
self.session.commit()
def get_contacts(self):
'''Метод возвращающий список всех контактов.'''
return [contact[0]
for contact in self.session.query(self.Contacts.name).all()]
def get_users(self):
'''Метод возвращающий список всех известных пользователей.'''
return [user[0]
for user in self.session.query(self.KnowUsers.username).all()]
def check_user(self, user):
'''Метод проверяющий существует ли пользователь.'''
if self.session.query(self.KnowUsers).filter_by(username=user).count():
return True
else:
return False
def check_contact(self, contact):
'''Метод проверяющий существует ли контакт.'''
if self.session.query(self.Contacts).filter_by(name=contact).count():
return True
else:
return False
def get_history(self, contact):
'''Метод возвращающий историю сообщений с определённым пользователем.'''
query = self.session.query(
self.MessageHistory).filter_by(
contact=contact)
return [(history_row.contact,
history_row.direction,
history_row.message,
history_row.date) for history_row in query.all()]
# Отладка:
if __name__ == '__main__':
test_db = ClientDatabase('test1')
# for i in ['test3', 'test4', 'test5']:
# test_db.add_contact(i)
# test_db.add_contact('test4')
# test_db.add_users(['test1', 'test2', 'test3', 'test4', 'test5'])
# test_db.save_message('test2', 'in', f'Привет! я тестовое сообщение от {datetime.datetime.now()}!')
# test_db.save_message('test2', 'out', f'Привет! я другое тестовое сообщение от {datetime.datetime.now()}!')
# print(test_db.get_contacts())
# print(test_db.get_users())
# print(test_db.check_user('test1'))
# print(test_db.check_user('test10'))
print(sorted(test_db.get_history('test2'), key=lambda item: item[3]))
# test_db.del_contact('test4')
# print(test_db.get_contacts()) | AASMessenger_Client | /AASMessenger_Client-1.0.1.tar.gz/AASMessenger_Client-1.0.1/client/client/database.py | database.py |
import sys
import logging
import socket
sys.path.append('../')
# Метод определения модуля, источника запуска:
if sys.argv[0].find('client') == -1:
LOGGER = logging.getLogger('server')
else:
LOGGER = logging.getLogger('client')
def log(func_for_log):
'''
Декоратор, выполняющий логирование вызовов функций.
Сохраняет события типа debug, содержащие
информацию о имени вызываемой функиции, параметры с которыми
вызывается функция, и модуль, вызывающий функцию.
'''
def log_create(*args, **kwargs):
res = func_for_log(*args, **kwargs)
LOGGER.debug(f'Вызвана функция {func_for_log.__name__} с параматреми '
f'{args}, {kwargs}. Вызов из модуля '
f'{func_for_log.__module__}')
return res
return log_create
def login_required(func):
'''
Декоратор, проверяющий, что клиент авторизован на сервере.
Проверяет, что передаваемый объект сокета находится в
списке авторизованных клиентов.
За исключением передачи словаря-запроса
на авторизацию. Если клиент не авторизован,
генерирует исключение TypeError
'''
def checker(*args, **kwargs):
# Проверяем, что первый аргумент - экземпляр MessageProcessor
# Импортировать необходимо тут, иначе ошибка рекурсивного импорта.
from server.server.core import MessageProcessor
from server.common.settings import ACTION, PRESENCE
if isinstance(args[0], MessageProcessor):
found = False
for arg in args:
if isinstance(arg, socket.socket):
# Проверяем, что данный сокет есть в списке names
# класса MessageProcessor
for client in args[0].names:
if args[0].names[client] == arg:
found = True
# Теперь надо проверить, что передаваемые аргументы
# не presence сообщение
for arg in args:
if isinstance(arg, dict):
if ACTION in arg and arg[ACTION] == PRESENCE:
found = True
# Если не авторизован и не сообщение начала авторизации,
# то вызываем исключение.
if not found:
raise TypeError
return func(*args, **kwargs)
return checker | AASMessenger_Client | /AASMessenger_Client-1.0.1.tar.gz/AASMessenger_Client-1.0.1/client/common/decos.py | decos.py |
import dis
class ServerVerifier(type):
'''
Метакласс, проверяющий что в результирующем классе нет клиентских
вызовов таких как: connect. Также проверяется, что серверный
сокет является TCP и работает по IPv4 протоколу.
'''
def __init__(self, clsname, bases, clsdict):
# Список методов, которые используются в функциях класса:
methods = []
# Атрибуты, вызываемые функциями классов:
attrs = []
for func in clsdict:
try:
ret = dis.get_instructions(clsdict[func])
# Если не функция то ловим исключение:
except TypeError:
pass
else:
# Раз функция разбираем код, получая используемые методы и
# атрибуты.
for instr in ret:
if instr.opname == 'LOAD_GLOBAL':
if instr.argval not in methods:
methods.append(instr.argval)
elif instr.opname == 'LOAD_ATTR':
if instr.argval not in attrs:
attrs.append(instr.argval)
# Если обнаружено использование недопустимого метода connect,
# генерируем исключение:
if 'connect' in methods:
raise TypeError('Использование метода connect недопустимо '
'в серверной части приложения')
# Если сокет не инициализировался константами SOCK_STREAM(TCP)
# AF_INET(IPv4), тоже исключение.
if not ('SOCK_STREAM' in attrs and 'AF_INET' in attrs):
raise TypeError('Некорректная инициализация сокета.')
super().__init__(clsname, bases, clsdict)
class ClientVerifier(type):
'''
Метакласс, проверяющий что в результирующем классе нет серверных
вызовов таких как: accept, listen. Также проверяется, что сокет не
создаётся внутри конструктора класса.
'''
def __init__(self, clsname, bases, clsdict):
# Список методов, которые используются в функциях класса:
methods = []
for func in clsdict:
try:
ret = dis.get_instructions(clsdict[func])
# Если не функция то ловим исключение:
except TypeError:
pass
else:
# Раз функция разбираем код, получая используемые методы:
for instr in ret:
if instr.opname == 'LOAD_GLOBAL':
if instr.argval not in methods:
methods.append(instr.argval)
# Если обнаружено использование недопустимого метода accept, listen,
# socket бросаем исключение:
for command in ('accept', 'listen', 'socket'):
if command in methods:
raise TypeError(
'Обнаружено использование недопустимого метода')
# Вызов get_message или send_message из utils считаем корректным
# использованием сокетов
if 'recv_message' in methods or 'send_message' in methods:
pass
else:
raise TypeError(
'Отсутствуют вызовы функций, работающих с сокетами')
super().__init__(clsname, bases, clsdict) | AASMessenger_Client | /AASMessenger_Client-1.0.1.tar.gz/AASMessenger_Client-1.0.1/client/common/metaclasses.py | metaclasses.py |
import argparse
import configparser
import os
import sys
import logging
from PyQt5.QtWidgets import QApplication
from PyQt5.QtCore import Qt
from common.settings import DEFAULT_PORT
from log import server_log_config
from server.core import MessageProcessor
from server.database import ServerDataBase
from server.main_window import MainWindow
from common.decos import log
# Инициализация логирования
SERVER_LOGGER = logging.getLogger('server')
@log
def args_handler(default_port, default_address):
'''Парсер аргументов коммандной строки.'''
SERVER_LOGGER.debug(
f'Инициализация парсера аргументов коммандной строки: {sys.argv}')
parser = argparse.ArgumentParser()
parser.add_argument('-p', dest='port', type=int,
default=default_port, nargs='?')
parser.add_argument('-a', dest='ip', default=default_address, nargs='?')
parser.add_argument('--no_gui', action='store_true')
args = parser.parse_args()
listen_address = args.ip
listen_port = args.port
gui_flag = args.no_gui
SERVER_LOGGER.debug('Аргументы успешно загружены.')
return listen_address, listen_port, gui_flag
@log
def config_load():
'''Парсер конфигурационного ini файла.'''
config = configparser.ConfigParser()
# dir_path = os.path.dirname(os.path.realpath(__file__))
dir_path = os.getcwd()
config.read(f"{dir_path}/{'server.ini'}")
# Если конфиг файл загружен правильно, запускаемся,
# иначе конфиг по умолчанию.
if 'SETTINGS' in config:
return config
else:
config.add_section('SETTINGS')
config.set('SETTINGS', 'Default_port', str(DEFAULT_PORT))
config.set('SETTINGS', 'Listen_Address', '')
config.set('SETTINGS', 'Database_path', '')
config.set('SETTINGS', 'Database_file', 'server_database.db3')
return config
@log
def main():
'''Запуск серверного приложения'''
# Загрузка файла конфигурации сервера:
config = config_load()
# Загрузка параметров командной строки, если нет параметров,
# то задаём значения по умоланию:
listen_address, listen_port, gui_flag = args_handler(
config['SETTINGS']['Default_port'],
config['SETTINGS']['Listen_Address'])
# Инициализация базы данных
database = ServerDataBase(
os.path.join(
config['SETTINGS']['Database_path'],
config['SETTINGS']['Database_file']))
# Создание экземпляра класса - сервера и его запуск:
server = MessageProcessor(listen_address, listen_port, database)
server.deamon = True
server.start()
# Если указан параметр без GUI то запускаем обработчик
# консольного ввода:
if gui_flag:
while True:
command = input('Введите exit для завершения работы сервера.')
# Если выход, то завршаем основной цикл сервера:
if command == 'exit':
server.running = False
server.join()
break
# Если не указан запуск без GUI, то запускаем GUI:
else:
server_app = QApplication(sys.argv)
server_app.setAttribute(Qt.AA_DisableWindowContextHelpButton)
main_window = MainWindow(database, server, config)
# Запускаем GUI:
server_app.exec_()
# По закрытию окон останавливаем обработчик сообщений:
server.running = False
if __name__ == '__main__':
main() | AASMessenger_Server | /AASMessenger_Server-1.0.1.tar.gz/AASMessenger_Server-1.0.1/server/server.py | server.py |
from PyQt5.QtWidgets import QDialog, QLabel, QLineEdit, QPushButton, \
QFileDialog, QMessageBox
from PyQt5.QtCore import Qt
import os
class ConfigWindow(QDialog):
'''Класс окно настроек.'''
def __init__(self, config):
super().__init__()
self.config = config
self.initUI()
def initUI(self):
'''Настройки окна'''
self.setFixedSize(365, 260)
self.setWindowTitle('Настройки сервера')
self.setAttribute(Qt.WA_DeleteOnClose)
self.setModal(True)
# Надпись о файле базы данных:
self.db_path_label = QLabel('Путь до файла базы данных: ', self)
self.db_path_label.move(10, 10)
self.db_path_label.setFixedSize(240, 15)
# Строка с путём базы:
self.db_path = QLineEdit(self)
self.db_path.setFixedSize(250, 20)
self.db_path.move(10, 30)
self.db_path.setReadOnly(True)
# Кнопка выбора пути:
self.db_path_select = QPushButton('Обзор...', self)
self.db_path_select.move(275, 28)
# Метка с именем поля файла базы данных:
self.db_file_label = QLabel('Имя файла базы данных: ', self)
self.db_file_label.move(10, 68)
self.db_file_label.setFixedSize(180, 15)
# Поле для ввода имени файла:
self.db_file = QLineEdit(self)
self.db_file.move(200, 66)
self.db_file.setFixedSize(150, 20)
# Метка с номером порта:
self.port_label = QLabel('Номер порта для соединений:', self)
self.port_label.move(10, 108)
self.port_label.setFixedSize(180, 15)
# Поле для ввода номера порта:
self.port = QLineEdit(self)
self.port.move(200, 108)
self.port.setFixedSize(150, 20)
# Метка с адресом для соединений:
self.ip_label = QLabel('С какого IP принимаем соединения:', self)
self.ip_label.move(10, 148)
self.ip_label.setFixedSize(180, 15)
# Метка с напоминанием о пустом поле:
self.ip_label_note = QLabel(' оставьте это поле пустым, чтобы\n принимать соединения с любых адресов.', self)
self.ip_label_note.move(10, 168)
self.ip_label_note.setFixedSize(500, 30)
# Поле для ввода ip:
self.ip = QLineEdit(self)
self.ip.move(200, 148)
self.ip.setFixedSize(150, 20)
# Кнопка сохранения настроек:
self.save_btn = QPushButton('Сохранить', self)
self.save_btn.move(190, 220)
# Кнопка закрытия окна:
self.close_button = QPushButton('Закрыть', self)
self.close_button.move(275, 220)
self.close_button.clicked.connect(self.close)
self.db_path_select.clicked.connect(self.open_file_dialog)
self.show()
self.db_path.insert(self.config['SETTINGS']['Database_path'])
self.db_file.insert(self.config['SETTINGS']['Database_file'])
self.port.insert(self.config['SETTINGS']['Default_port'])
self.ip.insert(self.config['SETTINGS']['Listen_Address'])
self.save_btn.clicked.connect(self.save_server_config)
def open_file_dialog(self):
'''Метод обработчик открытия окна выбора папки.'''
global dialog
dialog = QFileDialog(self)
path = dialog.getExistingDirectory()
path = path.replace('/', '\\')
self.db_path.clear()
self.db_path.insert(path)
def save_server_config(self):
'''
Метод сохранения настроек.
Проверяет правильность введённых данных и
если всё правильно сохраняет ini файл.
'''
global config_window
message = QMessageBox()
self.config['SETTINGS']['Database_path'] = self.db_path.text()
self.config['SETTINGS']['Database_file'] = self.db_file.text()
try:
port = int(self.port.text())
except ValueError:
message.warning(self, 'Ошибка', 'Порт должен быть числом')
else:
self.config['SETTINGS']['Listen_Address'] = self.ip.text()
if 1023 < port < 65536:
self.config['SETTINGS']['Default_port'] = str(port)
# dir_path = os.path.dirname(os.path.realpath(__file__))
dir_path = os.getcwd()
dir_path = os.path.join(dir_path, '..')
with open(f"{dir_path}/{'server.ini'}", 'w') as conf:
self.config.write(conf)
message.information(self, 'OK',
'Настройки успешно сохранены!')
else:
message.warning(self, 'Ошибка',
'Порт должен быть от 1024 до 65536') | AASMessenger_Server | /AASMessenger_Server-1.0.1.tar.gz/AASMessenger_Server-1.0.1/server/server/config_window.py | config_window.py |
from PyQt5.QtWidgets import QDialog, QPushButton, QTableView
from PyQt5.QtGui import QStandardItemModel, QStandardItem
from PyQt5.QtCore import Qt
class StatWindow(QDialog):
'''
Класс - окно со статистикой пользователей
'''
def __init__(self, database):
super().__init__()
self.database = database
self.initUI()
def initUI(self):
# Настройки окна:
self.setWindowTitle('Статистика клиентов')
self.setFixedSize(600, 700)
self.setAttribute(Qt.WA_DeleteOnClose)
# Кнапка закрытия окна
self.close_button = QPushButton('Закрыть', self)
self.close_button.move(250, 650)
self.close_button.clicked.connect(self.close)
# Лист с собственно статистикой
self.stat_table = QTableView(self)
self.stat_table.move(10, 10)
self.stat_table.setFixedSize(580, 620)
self.create_stat_model()
# Функция реализующая заполнение таблицы историей сообщений.
def create_stat_model(self):
'''Метод реализующий заполнение таблицы статистикой сообщений.'''
# Список записей из базы
stat_list = self.database.message_history()
# Объект модели данных:
list = QStandardItemModel()
list.setHorizontalHeaderLabels(
['Имя Клиента',
'Последний раз входил',
'Сообщений отправлено',
'Сообщений получено'])
for row in stat_list:
user, last_seen, sent, recvd = row
user = QStandardItem(user)
user.setEditable(False)
last_seen = QStandardItem(str(last_seen.replace(microsecond=0)))
last_seen.setEditable(False)
sent = QStandardItem(str(sent))
sent.setEditable(False)
recvd = QStandardItem(str(recvd))
recvd.setEditable(False)
list.appendRow([user, last_seen, sent, recvd])
self.stat_table.setModel(list)
self.stat_table.resizeColumnsToContents()
self.stat_table.resizeRowsToContents() | AASMessenger_Server | /AASMessenger_Server-1.0.1.tar.gz/AASMessenger_Server-1.0.1/server/server/stat_window.py | stat_window.py |
import threading
import logging
import select
import socket
import json
import hmac
import binascii
import os
from common.descriptors import ServerPort, ServerAddress
from common.settings import MAX_CONNECTIONS, DESTINATION, SENDER, ACTION, \
PRESENCE, MESSAGE, MESSAGE_TEXT, USER, TIME, ACCOUNT_NAME, ERROR, \
RESPONSE_200, RESPONSE_400, EXIT, GET_CONTACTS, LIST_INFO, ADD_CONTACT, \
RESPONSE_202, REMOVE_CONTACT, PUBLIC_KEY_REQUEST, RESPONSE_511, DATA, \
USERS_REQUEST, RESPONSE, RESPONSE_205, PUBLIC_KEY
from common.utils import send_message, recv_message
from common.decos import login_required
# Инициализация логирования
SERVER_LOGGER = logging.getLogger('server')
class MessageProcessor(threading.Thread):
'''
Основной класс сервера. Принимает содинения, словари - пакеты
от клиентов, обрабатывает поступающие сообщения.
Работает в качестве отдельного потока.
'''
port = ServerPort()
addr = ServerAddress()
def __init__(self, listen_address, listen_port, database):
# Параментры подключения:
self.addr = listen_address
self.port = listen_port
# База данных сервера:
self.database = database
# Сокет, через который будет осуществляться работа:
self.sock = None
# Список подключённых клиентов:
self.clients = []
# Сокеты:
self.listen_sockets = None
self.error_sockets = None
# Флаг продолжения работы:
self.running = True
# Словарь содержащий сопоставленные имена и соответствующие им сокеты:
self.names = dict()
# Конструктор родителя:
super().__init__()
def run(self):
'''Метод основной цикл потока.'''
# Инициализация Сокета:
self.init_socket()
# Основной цикл программы сервера:
while self.running:
# Ждём подключения, если таймаут вышел, ловим исключение:
try:
client, client_address = self.sock.accept()
except OSError:
pass
else:
SERVER_LOGGER.info(f'Установлено соединение с адресом: '
f'{client_address}')
client.settimeout(5)
self.clients.append(client)
recv_msg_lst = []
send_msg_lst = []
err_lst = []
# Проверяем на наличие ждущих клиентов:
try:
if self.clients:
recv_msg_lst, self.listen_sockets, \
self.error_sockets = select.select(
self.clients, self.clients, [], 0)
except OSError as err:
SERVER_LOGGER.error(f'Ошибка работы с сокетами: {err.errno}')
# Принимаем сообщения и если ошибка, исключаем клиента:
if recv_msg_lst:
for client_with_msg in recv_msg_lst:
try:
self.client_msg_handler(
recv_message(client_with_msg), client_with_msg)
except (OSError, json.JSONDecodeError, TypeError):
self.remove_client(client_with_msg)
def remove_client(self, client):
'''
Метод обработчик клиента с которым прервана связь.
Ищет клиента и удаляет его из списков и базы:
'''
SERVER_LOGGER.info(f'Клиент {client.getpeername()} '
f'отключился от сервера.')
for name in self.names:
if self.names[name] == client:
self.database.user_login(name)
del self.names[name]
break
self.clients.remove(client)
client.close()
def init_socket(self):
'''Метод инициализатор сокета.'''
# Готовим сокет:
transport = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
transport.bind((self.addr, self.port))
transport.settimeout(0.5)
# Начинаем слушать сокет:
self.sock = transport
self.sock.listen(MAX_CONNECTIONS)
SERVER_LOGGER.info(f'Запущен сервер с портом для подключений: '
f'{self.port}, '
f'адрес с которого принимаются подключения: '
f'{self.addr}. '
f'Если адрес не указан, соединения будут приниматься '
f'с любых адресов')
print('Сервер запущен')
def proccess_message(self, message):
'''
Метод отправки сообщения клиенту.
'''
if message[DESTINATION] in self.names \
and self.names[message[DESTINATION]] in self.listen_sockets:
try:
send_message(self.names[message[DESTINATION]], message)
SERVER_LOGGER.info(f'Отправлено сообщение пользователю '
f'{message[DESTINATION]} от пользователя '
f'{message[SENDER]}.')
except OSError:
self.remove_client(message[DESTINATION])
elif message[DESTINATION] in self.names \
and self.names[message[DESTINATION]] not in self.listen_sockets:
SERVER_LOGGER.error(f'Связь с клиентом {message[DESTINATION]} '
f'была потеряна. Соединение закрыто, '
f'доставка невозможна.')
self.remove_client(self.names[message[DESTINATION]])
else:
SERVER_LOGGER.error(f'Пользователь {message[DESTINATION]} '
f'не зарегистрирован на сервере, '
f'отправка сообщения невозможна.')
@login_required
def client_msg_handler(self, message, client):
'''Метод отбработчик поступающих сообщений.'''
SERVER_LOGGER.debug(f'Обработка сообщения от клиента: {message}')
# Если сообщение о присутствии, принимаем и отвечаем
if ACTION in message and message[ACTION] == PRESENCE \
and TIME in message and USER in message:
# Если сообщение о присутствии то вызываем функцию авторизации:
self.autorize_user(message, client)
# Если это сообщение, то добавляем его в очередь:
elif ACTION in message and message[ACTION] == MESSAGE \
and DESTINATION in message and TIME in message \
and SENDER in message and MESSAGE_TEXT in message \
and self.names[message[SENDER]] == client:
if message[DESTINATION] in self.names:
self.database.process_message(message[SENDER],
message[DESTINATION])
self.proccess_message(message)
try:
send_message(client, RESPONSE_200)
except OSError:
self.remove_client(client)
else:
response = RESPONSE_400
response[ERROR] = 'Пользователь не зарегистрирован на сервере.'
try:
send_message(client, response)
except OSError:
pass
return
# Если клиент выходит:
elif ACTION in message and message[ACTION] == EXIT \
and ACCOUNT_NAME in message \
and self.names[message[ACCOUNT_NAME]] == client:
self.remove_client(client)
# Если запрос контакт листа:
elif ACTION in message and message[ACTION] == GET_CONTACTS \
and USER in message and self.names[message[USER]] == client:
response = RESPONSE_202
response[LIST_INFO] = self.database.get_contacts(message[USER])
try:
send_message(client, response)
except OSError:
self.remove_client(client)
# Если добаваление контакта:
elif ACTION in message and message[ACTION] == ADD_CONTACT and \
ACCOUNT_NAME in message and USER in message \
and self.names[message[USER]] == client:
self.database.add_contact(message[USER], message[ACCOUNT_NAME])
try:
send_message(client, RESPONSE_200)
except OSError:
self.remove_client(client)
# Если удаление контакта:
elif ACTION in message and message[ACTION] == REMOVE_CONTACT \
and ACCOUNT_NAME in message and USER in message \
and self.names[message[USER]] == client:
self.database.remove_contact(message[USER], message[ACCOUNT_NAME])
try:
send_message(client, RESPONSE_200)
except OSError:
self.remove_client(client)
# Если запрос известных пользователей:
elif ACTION in message and message[ACTION] == USERS_REQUEST \
and ACCOUNT_NAME in message \
and self.names[message[ACCOUNT_NAME]] == client:
response = RESPONSE_202
response[LIST_INFO] = [user[0]
for user in self.database.users_list()]
try:
send_message(client, response)
except OSError:
self.remove_client(client)
# Если это запрос публичного ключа пользователя:
elif ACTION in message and message[ACTION] == PUBLIC_KEY_REQUEST \
and ACCOUNT_NAME in message:
response = RESPONSE_511
response[DATA] = self.database.get_pubkey(message[ACCOUNT_NAME])
# может быть, что ключа ещё нет (пользователь никогда
# не логинился, тогда шлём 400)
if response[DATA]:
try:
send_message(client, response)
except OSError:
self.remove_client(client)
else:
response = RESPONSE_400
response[ERROR] = 'Нет публичного ключа ' \
'для данного пользователя'
try:
send_message(client, response)
except OSError:
self.remove_client(client)
# Иначе - Bad Request:
else:
response = RESPONSE_400
response[ERROR] = 'Запрос некорректен.'
try:
send_message(client, response)
except OSError:
self.remove_client(client)
return
def autorize_user(self, message, sock):
'''Метод реализующий авторизцию пользователей.'''
# Если имя пользователя уже занято то возвращаем 400:
if message[USER][ACCOUNT_NAME] in self.names.keys():
response = RESPONSE_400
response[ERROR] = 'Имя пользователя уже занято.'
try:
send_message(sock, response)
except OSError:
pass
self.clients.remove(sock)
sock.close()
# Проверяем что пользователь зарегистрирован на сервере:
elif not self.database.check_user(message[USER][ACCOUNT_NAME]):
response = RESPONSE_400
response[ERROR] = 'Пользователь не зарегистрирован.'
try:
send_message(sock, response)
except OSError:
pass
self.clients.remove(sock)
sock.close()
else:
# Иначе отвечаем 511 и проводим процедуру авторизации
# Словарь - заготовка:
message_auth = RESPONSE_511
# Набор байтов в hex представлении:
random_str = binascii.hexlify(os.urandom(64))
# В словарь байты нельзя, декодируем (json.dumps -> TypeError):
message_auth[DATA] = random_str.decode('ascii')
# Создаём хэш пароля и связки с рандомной строкой,
# сохраняем серверную версию ключа:
hash = hmac.new(
self.database.get_hash(message[USER][ACCOUNT_NAME]),
random_str)
digest = hash.digest()
try:
# Обмен с клиентом:
send_message(sock, message_auth)
ans = recv_message(sock)
except OSError:
sock.close()
return
client_digest = binascii.a2b_base64(ans[DATA])
# Если ответ клиента корректный, то сохраняем его
# в список пользователей:
if RESPONSE in ans and ans[RESPONSE] == 511 \
and hmac.compare_digest(digest, client_digest):
self.names[message[USER][ACCOUNT_NAME]] = sock
client_ip, client_port = sock.getpeername()
try:
send_message(sock, RESPONSE_200)
except OSError:
self.remove_client(message[USER][ACCOUNT_NAME])
# Добавляем пользователя в список активных и если
# у него изменился открытый ключ сохраняем новый:
self.database.user_login(message[USER][ACCOUNT_NAME],
client_ip, client_port,
message[USER][PUBLIC_KEY])
else:
response = RESPONSE_400
response[ERROR] = 'Неверный пароль.'
try:
send_message(sock, response)
except OSError:
pass
self.clients.remove(sock)
sock.close()
# Функция - отправляет сервисное сообщение 205 с требованием клиентам
# обновить списки:
def service_update_lists(self):
'''Метод реализующий отправки сервисного сообщения 205 клиентам.'''
for client in self.names:
try:
send_message(self.names[client], RESPONSE_205)
except OSError:
self.remove_client(self.names[client]) | AASMessenger_Server | /AASMessenger_Server-1.0.1.tar.gz/AASMessenger_Server-1.0.1/server/server/core.py | core.py |
from PyQt5.QtWidgets import QDialog, QPushButton, QLineEdit, QApplication, \
QLabel, QMessageBox
from PyQt5.QtCore import Qt
import hashlib
import binascii
class RegisterUser(QDialog):
'''Класс диалог регистрации пользователя на сервере.'''
def __init__(self, database, server):
super().__init__()
self.database = database
self.server = server
self.setWindowTitle('Регистрация')
self.setFixedSize(175, 183)
self.setModal(True)
self.setAttribute(Qt.WA_DeleteOnClose)
self.label_username = QLabel('Введите имя пользователя:', self)
self.label_username.move(10, 10)
self.label_username.setFixedSize(150, 15)
self.client_name = QLineEdit(self)
self.client_name.setFixedSize(154, 20)
self.client_name.move(10, 30)
self.label_passwd = QLabel('Введите пароль:', self)
self.label_passwd.move(10, 55)
self.label_passwd.setFixedSize(150, 15)
self.client_passwd = QLineEdit(self)
self.client_passwd.setFixedSize(154, 20)
self.client_passwd.move(10, 75)
self.client_passwd.setEchoMode(QLineEdit.Password)
self.label_conf = QLabel('Введите подтверждение:', self)
self.label_conf.move(10, 100)
self.label_conf.setFixedSize(150, 15)
self.client_conf = QLineEdit(self)
self.client_conf.setFixedSize(154, 20)
self.client_conf.move(10, 120)
self.client_conf.setEchoMode(QLineEdit.Password)
self.btn_ok = QPushButton('Сохранить', self)
self.btn_ok.move(10, 150)
self.btn_ok.clicked.connect(self.save_data)
self.btn_cancel = QPushButton('Выход', self)
self.btn_cancel.move(90, 150)
self.btn_cancel.clicked.connect(self.close)
self.messages = QMessageBox()
self.show()
def save_data(self):
'''
Метод проверки правильности ввода и сохранения в базу
нового пользователя.
'''
if not self.client_name.text():
self.messages.critical(self, 'Ошибка',
'Не указано имя пользователя.')
return
elif self.client_passwd.text() != self.client_conf.text():
self.messages.critical(self, 'Ошибка',
'Введённые пароли не совпадают.')
return
elif self.database.check_user(self.client_name.text()):
self.messages.critical(self, 'Ошибка',
'Пользователь уже существует.')
return
else:
passwd_bytes = self.client_passwd.text().encode('utf-8')
salt = self.client_name.text().lower().encode('utf-8')
passwd_hash = hashlib.pbkdf2_hmac('sha512', passwd_bytes,
salt, 10000)
self.database.add_user(self.client_name.text(),
binascii.hexlify(passwd_hash))
self.messages.information(self, 'Успех',
'Пользователь успешно зарегистрирован.')
# Рассылаем клиентам сообщение
# о необходимости обновить справичники
self.server.service_update_lists()
self.close()
if __name__ == '__main__':
app = QApplication([])
app.setAttribute(Qt.AA_DisableWindowContextHelpButton)
dial = RegisterUser(None)
app.exec_() | AASMessenger_Server | /AASMessenger_Server-1.0.1.tar.gz/AASMessenger_Server-1.0.1/server/server/add_user.py | add_user.py |
from PyQt5.QtWidgets import QMainWindow, QAction, qApp, QLabel, QTableView
from PyQt5.QtGui import QStandardItemModel, QStandardItem
from PyQt5.QtCore import QTimer
from server.stat_window import StatWindow
from server.config_window import ConfigWindow
from server.add_user import RegisterUser
from server.remove_user import DelUserDialog
class MainWindow(QMainWindow):
'''Класс - основное окно сервера.'''
def __init__(self, database, server, config):
# Конструктор предка
super().__init__()
# База данных сервера
self.database = database
self.server_thread = server
self.config = config
# Ярлык выхода
self.exitAction = QAction('Выход', self)
self.exitAction.setShortcut('Ctrl+Q')
self.exitAction.triggered.connect(qApp.quit)
# Кнопка обновить список клиентов
self.refresh_button = QAction('Обновить список', self)
# Кнопка настроек сервера
self.config_btn = QAction('Настройки сервера', self)
# Кнопка регистрации пользователя
self.register_btn = QAction('Регистрация пользователя', self)
# Кнопка удаления пользователя
self.remove_btn = QAction('Удаление пользователя' , self)
# Кнопка вывести историю сообщений
self.show_history_button = QAction('История клиентов', self)
# Статусбар
self.statusBar()
self.statusBar().showMessage('Server Working')
# Тулбар
self.toolbar = self.addToolBar('MainBar')
self.toolbar.addAction(self.exitAction)
self.toolbar.addAction(self.refresh_button)
self.toolbar.addAction(self.show_history_button)
self.toolbar.addAction(self.config_btn)
self.toolbar.addAction(self.register_btn)
self.toolbar.addAction(self.remove_btn)
# Настройки геометрии основного окна
self.setFixedSize(800, 600)
self.setWindowTitle('Messaging Server alpha release')
# Надпись о том, что ниже список подключённых клиентов
self.label = QLabel('Список подключённых клиентов:', self)
self.label.setFixedSize(240, 15)
self.label.move(10, 25)
# Окно со списком подключённых клиентов.
self.active_clients_table = QTableView(self)
self.active_clients_table.move(10, 45)
self.active_clients_table.setFixedSize(780, 400)
# Таймер, обновляющий список клиентов 1 раз в секунду
self.timer = QTimer()
self.timer.timeout.connect(self.create_users_model)
self.timer.start(1000)
# Связываем кнопки с процедурами
self.refresh_button.triggered.connect(self.create_users_model)
self.show_history_button.triggered.connect(self.show_statistics)
self.config_btn.triggered.connect(self.server_config)
self.register_btn.triggered.connect(self.reg_user)
self.remove_btn.triggered.connect(self.rem_user)
# Последним параметром отображаем окно.
self.show()
def create_users_model(self):
'''Метод заполняющий таблицу активных пользователей.'''
list_users = self.database.active_users_list()
list = QStandardItemModel()
list.setHorizontalHeaderLabels(
['Имя Клиента', 'IP Адрес', 'Порт', 'Время подключения'])
for row in list_users:
user, ip, port, time = row
user = QStandardItem(user)
user.setEditable(False)
ip = QStandardItem(ip)
ip.setEditable(False)
port = QStandardItem(str(port))
port.setEditable(False)
# Уберём милисекунды из строки времени,
# т.к. такая точность не требуется.
time = QStandardItem(str(time.replace(microsecond=0)))
time.setEditable(False)
list.appendRow([user, ip, port, time])
self.active_clients_table.setModel(list)
self.active_clients_table.resizeColumnsToContents()
self.active_clients_table.resizeRowsToContents()
def show_statistics(self):
'''Метод создающий окно со статистикой клиентов.'''
global stat_window
stat_window = StatWindow(self.database)
stat_window.show()
def server_config(self):
'''Метод создающий окно с настройками сервера.'''
global config_window
# Создаём окно и заносим в него текущие параметры
config_window = ConfigWindow(self.config)
def reg_user(self):
'''Метод создающий окно регистрации пользователя.'''
global reg_window
reg_window = RegisterUser(self.database , self.server_thread)
reg_window.show()
def rem_user(self):
'''Метод создающий окно удаления пользователя.'''
global rem_window
rem_window = DelUserDialog(self.database , self.server_thread)
rem_window.show() | AASMessenger_Server | /AASMessenger_Server-1.0.1.tar.gz/AASMessenger_Server-1.0.1/server/server/main_window.py | main_window.py |
import datetime
from sqlalchemy import create_engine, Column, Integer, String, \
ForeignKey, DateTime, Text
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
class ServerDataBase:
'''
Класс - оболочка для работы с базой данных сервера.
Использует SQLite базу данных, реализован с помощью
SQLAlchemy ORM и используется декларативный подход.
'''
Base = declarative_base()
class AllUsers(Base):
'''Класс - отображение таблицы всех пользователей.'''
__tablename__ = 'all_users'
id = Column(Integer, primary_key=True)
name = Column(String, unique=True)
last_login = Column(DateTime)
passwd_hash = Column(String)
pubkey = Column(Text)
def __init__(self, username, passwd_hash):
self.name = username
self.last_login = datetime.datetime.now()
self.passwd_hash = passwd_hash
self.pubkey = None
self.id = None
class ActiveUsers(Base):
'''Класс - отображение таблицы активных пользователей.'''
__tablename__ = 'active_users'
id = Column(Integer, primary_key=True)
user = Column(String, ForeignKey('all_users.id'), unique=True)
ip_address = Column(String)
port = Column(Integer)
login_time = Column(DateTime)
def __init__(self, user_id, ip_address, port, login_time):
self.user = user_id
self.ip_address = ip_address
self.port = port
self.login_time = login_time
self.id = None
class LoginHistory(Base):
'''Класс - отображение таблицы истории входов.'''
__tablename__ = 'login_history'
id = Column(Integer, primary_key=True)
name = Column(String, ForeignKey('all_users.id'))
date_time = Column(DateTime)
ip = Column(String)
port = Column(String)
def __init__(self, name, date, ip, port):
self.id = None
self.name = name
self.date_time = date
self.ip = ip
self.port = port
class UsersContacts(Base):
'''Класс - отображение таблицы контактов пользователей.'''
__tablename__ = 'contacts'
id = Column(Integer, primary_key=True)
user = Column(ForeignKey('all_users.id'))
contact = Column(ForeignKey('all_users.id'))
def __init__(self, user, contact):
self.id = None
self.user = user
self.contact = contact
class UsersHistory(Base):
'''Класс - отображение таблицы истории действий.'''
__tablename__ = 'history'
id = Column(Integer, primary_key=True)
user = Column(ForeignKey('all_users.id'))
sent = Column(Integer)
accepted = Column(Integer)
def __init__(self, user):
self.id = None
self.user = user
self.sent = 0
self.accepted = 0
def __init__(self, path):
# Создаём движок базы данных:
self.engine = create_engine(f'sqlite:///{path}',
echo=False,
pool_recycle=7200,
connect_args={
'check_same_thread': False})
self.Base.metadata.create_all(self.engine)
# Создаём сессию:
Session = sessionmaker(bind=self.engine)
self.session = Session()
# Если в таблице активных пользователей есть записи,
# то их необходимо удалить
self.session.query(self.ActiveUsers).delete()
self.session.commit()
def user_login(self, username, ip_address, port, key):
'''
Метод выполняющийся при входе пользователя,
записывает в базу факт входа.
Обновляет открытый ключ пользователя при его изменении.
'''
# Запрос в таблицу пользователей на наличие там пользователя с таким
# именем
rez = self.session.query(self.AllUsers).filter_by(name=username)
# Если имя пользователя уже присутствует в таблице,
# обновляем время последнего входа и проверяем корректность ключа.
# Если клиент прислал новый ключ, сохраняем его:
if rez.count():
user = rez.first()
user.last_login = datetime.datetime.now()
if user.pubkey != key:
user.pubkey = key
# Если нету, то генерируем исключение:
else:
raise ValueError('Пользователь не заврегистрирован.')
# Теперь можно создать запись в таблицу активных пользователей
# о факте входа:
new_active_user = self.ActiveUsers(user.id, ip_address, port,
datetime.datetime.now())
self.session.add(new_active_user)
# И сохранить в историю входов:
history = self.LoginHistory(user.id, datetime.datetime.now(),
ip_address, port)
self.session.add(history)
self.session.commit()
def add_user(self, name, passwd_hash):
'''
Метод регистрации пользователя.
Принимает имя и хэш пароля, создаёт запись в таблице статистики.
'''
user_row = self.AllUsers(name, passwd_hash)
self.session.add(user_row)
self.session.commit()
history_row = self.UsersHistory(user_row.id)
self.session.add(history_row)
self.session.commit()
def remove_user(self, name):
'''Метод удаляющий пользователя из базы.'''
user = self.session.query(self.AllUsers).filter_by(name=name).first()
self.session.query(self.AllUsers).filter_by(user=user.id).delete()
self.session.query(self.LoginHistory).filter_by(name=user.id).delete()
self.session.query(self.UsersContacts).filter_by(user=user.id).delete()
self.session.query(
self.UsersContacts).filter_by(contact=user.id).delete()
self.session.query(self.UsersHistory).filter_by(user=user.id).delete()
self.session.query(self.AllUsers).filter_by(name=name).delete()
self.session.commit()
def get_hash(self, name):
'''Метод получения хэша пароля пользователя.'''
user = self.session.query(self.AllUsers).filter_by(name=name).first()
return user.passwd_hash
def get_pubkey(self, name):
'''Метод получения публичного ключа пользователя.'''
user = self.session.query(self.AllUsers).filter_by(name=name).first()
return user.pubkey
def check_user(self, name):
'''Метод проверяющий существование пользователя.'''
if self.session.query(self.AllUsers).filter_by(name=name).count():
return True
else:
return False
def user_logout(self, username):
'''Метод фиксирующий отключения пользователя.'''
# Запрашиваем пользователя, что покидает нас:
user = self.session.query(self.AllUsers).filter_by(
name=username).first()
# Удаляем его из таблицы активных пользователей:
self.session.query(self.ActiveUsers).filter_by(user=user.id).delete()
self.session.commit()
def process_message(self, sender, recipient):
'''Метод записывающий в таблицу статистики факт передачи сообщения.'''
# Получаем ID отправителя и получателя:
sender = self.session.query(self.AllUsers).filter_by(
name=sender).first().id
recipient = self.session.query(self.AllUsers).filter_by(
name=recipient).first().id
# Запрашиваем строки из истории и увеличиваем счётчики:
sender_row = self.session.query(self.UsersHistory).filter_by(
user=sender).first()
sender_row.sent += 1
recipient_row = self.session.query(self.UsersHistory).filter_by(
user=recipient).first()
recipient_row.accepted += 1
self.session.commit()
# Функция добавляет контакт для пользователя:
def add_contact(self, user, contact):
'''Метод добавления контакта для пользователя.'''
# Получаем ID пользователей
user = self.session.query(self.AllUsers).filter_by(
name=user).first()
contact = self.session.query(self.AllUsers).filter_by(
name=contact).first()
# Проверяем что не дубль и что контакт может существовать (полю
# пользователь мы доверяем):
if not contact or self.session.query(self.UsersContacts).filter_by(
user=user.id, contact=contact.id).count():
return
# Создаём объект и заносим его в базу:
contact_row = self.UsersContacts(user.id, contact.id)
self.session.add(contact_row)
self.session.commit()
def remove_contact(self, user, contact):
'''Метод удаления контакта пользователя.'''
# Получаем ID пользователей:
user = self.session.query(self.AllUsers).filter_by(
name=user).first()
contact = self.session.query(self.AllUsers).filter_by(
name=contact).first()
# Проверяем что контакт может существовать (полю пользователь мы
# доверяем):
if not contact:
return
# Удаляем требуемое:
self.session.query(self.UsersContacts).filter(
self.UsersContacts.user == user.id,
self.UsersContacts.contact == contact.id
).delete()
self.session.commit()
def users_list(self):
'''
Метод возвращающий список известных пользователей
со временем последнего входа.
'''
# Запрос строк таблицы пользователей:
query = self.session.query(
self.AllUsers.name,
self.AllUsers.last_login
)
# Возвращаем список кортежей:
return query.all()
def active_users_list(self):
'''Метод возвращающий список активных пользователей.'''
# Запрашиваем соединение таблиц
# и собираем кортежи имя, адрес, порт, время:
query = self.session.query(
self.AllUsers.name,
self.ActiveUsers.ip_address,
self.ActiveUsers.port,
self.ActiveUsers.login_time
).join(self.AllUsers)
# Возвращаем список кортежей:
return query.all()
def login_history(self, username=None):
'''Метод возвращающий историю входов.'''
# Запрашиваем историю входа:
query = self.session.query(
self.AllUsers.name,
self.LoginHistory.date_time,
self.LoginHistory.ip,
self.LoginHistory.port,
).join(self.AllUsers)
# Если было указано имя пользователя, то фильтруем по нему:
if username:
query = query.filter(self.AllUsers.name == username)
# Возвращаем список кортежей:
return query.all()
def get_contacts(self, username):
'''Метод возвращающий список контактов пользователя.'''
# Запрашивааем указанного пользователя:
user = self.session.query(self.AllUsers).filter_by(
name=username).one()
# Запрашиваем его список контактов:
query = self.session.query(self.UsersContacts, self.AllUsers.name). \
filter_by(user=user.id). \
join(self.AllUsers, self.UsersContacts.contact == self.AllUsers.id)
# Выбираем только имена пользователей и возвращаем их:
return [contact[1] for contact in query.all()]
def message_history(self):
'''Метод возвращающий статистику сообщений.'''
query = self.session.query(
self.AllUsers.name,
self.AllUsers.last_login,
self.UsersHistory.sent,
self.UsersHistory.accepted
).join(self.AllUsers)
# Возвращаем список кортежей:
return query.all()
# Отладка:
if __name__ == '__main__':
test_db = ServerDataBase('../../server_database.db3')
test_db.user_login('test1', '192.168.1.113', 8080, 'jhgfd')
test_db.user_login('test2', '192.168.1.113', 8081, 'kjfyd')
print(test_db.users_list())
# print(test_db.active_users_list())
# test_db.user_logout('McG')
# print(test_db.login_history('re'))
# test_db.add_contact('test2', 'test1')
# test_db.add_contact('test1', 'test3')
# test_db.add_contact('test1', 'test6')
# test_db.remove_contact('test1', 'test3')
test_db.process_message('test1', 'test2')
print(test_db.message_history()) | AASMessenger_Server | /AASMessenger_Server-1.0.1.tar.gz/AASMessenger_Server-1.0.1/server/server/database.py | database.py |
import sys
import logging
import socket
sys.path.append('../')
# Метод определения модуля, источника запуска:
if sys.argv[0].find('client') == -1:
LOGGER = logging.getLogger('server')
else:
LOGGER = logging.getLogger('client')
def log(func_for_log):
'''
Декоратор, выполняющий логирование вызовов функций.
Сохраняет события типа debug, содержащие
информацию о имени вызываемой функиции, параметры с которыми
вызывается функция, и модуль, вызывающий функцию.
'''
def log_create(*args, **kwargs):
res = func_for_log(*args, **kwargs)
LOGGER.debug(f'Вызвана функция {func_for_log.__name__} с параматреми '
f'{args}, {kwargs}. Вызов из модуля '
f'{func_for_log.__module__}')
return res
return log_create
def login_required(func):
'''
Декоратор, проверяющий, что клиент авторизован на сервере.
Проверяет, что передаваемый объект сокета находится в
списке авторизованных клиентов.
За исключением передачи словаря-запроса
на авторизацию. Если клиент не авторизован,
генерирует исключение TypeError
'''
def checker(*args, **kwargs):
# Проверяем, что первый аргумент - экземпляр MessageProcessor
# Импортировать необходимо тут, иначе ошибка рекурсивного импорта.
from server.core import MessageProcessor
from common.settings import ACTION, PRESENCE
if isinstance(args[0], MessageProcessor):
found = False
for arg in args:
if isinstance(arg, socket.socket):
# Проверяем, что данный сокет есть в списке names
# класса MessageProcessor
for client in args[0].names:
if args[0].names[client] == arg:
found = True
# Теперь надо проверить, что передаваемые аргументы
# не presence сообщение
for arg in args:
if isinstance(arg, dict):
if ACTION in arg and arg[ACTION] == PRESENCE:
found = True
# Если не авторизован и не сообщение начала авторизации,
# то вызываем исключение.
if not found:
raise TypeError
return func(*args, **kwargs)
return checker | AASMessenger_Server | /AASMessenger_Server-1.0.1.tar.gz/AASMessenger_Server-1.0.1/server/common/decos.py | decos.py |
import dis
class ServerVerifier(type):
'''
Метакласс, проверяющий что в результирующем классе нет клиентских
вызовов таких как: connect. Также проверяется, что серверный
сокет является TCP и работает по IPv4 протоколу.
'''
def __init__(self, clsname, bases, clsdict):
# Список методов, которые используются в функциях класса:
methods = []
# Атрибуты, вызываемые функциями классов:
attrs = []
for func in clsdict:
try:
ret = dis.get_instructions(clsdict[func])
# Если не функция то ловим исключение:
except TypeError:
pass
else:
# Раз функция разбираем код, получая используемые методы и
# атрибуты.
for instr in ret:
if instr.opname == 'LOAD_GLOBAL':
if instr.argval not in methods:
methods.append(instr.argval)
elif instr.opname == 'LOAD_ATTR':
if instr.argval not in attrs:
attrs.append(instr.argval)
# Если обнаружено использование недопустимого метода connect,
# генерируем исключение:
if 'connect' in methods:
raise TypeError('Использование метода connect недопустимо '
'в серверной части приложения')
# Если сокет не инициализировался константами SOCK_STREAM(TCP)
# AF_INET(IPv4), тоже исключение.
if not ('SOCK_STREAM' in attrs and 'AF_INET' in attrs):
raise TypeError('Некорректная инициализация сокета.')
super().__init__(clsname, bases, clsdict)
class ClientVerifier(type):
'''
Метакласс, проверяющий что в результирующем классе нет серверных
вызовов таких как: accept, listen. Также проверяется, что сокет не
создаётся внутри конструктора класса.
'''
def __init__(self, clsname, bases, clsdict):
# Список методов, которые используются в функциях класса:
methods = []
for func in clsdict:
try:
ret = dis.get_instructions(clsdict[func])
# Если не функция то ловим исключение:
except TypeError:
pass
else:
# Раз функция разбираем код, получая используемые методы:
for instr in ret:
if instr.opname == 'LOAD_GLOBAL':
if instr.argval not in methods:
methods.append(instr.argval)
# Если обнаружено использование недопустимого метода accept, listen,
# socket бросаем исключение:
for command in ('accept', 'listen', 'socket'):
if command in methods:
raise TypeError(
'Обнаружено использование недопустимого метода')
# Вызов get_message или send_message из utils считаем корректным
# использованием сокетов
if 'recv_message' in methods or 'send_message' in methods:
pass
else:
raise TypeError(
'Отсутствуют вызовы функций, работающих с сокетами')
super().__init__(clsname, bases, clsdict) | AASMessenger_Server | /AASMessenger_Server-1.0.1.tar.gz/AASMessenger_Server-1.0.1/server/common/metaclasses.py | metaclasses.py |
# from __future__ import absolute_import
# from __future__ import division
# from __future__ import print_function
#
# import warnings
# import copy
import time
import pickle
import numpy as np
from .layer.fully_connected import FullyConnected
from .layer.dropout import Dropout
from .layer.batch_normalization import BatchNormalization
from .layer.rnn import RNN
from .layer.lstm import LSTM
from .activation.relu import Relu
from .activation.sigmoid import Sigmoid
from .activation.tanh import Tanh
from .loss.softmax import SoftMax
from .loss.svm import SVM
from .layer.convolutional import Convolutional
from .layer.pooling import Pooling
from .layer.flatten import Flatten
from .config import get_default_config
class AADeepLearning:
"""
入口
"""
config = None
# 损失值
loss = []
# 训练数据 shape: (60000, 28, 28, 1) (样本数, 宽, 高, 通道数)
train_data = []
# 训练数据标签
train_label = []
# 损失值
test_data = []
# 损失值
test_lable = []
# 损失值
input_shape = 0
# 学习率
learning_rate = 0
# 神经网络层数
layer_number = 0
# 神经网络参数 weight和bias
net = {}
# 缓存loss
loss_list = []
# 缓存准确率
accuracy_list = []
def __init__(self, net={}, config={}):
"""
初始化
:param net: 网络结构
:param config: 配置项
"""
# 合并配置文件,后者覆盖前者
self.config = {**get_default_config(), **config}
# 网络结构和定义层一致
self.net = net
self.learning_rate = self.config['learning_rate']
self.net = self.init_net(net)
self.is_load_model = False
if self.config["load_model"] != "":
# 加载模型,进行预测或者继续训练
self.reload(self.config["load_model"])
self.is_load_model = True
def init_net(self, net):
"""
初始化网络所需的对象,方便后期调用,不用每次都重复判断
:param net: 网络结构
:return: 网络结构
"""
for i, layer in enumerate(net):
if layer['type'] == 'convolutional':
net[i]['object'] = Convolutional()
elif layer['type'] == 'pooling':
net[i]['object'] = Pooling()
elif layer['type'] == 'flatten':
net[i]['object'] = Flatten()
elif layer['type'] == 'fully_connected':
net[i]['object'] = FullyConnected()
elif layer['type'] == 'dropout':
net[i]['object'] = Dropout()
elif layer['type'] == 'batch_normalization':
net[i]['object'] = BatchNormalization()
elif layer['type'] == 'relu':
net[i]['object'] = Relu()
elif layer['type'] == 'sigmoid':
net[i]['object'] = Sigmoid()
elif layer['type'] == 'tanh':
net[i]['object'] = Tanh()
elif layer['type'] == 'rnn':
net[i]['object'] = RNN()
elif layer['type'] == 'lstm':
net[i]['object'] = LSTM()
elif layer['type'] == 'softmax':
net[i]['object'] = SoftMax()
elif layer['type'] == 'svm':
net[i]['object'] = SVM()
return net
def train(self, x_train=None, y_train=None, is_train=True):
"""
训练
:param x_train: 数据
:param y_train: 标签
:param is_train: 是否是训练模式
"""
if len(x_train.shape) == 4:
# 训练立方体数据 例如图片数据 宽*高*通道数
flow_data_shape = {
"batch_size": self.config['batch_size'],
"channel": x_train.shape[1],
"height": x_train.shape[2],
"width": x_train.shape[3]
}
else:
# 训练序列数据 样本 * 序列个数 * 序列长度
flow_data_shape = {
"batch_size": self.config['batch_size'],
"sequence_number": x_train.shape[1],
"sequence_length": x_train.shape[2]
}
# 1,初始化网络参数
if self.is_load_model == False:
# 没有载入已训练好的模型,则初始化
self.net = self.init_parameters(flow_data_shape)
for iteration in range(1, self.config['number_iteration'] + 1):
x_train_batch, y_train_batch = self.next_batch(x_train, y_train, self.config['batch_size'])
# 2,前向传播
flow_data = self.forward_pass(self.net, x_train_batch, is_train=is_train)
# loss = self.compute_cost(flow_data, y_train_batch)
# 3,调用最后一层的计算损失函数,计算损失
loss = self.net[len(self.net)-1]['object'].compute_cost(flow_data, self.net[len(self.net)-1], y_train_batch)
self.loss_list.append(loss)
# 4,反向传播,求梯度
self.net = self.backward_pass(self.net, flow_data, y_train_batch)
# 梯度检验
# self.gradient_check(x=x_train_batch, y=y_train_batch, net=self.net, layer_name='convolutional_1', weight_key='W', gradient_key='dW')
# exit()
# 5,根据梯度更新一次参数
self.net = self.update_parameters(self.net, iteration)
if iteration % self.config["display"] == 0:
# self.check_weight(self.net)
_, accuracy = self.predict(x_train_batch, y_train_batch, is_train=is_train)
self.accuracy_list.append(accuracy)
now_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
print(now_time, ' iteration:', iteration, ' loss:', loss, ' accuracy:', accuracy)
if self.config["save_model"] != "" and iteration % self.config["save_iteration"] == 0:
print('saving model...')
self.save(self.config["save_model"] + "-" + str(iteration) + '.model')
def init_parameters(self, flow_data_shape):
"""
初始化权重和偏置项
:param flow_data_shape: 流动数据形状
:return: 网络结构
"""
net = self.net
for i, layer in enumerate(net):
net[i], flow_data_shape = layer['object'].init(layer=layer, flow_data_shape=flow_data_shape,
config=self.config)
return net
def forward_pass(self, net, x, is_train=False):
"""
前向传播
:param net: 网络结构
:param x: 数据
:param is_train: 是否是训练模式
:return: 流动数据
"""
# 流动数据,一层一层的计算,并向后流动
flow_data = x
for i, layer in enumerate(net):
# 缓存当前层的输入
net[i]['input'] = flow_data
flow_data, net[i] = layer["object"].forword(flow_data=flow_data, layer=layer, is_train=is_train)
# 缓存当前层的输出
net[i]['output'] = flow_data
return flow_data
def backward_pass(self, net, flow_data, train_label):
"""
反向传播
:param net: 网络结构
:param flow_data: 前向传播最后一层输出
:param train_label: 标签
:return: 包含梯度的网络结构
"""
layer_number = len(net)
for i in reversed(range(0, layer_number)):
layer = net[i]
if i == len(net)-1:
# 最后一层
flow_data = layer["object"].backword(flow_data=flow_data, layer=layer, label=train_label)
else:
flow_data, net[i] = layer["object"].backword(flow_data=flow_data, layer=layer, config=self.config)
return net
def update_parameters(self, net, iteration):
"""
更新权重,偏置项
:param net: 网络结构
:param iteration: 迭代次数
:return: 更新权重,偏置项后的网络结构
"""
for i, layer in enumerate(net):
net[i] = layer['object'].update_parameters(layer=layer, config=self.config, iteration=iteration)
return net
def save(self, path="AA.model"):
"""
保存模型
:param path: 路径
"""
with open(path, "wb") as f:
pickle.dump(self.net, f)
def reload(self, path="AA.model"):
"""
载入模型
:param path: 路径
"""
with open(path, "rb") as f:
self.net = pickle.load(f)
def predict(self, x_test=None, y_test=None, is_train=False):
"""
预测
:param x_test: 预测数据
:param y_test: 预测标签
:param is_train: 是否是训练模式
:return: 概率分布矩阵,准确率
"""
# if x_test.shape[0] > 500:
# print("Verify the accuracy on " + str(x_test.shape[0]) + " test set, please wait a moment.")
flow_data = self.forward_pass(self.net, x_test, is_train)
flow_data = np.array(flow_data).T
batch_size = y_test.shape[0]
right = 0
for i in range(0, batch_size):
index = np.argmax(flow_data[i])
if y_test[i][index] == 1:
right += 1
accuracy = right / batch_size
return flow_data, accuracy
def next_batch(self, train_data, train_label, batch_size):
"""
随机获取下一批数据
:param train_data:
:param train_label:
:param batch_size:
:return:
"""
index = [i for i in range(0, len(train_label))]
# 洗牌后卷积核个数居然会改变固定位置的图片?
np.random.shuffle(index)
batch_data = []
batch_label = []
for i in range(0, batch_size):
batch_data.append(train_data[index[i]])
batch_label.append(train_label[index[i]])
batch_data = np.array(batch_data)
batch_label = np.array(batch_label)
return batch_data, batch_label
def visualization_loss(self):
"""
画出损失曲线
:return:
"""
import matplotlib.pyplot as plt
plt.plot(self.loss_list, 'r')
plt.xlabel("iteration")
plt.ylabel("loss")
plt.show()
def visualization_accuracy(self):
"""
画出正确率曲线
:return:
"""
import matplotlib.pyplot as plt
plt.plot(self.accuracy_list, 'g')
plt.xlabel("display")
plt.ylabel("accuracy")
plt.show()
def check_weight(self, net):
"""
检查权重,查看小于1e-8的比例
:param net:
:return:
"""
for i, layer in enumerate(net):
if layer['type'] == 'fully_connected':
print(layer["name"], ":dW|<1e-8 :", np.sum(abs(layer['dW']) < 1e-8), "/",
layer['dW'].shape[0] * layer['dW'].shape[1])
print(layer['name'] + ":db|<1e-8 :", np.sum(abs(layer['db']) < 1e-8), "/",
layer['db'].shape[0] * layer['db'].shape[1])
elif layer['type'] == 'convolutional':
print(layer["name"], ":dW|<1e-8 :", np.sum(abs(layer['dW']) < 1e-8), "/",
layer['dW'].shape[0] * layer['dW'].shape[1] * layer['dW'].shape[2] * layer['dW'].shape[3])
print(layer['name'] + ":db|<1e-8 :", np.sum(abs(layer['db']) < 1e-8), "/",
layer['db'].shape[0] * layer['db'].shape[1] * layer['db'].shape[2])
elif layer['type'] == 'rnn':
print(layer['name'] + ":weight_U_gradient" + str(i) + "|<1e-8 :",
np.sum(abs(layer['weight_U_gradient']) < 1e-8), "/",
layer['weight_U_gradient'].shape[0] * layer['weight_U_gradient'].shape[1])
print(layer['name'] + ":weight_W_gradient" + str(i) + "|<1e-8 :",
np.sum(abs(layer['weight_W_gradient']) < 1e-8), "/",
layer['weight_W_gradient'].shape[0] * layer['weight_W_gradient'].shape[1])
print(layer['name'] + ":weight_V_gradient" + str(i) + "|<1e-8 :",
np.sum(abs(layer['weight_V_gradient']) < 1e-8), "/",
layer['weight_V_gradient'].shape[0] * layer['weight_V_gradient'].shape[1])
elif layer['type'] == 'lstm':
print(layer['name'] + ":dWf" + str(i) + "|<1e-8 :", np.sum(abs(layer['dWf']) < 1e-8), "/",
layer['dWf'].shape[0] * layer['dWf'].shape[1])
print(layer['name'] + ":dUf" + str(i) + "|<1e-8 :", np.sum(abs(layer['dUf']) < 1e-8), "/",
layer['dUf'].shape[0] * layer['dUf'].shape[1])
print(layer['name'] + ":dbf" + str(i) + "|<1e-8 :", np.sum(abs(layer['dbf']) < 1e-8), "/",
layer['dbf'].shape[0] * layer['dbf'].shape[1])
print(layer['name'] + ":dWi" + str(i) + "|<1e-8 :", np.sum(abs(layer['dWi']) < 1e-8), "/",
layer['dWi'].shape[0] * layer['dWi'].shape[1])
print(layer['name'] + ":dUf" + str(i) + "|<1e-8 :", np.sum(abs(layer['dUf']) < 1e-8), "/",
layer['dUi'].shape[0] * layer['dUi'].shape[1])
print(layer['name'] + ":dbf" + str(i) + "|<1e-8 :", np.sum(abs(layer['dbf']) < 1e-8), "/",
layer['dbi'].shape[0] * layer['dbi'].shape[1])
def gradient_check(self, x, y, net, layer_name, weight_key, gradient_key, epsilon=1e-4):
"""
梯度检验
:param x: 数据
:param y: 标签
:param net: 网络结构
:param layer_name: 需要检验的层名称
:param weight_key: 需要检验的权重键名
:param gradient_key: 需要检验的梯度键名
:param epsilon: 数值逼近的x长度
"""
# 1,要检验的梯度展成一列
layer_number = -1 # 第几层
for j, layer in enumerate(net):
if layer['name'] == layer_name:
layer_number = j
break
assert layer_number != -1
# 梯度字典转列向量(n,1)
gradient_vector = np.reshape(net[layer_number][gradient_key], (-1, 1))
# 参数字典转列向量(n,1)
weight_vector = np.reshape(net[layer_number][weight_key], (-1, 1))
# 数值逼近求得的梯度
gradient_vector_approach = np.zeros(gradient_vector.shape)
lenght = weight_vector.shape[0]
# 遍历,每次求权重一个数据点的梯度,然后串联起来
for i in range(lenght):
if i % 10 == 0:
print("gradient checking i/len=", i, "/", lenght)
weight_vector_plus = np.copy(weight_vector)
weight_vector_plus[i][0] = weight_vector_plus[i][0] + epsilon
net[layer_number][weight_key] = np.reshape(weight_vector_plus, net[layer_number][weight_key].shape)
# 2,前向传播
flow_data = self.forward_pass(net=net, x=x)
# 3,计算损失
# J_plus_epsilon = self.compute_cost(flow_data, y)
J_plus_epsilon = net[len(net) - 1]['object'].compute_cost(flow_data, net[len(net) - 1], y)
weight_vector_minus = np.copy(weight_vector)
weight_vector_minus[i][0] = weight_vector_minus[i][0] - epsilon
net[layer_number][weight_key] = np.reshape(weight_vector_minus, net[layer_number][weight_key].shape)
# 2,前向传播
flow_data = self.forward_pass(net=net, x=x)
# 3,计算损失
# J_minus_epsilon = self.compute_cost(flow_data, y)
J_minus_epsilon = net[len(net) - 1]['object'].compute_cost(flow_data, net[len(net) - 1], y)
# 数值逼近求得梯度
gradient_vector_approach[i][0] = (J_plus_epsilon - J_minus_epsilon) / (epsilon * 2)
# 和解析解求得的梯度做欧式距离
diff = np.sqrt(np.sum((gradient_vector - gradient_vector_approach) ** 2)) / (
np.sqrt(np.sum((gradient_vector) ** 2)) + np.sqrt(np.sum((gradient_vector_approach) ** 2)))
# 错误阈值
if diff > 1e-4:
print("Maybe a mistake in your bakeward pass!!! diff=", diff)
else:
print("No problem in your bakeward pass!!! diff=", diff) | AAdeepLearning | /AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/aadeeplearning.py | aadeeplearning.py |
import numpy as np
class SVM:
"""
SVM损失层,又称为Hinge损失函数,一般用于最后一层分类
"""
@staticmethod
def init(layer, flow_data_shape, config):
"""
初始化, 这里无操作
:param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息
:param flow_data_shape: 流动数据的形状
:param config:配置
:return: 更新后的层, 流动数据的形状
"""
return layer, flow_data_shape
@staticmethod
def forword(flow_data, layer, is_train):
"""
前向传播,这里没有操作,直接计算损失
:param flow_data: 流动数据
:param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息
:param is_train: 是否是训练模式
:return: 流动数据, 更新后的层
"""
# for i in range(config['batch_size']):
# for i in range(flow_data.shape[1]):
# flow_data[:, i] = np.exp(flow_data[:, i]) / np.sum(np.exp(flow_data[:, i]))
return flow_data, layer
@staticmethod
def compute_cost(flow_data, layer, label):
"""
计算代价(SVM损失,又称为Hinge损失)
:param flow_data: 前向传播最后一层输出
:param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息
:param label: 标签
:return: 损失
"""
delta = 0.2
if 'delta' in layer.keys():
delta = layer['delta']
flow_data = flow_data.T
batch_size = label.shape[0]
loss = 0.0
for i in range(batch_size):
# loss = max(0, 错误得分 - 正确得分 + delta)
# 正确类别索引
right_index = np.argmax(label[i])
# # 正确类别值
positive_x = flow_data[i][right_index]
# 代入hinge loss公式
temp = flow_data[i] - positive_x + delta
# 剔除正确类里面的值
temp[right_index] = 0
# 小于零就转换为0, 大于零不变 相当于:temp=max(0, temp)
temp = temp * np.array(temp > 0)
loss += np.sum(temp)
loss = loss / batch_size
return loss
@staticmethod
def backword(flow_data, layer, label):
"""
反向传播
:param flow_data: 流动数据
:param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息
:param label: 标签
:return: 流动数据, 更新后的层
"""
delta = 0.2
if 'delta' in layer.keys():
delta = layer['delta']
flow_data = flow_data.T
batch_size = label.shape[0]
output = np.zeros(flow_data.shape)
for i in range(batch_size):
# loss += -np.sum(np.dot(batch_label[i], np.log(flow_data[:, i])))
# loss = max(0, 错误得分 - 正确得分 + delta)
# 正确类别索引
right_index = np.argmax(label[i])
# # 正确类别值
positive_x = flow_data[i][right_index]
# 代入hinge loss公式
temp = flow_data[i] - positive_x + delta
# 剔除正确类里面的值
temp[right_index] = 0
# 小于零就转换为0, 大于零转行为1, 0 1掩码
temp = np.ones(temp.shape) * np.array(temp > 0)
# 正确位置的梯度
temp[right_index] = -np.sum(temp)
output[i] = temp
# 获取最末层误差信号,反向传播
# print(output[0])
# print(output.shape)
# exit()
return output.T
@staticmethod
def update_parameters(layer, config, iteration):
"""
更新权重和偏置项, 这里无操作
:param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息
:param config:配置
:param iteration:迭代次数
:return: 更新后的层
"""
return layer | AAdeepLearning | /AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/loss/svm.py | svm.py |
import numpy as np
from .activation.tanh import Tanh
from .activation.sigmoid import Sigmoid
class LSTM:
@staticmethod
def init(layer, flow_data_shape):
sequence_length = int(flow_data_shape["sequence_length"])
# forget 遗忘门
layer["weight_f"] = np.random.randn(layer['neurons_number'], flow_data_shape["sequence_length"])
# input
layer["weight_i"] = np.random.randn(layer['neurons_number'], flow_data_shape["sequence_length"])
# current inputstate
layer["weight_c"] = np.random.randn(layer['neurons_number'], flow_data_shape["sequence_length"])
# output
layer["weight_o"] = np.random.randn(layer['neurons_number'], flow_data_shape["sequence_length"])
layer["bias_f"] = np.zeros((layer['neurons_number'], 1))
layer["bias_i"] = np.zeros((layer['neurons_number'], 1))
layer["bias_c"] = np.zeros((layer['neurons_number'], 1))
layer["bias_o"] = np.zeros((layer['neurons_number'], 1))
flow_data_shape = {
"flatten_size": flow_data_shape["sequence_length"],
"batch_size": flow_data_shape["batch_size"]
}
return layer, flow_data_shape
@staticmethod
def forword(layer, flow_data):
# flow_data = flow_data[0]
ht = np.zeros((layer['neurons_number'], flow_data.shape[0]))
ct = np.zeros((layer['neurons_number'], flow_data.shape[0]))
for i in range(flow_data.shape[1]):
xt = flow_data[:, i]
ft = Sigmoid.forword(np.dot(layer["weight_f"], np.concatenate(ht, xt)) + layer['bias_f'])
it = Sigmoid.forword(np.dot(layer["weight_i"], np.concatenate(ht, xt)) + layer['bias_i'])
_ct = Tanh.forword(np.dot(layer["weight_c"], np.concatenate(ht, xt)) + layer['bias_c'])
ct = ft * ct + it * _ct
ot = Sigmoid.forword(np.dot(layer["weight_o"], np.concatenate(ht, xt)) + layer['bias_o'])
ht = ot * Tanh.forword(ct)
# 缓存该层的输入
# todo 可能还有 weight_V
# layer["weight_V_input"] = h
# flow_data = np.dot( layer["weight_V"],h) + layer["bias_V"]
# print(flow_data.shape)
# exit()
# print(flow_data.shape)
# exit()
return flow_data, layer
@staticmethod
def backword(flow_data, layer, config):
output_all = np.zeros(layer["input"].shape)
# print(output_all.shape)
# exit()
layer["weight_W_gradient"] = np.zeros(layer["weight_W"].shape)
layer["weight_U_gradient"] = np.zeros(layer["weight_U"].shape)
layer["bias_W_gradient"] = np.zeros(layer["bias_W"].shape)
# todo 可能要列相加
layer["bias_V_gradient"] = flow_data
layer["weight_V_gradient"] = np.dot(flow_data, layer['weight_V_input'].T)
h = np.dot(layer["weight_V"].T, flow_data)
for i in reversed(range(0, layer['input'].shape[1])):
h = Tanh.backword(h, layer)
layer["bias_W_gradient"] += np.sum(h, axis=1, keepdims=True)
# print(h.shape)
# print(layer["weight_W_input_"+str(i)].T.shape)
# print(layer["weight_W_gradient"].shape)
# print("----------")
# exit()
layer["weight_W_gradient"] += np.dot(h, layer["weight_W_input_" + str(i)].T)
layer["weight_U_gradient"] += np.dot(h, layer["weight_U_input_" + str(i)])
output_all[:, i] = np.dot(h.T, layer["weight_U"])
h = np.dot(layer["weight_W"].T, h)
# print(output_all.shape)
# exit()
return layer, output_all | AAdeepLearning | /AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/layer/lstm_3.py | lstm_3.py |
import numpy as np
from ..optimizer.adam import Adam
from ..optimizer.momentum import Momentum
from ..optimizer.rmsprop import Rmsprop
from ..optimizer.sgd import Sgd
class Convolutional:
"""
卷积层
"""
@staticmethod
def init(layer, flow_data_shape, config):
"""
初始化
:param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息
:param flow_data_shape: 流动数据的形状
:param config:配置
:return: 更新后的层, 流动数据的形状
"""
# 何凯明初始化,主要针对relu激活函数
if layer["weight_init"] == 'msra':
layer["W"] = np.random.randn(layer['kernel_number'], flow_data_shape['channel'], layer['kernel_height'],
layer['kernel_width']) * (
np.sqrt(2 / (flow_data_shape['channel'] * layer['kernel_height'] * layer['kernel_width'])))
# xavier,主要针对tanh激活函数
elif layer["weight_init"] == 'xavier':
layer["W"] = np.random.randn(layer['kernel_number'], flow_data_shape['channel'], layer['kernel_height'],
layer['kernel_width']) * (
np.sqrt(1 / (flow_data_shape['channel'] * layer['kernel_height'] * layer['kernel_width'])))
else:
layer["W"] = np.random.randn(layer['kernel_number'], flow_data_shape['channel'], layer['kernel_height'],
layer['kernel_width']) * 0.01
layer["b"] = np.zeros((layer['kernel_number'], 1, 1, 1))
flow_data_shape = {
"batch_size": flow_data_shape['batch_size'],
"channel": layer['kernel_number'],
"height": ((flow_data_shape['height'] + layer['padding'] * 2 - layer['kernel_height'])) // layer[
'stride'] + 1,
"width": ((flow_data_shape['width'] + layer['padding'] * 2 - layer['kernel_width']) // layer['stride']) + 1
}
print(layer['name'] + ",W.shape:", layer["W"].shape)
print(layer['name'] + ",b.shape:", layer["b"].shape)
return layer, flow_data_shape
@staticmethod
# 多核也没问题
def forword(flow_data, layer, is_train):
"""
前向传播
:param flow_data: 流动数据
:param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息
:param is_train: 是否是训练模式
:return: 流动数据, 更新后的层
"""
padding = layer['padding']
if padding != 0:
flow_data = Convolutional.padding(flow_data, padding)
layer['padding_input'] = flow_data
kernel_height = layer['kernel_height']
kernel_width = layer['kernel_width']
batch_size = flow_data.shape[0]
output_height = ((flow_data.shape[2] - kernel_width) // layer['stride']) + 1
output_width = ((flow_data.shape[3] - kernel_height) // layer['stride']) + 1
# 卷积输出
output = np.zeros((batch_size, layer['kernel_number'], output_height, output_width))
# 开始卷积
for channel in range(output.shape[1]): # 遍历输出的通道数,输出的通道数等于卷积核的个数
for height in range(output.shape[2]): # 遍历输出的高
for width in range(output.shape[3]): # 遍历输出的宽
# 滑动窗口截取部分
sliding_window = flow_data[:,:,
height * layer['stride']:height * layer['stride'] + kernel_height,
width * layer['stride']:width * layer['stride'] + kernel_width
]
output[:,channel,height,width] = np.sum(np.sum(np.sum((sliding_window * layer["W"][channel]) + layer["b"][channel], axis=2), axis=2), axis=1)
return output, layer
@staticmethod
def backword(flow_data, layer, config):
"""
反向传播
:param flow_data: 流动数据
:param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息
:param config:配置
:return: 流动数据, 更新后的层
"""
layer["dW"] = np.zeros(layer['W'].shape)
layer["db"] = np.zeros(layer['b'].shape)
kernel_height = layer['kernel_height']
kernel_width = layer['kernel_width']
if layer['padding'] != 0:
forword_input = layer['padding_input']
else:
forword_input = layer['input']
output = np.zeros((forword_input.shape))
for channel in range(flow_data.shape[1]): # 遍历输入梯度的通道数,输入梯度的通道数等于卷积核的个数
for height in range(flow_data.shape[2]): # 遍历输入梯度的高
for width in range(flow_data.shape[3]): # 遍历输入梯度的宽
# 前向传播输入数据,滑动截取窗口
sliding_window = forword_input[:,:,
height * layer['stride']:height * layer['stride'] + kernel_height,
width * layer['stride']:width * layer['stride'] + kernel_width
]
# dx
output[:,:,
height * layer['stride']:height * layer['stride'] + kernel_height,
width * layer['stride']:width * layer['stride'] + kernel_width
] += flow_data[:,channel,height,width].reshape(flow_data.shape[0], 1, 1, 1) * layer['W'][channel]
# 单个卷积核梯度 = 前向输入数据的滑动窗口 * 梯度对应通道(卷积核),对应高宽
layer["dW"][channel] += np.mean(flow_data[:,channel,height,width].reshape(flow_data.shape[0], 1, 1, 1) * sliding_window, axis=0)
layer["db"][channel][0][0][0] += np.mean(flow_data[:,channel,height,width])
if layer['padding'] != 0:
output = Convolutional.delete_padding(output, layer['padding'])
return output, layer
@staticmethod
def update_parameters(layer, config, iteration):
"""
更新权重和偏置项
:param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息
:param config:配置
:param iteration:迭代次数
:return: 更新后的层
"""
# 需要更新的键名
keys = ['W', 'b']
if "optimizer" in config.keys() and config["optimizer"] == 'momentum':
layer = Momentum.update_parameters(layer, keys, config['learning_rate'], config['momentum_coefficient'])
elif "optimizer" in config.keys() and config["optimizer"] == 'rmsprop':
layer = Rmsprop.update_parameters(layer, keys, config['learning_rate'])
elif "optimizer" in config.keys() and config["optimizer"] == 'adam':
layer = Adam.update_parameters(layer, keys, config['learning_rate'], iteration)
else:
# 默认使用 sgd
layer = Sgd.update_parameters(layer, keys, config['learning_rate'])
return layer
@staticmethod
def padding(flow_data, padding):
"""
填充
:param flow_data: 流动数据
:param padding: 填充多少层0
:return:
"""
padding_flow_data = np.zeros((flow_data.shape[0], flow_data.shape[1], flow_data.shape[2] + padding * 2,
flow_data.shape[3] + padding * 2))
for batch in range(flow_data.shape[0]): # 遍历总样本数
for channel in range(flow_data.shape[1]): # 遍历 通道数
# 在二位矩阵外面填充 padding圈零
padding_flow_data[batch][channel] = np.pad(flow_data[batch][channel],
((padding, padding), (padding, padding)), 'constant')
return padding_flow_data
@staticmethod
def delete_padding(flow_data, padding):
"""
删除填充
:param flow_data: 流动数据
:param padding: 去掉外面多少层
:return:
"""
# 定义结构
delete_padding_flow_data = np.zeros((flow_data.shape[0], flow_data.shape[1], flow_data.shape[2] - padding * 2,
flow_data.shape[3] - padding * 2))
for batch in range(flow_data.shape[0]):
for channel in range(flow_data.shape[1]):
height = flow_data[batch][channel].shape[0]
width = flow_data[batch][channel].shape[1]
# 对应位置复制过来
delete_padding_flow_data[batch][channel] = flow_data[batch][channel][padding:height - padding,
padding:width - padding]
return delete_padding_flow_data | AAdeepLearning | /AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/layer/convolutional.py | convolutional.py |
import numpy as np
from ..optimizer.adam import Adam
from ..optimizer.momentum import Momentum
from ..optimizer.rmsprop import Rmsprop
from ..optimizer.sgd import Sgd
class LSTM:
@staticmethod
def init(layer, flow_data_shape, config):
sequence_length = int(flow_data_shape["sequence_length"])
neurons_number = layer['neurons_number']
# 何凯明初始化,主要针对relu激活函数
if layer["weight_init"] == 'msra':
layer["Wf"] = np.random.randn(neurons_number, neurons_number) * (np.sqrt(2 / neurons_number))
layer["Uf"] = np.random.randn(neurons_number, sequence_length) * (np.sqrt(2 / sequence_length))
layer["Wi"] = np.random.randn(neurons_number, neurons_number) * (np.sqrt(2 / neurons_number))
layer["Ui"] = np.random.randn(neurons_number, sequence_length) * (np.sqrt(2 / sequence_length))
layer["Wa"] = np.random.randn(neurons_number, neurons_number) * (np.sqrt(2 / neurons_number))
layer["Ua"] = np.random.randn(neurons_number, sequence_length) * (np.sqrt(2 / sequence_length))
layer["Wo"] = np.random.randn(neurons_number, neurons_number) * (np.sqrt(2 / neurons_number))
layer["Uo"] = np.random.randn(neurons_number, sequence_length) * (np.sqrt(2 / sequence_length))
layer["V"] = np.random.randn(sequence_length, neurons_number) * (np.sqrt(2 / neurons_number))
# xavier,主要针对tanh激活函数
elif layer["weight_init"] == 'xavier':
layer["Wf"] = np.random.randn(neurons_number, neurons_number) * (np.sqrt(1 / neurons_number))
layer["Uf"] = np.random.randn(neurons_number, sequence_length) * (np.sqrt(1 / sequence_length))
layer["Wi"] = np.random.randn(neurons_number, neurons_number) * (np.sqrt(1 / neurons_number))
layer["Ui"] = np.random.randn(neurons_number, sequence_length) * (np.sqrt(1 / sequence_length))
layer["Wa"] = np.random.randn(neurons_number, neurons_number) * (np.sqrt(1 / neurons_number))
layer["Ua"] = np.random.randn(neurons_number, sequence_length) * (np.sqrt(1 / sequence_length))
layer["Wo"] = np.random.randn(neurons_number, neurons_number) * (np.sqrt(1 / neurons_number))
layer["Uo"] = np.random.randn(neurons_number, sequence_length) * (np.sqrt(1 / sequence_length))
layer["V"] = np.random.randn(sequence_length, neurons_number) * (np.sqrt(1 / neurons_number))
else:
layer["Wf"] = np.random.randn(neurons_number, neurons_number) * 0.01
layer["Uf"] = np.random.randn(neurons_number, sequence_length) * 0.01
layer["Wi"] = np.random.randn(neurons_number, neurons_number) * 0.01
layer["Ui"] = np.random.randn(neurons_number, sequence_length) * 0.01
layer["Wa"] = np.random.randn(neurons_number, neurons_number) * 0.01
layer["Ua"] = np.random.randn(neurons_number, sequence_length) * 0.01
layer["Wo"] = np.random.randn(neurons_number, neurons_number) * 0.01
layer["Uo"] = np.random.randn(neurons_number, sequence_length) * 0.01
layer["V"] = np.random.randn(sequence_length, neurons_number) * 0.01
layer["bf"] = np.zeros((neurons_number, 1))
layer["bi"] = np.zeros((neurons_number, 1))
layer["ba"] = np.zeros((neurons_number, 1))
layer["bo"] = np.zeros((neurons_number, 1))
layer["c"] = np.zeros((sequence_length, 1))
flow_data_shape = {
"flatten_size": sequence_length,
"batch_size": flow_data_shape["batch_size"]
}
return layer, flow_data_shape
@staticmethod
def forword(flow_data, layer, is_train):
ht = np.zeros((layer['neurons_number'], flow_data.shape[0]))
layer["cache_ht_-1"] = ht
ct = np.zeros((layer['neurons_number'], flow_data.shape[0]))
layer["cache_ct_-1"] = ht
for i in range(flow_data.shape[1]):
xt = flow_data[:, i]
layer["cache_xt_" + str(i)] = xt
# 遗忘门 forget
ft_1 = np.dot(layer["Wf"], ht)
layer["cache_ft_1_" + str(i)] = ft_1
ft_2 = np.dot(layer["Uf"], xt.T)
layer["cache_ft_2_" + str(i)] = ft_2
ft_3 = ft_1 + ft_2 + layer["bf"]
layer["cache_ft_3_" + str(i)] = ft_3
# ft = Sigmoid.forword(ft_3)
ft = 1 / (1 + np.exp(-ft_3))
layer["cache_ft_" + str(i)] = ft
# 输入门1 input
it_1 = np.dot(layer["Wi"], ht)
layer["cache_it_1_" + str(i)] = it_1
it_2 = np.dot(layer["Ui"], xt.T)
layer["cache_it_2_" + str(i)] = it_2
it_3 = it_1 + it_2 + layer["bi"]
layer["cache_it_3_" + str(i)] = it_3
# it = Sigmoid.forword(it_3)
it = 1 / (1 + np.exp(-it_3))
layer["cache_it_" + str(i)] = it
# 输入门2
at_1 = np.dot(layer["Wa"], ht)
layer["cache_at_1_" + str(i)] = at_1
at_2 = np.dot(layer["Ua"], xt.T)
layer["cache_at_2_" + str(i)] = at_2
at_3 = at_1 + at_2 + layer["ba"]
layer["cache_at_3_" + str(i)] = at_3
# at = Tanh.forword(at_3, layer, is_train)
at = np.tanh(at_3)
layer["cache_at_" + str(i)] = at
# 细胞状态更新
ct_1 = ct * ft
layer["cache_ct_1_" + str(i)] = ct_1
ct_2 = it * at
layer["cache_ct_2_" + str(i)] = ct_2
ct = ct_1 + ct_2
layer["cache_ct_" + str(i)] = ct
ot_1 = np.dot(layer["Wo"], ht)
layer["cache_ot_1_" + str(i)] = ot_1
ot_2 = np.dot(layer["Uo"], xt.T)
layer["cache_ot_2_" + str(i)] = ot_2
ot_3 = ot_1 + ot_2 + layer["bo"]
layer["cache_ot_3_" + str(i)] = ot_3
# ot = Sigmoid.forword(ot_3)
ot = 1 / (1 + np.exp(-ot_3))
layer["cache_ot_" + str(i)] = ot
# 输出门
# ht_1 = Tanh.forword(ct)
ht_1 = np.tanh(ct)
layer["cache_ht_1_" + str(i)] = ht_1
ht = ot * ht_1
layer["cache_ht_" + str(i)] = ht
flow_data = np.dot(layer["V"], ht) + layer["c"]
return flow_data, layer
@staticmethod
def backword(flow_data, layer, config):
sequence_number = layer['input'].shape[1]
ct = np.zeros(layer["cache_ct_0"].shape)
layer["dc"] = np.sum(flow_data, axis=1, keepdims=True)
layer["dV"] = np.dot(flow_data, layer["cache_ht_" + str(sequence_number - 1)].T)
ht = np.dot(layer["V"].T, flow_data)
output = np.zeros(layer["input"].shape)
layer["dbo"] = np.zeros(layer["bo"].shape)
layer["dWo"] = np.zeros(layer["Wo"].shape)
layer["dUo"] = np.zeros(layer["Uo"].shape)
layer["dba"] = np.zeros(layer["ba"].shape)
layer["dWa"] = np.zeros(layer["Wa"].shape)
layer["dUa"] = np.zeros(layer["Ua"].shape)
layer["dbi"] = np.zeros(layer["bi"].shape)
layer["dWi"] = np.zeros(layer["Wi"].shape)
layer["dUi"] = np.zeros(layer["Ui"].shape)
layer["dbf"] = np.zeros(layer["bf"].shape)
layer["dWf"] = np.zeros(layer["Wf"].shape)
layer["dUf"] = np.zeros(layer["Uf"].shape)
for i in reversed(range(0, sequence_number)):
ht_1 = ht * layer["cache_ot_" + str(i)]
# ct = ct + Tanh.backword(ht_1, layer["cache_ht_1_" + str(i)])
# dtanh/dz = 1-a^2
ct = ct + ht_1 * (1 - np.power(layer["cache_ht_1_" + str(i)], 2))
ct_1 = ct
ct = ct_1 * layer["cache_ft_" + str(i)]
ot = ht * layer["cache_ht_1_" + str(i)]
# ot_3 = Sigmoid.backword(ot, layer["cache_ot_" + str(i)])
# dsigmoid/dz = a*(1-a)
ot_3 = ot * (layer["cache_ot_" + str(i)]*(1-layer["cache_ot_" + str(i)]))
layer["dbo"] += np.sum(ot_3, axis=1, keepdims=True)
layer["dWo"] += np.dot(ot_3, layer["cache_ht_" + str(i)].T)
layer["dUo"] += np.dot(ot_3, layer["cache_xt_" + str(i)])
ot_2 = ot_3
ot_1 = ot_3
ct_2 = ct
at = ct_2 * layer["cache_it_" + str(i)]
# at_3 = Tanh.backword(at, layer["cache_at_" + str(i)])
# dtanh/dz = 1-a^2
at_3 = at * (1 - np.power(layer["cache_at_" + str(i)], 2))
layer["dba"] += np.sum(at_3, axis=1, keepdims=True)
layer["dWa"] += np.dot(at_3, layer["cache_ht_" + str(i)].T)
layer["dUa"] += np.dot(at_3, layer["cache_xt_" + str(i)])
at_1 = at_3
at_2 = at_3
it = ct_2 * layer["cache_at_" + str(i)]
# it_3 = Sigmoid.backword(it, layer["cache_it_" + str(i)])
# dsigmoid/dz = a*(1-a)
it_3 = ot * (layer["cache_it_" + str(i)]*(1-layer["cache_it_" + str(i)]))
layer["dbi"] += np.sum(it_3, axis=1, keepdims=True)
layer["dWi"] += np.dot(it_3, layer["cache_ht_" + str(i)].T)
layer["dUi"] += np.dot(it_3, layer["cache_xt_" + str(i)])
it_2 = it_3
it_1 = it_3
ft = ct_1 * layer["cache_ct_" + str(i)]
# ft_3 = Sigmoid.backword(ft, layer["cache_ft_" + str(i)])
# dsigmoid/dz = a*(1-a)
ft_3 = ft * (layer["cache_ft_" + str(i)]*(1-layer["cache_ft_" + str(i)]))
layer["dbf"] += np.sum(ft_3, axis=1, keepdims=True)
layer["dWf"] += np.dot(ft_3, layer["cache_ht_" + str(i)].T)
layer["dUf"] += np.dot(ft_3, layer["cache_xt_" + str(i)])
ft_2 = ft_3
ft_1 = ft_3
xt = np.dot(layer["Uf"].T, ft_2) + np.dot(layer["Ui"].T, it_2) + np.dot(layer["Ua"].T, at_2) + np.dot(
layer["Uo"].T, ot_2)
ht = np.dot(layer["Wf"].T, ft_1) + np.dot(layer["Wi"].T, it_1) + np.dot(layer["Wa"].T, at_1) + np.dot(
layer["Wo"].T, ot_1)
output[:, i] = xt.T
return output, layer
# @staticmethod
# def update_parameters(layer, config, iteration):
# layer["Wf"] -= config["learning_rate"] * layer["dWf"]
# layer["Uf"] -= config["learning_rate"] * layer["dUf"]
# layer["Wi"] -= config["learning_rate"] * layer["dWi"]
# layer["Ui"] -= config["learning_rate"] * layer["dUi"]
# layer["Wa"] -= config["learning_rate"] * layer["dWa"]
# layer["Ua"] -= config["learning_rate"] * layer["dUa"]
# layer["Wo"] -= config["learning_rate"] * layer["dWo"]
# layer["Uo"] -= config["learning_rate"] * layer["dUo"]
# layer["V"] -= config["learning_rate"] * layer["dV"]
# layer["bf"] -= config["learning_rate"] * layer["dbf"]
# layer["bi"] -= config["learning_rate"] * layer["dbi"]
# layer["ba"] -= config["learning_rate"] * layer["dba"]
# layer["bo"] -= config["learning_rate"] * layer["dbo"]
# layer["c"] -= config["learning_rate"] * layer["dc"]
# return layer
@staticmethod
def update_parameters(layer, config, iteration):
"""
更新权重和偏置项
:param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息
:param config:配置
:param iteration:迭代次数
:return: 更新后的层
"""
# 要更新的键名
keys = ['Wf', 'Uf', 'Wi', 'Ui', 'Wa', 'Ua', 'Wo', 'Uo', 'V', 'bf', 'bi', 'ba', 'bo', 'c']
if "optimizer" in config.keys() and config["optimizer"] == 'momentum':
layer = Momentum.update_parameters(layer, keys, config['learning_rate'], config['momentum_coefficient'])
elif "optimizer" in config.keys() and config["optimizer"] == 'rmsprop':
layer = Rmsprop.update_parameters(layer, keys, config['learning_rate'])
elif "optimizer" in config.keys() and config["optimizer"] == 'adam':
layer = Adam.update_parameters(layer, keys, config['learning_rate'], iteration)
else:
# 默认使用 sgd
layer = Sgd.update_parameters(layer, keys, config['learning_rate'])
return layer | AAdeepLearning | /AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/layer/lstm.py | lstm.py |
import numpy as np
class Pooling:
"""
池化层
"""
@staticmethod
def init(layer, flow_data_shape, config):
"""
初始化
:param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息
:param flow_data_shape: 流动数据的形状
:param config:配置
:return: 更新后的层, 流动数据的形状
"""
flow_data_shape = {
"batch_size": flow_data_shape['batch_size'],
"channel": flow_data_shape['channel'],
"height": (flow_data_shape['height'] - layer['kernel_height']) // layer['stride'] + 1,
"width": (flow_data_shape['width'] - layer['kernel_width']) // layer['stride'] + 1
}
return layer, flow_data_shape
@staticmethod
def forword(flow_data, layer, is_train):
"""
前向传播
:param flow_data: 流动数据
:param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息
:param is_train: 是否是训练模式
:return: 流动数据, 更新后的层
"""
kernel_height = layer['kernel_height']
kernel_width = layer['kernel_width']
batch_size = flow_data.shape[0]
channels = flow_data.shape[1]
output_width = ((flow_data.shape[2] - kernel_width) // layer['stride']) + 1
output_height = ((flow_data.shape[3] - kernel_height) // layer['stride']) + 1
# 池化总输出
pooling_out = np.zeros((batch_size, channels, output_width, output_height))
# 开始池化
for batch in range(batch_size): # 遍历输出样本数
for channel in range(channels): # 遍历输出通道数
for height in range(output_height): # 遍历输出高
for width in range(output_width): # 遍历输出宽
# 滑动窗口截取部分
sliding_window = flow_data[batch][channel][
height * layer['stride']:height * layer['stride'] + kernel_height,
width * layer['stride']:width * layer['stride'] + kernel_width
]
if 'mode' in layer.keys() and layer['mode'] == 'average':
# 平均池化
pooling_out[batch][channel][height][width] = np.average(sliding_window)
else:
# 默认取最大值
pooling_out[batch][channel][height][width] = np.max(sliding_window)
return pooling_out, layer
@staticmethod
def backword(flow_data, layer, config):
"""
反向传播
:param flow_data: 流动数据
:param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息
:param config:配置
:return: 流动数据, 更新后的层
"""
kernel_height = layer['kernel_height']
kernel_width = layer['kernel_width']
kernel_total = kernel_height*kernel_width
stride = layer['stride']
output = np.zeros(layer['input'].shape)
batch_size = flow_data.shape[0]
# np.savetxt("input.csv", flow_data[0][0], delimiter=',')
# np.savetxt("forward_input.csv", layer['input'][0][0], delimiter=',')
for batch in range(batch_size):
for channel in range(flow_data.shape[1]):
for height in range(flow_data.shape[2]):
for width in range(flow_data.shape[3]):
if 'mode' in layer.keys() and layer['mode'] == 'average':
# 平均池化
output[batch][channel][
height * stride:height * stride + kernel_height,
width * stride:width * stride + kernel_width
] += flow_data[batch][channel][height][width]/kernel_total
else:
# 滑动窗口截取部分
sliding_window = layer['input'][batch][channel][
height * stride:height * stride + kernel_height,
width * stride:width * stride + kernel_width
]
# 默认取最大值
max_height, max_width =np.unravel_index(sliding_window.argmax(), sliding_window.shape)
output[batch][channel][max_height+height * stride][max_width+width * stride] += flow_data[batch][channel][height][width]
return output, layer
@staticmethod
def update_parameters(layer, config, iteration):
"""
更新权重和偏置项,这里无操作
:param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息
:param config:配置
:param iteration:迭代次数
:return: 更新后的层
"""
return layer | AAdeepLearning | /AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/layer/pooling.py | pooling.py |
import numpy as np
from ..optimizer.adam import Adam
from ..optimizer.momentum import Momentum
from ..optimizer.rmsprop import Rmsprop
from ..optimizer.sgd import Sgd
class FullyConnected:
"""
全连接层
"""
@staticmethod
def init(layer, flow_data_shape, config):
"""
初始化
:param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息
:param flow_data_shape: 流动数据的形状
:param config:配置
:return: 更新后的层, 流动数据的形状
"""
flatten_size = int(flow_data_shape["flatten_size"])
if layer["weight_init"] == 'msra':
# 何凯明初始化,主要针对relu激活函数
layer["W"] = np.random.randn(layer['neurons_number'],
flatten_size) * (np.sqrt(2 / flatten_size))
elif layer["weight_init"] == 'xavier':
# xavier,主要针对tanh激活函数
layer["W"] = np.random.randn(layer['neurons_number'],
flatten_size) * (np.sqrt(1 / flatten_size))
else:
# 高斯初始化
layer["W"] = np.random.randn(layer['neurons_number'], flatten_size) * 0.01
layer["b"] = np.zeros((layer['neurons_number'], 1))
flow_data_shape = {
"flatten_size": layer['neurons_number'],
"batch_size": flow_data_shape["batch_size"]
}
print(layer['name']+",W.shape:", layer["W"].shape)
print(layer['name']+",b.shape:", layer["b"].shape)
return layer, flow_data_shape
@staticmethod
def forword(flow_data, layer, is_train):
"""
前向传播
:param flow_data: 流动数据
:param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息
:param is_train: 是否是训练模式
:return: 流动数据, 更新后的层
"""
flow_data = np.dot(layer['W'], flow_data) + layer['b']
return flow_data, layer
@staticmethod
def backword(flow_data, layer, config):
"""
反向传播
:param flow_data: 流动数据
:param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息
:param config:配置
:return: 流动数据, 更新后的层
"""
layer["dW"] = (1 / config['batch_size']) * np.dot(flow_data, layer['input'].T)
layer["db"] = (1 / config['batch_size']) * np.sum(flow_data, axis=1, keepdims=True)
# dx
flow_data = np.dot(layer['W'].T, flow_data)
return flow_data, layer
@staticmethod
def update_parameters(layer, config, iteration):
"""
更新权重和偏置项
:param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息
:param config:配置
:param iteration:迭代次数
:return: 更新后的层
"""
# 要更新的键名
keys = ['W', 'b']
if "optimizer" in config.keys() and config["optimizer"] == 'momentum':
layer = Momentum.update_parameters(layer, keys, config['learning_rate'], config['momentum_coefficient'])
elif "optimizer" in config.keys() and config["optimizer"] == 'rmsprop':
layer = Rmsprop.update_parameters(layer, keys, config['learning_rate'])
elif "optimizer" in config.keys() and config["optimizer"] == 'adam':
layer = Adam.update_parameters(layer, keys, config['learning_rate'], iteration)
else:
# 默认使用 sgd
layer = Sgd.update_parameters(layer, keys, config['learning_rate'])
return layer | AAdeepLearning | /AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/layer/fully_connected.py | fully_connected.py |
import numpy as np
from .activation.tanh import Tanh
from .activation.sigmoid import Sigmoid
class LSTM():
@staticmethod
def init(layer, flow_data_shape):
sequence_length = int(flow_data_shape["sequence_length"])
neurons_number = layer['neurons_number']
# 何凯明初始化,主要针对relu激活函数
if layer["weight_init"] == 'msra':
layer["Wf"] = np.random.randn(neurons_number, neurons_number) * (np.sqrt(2 / neurons_number))
layer["Uf"] = np.random.randn(neurons_number, sequence_length) * (np.sqrt(2 / sequence_length))
layer["Wi"] = np.random.randn(neurons_number, neurons_number) * (np.sqrt(2 / neurons_number))
layer["Ui"] = np.random.randn(neurons_number, sequence_length) * (np.sqrt(2 / sequence_length))
layer["Wa"] = np.random.randn(neurons_number, neurons_number) * (np.sqrt(2 / neurons_number))
layer["Ua"] = np.random.randn(neurons_number, sequence_length) * (np.sqrt(2 / sequence_length))
layer["Wo"] = np.random.randn(neurons_number, neurons_number) * (np.sqrt(2 / neurons_number))
layer["Uo"] = np.random.randn(neurons_number, sequence_length) * (np.sqrt(2 / sequence_length))
layer["V"] = np.random.randn(sequence_length, neurons_number) * (np.sqrt(2 / neurons_number))
# xavier,主要针对tanh激活函数
elif layer["weight_init"] == 'xavier':
layer["Wf"] = np.random.randn(neurons_number, neurons_number) * (np.sqrt(1 / neurons_number))
layer["Uf"] = np.random.randn(neurons_number, sequence_length) * (np.sqrt(1 / sequence_length))
layer["Wi"] = np.random.randn(neurons_number, neurons_number) * (np.sqrt(1 / neurons_number))
layer["Ui"] = np.random.randn(neurons_number, sequence_length) * (np.sqrt(1 / sequence_length))
layer["Wa"] = np.random.randn(neurons_number, neurons_number) * (np.sqrt(1 / neurons_number))
layer["Ua"] = np.random.randn(neurons_number, sequence_length) * (np.sqrt(1 / sequence_length))
layer["Wo"] = np.random.randn(neurons_number, neurons_number) * (np.sqrt(1 / neurons_number))
layer["Uo"] = np.random.randn(neurons_number, sequence_length) * (np.sqrt(1 / sequence_length))
layer["V"] = np.random.randn(sequence_length, neurons_number) * (np.sqrt(1 / neurons_number))
else:
layer["Wf"] = np.random.randn(neurons_number, neurons_number) * 0.01
layer["Uf"] = np.random.randn(neurons_number, sequence_length) * 0.01
layer["Wi"] = np.random.randn(neurons_number, neurons_number) * 0.01
layer["Ui"] = np.random.randn(neurons_number, sequence_length) * 0.01
layer["Wa"] = np.random.randn(neurons_number, neurons_number) * 0.01
layer["Ua"] = np.random.randn(neurons_number, sequence_length) * 0.01
layer["Wo"] = np.random.randn(neurons_number, neurons_number) * 0.01
layer["Uo"] = np.random.randn(neurons_number, sequence_length) * 0.01
layer["V"] = np.random.randn(sequence_length, neurons_number) * 0.01
layer["bf"] = np.zeros((neurons_number, 1))
layer["bi"] = np.zeros((neurons_number, 1))
layer["ba"] = np.zeros((neurons_number, 1))
layer["bo"] = np.zeros((neurons_number, 1))
layer["c"] = np.zeros((sequence_length, 1))
flow_data_shape = {
"flatten_size": sequence_length,
"batch_size": flow_data_shape["batch_size"]
}
return layer, flow_data_shape
@staticmethod
def forword(layer, flow_data):
ht = np.zeros((layer['neurons_number'], flow_data.shape[0]))
layer["cache_ht_0"] = ht
ct = np.zeros((layer['neurons_number'], flow_data.shape[0]))
layer["cache_ct_-1"] = ht
for i in range(flow_data.shape[1]):
xt = flow_data[:, i]
layer["cache_xt_" + str(i)] = xt
print(ht.shape)
print(xt.shape)
exit()
ht_xt = np.concatenate(ht, xt)
layer["cache_ht_xt_" + str(i)] = ht_xt
ft_1 = np.dot(layer["Wf"], ht_xt) + layer["bf"]
layer["cache_ft_1_" + str(i)] = ft_1
ft = Sigmoid.forword(ft_1)
layer["cache_ft_" + str(i)] = ft
it_1 = np.dot(layer["Wi"], ht_xt) + layer["bi"]
layer["cache_it_1_" + str(i)] = it_1
it = Sigmoid.forword(it_1)
layer["cache_it_" + str(i)] = it
at_1 = np.dot(layer["Wa"], ht_xt) + layer["ba"]
layer["cache_at_1_" + str(i)] = at_1
at = Tanh.forword(at_1)
layer["cache_at_" + str(i)] = at
ot_1 = np.dot(layer["Wo"], ht_xt) + layer["bo"]
layer["cache_ot_1_" + str(i)] = ot_1
ot = Sigmoid.forword(ot_1)
layer["cache_ot_" + str(i)] = ot
ct_1 = ct * ft
layer["cache_ct_1_" + str(i)] = ct_1
ct_2 = it * at
layer["cache_ct_2_" + str(i)] = ct_2
ct = ct_1 + ct_2
layer["cache_ct_" + str(i)] = ct
ht_1 = Tanh.forword(ct)
layer["cache_ht_1_" + str(i)] = ht_1
ht = ot * ht_1
layer["cache_ht_" + str(i)] = ht
yt = np.dot(layer["Wy"], ht) + layer["by"]
layer["cache_yt"] = yt
flow_data = yt
# print(flow_data.shape)
# exit()
# print(flow_data.shape)
# exit()
return flow_data, layer
@staticmethod
def backword(flow_data, layer, config):
sequence_number = layer['input'].shape[1]
layer["dy"] = flow_data
layer["dWy"] = np.dot(flow_data, layer["cache_ht_" + str(sequence_number - 1)].T)
ht = np.dot(layer["Wy"].T, flow_data)
output = np.zeros(layer["input"].shape)
layer["dbo"] = np.zeros(layer["bo"].shape)
layer["dWo"] = np.zeros(layer["Wo"].shape)
layer["dUo"] = np.zeros(layer["Uo"].shape)
layer["dba"] = np.zeros(layer["ba"].shape)
layer["dWa"] = np.zeros(layer["Wa"].shape)
layer["dUa"] = np.zeros(layer["Ua"].shape)
layer["dbi"] = np.zeros(layer["bi"].shape)
layer["dWi"] = np.zeros(layer["Wi"].shape)
layer["dUi"] = np.zeros(layer["Ui"].shape)
layer["dbf"] = np.zeros(layer["bf"].shape)
layer["dWf"] = np.zeros(layer["Wf"].shape)
layer["dUf"] = np.zeros(layer["Uf"].shape)
ct = np.zeros(layer["cache_ct_0"].shape)
for i in reversed(range(0, sequence_number)):
ht_1 = ht * layer["cache_ot_" + str(i)]
ct = Tanh.backword(ht_1, layer["cache_ht_1_" + str(i)]) + ct
ct_1 = ct
ct = ct_1 * layer["cache_ft_" + str(i)]
ct_2 = ct
ot = ht * layer["cache_ht_1_" + str(i)]
ot_1 = Sigmoid.backword(ot, layer["cache_ot_" + str(i)])
layer["dbo"] += np.sum(ot_1, axis=1, keepdims=True)
layer["dWo"] += np.dot(ot_1, layer["cache_ht_xt_" + str(i)].T)
at = ct_2 * layer["cache_it_" + str(i)]
at_1 = Tanh.backword(at, layer["cache_at_" + str(i)])
layer["dba"] += np.sum(at_1, axis=1, keepdims=True)
layer["dWa"] += np.dot(at_1, layer["cache_ht_xt_" + str(i)].T)
it = ct_2 * layer["cache_at_" + str(i)]
it_1 = Sigmoid.backword(it, layer["cache_it_" + str(i)])
layer["dbi"] += np.sum(it_1, axis=1, keepdims=True)
layer["dWi"] += np.dot(it_1, layer["cache_ht_xt_" + str(i)].T)
ft = ct_1 * layer["cache_ct_" + str(i - 1)]
ft_1 = Sigmoid.backword(ft, layer["cache_ft_" + str(i)])
layer["dbf"] += np.sum(ft_1, axis=1, keepdims=True)
layer["dWf"] += np.dot(ft_1, layer["cache_ht_xt_" + str(i)].T)
# ht_xt = np.dot(layer["Uf"].T, ft_2) + np.dot(layer["Ui"].T, it_2) + np.dot(layer["Ua"].T, at_2) + np.dot(
# layer["Uo"].T, ot_2)
ht_xt = np.dot(layer["Wf"].T, ft_1) + np.dot(layer["Wi"].T, it_1) + np.dot(layer["Wa"].T, at_1) + np.dot(
layer["Wo"].T, ot_1)
ht = ht_xt[:ht.shape[0]]
xt = ht_xt[ht.shape[0]:]
output[:, i] = xt.T
return layer, output
# 输出单元激活函数
@staticmethod
def softmax(x):
x = np.array(x)
max_x = np.max(x)
return np.exp(x - max_x) / np.sum(np.exp(x - max_x)) | AAdeepLearning | /AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/layer/lstm_2.py | lstm_2.py |
import numpy as np
class BatchNormalization:
"""
批归一化
"""
@staticmethod
def init(layer, flow_data_shape, config):
"""
初始化
:param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息
:param flow_data_shape: 流动数据的形状
:param config:配置
:return: 更新后的层, 流动数据的形状
"""
flatten_size = int(flow_data_shape["flatten_size"])
layer["gamma"] = np.ones((flatten_size, 1))
layer["beta"] = np.zeros((flatten_size, 1))
print(layer['name'] + ",gamma.shape:", layer["gamma"].shape)
print(layer['name'] + ",beta.shape:", layer["beta"].shape)
return layer, flow_data_shape
@staticmethod
def forword(flow_data, layer, is_train):
"""
前向传播
:param flow_data: 流动数据
:param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息
:return: 流动数据, 更新后的层
"""
epsilon = 1e-8
layer['mean'] = np.mean(flow_data, axis=1, keepdims=True)
layer['std'] = np.std(flow_data, axis=1, keepdims=True)
layer['norm'] = (flow_data - layer['mean']) / (layer['std'] + epsilon)
flow_data = layer["gamma"] * layer['norm'] + layer["beta"]
return flow_data, layer
@staticmethod
def backword(flow_data, layer, config):
"""
反向传播
:param flow_data: 流动数据
:param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息
:param config: 配置
:return: 流动数据, 更新后的层
"""
epsilon = 1e-8
# gamma 的梯度
layer["dgamma"] = np.sum(flow_data * layer['norm'], axis=1, keepdims=True)
# beta 的梯度
layer["dbeta"] = np.sum(flow_data, axis=1, keepdims=True)
flow_data = (layer["gamma"] / (layer['std'] + epsilon)) * (
flow_data - layer["dgamma"] * layer['norm'] / config['batch_size'] - np.mean(flow_data, axis=1,
keepdims=True))
return flow_data, layer
@staticmethod
def update_parameters(layer, config, iteration):
"""
更新参数
:param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息
:param config: 配置
:param iteration: 迭代次数
:return:
"""
layer["gamma"] -= config["learning_rate"] * layer["dgamma"]
layer["beta"] -= config["learning_rate"] * layer["dbeta"]
return layer | AAdeepLearning | /AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/layer/batch_normalization.py | batch_normalization.py |
import numpy as np
from ..optimizer.adam import Adam
from ..optimizer.momentum import Momentum
from ..optimizer.rmsprop import Rmsprop
from ..optimizer.sgd import Sgd
class RNN:
@staticmethod
def init(layer, flow_data_shape, config):
sequence_length = int(flow_data_shape["sequence_length"])
# 何凯明初始化,主要针对relu激活函数
if layer["weight_init"] == 'msra':
layer["U"] = np.random.randn(layer['neurons_number'], flow_data_shape["sequence_length"]) * (
np.sqrt(2 / sequence_length))
layer["W"] = np.random.randn(layer['neurons_number'], layer['neurons_number']) * (
np.sqrt(2 / layer['neurons_number']))
layer["V"] = np.random.randn(flow_data_shape["sequence_length"], layer['neurons_number']) * (
np.sqrt(2 / layer['neurons_number']))
# xavier,主要针对tanh激活函数
elif layer["weight_init"] == 'xavier':
layer["U"] = np.random.randn(layer['neurons_number'], flow_data_shape["sequence_length"]) * (
np.sqrt(1 / sequence_length))
layer["W"] = np.random.randn(layer['neurons_number'], layer['neurons_number']) * (
np.sqrt(1 / layer['neurons_number']))
layer["V"] = np.random.randn(flow_data_shape["sequence_length"], layer['neurons_number']) * (
np.sqrt(1 / layer['neurons_number']))
else:
layer["U"] = np.random.randn(layer['neurons_number'], flow_data_shape["sequence_length"]) * 0.01
layer["W"] = np.random.randn(layer['neurons_number'], layer['neurons_number']) * 0.01
layer["V"] = np.random.randn(flow_data_shape["sequence_length"], layer['neurons_number']) * 0.01
layer["bW"] = np.zeros((layer['neurons_number'], 1))
layer["bV"] = np.zeros((flow_data_shape["sequence_length"], 1))
flow_data_shape = {
"flatten_size": flow_data_shape["sequence_length"],
"batch_size": flow_data_shape["batch_size"]
}
return layer, flow_data_shape
@staticmethod
def forword(layer, flow_data, is_train):
# flow_data = flow_data[0]
h = np.zeros((layer['neurons_number'], flow_data.shape[0]))
for i in range(flow_data.shape[1]):
sequence = flow_data[:, i]
layer["U_input_" + str(i)] = sequence
U_multiply_X = np.dot(layer["U"], sequence.T)
layer["W_input_" + str(i)] = h
W_multiply_h = np.dot(layer["W"], h)
h = U_multiply_X + W_multiply_h
h = h + layer["bW"]
h = np.tanh(h)
layer["tanh_output"] = h
# 缓存该层的输入
layer["V_input"] = h
flow_data = np.dot(layer["V"], h) + layer["bV"]
return flow_data, layer
@staticmethod
def backword(flow_data, layer, config):
output_all = np.zeros(layer["input"].shape)
layer["dW"] = np.zeros(layer["W"].shape)
layer["dU"] = np.zeros(layer["U"].shape)
layer["dbW"] = np.zeros(layer["bW"].shape)
layer["dbV"] = np.sum(flow_data, axis=1, keepdims=True)
layer["dV"] = np.dot(flow_data, layer['V_input'].T)
h = np.dot(layer["V"].T, flow_data)
for i in reversed(range(0, layer['input'].shape[1])):
# tanh 梯度
h = h * (1 - np.power(layer["tanh_output"], 2))
layer["dbW"] += np.sum(h, axis=1, keepdims=True)
layer["dW"] += np.dot(h, layer["W_input_" + str(i)].T)
layer["dU"] += np.dot(h, layer["U_input_" + str(i)])
output_all[:, i] = np.dot(h.T, layer["U"])
h = np.dot(layer["W"].T, h)
return output_all, layer
@staticmethod
def update_parameters(layer, config, iteration):
"""
更新权重和偏置项
:param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息
:param config:配置
:param iteration:迭代次数
:return: 更新后的层
"""
# 要更新的键名
keys = ['U', 'W', 'V', 'bW', 'bV']
if "optimizer" in config.keys() and config["optimizer"] == 'momentum':
layer = Momentum.update_parameters(layer, keys, config['learning_rate'], config['momentum_coefficient'])
elif "optimizer" in config.keys() and config["optimizer"] == 'rmsprop':
layer = Rmsprop.update_parameters(layer, keys, config['learning_rate'])
elif "optimizer" in config.keys() and config["optimizer"] == 'adam':
layer = Adam.update_parameters(layer, keys, config['learning_rate'], iteration)
else:
# 默认使用 sgd
layer = Sgd.update_parameters(layer, keys, config['learning_rate'])
return layer | AAdeepLearning | /AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/layer/rnn.py | rnn.py |
"""Reuters topic classification dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .data_utils import get_file
# from ..preprocessing.sequence import _remove_long_seq
import numpy as np
import json
import warnings
def load_data(path='reuters.npz', num_words=None, skip_top=0,
maxlen=None, test_split=0.2, seed=113,
start_char=1, oov_char=2, index_from=3, **kwargs):
"""Loads the Reuters newswire classification dataset.
# Arguments
path: where to cache the data (relative to `~/.aadeeplearning/dataset`).
num_words: max number of words to include. Words are ranked
by how often they occur (in the training set) and only
the most frequent words are kept
skip_top: skip the top N most frequently occurring words
(which may not be informative).
maxlen: truncate sequences after this length.
test_split: Fraction of the dataset to be used as test data.
seed: random seed for sample shuffling.
start_char: The start of a sequence will be marked with this character.
Set to 1 because 0 is usually the padding character.
oov_char: words that were cut out because of the `num_words`
or `skip_top` limit will be replaced with this character.
index_from: index actual words with this index and higher.
# Returns
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
Note that the 'out of vocabulary' character is only used for
words that were present in the training set but are not included
because they're not making the `num_words` cut here.
Words that were not seen in the training set but are in the test set
have simply been skipped.
"""
# Legacy support
if 'nb_words' in kwargs:
warnings.warn('The `nb_words` argument in `load_data` '
'has been renamed `num_words`.')
num_words = kwargs.pop('nb_words')
if kwargs:
raise TypeError('Unrecognized keyword arguments: ' + str(kwargs))
path = get_file(path,
origin='https://s3.amazonaws.com/text-datasets/reuters.npz',
file_hash='87aedbeb0cb229e378797a632c1997b6')
with np.load(path) as f:
xs, labels = f['x'], f['y']
np.random.seed(seed)
indices = np.arange(len(xs))
np.random.shuffle(indices)
xs = xs[indices]
labels = labels[indices]
if start_char is not None:
xs = [[start_char] + [w + index_from for w in x] for x in xs]
elif index_from:
xs = [[w + index_from for w in x] for x in xs]
# if maxlen:
# xs, labels = _remove_long_seq(maxlen, xs, labels)
if not num_words:
num_words = max([max(x) for x in xs])
# by convention, use 2 as OOV word
# reserve 'index_from' (=3 by default) characters:
# 0 (padding), 1 (start), 2 (OOV)
if oov_char is not None:
xs = [[w if skip_top <= w < num_words else oov_char for w in x] for x in xs]
else:
xs = [[w for w in x if skip_top <= w < num_words] for x in xs]
idx = int(len(xs) * (1 - test_split))
x_train, y_train = np.array(xs[:idx]), np.array(labels[:idx])
x_test, y_test = np.array(xs[idx:]), np.array(labels[idx:])
return (x_train, y_train), (x_test, y_test)
def get_word_index(path='reuters_word_index.json'):
"""Retrieves the dictionary mapping words to word indices.
# Arguments
path: where to cache the data (relative to `~/.aadeeplearning/dataset`).
# Returns
The word index dictionary.
"""
path = get_file(
path,
origin='https://s3.amazonaws.com/text-datasets/reuters_word_index.json',
file_hash='4d44cc38712099c9e383dc6e5f11a921')
f = open(path)
data = json.load(f)
f.close()
return data | AAdeepLearning | /AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/datasets/reuters.py | reuters.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import multiprocessing as mp
import os
import random
import shutil
import sys
import tarfile
import threading
import time
import warnings
import zipfile
from abc import abstractmethod
from contextlib import closing
from multiprocessing.pool import ThreadPool
import numpy as np
import six
from six.moves.urllib.error import HTTPError
from six.moves.urllib.error import URLError
from six.moves.urllib.request import urlopen
try:
import queue
except ImportError:
import Queue as queue
from .generic_utils import Progbar
if sys.version_info[0] == 2:
def urlretrieve(url, filename, reporthook=None, data=None):
"""Replacement for `urlretrive` for Python 2.
Under Python 2, `urlretrieve` relies on `FancyURLopener` from legacy
`urllib` module, known to have issues with proxy management.
# Arguments
url: url to retrieve.
filename: where to store the retrieved data locally.
reporthook: a hook function that will be called once
on establishment of the network connection and once
after each block read thereafter.
The hook will be passed three arguments;
a count of blocks transferred so far,
a block size in bytes, and the total size of the file.
data: `data` argument passed to `urlopen`.
"""
def chunk_read(response, chunk_size=8192, reporthook=None):
content_type = response.info().get('Content-Length')
total_size = -1
if content_type is not None:
total_size = int(content_type.strip())
count = 0
while True:
chunk = response.read(chunk_size)
count += 1
if reporthook is not None:
reporthook(count, chunk_size, total_size)
if chunk:
yield chunk
else:
break
with closing(urlopen(url, data)) as response, open(filename, 'wb') as fd:
for chunk in chunk_read(response, reporthook=reporthook):
fd.write(chunk)
else:
from six.moves.urllib.request import urlretrieve
def _extract_archive(file_path, path='.', archive_format='auto'):
"""Extracts an archive if it matches tar, tar.gz, tar.bz, or zip formats.
# Arguments
file_path: path to the archive file
path: path to extract the archive file
archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
# Returns
True if a match was found and an archive extraction was completed,
False otherwise.
"""
if archive_format is None:
return False
if archive_format is 'auto':
archive_format = ['tar', 'zip']
if isinstance(archive_format, six.string_types):
archive_format = [archive_format]
for archive_type in archive_format:
if archive_type is 'tar':
open_fn = tarfile.open
is_match_fn = tarfile.is_tarfile
if archive_type is 'zip':
open_fn = zipfile.ZipFile
is_match_fn = zipfile.is_zipfile
if is_match_fn(file_path):
with open_fn(file_path) as archive:
try:
archive.extractall(path)
except (tarfile.TarError, RuntimeError,
KeyboardInterrupt):
if os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
raise
return True
return False
def get_file(fname,
origin,
untar=False,
md5_hash=None,
file_hash=None,
cache_subdir='datasets',
hash_algorithm='auto',
extract=False,
archive_format='auto',
cache_dir=None):
"""Downloads a file from a URL if it not already in the cache.
By default the file at the url `origin` is downloaded to the
cache_dir `~/.aadeeplearning`, placed in the cache_subdir `datasets`,
and given the filename `fname`. The final location of a file
`example.txt` would therefore be `~/.aadeeplearning/datasets/example.txt`.
Files in tar, tar.gz, tar.bz, and zip formats can also be extracted.
Passing a hash will verify the file after download. The command line
programs `shasum` and `sha256sum` can compute the hash.
# Arguments
fname: Name of the file. If an absolute path `/path/to/file.txt` is
specified the file will be saved at that location.
origin: Original URL of the file.
untar: Deprecated in favor of 'extract'.
boolean, whether the file should be decompressed
md5_hash: Deprecated in favor of 'file_hash'.
md5 hash of the file for verification
file_hash: The expected hash string of the file after download.
The sha256 and md5 hash algorithms are both supported.
cache_subdir: Subdirectory under the AADeepLearning cache dir where the file is
saved. If an absolute path `/path/to/folder` is
specified the file will be saved at that location.
hash_algorithm: Select the hash algorithm to verify the file.
options are 'md5', 'sha256', and 'auto'.
The default 'auto' detects the hash algorithm in use.
extract: True tries extracting the file as an Archive, like tar or zip.
archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
cache_dir: Location to store cached files, when None it
defaults to the [AADeepLearning Directory](/faq/#where-is-the-aadeeplearning-configuration-filed-stored).
# Returns
Path to the downloaded file
""" # noqa
if cache_dir is None:
cache_dir = os.path.join(os.path.expanduser('~'), '.aadeeplearning')
if md5_hash is not None and file_hash is None:
file_hash = md5_hash
hash_algorithm = 'md5'
datadir_base = os.path.expanduser(cache_dir)
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join('/tmp', '.aadeeplearning')
datadir = os.path.join(datadir_base, cache_subdir)
if not os.path.exists(datadir):
os.makedirs(datadir)
if untar:
untar_fpath = os.path.join(datadir, fname)
fpath = untar_fpath + '.tar.gz'
else:
fpath = os.path.join(datadir, fname)
download = False
if os.path.exists(fpath):
# File found; verify integrity if a hash was provided.
if file_hash is not None:
if not validate_file(fpath, file_hash, algorithm=hash_algorithm):
print('A local file was found, but it seems to be '
'incomplete or outdated because the ' + hash_algorithm +
' file hash does not match the original value of ' +
file_hash + ' so we will re-download the data.')
download = True
else:
download = True
if download:
print('Downloading data from', origin)
class ProgressTracker(object):
# Maintain progbar for the lifetime of download.
# This design was chosen for Python 2.7 compatibility.
progbar = None
def dl_progress(count, block_size, total_size):
if ProgressTracker.progbar is None:
if total_size is -1:
total_size = None
ProgressTracker.progbar = Progbar(total_size)
else:
ProgressTracker.progbar.update(count * block_size)
error_msg = 'URL fetch failure on {}: {} -- {}'
try:
try:
urlretrieve(origin, fpath, dl_progress)
except HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except (Exception, KeyboardInterrupt):
if os.path.exists(fpath):
os.remove(fpath)
raise
ProgressTracker.progbar = None
if untar:
if not os.path.exists(untar_fpath):
_extract_archive(fpath, datadir, archive_format='tar')
return untar_fpath
if extract:
_extract_archive(fpath, datadir, archive_format)
return fpath
def _hash_file(fpath, algorithm='sha256', chunk_size=65535):
"""Calculates a file sha256 or md5 hash.
# Example
```python
>>> from aadeeplearning.data_utils import _hash_file
>>> _hash_file('/path/to/file.zip')
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
```
# Arguments
fpath: path to the file being validated
algorithm: hash algorithm, one of 'auto', 'sha256', or 'md5'.
The default 'auto' detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
# Returns
The file hash
"""
if (algorithm is 'sha256') or (algorithm is 'auto' and len(hash) is 64):
hasher = hashlib.sha256()
else:
hasher = hashlib.md5()
with open(fpath, 'rb') as fpath_file:
for chunk in iter(lambda: fpath_file.read(chunk_size), b''):
hasher.update(chunk)
return hasher.hexdigest()
def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535):
"""Validates a file against a sha256 or md5 hash.
# Arguments
fpath: path to the file being validated
file_hash: The expected hash string of the file.
The sha256 and md5 hash algorithms are both supported.
algorithm: Hash algorithm, one of 'auto', 'sha256', or 'md5'.
The default 'auto' detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
# Returns
Whether the file is valid
"""
if ((algorithm is 'sha256') or
(algorithm is 'auto' and len(file_hash) is 64)):
hasher = 'sha256'
else:
hasher = 'md5'
if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash):
return True
else:
return False
class Sequence(object):
"""Base object for fitting to a sequence of data, such as a dataset.
Every `Sequence` must implement the `__getitem__` and the `__len__` methods.
If you want to modify your dataset between epochs you may implement
`on_epoch_end`. The method `__getitem__` should return a complete batch.
# Notes
`Sequence` are a safer way to do multiprocessing. This structure guarantees
that the network will only train once on each sample per epoch which is not
the case with generators.
# Examples
```python
from skimage.io import imread
from skimage.transform import resize
import numpy as np
# Here, `x_set` is list of path to the images
# and `y_set` are the associated classes.
class CIFAR10Sequence(Sequence):
def __init__(self, x_set, y_set, batch_size):
self.x, self.y = x_set, y_set
self.batch_size = batch_size
def __len__(self):
return int(np.ceil(len(self.x) / float(self.batch_size)))
def __getitem__(self, idx):
batch_x = self.x[idx * self.batch_size:(idx + 1) * self.batch_size]
batch_y = self.y[idx * self.batch_size:(idx + 1) * self.batch_size]
return np.array([
resize(imread(file_name), (200, 200))
for file_name in batch_x]), np.array(batch_y)
```
"""
@abstractmethod
def __getitem__(self, index):
"""Gets batch at position `index`.
# Arguments
index: position of the batch in the Sequence.
# Returns
A batch
"""
raise NotImplementedError
@abstractmethod
def __len__(self):
"""Number of batch in the Sequence.
# Returns
The number of batches in the Sequence.
"""
raise NotImplementedError
def on_epoch_end(self):
"""Method called at the end of every epoch.
"""
pass
def __iter__(self):
"""Create a generator that iterate over the Sequence."""
for item in (self[i] for i in range(len(self))):
yield item
# Global variables to be shared across processes
_SHARED_SEQUENCES = {}
# We use a Value to provide unique id to different processes.
_SEQUENCE_COUNTER = None
def init_pool(seqs):
global _SHARED_SEQUENCES
_SHARED_SEQUENCES = seqs
def get_index(uid, i):
"""Get the value from the Sequence `uid` at index `i`.
To allow multiple Sequences to be used at the same time, we use `uid` to
get a specific one. A single Sequence would cause the validation to
overwrite the training Sequence.
# Arguments
uid: int, Sequence identifier
i: index
# Returns
The value at index `i`.
"""
return _SHARED_SEQUENCES[uid][i]
class SequenceEnqueuer(object):
"""Base class to enqueue inputs.
The task of an Enqueuer is to use parallelism to speed up preprocessing.
This is done with processes or threads.
# Examples
```python
enqueuer = SequenceEnqueuer(...)
enqueuer.start()
datas = enqueuer.get()
for data in datas:
# Use the inputs; training, evaluating, predicting.
# ... stop sometime.
enqueuer.close()
```
The `enqueuer.get()` should be an infinite stream of datas.
"""
def __init__(self, sequence,
use_multiprocessing=False):
self.sequence = sequence
self.use_multiprocessing = use_multiprocessing
global _SEQUENCE_COUNTER
if _SEQUENCE_COUNTER is None:
try:
_SEQUENCE_COUNTER = mp.Value('i', 0)
except OSError:
# In this case the OS does not allow us to use
# multiprocessing. We resort to an int
# for enqueuer indexing.
_SEQUENCE_COUNTER = 0
if isinstance(_SEQUENCE_COUNTER, int):
self.uid = _SEQUENCE_COUNTER
_SEQUENCE_COUNTER += 1
else:
# Doing Multiprocessing.Value += x is not process-safe.
with _SEQUENCE_COUNTER.get_lock():
self.uid = _SEQUENCE_COUNTER.value
_SEQUENCE_COUNTER.value += 1
self.workers = 0
self.executor_fn = None
self.queue = None
self.run_thread = None
self.stop_signal = None
def is_running(self):
return self.stop_signal is not None and not self.stop_signal.is_set()
def start(self, workers=1, max_queue_size=10):
"""Start the handler's workers.
# Arguments
workers: number of worker threads
max_queue_size: queue size
(when full, workers could block on `put()`)
"""
if self.use_multiprocessing:
self.executor_fn = self._get_executor_init(workers)
else:
# We do not need the init since it's threads.
self.executor_fn = lambda _: ThreadPool(workers)
self.workers = workers
self.queue = queue.Queue(max_queue_size)
self.stop_signal = threading.Event()
self.run_thread = threading.Thread(target=self._run)
self.run_thread.daemon = True
self.run_thread.start()
def _send_sequence(self):
"""Send current Iterable to all workers."""
# For new processes that may spawn
_SHARED_SEQUENCES[self.uid] = self.sequence
def stop(self, timeout=None):
"""Stops running threads and wait for them to exit, if necessary.
Should be called by the same thread which called `start()`.
# Arguments
timeout: maximum time to wait on `thread.join()`
"""
self.stop_signal.set()
with self.queue.mutex:
self.queue.queue.clear()
self.queue.unfinished_tasks = 0
self.queue.not_full.notify()
self.run_thread.join(timeout)
_SHARED_SEQUENCES[self.uid] = None
@abstractmethod
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
raise NotImplementedError
@abstractmethod
def _get_executor_init(self, workers):
"""Get the Pool initializer for multiprocessing.
# Returns
Function, a Function to initialize the pool
"""
raise NotImplementedError
@abstractmethod
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
# Returns
Generator yielding tuples `(inputs, targets)`
or `(inputs, targets, sample_weights)`.
"""
raise NotImplementedError
class OrderedEnqueuer(SequenceEnqueuer):
"""Builds a Enqueuer from a Sequence.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
# Arguments
sequence: A `aadeeplearning.utils.data_utils.Sequence` object.
use_multiprocessing: use multiprocessing if True, otherwise threading
shuffle: whether to shuffle the data at the beginning of each epoch
"""
def __init__(self, sequence, use_multiprocessing=False, shuffle=False):
super(OrderedEnqueuer, self).__init__(sequence, use_multiprocessing)
self.shuffle = shuffle
def _get_executor_init(self, workers):
"""Get the Pool initializer for multiprocessing.
# Returns
Function, a Function to initialize the pool
"""
return lambda seqs: mp.Pool(workers,
initializer=init_pool,
initargs=(seqs,))
def _wait_queue(self):
"""Wait for the queue to be empty."""
while True:
time.sleep(0.1)
if self.queue.unfinished_tasks == 0 or self.stop_signal.is_set():
return
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
sequence = list(range(len(self.sequence)))
self._send_sequence() # Share the initial sequence
while True:
if self.shuffle:
random.shuffle(sequence)
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
for i in sequence:
if self.stop_signal.is_set():
return
self.queue.put(
executor.apply_async(get_index, (self.uid, i)), block=True)
# Done with the current epoch, waiting for the final batches
self._wait_queue()
if self.stop_signal.is_set():
# We're done
return
# Call the internal on epoch end.
self.sequence.on_epoch_end()
self._send_sequence() # Update the pool
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
# Yields
The next element in the queue, i.e. a tuple
`(inputs, targets)` or
`(inputs, targets, sample_weights)`.
"""
try:
while self.is_running():
inputs = self.queue.get(block=True).get()
self.queue.task_done()
if inputs is not None:
yield inputs
except Exception as e:
self.stop()
six.reraise(*sys.exc_info())
def init_pool_generator(gens, random_seed=None):
global _SHARED_SEQUENCES
_SHARED_SEQUENCES = gens
if random_seed is not None:
ident = mp.current_process().ident
np.random.seed(random_seed + ident)
def next_sample(uid):
"""Get the next value from the generator `uid`.
To allow multiple generators to be used at the same time, we use `uid` to
get a specific one. A single generator would cause the validation to
overwrite the training generator.
# Arguments
uid: int, generator identifier
# Returns
The next value of generator `uid`.
"""
return six.next(_SHARED_SEQUENCES[uid])
class GeneratorEnqueuer(SequenceEnqueuer):
"""Builds a queue out of a data generator.
The provided generator can be finite in which case the class will throw
a `StopIteration` exception.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
# Arguments
generator: a generator function which yields data
use_multiprocessing: use multiprocessing if True, otherwise threading
wait_time: time to sleep in-between calls to `put()`
random_seed: Initial seed for workers,
will be incremented by one for each worker.
"""
def __init__(self, sequence, use_multiprocessing=False, wait_time=None,
random_seed=None):
super(GeneratorEnqueuer, self).__init__(sequence, use_multiprocessing)
self.random_seed = random_seed
if wait_time is not None:
warnings.warn('`wait_time` is not used anymore.',
DeprecationWarning)
def _get_executor_init(self, workers):
"""Get the Pool initializer for multiprocessing.
# Returns
Function, a Function to initialize the pool
"""
return lambda seqs: mp.Pool(workers,
initializer=init_pool_generator,
initargs=(seqs, self.random_seed))
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
self._send_sequence() # Share the initial generator
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
while True:
if self.stop_signal.is_set():
return
self.queue.put(
executor.apply_async(next_sample, (self.uid,)), block=True)
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
# Yields
The next element in the queue, i.e. a tuple
`(inputs, targets)` or
`(inputs, targets, sample_weights)`.
"""
try:
while self.is_running():
inputs = self.queue.get(block=True).get()
self.queue.task_done()
if inputs is not None:
yield inputs
except StopIteration:
# Special case for finite generators
last_ones = []
while self.queue.qsize() > 0:
last_ones.append(self.queue.get(block=True))
# Wait for them to complete
list(map(lambda f: f.wait(), last_ones))
# Keep the good ones
last_ones = [future.get() for future in last_ones if future.successful()]
for inputs in last_ones:
if inputs is not None:
yield inputs
except Exception as e:
self.stop()
if 'generator already executing' in str(e):
raise RuntimeError(
"Your generator is NOT thread-safe."
"AADeepLearning requires a thread-safe generator when"
"`use_multiprocessing=False, workers > 1`."
"For more information see issue #1638.")
six.reraise(*sys.exc_info()) | AAdeepLearning | /AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/datasets/data_utils.py | data_utils.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .data_utils import get_file
# from ..preprocessing.sequence import _remove_long_seq
import numpy as np
import json
import warnings
def load_data(path='imdb.npz', num_words=None, skip_top=0,
maxlen=None, seed=113,
start_char=1, oov_char=2, index_from=3, **kwargs):
"""Loads the IMDB dataset.
# Arguments
path: where to cache the data (relative to `~/.aadeeplearning/dataset`).
num_words: max number of words to include. Words are ranked
by how often they occur (in the training set) and only
the most frequent words are kept
skip_top: skip the top N most frequently occurring words
(which may not be informative).
maxlen: sequences longer than this will be filtered out.
seed: random seed for sample shuffling.
start_char: The start of a sequence will be marked with this character.
Set to 1 because 0 is usually the padding character.
oov_char: words that were cut out because of the `num_words`
or `skip_top` limit will be replaced with this character.
index_from: index actual words with this index and higher.
# Returns
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
# Raises
ValueError: in case `maxlen` is so low
that no input sequence could be kept.
Note that the 'out of vocabulary' character is only used for
words that were present in the training set but are not included
because they're not making the `num_words` cut here.
Words that were not seen in the training set but are in the test set
have simply been skipped.
"""
# Legacy support
if 'nb_words' in kwargs:
warnings.warn('The `nb_words` argument in `load_data` '
'has been renamed `num_words`.')
num_words = kwargs.pop('nb_words')
if kwargs:
raise TypeError('Unrecognized keyword arguments: ' + str(kwargs))
path = get_file(path,
origin='https://s3.amazonaws.com/text-datasets/imdb.npz',
file_hash='599dadb1135973df5b59232a0e9a887c')
with np.load(path) as f:
x_train, labels_train = f['x_train'], f['y_train']
x_test, labels_test = f['x_test'], f['y_test']
np.random.seed(seed)
indices = np.arange(len(x_train))
np.random.shuffle(indices)
x_train = x_train[indices]
labels_train = labels_train[indices]
indices = np.arange(len(x_test))
np.random.shuffle(indices)
x_test = x_test[indices]
labels_test = labels_test[indices]
xs = np.concatenate([x_train, x_test])
labels = np.concatenate([labels_train, labels_test])
if start_char is not None:
xs = [[start_char] + [w + index_from for w in x] for x in xs]
elif index_from:
xs = [[w + index_from for w in x] for x in xs]
# if maxlen:
# xs, labels = _remove_long_seq(maxlen, xs, labels)
# if not xs:
# raise ValueError('After filtering for sequences shorter than maxlen=' +
# str(maxlen) + ', no sequence was kept. '
# 'Increase maxlen.')
if not num_words:
num_words = max([max(x) for x in xs])
# by convention, use 2 as OOV word
# reserve 'index_from' (=3 by default) characters:
# 0 (padding), 1 (start), 2 (OOV)
if oov_char is not None:
xs = [[w if (skip_top <= w < num_words) else oov_char for w in x]
for x in xs]
else:
xs = [[w for w in x if skip_top <= w < num_words]
for x in xs]
idx = len(x_train)
x_train, y_train = np.array(xs[:idx]), np.array(labels[:idx])
x_test, y_test = np.array(xs[idx:]), np.array(labels[idx:])
return (x_train, y_train), (x_test, y_test)
def get_word_index(path='imdb_word_index.json'):
"""Retrieves the dictionary mapping words to word indices.
# Arguments
path: where to cache the data (relative to `~/.aadeeplearning/dataset`).
# Returns
The word index dictionary.
"""
path = get_file(
path,
origin='https://s3.amazonaws.com/text-datasets/imdb_word_index.json',
file_hash='bfafd718b763782e994055a2d397834f')
with open(path) as f:
return json.load(f) | AAdeepLearning | /AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/datasets/imdb.py | imdb.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import binascii
import numpy as np
import time
import sys
import six
import marshal
import types as python_types
import inspect
import codecs
import collections
_GLOBAL_CUSTOM_OBJECTS = {}
class CustomObjectScope(object):
"""Provides a scope that changes to `_GLOBAL_CUSTOM_OBJECTS` cannot escape.
Code within a `with` statement will be able to access custom objects
by name. Changes to global custom objects persist
within the enclosing `with` statement. At end of the `with` statement,
global custom objects are reverted to state
at beginning of the `with` statement.
# Example
Consider a custom object `MyObject` (e.g. a class):
```python
with CustomObjectScope({'MyObject':MyObject}):
layer = Dense(..., kernel_regularizer='MyObject')
# save, load, etc. will recognize custom object by name
```
"""
def __init__(self, *args):
self.custom_objects = args
self.backup = None
def __enter__(self):
self.backup = _GLOBAL_CUSTOM_OBJECTS.copy()
for objects in self.custom_objects:
_GLOBAL_CUSTOM_OBJECTS.update(objects)
return self
def __exit__(self, *args, **kwargs):
_GLOBAL_CUSTOM_OBJECTS.clear()
_GLOBAL_CUSTOM_OBJECTS.update(self.backup)
def custom_object_scope(*args):
"""Provides a scope that changes to `_GLOBAL_CUSTOM_OBJECTS` cannot escape.
Convenience wrapper for `CustomObjectScope`.
Code within a `with` statement will be able to access custom objects
by name. Changes to global custom objects persist
within the enclosing `with` statement. At end of the `with` statement,
global custom objects are reverted to state
at beginning of the `with` statement.
# Example
Consider a custom object `MyObject`
```python
with custom_object_scope({'MyObject':MyObject}):
layer = Dense(..., kernel_regularizer='MyObject')
# save, load, etc. will recognize custom object by name
```
# Arguments
*args: Variable length list of dictionaries of name,
class pairs to add to custom objects.
# Returns
Object of type `CustomObjectScope`.
"""
return CustomObjectScope(*args)
def get_custom_objects():
"""Retrieves a live reference to the global dictionary of custom objects.
Updating and clearing custom objects using `custom_object_scope`
is preferred, but `get_custom_objects` can
be used to directly access `_GLOBAL_CUSTOM_OBJECTS`.
# Example
```python
get_custom_objects().clear()
get_custom_objects()['MyObject'] = MyObject
```
# Returns
Global dictionary of names to classes (`_GLOBAL_CUSTOM_OBJECTS`).
"""
return _GLOBAL_CUSTOM_OBJECTS
def serialize_aadeeplearning_object(instance):
if instance is None:
return None
if hasattr(instance, 'get_config'):
return {
'class_name': instance.__class__.__name__,
'config': instance.get_config()
}
if hasattr(instance, '__name__'):
return instance.__name__
else:
raise ValueError('Cannot serialize', instance)
def deserialize_aadeeplearning_object(identifier, module_objects=None,
custom_objects=None,
printable_module_name='object'):
if isinstance(identifier, dict):
# In this case we are dealing with a AADeepLearning config dictionary.
config = identifier
if 'class_name' not in config or 'config' not in config:
raise ValueError('Improper config format: ' + str(config))
class_name = config['class_name']
if custom_objects and class_name in custom_objects:
cls = custom_objects[class_name]
elif class_name in _GLOBAL_CUSTOM_OBJECTS:
cls = _GLOBAL_CUSTOM_OBJECTS[class_name]
else:
module_objects = module_objects or {}
cls = module_objects.get(class_name)
if cls is None:
raise ValueError('Unknown ' + printable_module_name +
': ' + class_name)
if hasattr(cls, 'from_config'):
custom_objects = custom_objects or {}
if has_arg(cls.from_config, 'custom_objects'):
return cls.from_config(
config['config'],
custom_objects=dict(list(_GLOBAL_CUSTOM_OBJECTS.items()) +
list(custom_objects.items())))
with CustomObjectScope(custom_objects):
return cls.from_config(config['config'])
else:
# Then `cls` may be a function returning a class.
# in this case by convention `config` holds
# the kwargs of the function.
custom_objects = custom_objects or {}
with CustomObjectScope(custom_objects):
return cls(**config['config'])
elif isinstance(identifier, six.string_types):
function_name = identifier
if custom_objects and function_name in custom_objects:
fn = custom_objects.get(function_name)
elif function_name in _GLOBAL_CUSTOM_OBJECTS:
fn = _GLOBAL_CUSTOM_OBJECTS[function_name]
else:
fn = module_objects.get(function_name)
if fn is None:
raise ValueError('Unknown ' + printable_module_name +
':' + function_name)
return fn
else:
raise ValueError('Could not interpret serialized ' +
printable_module_name + ': ' + identifier)
def func_dump(func):
"""Serializes a user defined function.
# Arguments
func: the function to serialize.
# Returns
A tuple `(code, defaults, closure)`.
"""
raw_code = marshal.dumps(func.__code__)
code = codecs.encode(raw_code, 'base64').decode('ascii')
defaults = func.__defaults__
if func.__closure__:
closure = tuple(c.cell_contents for c in func.__closure__)
else:
closure = None
return code, defaults, closure
def func_load(code, defaults=None, closure=None, globs=None):
"""Deserializes a user defined function.
# Arguments
code: bytecode of the function.
defaults: defaults of the function.
closure: closure of the function.
globs: dictionary of global objects.
# Returns
A function object.
"""
if isinstance(code, (tuple, list)): # unpack previous dump
code, defaults, closure = code
if isinstance(defaults, list):
defaults = tuple(defaults)
def ensure_value_to_cell(value):
"""Ensures that a value is converted to a python cell object.
# Arguments
value: Any value that needs to be casted to the cell type
# Returns
A value wrapped as a cell object (see function "func_load")
"""
def dummy_fn():
value # just access it so it gets captured in .__closure__
cell_value = dummy_fn.__closure__[0]
if not isinstance(value, type(cell_value)):
return cell_value
else:
return value
if closure is not None:
closure = tuple(ensure_value_to_cell(_) for _ in closure)
try:
raw_code = codecs.decode(code.encode('ascii'), 'base64')
code = marshal.loads(raw_code)
except (UnicodeEncodeError, binascii.Error, ValueError):
# backwards compatibility for models serialized prior to 2.1.2
raw_code = code.encode('raw_unicode_escape')
code = marshal.loads(raw_code)
if globs is None:
globs = globals()
return python_types.FunctionType(code, globs,
name=code.co_name,
argdefs=defaults,
closure=closure)
def has_arg(fn, name, accept_all=False):
"""Checks if a callable accepts a given keyword argument.
For Python 2, checks if there is an argument with the given name.
For Python 3, checks if there is an argument with the given name, and
also whether this argument can be called with a keyword (i.e. if it is
not a positional-only argument).
# Arguments
fn: Callable to inspect.
name: Check if `fn` can be called with `name` as a keyword argument.
accept_all: What to return if there is no parameter called `name`
but the function accepts a `**kwargs` argument.
# Returns
bool, whether `fn` accepts a `name` keyword argument.
"""
if sys.version_info < (3,):
arg_spec = inspect.getargspec(fn)
if accept_all and arg_spec.keywords is not None:
return True
return (name in arg_spec.args)
elif sys.version_info < (3, 3):
arg_spec = inspect.getfullargspec(fn)
if accept_all and arg_spec.varkw is not None:
return True
return (name in arg_spec.args or
name in arg_spec.kwonlyargs)
else:
signature = inspect.signature(fn)
parameter = signature.parameters.get(name)
if parameter is None:
if accept_all:
for param in signature.parameters.values():
if param.kind == inspect.Parameter.VAR_KEYWORD:
return True
return False
return (parameter.kind in (inspect.Parameter.POSITIONAL_OR_KEYWORD,
inspect.Parameter.KEYWORD_ONLY))
class Progbar(object):
"""Displays a progress bar.
# Arguments
target: Total number of steps expected, None if unknown.
width: Progress bar width on screen.
verbose: Verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose)
stateful_metrics: Iterable of string names of metrics that
should *not* be averaged over time. Metrics in this list
will be displayed as-is. All others will be averaged
by the progbar before display.
interval: Minimum visual progress update interval (in seconds).
"""
def __init__(self, target, width=30, verbose=1, interval=0.05,
stateful_metrics=None):
self.target = target
self.width = width
self.verbose = verbose
self.interval = interval
if stateful_metrics:
self.stateful_metrics = set(stateful_metrics)
else:
self.stateful_metrics = set()
self._dynamic_display = ((hasattr(sys.stdout, 'isatty') and
sys.stdout.isatty()) or
'ipykernel' in sys.modules)
self._total_width = 0
self._seen_so_far = 0
self._values = collections.OrderedDict()
self._start = time.time()
self._last_update = 0
def update(self, current, values=None):
"""Updates the progress bar.
# Arguments
current: Index of current step.
values: List of tuples:
`(name, value_for_last_step)`.
If `name` is in `stateful_metrics`,
`value_for_last_step` will be displayed as-is.
Else, an average of the metric over time will be displayed.
"""
values = values or []
for k, v in values:
if k not in self.stateful_metrics:
if k not in self._values:
self._values[k] = [v * (current - self._seen_so_far),
current - self._seen_so_far]
else:
self._values[k][0] += v * (current - self._seen_so_far)
self._values[k][1] += (current - self._seen_so_far)
else:
# Stateful metrics output a numeric value. This representation
# means "take an average from a single value" but keeps the
# numeric formatting.
self._values[k] = [v, 1]
self._seen_so_far = current
now = time.time()
info = ' - %.0fs' % (now - self._start)
if self.verbose == 1:
if (now - self._last_update < self.interval and
self.target is not None and current < self.target):
return
prev_total_width = self._total_width
if self._dynamic_display:
sys.stdout.write('\b' * prev_total_width)
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
if self.target is not None:
numdigits = int(np.floor(np.log10(self.target))) + 1
barstr = '%%%dd/%d [' % (numdigits, self.target)
bar = barstr % current
prog = float(current) / self.target
prog_width = int(self.width * prog)
if prog_width > 0:
bar += ('=' * (prog_width - 1))
if current < self.target:
bar += '>'
else:
bar += '='
bar += ('.' * (self.width - prog_width))
bar += ']'
else:
bar = '%7d/Unknown' % current
self._total_width = len(bar)
sys.stdout.write(bar)
if current:
time_per_unit = (now - self._start) / current
else:
time_per_unit = 0
if self.target is not None and current < self.target:
eta = time_per_unit * (self.target - current)
if eta > 3600:
eta_format = ('%d:%02d:%02d' %
(eta // 3600, (eta % 3600) // 60, eta % 60))
elif eta > 60:
eta_format = '%d:%02d' % (eta // 60, eta % 60)
else:
eta_format = '%ds' % eta
info = ' - ETA: %s' % eta_format
else:
if time_per_unit >= 1:
info += ' %.0fs/step' % time_per_unit
elif time_per_unit >= 1e-3:
info += ' %.0fms/step' % (time_per_unit * 1e3)
else:
info += ' %.0fus/step' % (time_per_unit * 1e6)
for k in self._values:
info += ' - %s:' % k
if isinstance(self._values[k], list):
avg = np.mean(
self._values[k][0] / max(1, self._values[k][1]))
if abs(avg) > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
else:
info += ' %s' % self._values[k]
self._total_width += len(info)
if prev_total_width > self._total_width:
info += (' ' * (prev_total_width - self._total_width))
if self.target is not None and current >= self.target:
info += '\n'
sys.stdout.write(info)
sys.stdout.flush()
elif self.verbose == 2:
if self.target is None or current >= self.target:
for k in self._values:
info += ' - %s:' % k
avg = np.mean(
self._values[k][0] / max(1, self._values[k][1]))
if avg > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
info += '\n'
sys.stdout.write(info)
sys.stdout.flush()
self._last_update = now
def add(self, n, values=None):
self.update(self._seen_so_far + n, values)
def to_list(x, allow_tuple=False):
"""Normalizes a list/tensor into a list.
If a tensor is passed, we return
a list of size 1 containing the tensor.
# Arguments
x: target object to be normalized.
allow_tuple: If False and x is a tuple,
it will be converted into a list
with a single element (the tuple).
Else converts the tuple to a list.
# Returns
A list.
"""
if isinstance(x, list):
return x
if allow_tuple and isinstance(x, tuple):
return list(x)
return [x]
def unpack_singleton(x):
"""Gets the first element if the iterable has only one value.
Otherwise return the iterable.
# Argument:
x: A list or tuple.
# Returns:
The same iterable or the first element.
"""
if len(x) == 1:
return x[0]
return x
def object_list_uid(object_list):
object_list = to_list(object_list)
return ', '.join([str(abs(id(x))) for x in object_list])
def is_all_none(iterable_or_element):
iterable = to_list(iterable_or_element, allow_tuple=True)
for element in iterable:
if element is not None:
return False
return True
def slice_arrays(arrays, start=None, stop=None):
"""Slices an array or list of arrays.
This takes an array-like, or a list of
array-likes, and outputs:
- arrays[start:stop] if `arrays` is an array-like
- [x[start:stop] for x in arrays] if `arrays` is a list
Can also work on list/array of indices: `_slice_arrays(x, indices)`
# Arguments
arrays: Single array or list of arrays.
start: can be an integer index (start index)
or a list/array of indices
stop: integer (stop index); should be None if
`start` was a list.
# Returns
A slice of the array(s).
"""
if arrays is None:
return [None]
elif isinstance(arrays, list):
if hasattr(start, '__len__'):
# hdf5 datasets only support list objects as indices
if hasattr(start, 'shape'):
start = start.tolist()
return [None if x is None else x[start] for x in arrays]
else:
return [None if x is None else x[start:stop] for x in arrays]
else:
if hasattr(start, '__len__'):
if hasattr(start, 'shape'):
start = start.tolist()
return arrays[start]
elif hasattr(start, '__getitem__'):
return arrays[start:stop]
else:
return [None]
def transpose_shape(shape, target_format, spatial_axes):
"""Converts a tuple or a list to the correct `data_format`.
It does so by switching the positions of its elements.
# Arguments
shape: Tuple or list, often representing shape,
corresponding to `'channels_last'`.
target_format: A string, either `'channels_first'` or `'channels_last'`.
spatial_axes: A tuple of integers.
Correspond to the indexes of the spatial axes.
For example, if you pass a shape
representing (batch_size, timesteps, rows, cols, channels),
then `spatial_axes=(2, 3)`.
# Returns
A tuple or list, with the elements permuted according
to `target_format`.
# Example
```python
>>> from aadeeplearning.utils.generic_utils import transpose_shape
>>> transpose_shape((16, 128, 128, 32),'channels_first', spatial_axes=(1, 2))
(16, 32, 128, 128)
>>> transpose_shape((16, 128, 128, 32), 'channels_last', spatial_axes=(1, 2))
(16, 128, 128, 32)
>>> transpose_shape((128, 128, 32), 'channels_first', spatial_axes=(0, 1))
(32, 128, 128)
```
# Raises
ValueError: if `value` or the global `data_format` invalid.
"""
if target_format == 'channels_first':
new_values = shape[:spatial_axes[0]]
new_values += (shape[-1],)
new_values += tuple(shape[x] for x in spatial_axes)
if isinstance(shape, list):
return list(new_values)
return new_values
elif target_format == 'channels_last':
return shape
else:
raise ValueError('The `data_format` argument must be one of '
'"channels_first", "channels_last". Received: ' +
str(target_format)) | AAdeepLearning | /AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/datasets/generic_utils.py | generic_utils.py |
import numpy as np
np.random.seed(1337)
from aa_deep_learning.AADeepLearning import AADeepLearning
# 网络配置文件
config = {
# 初始学习率
"learning_rate": 0.001,
# 优化策略: sgd/momentum/rmsprop/adam
"optimizer": "sgd",
# 训练多少次
"number_iteration": 5000,
# 每次用多少个样本训练
"batch_size": 2,
# 迭代多少次打印一次信息
"display": 500,
}
net = [
{
# 层名
"name": "rnn_1",
# 层类型,循环神经网络层 目前只支持 n->1 输出
"type": "rnn",
# 神经元个数
"neurons_number": 128,
# 权重初始化方式 msra/xavier/gaussian
"weight_init": "msra"
},
{
# 层名
"name": "softmax",
# 层类型
"type": "softmax"
}
]
def str2onehot(str):
"""
字符串转 one hot 向量
:param str: 字符串
:return: one hot 向量
"""
word2int = {'a': 0, 'b': 1, 'c': 2, 'd': 3, 'e': 4, 'f': 5, 'g': 6, 'h': 7, 'i': 8, 'j': 9, 'k': 10
, 'l': 11, 'm': 12, 'n': 13, 'o': 14, 'p': 15, 'q': 16, 'r': 17, 's': 18, 't': 19,
'u': 20, 'v': 21, 'w': 22, 'x': 23, 'y': 24, 'z': 25}
element_vector = [0] * 26
element_vector[word2int[str]] = 1
return element_vector
def onehot2str(element_vector):
"""
one hot 向量转字符串
:param element_vector:one hot 向量
:return: 字符串
"""
int2word = {0: 'a', 1: 'b', 2: 'c', 3: 'd', 4: 'e', 5: 'f', 6: 'g', 7: 'h', 8: 'i',
9: 'j', 10: 'k', 11: 'l', 12: 'm', 13: 'n', 14: 'o', 15: 'p', 16: 'q', 17: 'r',
18: 's', 19: 't', 20: 'u', 21: 'v', 22: 'w', 23: 'x', 24: 'y', 25: 'z'}
i = np.argmax(element_vector)
return int2word[i]
def list2vector(x, y):
"""
列表转one hot 向量
:param x: 数据
:param y: 标签
:return: one hot 向量
"""
x_vector = np.zeros((len(x), len(x[0]), 26))
y_vector = np.zeros((len(y), 26))
for i, value in enumerate(x):
j = 0
for letter in value:
x_vector[i][j] = str2onehot(letter)
j = j + 1
y_vector[i] = str2onehot(y[i])
return x_vector, y_vector
x = ['abc', 'bcd', 'cde', 'def', 'efg', 'fgh', 'ghi', 'hij', 'ijk', 'jkl', 'klm', 'lmn', 'mno', 'nop', 'opq', 'pqr',
'qrs', 'rst', 'stu', 'tuv', 'uvw', 'vwx', 'wxy', 'xyz', 'yza', 'zab']
y = ['d', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'a', 'b', 'c']
# 训练数据
x_train, y_train = list2vector(x, y)
# 预测数据
x = ['bcd']
y = ['e']
x_test, y_test = list2vector(x, y)
print("x_train.shape:",x_train.shape)
print("y_train.shape:",y_train.shape)
print("x_test.shape:",x_test.shape)
print("y_test.shape:",y_test.shape)
# 定义模型,传入网络结构和配置项
AA = AADeepLearning(net=net, config=config)
# 训练模型
AA.train(x_train=x_train, y_train=y_train)
# 使用测试集预测,返回概率分布和准确率, score:样本在各个分类上的概率, accuracy:准确率
result, accuracy = AA.predict(x_test=x_test, y_test=y_test)
print("accuracy:", accuracy)
print("test letter: " + x[0])
print("true letter: " + y[0])
print("predict letter: " + onehot2str(result[0])) | AAdeepLearning | /AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/develop/aa_rnn_letter.py | aa_rnn_letter.py |
from aa_deep_learning.AADeepLearning import AADeepLearning
from aa_deep_learning.AADeepLearning.datasets import mnist
from aa_deep_learning.AADeepLearning.datasets import np_utils
# import numpy as np
# np.random.seed(0)
# mnist数据集已经被划分成了60,000个训练集,10,000个测试集的形式,如果数据不存在则自动下载
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# 第一个维度是样本数目,第二维度是通道数表示颜色通道数,第三维度是高,第四个维度是宽
x_train = x_train.reshape(x_train.shape[0], 1, 28, 28)
x_test = x_test.reshape(x_test.shape[0], 1, 28, 28)
# 将x_train, x_test的数据格式转为float32
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# 归一化,将值映射到 0到1区间
x_train /= 255
x_test /= 255
# 因为是10分类,所以将类别向量(从0到10的整数向量)映射为二值类别矩阵,相当于将向量用one-hot重新编码
y_train = np_utils.to_categorical(y_train, 10)
y_test = np_utils.to_categorical(y_test, 10)
# 网络配置文件
config = {
# 初始学习率
"learning_rate": 0.001,
# 优化策略: sgd/momentum/rmsprop/adam
"optimizer": "adam",
# 使用动量的梯度下降算法做优化,可以设置这一项,默认值为 0.9 ,一般不需要调整
"momentum_coefficient": 0.9,
# 训练多少次
"number_iteration": 1000,
# 每次用多少个样本训练
"batch_size": 64,
# 迭代多少次打印一次信息
"display": 100,
}
# 网络结构,数据将从上往下传播
net = [
{
# 层名,无限制
"name": "flatten_1",
# 层类型,将数据展平为适合神经网络的结构,用于输入层或者卷积层和全连接层中间。 (60000, 1, 28, 28) ——> (784, 60000)
"type": "flatten"
},
{
# 层名
"name": "fully_connected_1",
# 层类型,全连接层
"type": "fully_connected",
# 神经元个数
"neurons_number": 256,
# 权重初始化方式 msra/xavier/gaussian
"weight_init": "msra"
},
{
# 层名
"name": "relu_1",
# 层类型(激活层) 可选,relu,sigmoid,tanh,
"type": "relu"
},
{
# 层名
"name": "fully_connected_2",
# 层类型,全连接层
"type": "fully_connected",
# 神经元个数, 因为是10分类,所以神经元个数为10
"neurons_number": 10,
# 权重初始化方式 msra/xavier/gaussian
"weight_init": "msra"
},
{
# 层名
"name": "softmax_1",
# 层类型,分类层,最终输出十分类的概率分布
"type": "softmax"
}
]
# 定义模型,传入网络结构和配置项
AA = AADeepLearning(net=net, config=config)
# 训练模型
AA.train(x_train=x_train, y_train=y_train)
# 画出损失曲线
AA.visualization_loss()
# 使用测试集预测,返回概率分布和准确率, score:样本在各个分类上的概率, accuracy:准确率
score, accuracy = AA.predict(x_test=x_test, y_test=y_test)
print("test set accuracy:", accuracy) | AAdeepLearning | /AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/develop/aa_visualization_loss.py | aa_visualization_loss.py |
from aa_deep_learning.AADeepLearning import AADeepLearning
from aa_deep_learning.AADeepLearning.datasets import mnist
from aa_deep_learning.AADeepLearning.datasets import np_utils
# import numpy as np
# np.random.seed(0)
# mnist数据集已经被划分成了60,000个训练集,10,000个测试集的形式,如果数据不存在则自动下载
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# 第一个维度是样本数目,第二维度是通道数表示颜色通道数,第三维度是高,第四个维度是宽
x_train = x_train.reshape(x_train.shape[0], 1, 28, 28)
x_test = x_test.reshape(x_test.shape[0], 1, 28, 28)
# 将x_train, x_test的数据格式转为float32
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# 归一化,将值映射到 0到1区间
x_train /= 255
x_test /= 255
# 因为是10分类,所以将类别向量(从0到10的整数向量)映射为二值类别矩阵,相当于将向量用one-hot重新编码
y_train = np_utils.to_categorical(y_train, 10)
y_test = np_utils.to_categorical(y_test, 10)
# 网络配置文件
config = {
# 初始学习率
"learning_rate": 0.001,
# 优化策略: sgd/momentum/rmsprop/adam
"optimizer": "adam",
# 使用动量的梯度下降算法做优化,可以设置这一项,默认值为 0.9 ,一般不需要调整
"momentum_coefficient": 0.9,
# 训练多少次
"number_iteration": 1000,
# 每次用多少个样本训练
"batch_size": 64,
# 迭代多少次打印一次信息
"display": 10,
}
# 网络结构,数据将从上往下传播
net = [
{
# 层名,无限制
"name": "flatten_1",
# 层类型,将数据展平为适合神经网络的结构,用于输入层或者卷积层和全连接层中间。 (60000, 1, 28, 28) ——> (784, 60000)
"type": "flatten"
},
{
# 层名
"name": "fully_connected_1",
# 层类型,全连接层
"type": "fully_connected",
# 神经元个数
"neurons_number": 256,
# 权重初始化方式 msra/xavier/gaussian
"weight_init": "msra"
},
{
# 层名
"name": "relu_1",
# 层类型(激活层) 可选,relu,sigmoid,tanh,
"type": "relu"
},
{
# 层名
"name": "fully_connected_2",
# 层类型,全连接层
"type": "fully_connected",
# 神经元个数, 因为是10分类,所以神经元个数为10
"neurons_number": 10,
# 权重初始化方式 msra/xavier/gaussian
"weight_init": "msra"
},
{
# 层名
"name": "softmax_1",
# 层类型,分类层,最终输出十分类的概率分布
"type": "softmax"
}
]
# 定义模型,传入网络结构和配置项
AA = AADeepLearning(net=net, config=config)
# 训练模型
AA.train(x_train=x_train, y_train=y_train)
# 画出正确率曲线
AA.visualization_accuracy()
# 使用测试集预测,返回概率分布和准确率, score:样本在各个分类上的概率, accuracy:准确率
score, accuracy = AA.predict(x_test=x_test, y_test=y_test)
print("test set accuracy:", accuracy) | AAdeepLearning | /AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/develop/aa_visualization_accuracy.py | aa_visualization_accuracy.py |
from aa_deep_learning.AADeepLearning import AADeepLearning
from aa_deep_learning.AADeepLearning.datasets import cifar10
from aa_deep_learning.AADeepLearning.datasets import np_utils
# 载入数据,如果不存在则自动下载
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# 第一个维度是样本数目,第二维度是通道数表示颜色通道数,第三维度是高,第四个维度是宽
print('x_train shape:', x_train.shape)
print('y_train shape:', y_train.shape)
print('x_test shape:', x_test.shape)
print('y_test shape:', y_test.shape)
# 将x_train, x_test的数据格式转为float32
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# 归一化,将值映射到 0到1区间
x_train /= 255
x_test /= 255
# 因为是10分类,所以将类别向量(从0到10的整数向量)映射为二值类别矩阵,相当于将向量用one-hot重新编码
y_train = np_utils.to_categorical(y_train, 10)
y_test = np_utils.to_categorical(y_test, 10)
# 网络配置文件
config = {
# 初始学习率
"learning_rate": 0.001,
# 优化策略: sgd/momentum/rmsprop/adam
"optimizer": "adam",
# 使用动量的梯度下降算法做优化,可以设置这一项,默认值为 0.9 ,一般不需要调整
"momentum_coefficient": 0.9,
# 训练多少次
"number_iteration": 500,
# 每次用多少个样本训练
"batch_size": 16,
# 迭代多少次打印一次信息
"display": 10,
}
# 网络结构,数据将从上往下传播
net = [
{
# 层名
"name": "convolutional_1",
# 层类型,卷积层
"type": "convolutional",
# 卷积核个数
"kernel_number": 2,
# 卷积核高
"kernel_height": 2,
# 卷积核宽
"kernel_width": 2,
# 填充数,1:在图片最外层填充1圈0,2:填充2圈0,以此类推
"padding": 1,
# 滑动步长,1:水平或垂直方向滑动步长都为1,2:水平或垂直方向滑动步长都为2,以此类推
"stride": 1,
# 权重初始化 gaussian/xavier/msra
"weight_init": "msra"
},
{
# 层名
"name": "relu_1",
# 层类型, 激活函数层
"type": "relu"
},
{
# 层名
"name": "pooling_1",
# 层类型,池化层
"type": "pooling",
# 模式 max(最大池化)/average(平均池化)
"mode": "max",
# 池化核高
"kernel_height": 2,
# 池化核宽
"kernel_width": 2,
# 滑动步长,1:水平或垂直方向滑动步长都为1,2:水平或垂直方向滑动步长都为2,以此类推
"stride": 1
},
{
# 层名,无限制
"name": "flatten_1",
# 层类型,将数据展平为适合神经网络的结构,用于输入层或者卷积层和全连接层中间。 (60000, 1, 28, 28) ——> (784, 60000)
"type": "flatten"
},
{
# 层名
"name": "fully_connected_1",
# 层类型,全连接层
"type": "fully_connected",
# 神经元个数
"neurons_number": 256,
# 权重初始化方式 msra/xavier/gaussian
"weight_init": "msra"
},
{
# 层名
"name": "relu_2",
# 层类型(激活层) 可选,relu,sigmoid,tanh,
"type": "relu"
},
{
# 层名
"name": "fully_connected_2",
# 层类型,全连接层
"type": "fully_connected",
# 神经元个数, 因为是10分类,所以神经元个数为10
"neurons_number": 10,
# 权重初始化方式 msra/xavier/gaussian
"weight_init": "msra"
},
{
# 层名
"name": "softmax_1",
# 层类型,分类层,最终输出十分类的概率分布
"type": "softmax"
}
]
# 定义模型,传入网络结构和配置项
AA = AADeepLearning(net=net, config=config)
# 训练模型
AA.train(x_train=x_train, y_train=y_train)
# 使用测试集预测,返回概率分布和准确率, score:样本在各个分类上的概率, accuracy:准确率
score, accuracy = AA.predict(x_test=x_test, y_test=y_test)
print("test set accuracy:", accuracy) | AAdeepLearning | /AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/develop/aa_cnn_cifar10.py | aa_cnn_cifar10.py |
from aa_deep_learning.AADeepLearning import AADeepLearning
from aa_deep_learning.AADeepLearning.datasets import mnist
from aa_deep_learning.AADeepLearning.datasets import np_utils
import numpy as np
np.random.seed(0)
# mnist数据集已经被划分成了60,000个训练集,10,000个测试集的形式,如果数据不存在则自动下载
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# 第一个维度是样本数目,第二维度是通道数表示颜色通道数,第三维度是高,第四个维度是宽
x_train = x_train.reshape(x_train.shape[0], 1, 28, 28)
x_test = x_test.reshape(x_test.shape[0], 1, 28, 28)
# 将x_train, x_test的数据格式转为float32
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# 归一化,将值映射到 0到1区间
x_train /= 255
x_test /= 255
# 因为是10分类,所以将类别向量(从0到10的整数向量)映射为二值类别矩阵,相当于将向量用one-hot重新编码
y_train = np_utils.to_categorical(y_train, 10)
y_test = np_utils.to_categorical(y_test, 10)
# 网络配置文件
config = {
# 初始学习率
"learning_rate": 0.001,
# 优化策略: sgd/momentum/rmsprop/adam
"optimizer": "adam",
# 使用动量的梯度下降算法做优化,可以设置这一项,默认值为 0.9 ,一般不需要调整
"momentum_coefficient": 0.9,
# 训练多少次
"number_iteration": 1000,
# 每次用多少个样本训练
"batch_size": 64,
# 迭代多少次打印一次信息
"display": 100,
}
# 网络结构,数据将从上往下传播
net = [
{
# 层名,无限制
"name": "flatten_1",
# 层类型,将数据展平为适合神经网络的结构,用于输入层或者卷积层和全连接层中间。 (60000, 1, 28, 28) ——> (784, 60000)
"type": "flatten"
},
{
# 层名
"name": "fully_connected_1",
# 层类型,全连接层
"type": "fully_connected",
# 神经元个数
"neurons_number": 256,
# 权重初始化方式 msra/xavier/gaussian
"weight_init": "msra"
},
{
# 层名
"name": "relu_1",
# 层类型(激活层) 可选,relu,sigmoid,tanh,
"type": "relu"
},
{
# 层名
"name": "fully_connected_2",
# 层类型,全连接层
"type": "fully_connected",
# 神经元个数, 因为是10分类,所以神经元个数为10
"neurons_number": 10,
# 权重初始化方式 msra/xavier/gaussian
"weight_init": "msra"
},
{
# 层名
"name": "svm_1",
# 分类层svm分类,计算损失,又称为Hinge损失函数,最终输出十分类的概率分布
"type": "svm",
# 错误类别和正确类别得分阈值(loss = max(0, 错误得分-正确得分+delta))
"delta": 0.2
}
]
# 定义模型,传入网络结构和配置项
AA = AADeepLearning(net=net, config=config)
# 训练模型
AA.train(x_train=x_train, y_train=y_train)
# 使用测试集预测,如果是svm分类器,则返回得分值分布和准确率, score:样本在各个分类上的得分值, accuracy:准确率
score, accuracy = AA.predict(x_test=x_test, y_test=y_test)
print("test set accuracy:", accuracy)
print(score[0])
print(score.shape) | AAdeepLearning | /AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/develop/aa_dnn_mnist_svm.py | aa_dnn_mnist_svm.py |
from __future__ import print_function
import numpy as np
np.random.seed(1337)
# 生产环境
# from aa_deep_learning.aadeeplearning.aadeeplearning_old import AADeepLearning
from aa_deep_learning.AADeepLearning import AADeepLearning
# print(y_test.shape)
# 网络配置文件
config = {
# 初始学习率
"learning_rate": 0.001,
# 学习率衰减: 通常设置为 0.99
"learning_rate_decay": 0.9999,
# 优化策略: sgd/momentum/rmsprop
"optimizer": "momentum",
# 使用动量的梯度下降算法做优化,可以设置这一项,默认值为 0.9 ,一般不需要调整
"momentum_coefficient": 0.95,
# rmsprop优化器的衰减系数
"rmsprop_decay": 0.95,
# 正则化系数
"reg_coefficient": 0,
# 训练多少次
"number_iteration": 10000,
# 每次用多少个样本训练
"batch_size": 2,
# 每隔几个迭代周期评估一次准确率?
"evaluate_interval": 10,
# 每隔几个迭代周期保存一次快照?
# 是否以fine_tune方式训练? true/false
# 预训练参数模型所在路径
"pre_train_model": "./iter5.gordonmodel"
}
net = [
{
# 层名
"name": "lstm_1",
# 层类型
"type": "lstm",
# 神经元个数
"neurons_number": 50,
# 权重初始化方式 msra/xavier/gaussian/xavier
"weight_init": "xavier"
}
,
{
# 层名
"name": "softmax",
# 层类型
"type": "softmax"
}
]
def str2onehot(str):
word2int = {'a': 0, 'b': 1, 'c': 2, 'd': 3, 'e': 4, 'f': 5, 'g': 6, 'h': 7, 'i': 8, 'j': 9, 'k': 10
, 'l': 11, 'm': 12, 'n': 13, 'o': 14, 'p': 15, 'q': 16, 'r': 17, 's': 18, 't': 19,
'u': 20, 'v': 21, 'w': 22, 'x': 23, 'y': 24, 'z': 25}
int2word = {v: k for k, v in word2int.items()}
element_vector = [0] * 26
element_vector[word2int[str]] = 1
return element_vector
def onehot2str(element_vector):
int2word = {0: 'a', 1: 'b', 2: 'c', 3: 'd', 4: 'e', 5: 'f', 6: 'g', 7: 'h', 8: 'i',
9: 'j', 10: 'k', 11: 'l', 12: 'm', 13: 'n', 14: 'o', 15: 'p', 16: 'q', 17: 'r',
18: 's', 19: 't', 20: 'u', 21: 'v', 22: 'w', 23: 'x', 24: 'y', 25: 'z'}
i = np.argmax(element_vector)
return int2word[i]
def list2vector(x, y):
x_vector = np.zeros((len(x), len(x[0]), 26))
y_vector = np.zeros((len(y), 26))
for i, value in enumerate(x):
j = 0
for letter in value:
x_vector[i][j] = str2onehot(letter)
j = j + 1
y_vector[i] = str2onehot(y[i])
return x_vector, y_vector
# x = ['abc', 'bcd', 'cde', 'def', 'efg']
# y = ['d', 'e', 'f', 'g', 'h']
x = ['abc', 'bcd', 'cde', 'def', 'efg', 'fgh', 'ghi', 'hij', 'ijk', 'jkl', 'klm', 'lmn', 'mno', 'nop', 'opq', 'pqr',
'qrs', 'rst', 'stu', 'tuv', 'uvw', 'vwx', 'wxy', 'xyz', 'yza', 'zab']
y = ['d', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'a', 'b', 'c']
x_train, y_train = list2vector(x, y)
x = ['cde']
y = ['f']
x_test, y_test = list2vector(x, y)
print("x_train.shape:",x_train.shape)
print("y_train.shape:",y_train.shape)
print("x_test.shape:",x_test.shape)
print("y_test.shape:",y_test.shape)
AA = AADeepLearning(net=net, config=config)
AA.train(x_train=x_train, y_train=y_train)
result, accuracy = AA.predict(x_test=x_test, y_test=y_test)
print("accuracy:", accuracy)
print("test letter: " + x[0])
print("true letter: " + y[0])
print("predict letter: " + onehot2str(result[0])) | AAdeepLearning | /AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/develop/aa_lstm_letter.py | aa_lstm_letter.py |
from aa_deep_learning.AADeepLearning import AADeepLearning
from aa_deep_learning.AADeepLearning.datasets import mnist
from aa_deep_learning.AADeepLearning.datasets import np_utils
# import numpy as np
# np.random.seed(0)
# mnist数据集已经被划分成了60,000个训练集,10,000个测试集的形式,如果数据不存在则自动下载
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# 第一个维度是样本数目,第二维度是通道数表示颜色通道数,第三维度是高,第四个维度是宽
x_train = x_train.reshape(x_train.shape[0], 1, 28, 28)
x_test = x_test.reshape(x_test.shape[0], 1, 28, 28)
# 将x_train, x_test的数据格式转为float32
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# 归一化,将值映射到 0到1区间
x_train /= 255
x_test /= 255
# 因为是10分类,所以将类别向量(从0到10的整数向量)映射为二值类别矩阵,相当于将向量用one-hot重新编码
y_train = np_utils.to_categorical(y_train, 10)
y_test = np_utils.to_categorical(y_test, 10)
# 网络配置文件
config = {
# 初始学习率
"learning_rate": 0.001,
# 优化策略: sgd/momentum/rmsprop/adam
"optimizer": "adam",
# 使用动量的梯度下降算法做优化,可以设置这一项,默认值为 0.9 ,一般不需要调整
"momentum_coefficient": 0.9,
# 训练多少次
"number_iteration": 1000,
# 每次用多少个样本训练
"batch_size": 64,
# 迭代多少次打印一次信息
"display": 100,
}
# 网络结构,数据将从上往下传播
net = [
{
# 层名,无限制
"name": "flatten_1",
# 层类型,将数据展平为适合神经网络的结构,用于输入层或者卷积层和全连接层中间。 (60000, 1, 28, 28) ——> (784, 60000)
"type": "flatten"
},
{
# 层名
"name": "fully_connected_1",
# 层类型,全连接层
"type": "fully_connected",
# 神经元个数
"neurons_number": 256,
# 权重初始化方式 msra/xavier/gaussian
"weight_init": "msra"
},
# {
# # 层名
# "name": "relu_1",
# # 层类型(激活层) 可选,relu,sigmoid,tanh,
# "type": "relu"
# },
{
# 层名
"name": "tanh_1",
# 层类型(激活层) 可选,relu,sigmoid,tanh,
"type": "tanh"
},
{
# 层名
"name": "fully_connected_2",
# 层类型,全连接层
"type": "fully_connected",
# 神经元个数, 因为是10分类,所以神经元个数为10
"neurons_number": 10,
# 权重初始化方式 msra/xavier/gaussian
"weight_init": "msra"
},
{
# 层名
"name": "softmax_1",
# 层类型,分类层,最终输出十分类的概率分布
"type": "softmax"
}
]
# 定义模型,传入网络结构和配置项
AA = AADeepLearning(net=net, config=config)
# 训练模型
AA.train(x_train=x_train, y_train=y_train)
# 使用测试集预测,返回概率分布和准确率, score:样本在各个分类上的概率, accuracy:准确率
score, accuracy = AA.predict(x_test=x_test, y_test=y_test)
print("test set accuracy:", accuracy) | AAdeepLearning | /AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/develop/aa_dnn_mnist.py | aa_dnn_mnist.py |
from aa_deep_learning.AADeepLearning import AADeepLearning
from aa_deep_learning.AADeepLearning.datasets import cifar10
from aa_deep_learning.AADeepLearning.datasets import np_utils
# 载入数据,如果不存在则自动下载
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# 第一个维度是样本数目,第二维度是通道数表示颜色通道数,第三维度是高,第四个维度是宽
print('x_train shape:', x_train.shape)
print('y_train shape:', y_train.shape)
print('x_test shape:', x_test.shape)
print('y_test shape:', y_test.shape)
# 将x_train, x_test的数据格式转为float32
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# 归一化,将值映射到 0到1区间
x_train /= 255
x_test /= 255
# 因为是10分类,所以将类别向量(从0到10的整数向量)映射为二值类别矩阵,相当于将向量用one-hot重新编码
y_train = np_utils.to_categorical(y_train, 10)
y_test = np_utils.to_categorical(y_test, 10)
# 网络配置文件
config = {
# 初始学习率
"learning_rate": 0.001,
# 优化策略: sgd/momentum/rmsprop/adam
"optimizer": "adam",
# 使用动量的梯度下降算法做优化,可以设置这一项,默认值为 0.9 ,一般不需要调整
"momentum_coefficient": 0.9,
# 训练多少次
"number_iteration": 1000,
# 每次用多少个样本训练
"batch_size": 64,
# 迭代多少次打印一次信息
"display": 100,
}
# 网络结构,数据将从上往下传播
net = [
{
# 层名,无限制
"name": "flatten_1",
# 层类型,将数据展平为适合神经网络的结构,用于输入层或者卷积层和全连接层中间。 (60000, 1, 28, 28) ——> (784, 60000)
"type": "flatten"
},
{
# 层名
"name": "fully_connected_1",
# 层类型,全连接层
"type": "fully_connected",
# 神经元个数
"neurons_number": 256,
# 权重初始化方式 msra/xavier/gaussian
"weight_init": "msra"
},
{
# 层名
"name": "batch_normalization_1",
# 标准化层,将上一层的数据转化为均值接近0,标准差接近1的转换。可以一定程度解决梯度消失、数据不稳定的问题
"type": "batch_normalization"
},
{
# 层名
"name": "relu_1",
# 层类型(激活层) 可选,relu,sigmoid,tanh,
"type": "relu"
},
{
# 层名
"name": "fully_connected_2",
# 层类型,全连接层
"type": "fully_connected",
# 神经元个数, 因为是10分类,所以神经元个数为10
"neurons_number": 10,
# 权重初始化方式 msra/xavier/gaussian
"weight_init": "msra"
},
{
# 层名
"name": "softmax_1",
# 层类型,分类层,最终输出十分类的概率分布
"type": "softmax"
}
]
# 定义模型,传入网络结构和配置项
AA = AADeepLearning(net=net, config=config)
# 训练模型
AA.train(x_train=x_train, y_train=y_train)
# 使用测试集预测,返回概率分布和准确率, score:样本在各个分类上的概率, accuracy:准确率
score, accuracy = AA.predict(x_test=x_test, y_test=y_test)
print("test set accuracy:", accuracy) | AAdeepLearning | /AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/develop/aa_dnn_cifar10.py | aa_dnn_cifar10.py |
from __future__ import print_function
import numpy as np
np.random.seed(1337)
# 生产环境
# from aa_deep_learning.aadeeplearning.aadeeplearning_old import AADeepLearning
from aa_deep_learning.AADeepLearning import AADeepLearning
from aa_deep_learning.AADeepLearning.datasets import mnist
from aa_deep_learning.AADeepLearning.datasets import np_utils
# 测试环境
# from aadeeplearning import aadeeplearning as aa
# 10分类
nb_classes = 10
# keras中的mnist数据集已经被划分成了60,000个训练集,10,000个测试集的形式,按以下格式调用即可
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_test = x_test[:64]
y_test = y_test[:64]
# print(x_test[0])
# 画出minist 数字
# import matplotlib.pyplot as plt
# fig = plt.figure()
# plt.imshow(x_test[0],cmap = 'binary')#黑白显示
# plt.show()
# 后端使用tensorflow时,即tf模式下,
# 会将100张RGB三通道的16*32彩色图表示为(100,16,32,3),
# 第一个维度是样本维,表示样本的数目,
# 第二和第三个维度是高和宽,
# 最后一个维度是通道维,表示颜色通道数
# x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
# x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
# input_shape = (img_rows, img_cols, 1)
# 将x_train, x_test的数据格式转为float32
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# 归一化,将值映射到 0到1区间
x_train /= 255
x_test /= 255
# 将类别向量(从0到nb_classes的整数向量)映射为二值类别矩阵,
# 相当于将向量用one-hot重新编码
y_train = np_utils.to_categorical(y_train, nb_classes)
y_test = np_utils.to_categorical(y_test, nb_classes)
# 打印出相关信息
print('x_train shape:', x_train.shape)
print('y_train shape:', y_train.shape)
print('x_test shape:', x_test.shape)
print('y_test shape:', y_test.shape)
# 网络配置文件
config = {
# 初始学习率
"learning_rate": 0.001,
# 学习率衰减: 通常设置为 0.99
"learning_rate_decay": 0.9999,
# 优化策略: sgd/momentum/rmsprop
"optimizer": "momentum",
# 使用动量的梯度下降算法做优化,可以设置这一项,默认值为 0.9 ,一般不需要调整
"momentum_coefficient": 0.95,
# rmsprop优化器的衰减系数
"rmsprop_decay": 0.95,
# 正则化系数
"reg_coefficient": 0,
# 训练多少次
"number_iteration": 1000,
# 每次用多少个样本训练
"batch_size": 64,
# 预训练参数模型所在路径
"pre_train_model": "./iter5.gordonmodel"
}
net = [
{
# 层名
"name": "rnn_1",
# 层类型
"type": "rnn",
# 神经元个数
"neurons_number": 60,
# 权重初始化方式 msra/xavier/gaussian/xavier
"weight_init": "xavier"
},
{
# 层名
"name": "relu_1",
# 层类型
"type": "relu"
},
{
# 层名
"name": "fully_connected_1",
# 层类型
"type": "fully_connected",
# 神经元个数, 因为是10分类,所以神经元个数为10
"neurons_number": 10,
# 权重初始化方式 msra/xavier/gaussian/xaver
"weight_init": "msra"
},
{
# 层名
"name": "softmax",
# 层类型
"type": "softmax"
}
]
AA = AADeepLearning(net=net, config=config)
# 训练模型
AA.train(x_train=x_train, y_train=y_train)
# 使用测试集预测,返回概率分布和准确率, score:样本在各个分类上的概率, accuracy:准确率
score, accuracy = AA.predict(x_test=x_test, y_test=y_test)
print("test set accuracy:",accuracy)
"""
# 输出训练好的模型在测试集上的表现
print('Test score:', score[0])
print('Test accuracy:', score[1])
# Test score: 0.032927570413
# Test accuracy: 0.9892
""" | AAdeepLearning | /AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/develop/aa_rnn_mnist.py | aa_rnn_mnist.py |
from aa_deep_learning.AADeepLearning import AADeepLearning
from aa_deep_learning.AADeepLearning.datasets import mnist
from aa_deep_learning.AADeepLearning.datasets import np_utils
# mnist数据集已经被划分成了60,000个训练集,10,000个测试集的形式,如果数据不存在则自动下载
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# 第一个维度是样本数目,第二维度是通道数表示颜色通道数,第三维度是高,第四个维度是宽
x_train = x_train.reshape(x_train.shape[0], 1, 28, 28)
x_test = x_test.reshape(x_test.shape[0], 1, 28, 28)
# 将x_train, x_test的数据格式转为float32
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# 归一化,将值映射到 0到1区间
x_train /= 255
x_test /= 255
# 因为是10分类,所以将类别向量(从0到10的整数向量)映射为二值类别矩阵,相当于将向量用one-hot重新编码
y_train = np_utils.to_categorical(y_train, 10)
y_test = np_utils.to_categorical(y_test, 10)
# 网络配置文件
config = {
# 初始学习率
"learning_rate": 0.001,
# 优化策略: sgd/momentum/rmsprop/adam
"optimizer": "adam",
# 使用动量的梯度下降算法做优化,可以设置这一项,默认值为 0.9 ,一般不需要调整
"momentum_coefficient": 0.9,
# 训练多少次
"number_iteration": 1000,
# 每次用多少个样本训练
"batch_size": 64,
# 迭代多少次打印一次信息
"display": 100,
# 保存模型快照的名称
"save_model": "AA",
# 每隔几个迭代周期保存一次快照? 保存名称为 名称+迭代次数+.model
"save_iteration": 1000,
}
# 网络结构,数据将从上往下传播
net = [
{
# 层名,无限制
"name": "flatten_1",
# 层类型,将数据展平为适合神经网络的结构,用于输入层或者卷积层和全连接层中间。 (60000, 1, 28, 28) ——> (784, 60000)
"type": "flatten"
},
{
# 层名
"name": "fully_connected_1",
# 层类型,全连接层
"type": "fully_connected",
# 神经元个数
"neurons_number": 256,
# 权重初始化方式 msra/xavier/gaussian
"weight_init": "msra"
},
{
# 层名
"name": "relu_1",
# 层类型(激活层) 可选,relu,sigmoid,tanh,
"type": "relu"
},
{
# 层名
"name": "fully_connected_2",
# 层类型,全连接层
"type": "fully_connected",
# 神经元个数, 因为是10分类,所以神经元个数为10
"neurons_number": 10,
# 权重初始化方式 msra/xavier/gaussian
"weight_init": "msra"
},
{
# 层名
"name": "softmax_1",
# 层类型,分类层,最终输出十分类的概率分布
"type": "softmax"
}
]
# 定义模型,传入网络结构和配置项
AA = AADeepLearning(net=net, config=config)
# 训练模型
AA.train(x_train=x_train, y_train=y_train)
# 使用测试集预测,返回概率分布和准确率, score:样本在各个分类上的概率, accuracy:准确率
score, accuracy = AA.predict(x_test=x_test, y_test=y_test)
print("test set accuracy:", accuracy) | AAdeepLearning | /AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/develop/aa_save.py | aa_save.py |
from __future__ import print_function
import numpy as np
np.random.seed(1337)
# 生产环境
from aa_deep_learning.AADeepLearning import AADeepLearning
from aa_deep_learning.AADeepLearning.datasets import cifar10
from aa_deep_learning.AADeepLearning.datasets import cifar100
from aa_deep_learning.AADeepLearning.datasets import imdb
from aa_deep_learning.AADeepLearning.datasets import reuters
from aa_deep_learning.AADeepLearning.datasets import fashion_mnist
from aa_deep_learning.AADeepLearning.datasets import np_utils
from aa_deep_learning.AADeepLearning.datasets import mnist
# mnist数据集已经被划分成了60,000个训练集,10,000个测试集的形式,如果数据不存在则自动下载
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# 画出minist 数字
fig = plt.figure()
plt.imshow(x_test[0],cmap = 'binary')#黑白显示
plt.show()
# (x_train, y_train), (x_test, y_test) = cifar10.load_data()
# x_train = np.transpose(x_train, (0,2,3,1))
# plt.figure(figsize=(1,1))
# plt.imshow(x_train[0])
# plt.show()
# (x_train, y_train), (x_test, y_test) = cifar100.load_data(label_mode='fine')
# x_train = np.transpose(x_train, (0,2,3,1))
# plt.figure(figsize=(1,1))
# plt.imshow(x_train[0])
# plt.show()
# (x_train, y_train), (x_test, y_test) = imdb.load_data(path="imdb.npz",
# num_words=None,
# skip_top=0,
# maxlen=None,
# seed=113,
# start_char=1,
# oov_char=2,
# index_from=3)
# print(x_train[8])
# (x_train, y_train), (x_test, y_test) = reuters.load_data(path="reuters.npz",
# num_words=None,
# skip_top=0,
# maxlen=None,
# test_split=0.2,
# seed=113,
# start_char=1,
# oov_char=2,
# index_from=3)
# print(x_train[8])
# (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
# fig = plt.figure()
# plt.imshow(x_train[10],cmap = 'binary')#黑白显示
# plt.show()
# (x_train, y_train), (x_test, y_test) = boston_housing.load_data()
# print(x_train[8])
# print('x_train shape:', x_train.shape)
# print('y_train shape:', y_train.shape)
# print('x_test shape:', x_test.shape)
# print('y_test shape:', y_test.shape) | AAdeepLearning | /AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/develop/aa_dataset.py | aa_dataset.py |
from aa_deep_learning.AADeepLearning import AADeepLearning
from aa_deep_learning.AADeepLearning.datasets import mnist
from aa_deep_learning.AADeepLearning.datasets import np_utils
# mnist数据集已经被划分成了60,000个训练集,10,000个测试集的形式,如果数据不存在则自动下载
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# 第一个维度是样本数目,第二维度是通道数表示颜色通道数,第三维度是高,第四个维度是宽
x_train = x_train.reshape(x_train.shape[0], 1, 28, 28)
x_test = x_test.reshape(x_test.shape[0], 1, 28, 28)
# 将x_train, x_test的数据格式转为float32
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# 归一化,将值映射到 0到1区间
x_train /= 255
x_test /= 255
# 因为是10分类,所以将类别向量(从0到10的整数向量)映射为二值类别矩阵,相当于将向量用one-hot重新编码
y_train = np_utils.to_categorical(y_train, 10)
y_test = np_utils.to_categorical(y_test, 10)
# 网络配置文件
config = {
# 初始学习率
"learning_rate": 0.001,
# 优化策略: sgd/momentum/rmsprop/adam
"optimizer": "adam",
# 使用动量的梯度下降算法做优化,可以设置这一项,默认值为 0.9 ,一般不需要调整
"momentum_coefficient": 0.9,
# 训练多少次
"number_iteration": 1000,
# 每次用多少个样本训练
"batch_size": 64,
# 迭代多少次打印一次信息
"display": 100,
}
# 网络结构,数据将从上往下传播
net = [
{
# 层名,无限制
"name": "flatten_1",
# 层类型,将数据展平为适合神经网络的结构,用于输入层或者卷积层和全连接层中间。 (60000, 1, 28, 28) ——> (784, 60000)
"type": "flatten"
},
{
# 层名
"name": "fully_connected_1",
# 层类型,全连接层
"type": "fully_connected",
# 神经元个数
"neurons_number": 256,
# 权重初始化方式 msra/xavier/gaussian
"weight_init": "msra"
},
{
# 层名
"name": "relu_1",
# 层类型(激活层) 可选,relu,sigmoid,tanh,
"type": "relu"
},
{
# 层名
"name": "dropout_1",
# 层类型,随机失活层
"type": "dropout",
# dropout比例(置零比例),0.5表示将有50%的神经元随机处于失活状态,必须为[0,1]区间
"drop_rate": 0.5
},
{
# 层名
"name": "fully_connected_2",
# 层类型,全连接层
"type": "fully_connected",
# 神经元个数, 因为是10分类,所以神经元个数为10
"neurons_number": 10,
# 权重初始化方式 msra/xavier/gaussian
"weight_init": "msra"
},
{
# 层名
"name": "softmax_1",
# 层类型,分类层,最终输出十分类的概率分布
"type": "softmax"
}
]
# 定义模型,传入网络结构和配置项
AA = AADeepLearning(net=net, config=config)
# 训练模型
AA.train(x_train=x_train, y_train=y_train)
# 使用测试集预测,返回概率分布和准确率, score:样本在各个分类上的概率, accuracy:准确率
score, accuracy = AA.predict(x_test=x_test, y_test=y_test)
print("test set accuracy:", accuracy) | AAdeepLearning | /AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/develop/aa_dnn_mnist_drop_out.py | aa_dnn_mnist_drop_out.py |
from aa_deep_learning.AADeepLearning import AADeepLearning
from aa_deep_learning.AADeepLearning.datasets import mnist
from aa_deep_learning.AADeepLearning.datasets import np_utils
# mnist数据集已经被划分成了60,000个训练集,10,000个测试集的形式,如果数据不存在则自动下载
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# 第一个维度是样本数目,第二维度是通道数表示颜色通道数,第三维度是高,第四个维度是宽
x_train = x_train.reshape(x_train.shape[0], 1, 28, 28)
x_test = x_test.reshape(x_test.shape[0], 1, 28, 28)
# 将x_train, x_test的数据格式转为float32
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# 归一化,将值映射到 0到1区间
x_train /= 255
x_test /= 255
# 因为是10分类,所以将类别向量(从0到10的整数向量)映射为二值类别矩阵,相当于将向量用one-hot重新编码
y_train = np_utils.to_categorical(y_train, 10)
y_test = np_utils.to_categorical(y_test, 10)
# 网络配置文件
config = {
# 初始学习率
"learning_rate": 0.001,
# 优化策略: sgd/momentum/rmsprop/adam
"optimizer": "adam",
# 使用动量的梯度下降算法做优化,可以设置这一项,默认值为 0.9 ,一般不需要调整
"momentum_coefficient": 0.9,
# 训练多少次
"number_iteration": 1000,
# 每次用多少个样本训练
"batch_size": 64,
# 迭代多少次打印一次信息
"display": 100,
# 保存模型快照的名称
"save_model": "AA",
# 每隔几个迭代周期保存一次快照? 保存名称为 名称+迭代次数+.model
"save_iteration": 1000,
# 预训练参数模型所在路径,不为空框架会加载模型,用于预测或继续训练
"load_model": "AA-1000.model"
}
# 网络结构,数据将从上往下传播
net = [
{
# 层名,无限制
"name": "flatten_1",
# 层类型,将数据展平为适合神经网络的结构,用于输入层或者卷积层和全连接层中间。 (60000, 1, 28, 28) ——> (784, 60000)
"type": "flatten"
},
{
# 层名
"name": "fully_connected_1",
# 层类型,全连接层
"type": "fully_connected",
# 神经元个数
"neurons_number": 256,
# 权重初始化方式 msra/xavier/gaussian
"weight_init": "msra"
},
{
# 层名
"name": "relu_1",
# 层类型(激活层) 可选,relu,sigmoid,tanh,
"type": "relu"
},
{
# 层名
"name": "fully_connected_2",
# 层类型,全连接层
"type": "fully_connected",
# 神经元个数, 因为是10分类,所以神经元个数为10
"neurons_number": 10,
# 权重初始化方式 msra/xavier/gaussian
"weight_init": "msra"
},
{
# 层名
"name": "softmax_1",
# 层类型,分类层,最终输出十分类的概率分布
"type": "softmax"
}
]
# 定义模型,传入网络结构和配置项
AA = AADeepLearning(net=net, config=config)
# 训练模型
AA.train(x_train=x_train, y_train=y_train)
# 使用测试集预测,返回概率分布和准确率, score:样本在各个分类上的概率, accuracy:准确率
score, accuracy = AA.predict(x_test=x_test, y_test=y_test)
print("test set accuracy:", accuracy) | AAdeepLearning | /AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/develop/aa_continue_train.py | aa_continue_train.py |
from aa_deep_learning.AADeepLearning import AADeepLearning
from aa_deep_learning.AADeepLearning.datasets import mnist
from aa_deep_learning.AADeepLearning.datasets import np_utils
# mnist数据集已经被划分成了60,000个训练集,10,000个测试集的形式,如果数据不存在则自动下载
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# 第一个维度是样本数目,第二维度是通道数表示颜色通道数,第三维度是高,第四个维度是宽
x_train = x_train.reshape(x_train.shape[0], 1, 28, 28)
x_test = x_test.reshape(x_test.shape[0], 1, 28, 28)
# 将x_train, x_test的数据格式转为float32
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# 归一化,将值映射到 0到1区间
x_train /= 255
x_test /= 255
# 因为是10分类,所以将类别向量(从0到10的整数向量)映射为二值类别矩阵,相当于将向量用one-hot重新编码
y_train = np_utils.to_categorical(y_train, 10)
y_test = np_utils.to_categorical(y_test, 10)
# 网络配置文件
config = {
# 初始学习率
"learning_rate": 0.001,
# 优化策略: sgd/momentum/rmsprop/adam
"optimizer": "adam",
# 使用动量的梯度下降算法做优化,可以设置这一项,默认值为 0.9 ,一般不需要调整
"momentum_coefficient": 0.9,
# 训练多少次
"number_iteration": 1000,
# 每次用多少个样本训练
"batch_size": 64,
# 迭代多少次打印一次信息
"display": 100,
}
# 网络结构,数据将从上往下传播
net = [
{
# 层名,无限制
"name": "flatten_1",
# 层类型,将数据展平为适合神经网络的结构,用于输入层或者卷积层和全连接层中间。 (60000, 1, 28, 28) ——> (784, 60000)
"type": "flatten"
},
{
# 层名
"name": "fully_connected_1",
# 层类型,全连接层
"type": "fully_connected",
# 神经元个数
"neurons_number": 256,
# 权重初始化方式 msra/xavier/gaussian
"weight_init": "msra"
},
{
# 层名
"name": "batch_normalization_1",
# 标准化层,将上一层的数据转化为均值接近0,标准差接近1的转换。可以一定程度解决梯度消失、数据不稳定的问题
"type": "batch_normalization"
},
{
# 层名
"name": "relu_1",
# 层类型(激活层) 可选,relu,sigmoid,tanh,
"type": "relu"
},
{
# 层名
"name": "fully_connected_2",
# 层类型,全连接层
"type": "fully_connected",
# 神经元个数, 因为是10分类,所以神经元个数为10
"neurons_number": 10,
# 权重初始化方式 msra/xavier/gaussian
"weight_init": "msra"
},
{
# 层名
"name": "softmax_1",
# 层类型,分类层,最终输出十分类的概率分布
"type": "softmax"
}
]
# 定义模型,传入网络结构和配置项
AA = AADeepLearning(net=net, config=config)
# 训练模型
AA.train(x_train=x_train, y_train=y_train)
# 使用测试集预测,返回概率分布和准确率, score:样本在各个分类上的概率, accuracy:准确率
score, accuracy = AA.predict(x_test=x_test, y_test=y_test)
print("test set accuracy:", accuracy) | AAdeepLearning | /AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/develop/aa_dnn_mnist_batch_normalization.py | aa_dnn_mnist_batch_normalization.py |
from aa_deep_learning.AADeepLearning import AADeepLearning
from aa_deep_learning.AADeepLearning.datasets import mnist
from aa_deep_learning.AADeepLearning.datasets import np_utils
import numpy as np
np.random.seed(0)
# mnist数据集已经被划分成了60,000个训练集,10,000个测试集的形式,如果数据不存在则自动下载
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# 第一个维度是样本数目,第二维度是通道数表示颜色通道数,第三维度是高,第四个维度是宽
x_train = x_train.reshape(x_train.shape[0], 1, 28, 28)
x_test = x_test.reshape(x_test.shape[0], 1, 28, 28)
# 将x_train, x_test的数据格式转为float32
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# 归一化,将值映射到 0到1区间
x_train /= 255
x_test /= 255
# 因为是10分类,所以将类别向量(从0到10的整数向量)映射为二值类别矩阵,相当于将向量用one-hot重新编码
y_train = np_utils.to_categorical(y_train, 10)
y_test = np_utils.to_categorical(y_test, 10)
# 网络配置文件
config = {
# 初始学习率
"learning_rate": 0.001,
# 优化策略: sgd/momentum/rmsprop/adam
"optimizer": "adam",
# 使用动量的梯度下降算法做优化,可以设置这一项,默认值为 0.9 ,一般不需要调整
"momentum_coefficient": 0.9,
# 训练多少次
"number_iteration": 100,
# 每次用多少个样本训练
"batch_size": 16,
# 迭代多少次打印一次信息
"display": 10,
}
# 网络结构,数据将从上往下传播
net = [
{
# 层名
"name": "convolutional_1",
# 层类型,卷积层
"type": "convolutional",
# 卷积核个数
"kernel_number": 1,
# 卷积核高
"kernel_height": 2,
# 卷积核宽
"kernel_width": 2,
# 填充数,1:在图片最外层填充1圈0,2:填充2圈0,以此类推
"padding": 1,
# 滑动步长,1:水平或垂直方向滑动步长都为1,2:水平或垂直方向滑动步长都为2,以此类推
"stride": 1,
# 权重初始化 gaussian/xavier/msra
"weight_init": "msra"
},
{
# 层名
"name": "relu_1",
# 层类型, 激活函数层
"type": "relu"
},
{
# 层名
"name": "pooling_1",
# 层类型,池化层
"type": "pooling",
# 模式 max(最大池化)/average(平均池化)
"mode": "max",
# 池化核高
"kernel_height": 2,
# 池化核宽
"kernel_width": 2,
# 滑动步长,1:水平或垂直方向滑动步长都为1,2:水平或垂直方向滑动步长都为2,以此类推
"stride": 1
},
{
# 层名,无限制
"name": "flatten_1",
# 层类型,将数据展平为适合神经网络的结构,用于输入层或者卷积层和全连接层中间。 (60000, 1, 28, 28) ——> (784, 60000)
"type": "flatten"
},
{
# 层名
"name": "fully_connected_1",
# 层类型,全连接层
"type": "fully_connected",
# 神经元个数
"neurons_number": 256,
# 权重初始化方式 msra/xavier/gaussian
"weight_init": "msra"
},
{
# 层名
"name": "relu_2",
# 层类型(激活层) 可选,relu,sigmoid,tanh,
"type": "relu"
},
{
# 层名
"name": "fully_connected_2",
# 层类型,全连接层
"type": "fully_connected",
# 神经元个数, 因为是10分类,所以神经元个数为10
"neurons_number": 10,
# 权重初始化方式 msra/xavier/gaussian
"weight_init": "msra"
},
{
# 层名
"name": "softmax_1",
# 层类型,分类层,最终输出十分类的概率分布
"type": "softmax"
}
]
# 定义模型,传入网络结构和配置项
AA = AADeepLearning(net=net, config=config)
# 训练模型
AA.train(x_train=x_train, y_train=y_train)
# 使用测试集预测,返回概率分布和准确率, score:样本在各个分类上的概率, accuracy:准确率
score, accuracy = AA.predict(x_test=x_test, y_test=y_test)
print("test set accuracy:", accuracy) | AAdeepLearning | /AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/develop/aa_cnn_mnist.py | aa_cnn_mnist.py |
import numpy as np
np.random.seed(1337)
from AADeepLearning import AADeepLearning
# 网络配置文件
config = {
# 初始学习率
"learning_rate": 0.001,
# 优化策略: sgd/momentum/rmsprop
"optimizer": "sgd",
# 训练多少次
"number_iteration": 5000,
# 每次用多少个样本训练
"batch_size": 2,
# 迭代多少次打印一次信息
"display": 500,
}
net = [
{
# 层名
"name": "rnn_1",
# 层类型,循环神经网络层 目前只支持 n->1 输出
"type": "rnn",
# 神经元个数
"neurons_number": 128,
# 权重初始化方式 msra/xavier/gaussian
"weight_init": "msra"
},
{
# 层名
"name": "softmax",
# 层类型
"type": "softmax"
}
]
def str2onehot(str):
"""
字符串转 one hot 向量
:param str: 字符串
:return: one hot 向量
"""
word2int = {'a': 0, 'b': 1, 'c': 2, 'd': 3, 'e': 4, 'f': 5, 'g': 6, 'h': 7, 'i': 8, 'j': 9, 'k': 10
, 'l': 11, 'm': 12, 'n': 13, 'o': 14, 'p': 15, 'q': 16, 'r': 17, 's': 18, 't': 19,
'u': 20, 'v': 21, 'w': 22, 'x': 23, 'y': 24, 'z': 25}
element_vector = [0] * 26
element_vector[word2int[str]] = 1
return element_vector
def onehot2str(element_vector):
"""
one hot 向量转字符串
:param element_vector:one hot 向量
:return: 字符串
"""
int2word = {0: 'a', 1: 'b', 2: 'c', 3: 'd', 4: 'e', 5: 'f', 6: 'g', 7: 'h', 8: 'i',
9: 'j', 10: 'k', 11: 'l', 12: 'm', 13: 'n', 14: 'o', 15: 'p', 16: 'q', 17: 'r',
18: 's', 19: 't', 20: 'u', 21: 'v', 22: 'w', 23: 'x', 24: 'y', 25: 'z'}
i = np.argmax(element_vector)
return int2word[i]
def list2vector(x, y):
"""
列表转one hot 向量
:param x: 数据
:param y: 标签
:return: one hot 向量
"""
x_vector = np.zeros((len(x), len(x[0]), 26))
y_vector = np.zeros((len(y), 26))
for i, value in enumerate(x):
j = 0
for letter in value:
x_vector[i][j] = str2onehot(letter)
j = j + 1
y_vector[i] = str2onehot(y[i])
return x_vector, y_vector
x = ['abc', 'bcd', 'cde', 'def', 'efg', 'fgh', 'ghi', 'hij', 'ijk', 'jkl', 'klm', 'lmn', 'mno', 'nop', 'opq', 'pqr',
'qrs', 'rst', 'stu', 'tuv', 'uvw', 'vwx', 'wxy', 'xyz', 'yza', 'zab']
y = ['d', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'a', 'b', 'c']
# 训练数据
x_train, y_train = list2vector(x, y)
# 预测数据
x = ['bcd']
y = ['e']
x_test, y_test = list2vector(x, y)
print("x_train.shape:",x_train.shape)
print("y_train.shape:",y_train.shape)
print("x_test.shape:",x_test.shape)
print("y_test.shape:",y_test.shape)
# 定义模型,传入网络结构和配置项
AA = AADeepLearning(net=net, config=config)
# 训练模型
AA.train(x_train=x_train, y_train=y_train)
# 使用测试集预测,返回概率分布和准确率, score:样本在各个分类上的概率, accuracy:准确率
result, accuracy = AA.predict(x_test=x_test, y_test=y_test)
print("accuracy:", accuracy)
print("test letter: " + x[0])
print("true letter: " + y[0])
print("predict letter: " + onehot2str(result[0])) | AAdeepLearning | /AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/example/aa_rnn_letter.py | aa_rnn_letter.py |
from AADeepLearning import AADeepLearning
from AADeepLearning.datasets import cifar10
from AADeepLearning.datasets import np_utils
# 载入数据,如果不存在则自动下载
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# 第一个维度是样本数目,第二维度是通道数表示颜色通道数,第三维度是高,第四个维度是宽
print('x_train shape:', x_train.shape)
print('y_train shape:', y_train.shape)
print('x_test shape:', x_test.shape)
print('y_test shape:', y_test.shape)
# 将x_train, x_test的数据格式转为float32
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# 归一化,将值映射到 0到1区间
x_train /= 255
x_test /= 255
# 因为是10分类,所以将类别向量(从0到10的整数向量)映射为二值类别矩阵,相当于将向量用one-hot重新编码
y_train = np_utils.to_categorical(y_train, 10)
y_test = np_utils.to_categorical(y_test, 10)
# 网络配置文件
config = {
# 初始学习率
"learning_rate": 0.001,
# 优化策略: sgd/momentum/rmsprop/adam
"optimizer": "adam",
# 使用动量的梯度下降算法做优化,可以设置这一项,默认值为 0.9 ,一般不需要调整
"momentum_coefficient": 0.9,
# 训练多少次
"number_iteration": 500,
# 每次用多少个样本训练
"batch_size": 16,
# 迭代多少次打印一次信息
"display": 10,
}
# 网络结构,数据将从上往下传播
net = [
{
# 层名
"name": "convolutional_1",
# 层类型,卷积层
"type": "convolutional",
# 卷积核个数
"kernel_number": 1,
# 卷积核高
"kernel_height": 2,
# 卷积核宽
"kernel_width": 2,
# 填充数,1:在图片最外层填充1圈0,2:填充2圈0,以此类推
"padding": 1,
# 滑动步长,1:水平或垂直方向滑动步长都为1,2:水平或垂直方向滑动步长都为2,以此类推
"stride": 1,
# 权重初始化 gaussian/xavier/msra
"weight_init": "msra"
},
{
# 层名
"name": "relu_1",
# 层类型, 激活函数层
"type": "relu"
},
{
# 层名
"name": "pooling_1",
# 层类型,池化层
"type": "pooling",
# 模式 max(最大池化)/average(平均池化)
"mode": "max",
# 池化核高
"kernel_height": 2,
# 池化核宽
"kernel_width": 2,
# 滑动步长,1:水平或垂直方向滑动步长都为1,2:水平或垂直方向滑动步长都为2,以此类推
"stride": 1
},
{
# 层名,无限制
"name": "flatten_1",
# 层类型,将数据展平为适合神经网络的结构,用于输入层或者卷积层和全连接层中间。 (60000, 1, 28, 28) ——> (784, 60000)
"type": "flatten"
},
{
# 层名
"name": "fully_connected_1",
# 层类型,全连接层
"type": "fully_connected",
# 神经元个数
"neurons_number": 256,
# 权重初始化方式 msra/xavier/gaussian
"weight_init": "msra"
},
{
# 层名
"name": "relu_2",
# 层类型(激活层) 可选,relu,sigmoid,tanh,
"type": "relu"
},
{
# 层名
"name": "fully_connected_2",
# 层类型,全连接层
"type": "fully_connected",
# 神经元个数, 因为是10分类,所以神经元个数为10
"neurons_number": 10,
# 权重初始化方式 msra/xavier/gaussian
"weight_init": "msra"
},
{
# 层名
"name": "softmax_1",
# 层类型,分类层,最终输出十分类的概率分布
"type": "softmax"
}
]
# 定义模型,传入网络结构和配置项
AA = AADeepLearning(net=net, config=config)
# 训练模型
AA.train(x_train=x_train, y_train=y_train)
# 使用测试集预测,返回概率分布和准确率, score:样本在各个分类上的概率, accuracy:准确率
score, accuracy = AA.predict(x_test=x_test, y_test=y_test)
print("test set accuracy:", accuracy) | AAdeepLearning | /AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/example/aa_cnn_cifar10.py | aa_cnn_cifar10.py |
from AADeepLearning import AADeepLearning
from AADeepLearning.datasets import mnist
from AADeepLearning.datasets import np_utils
import numpy as np
np.random.seed(0)
# mnist数据集已经被划分成了60,000个训练集,10,000个测试集的形式,如果数据不存在则自动下载
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# 第一个维度是样本数目,第二维度是通道数表示颜色通道数,第三维度是高,第四个维度是宽
x_train = x_train.reshape(x_train.shape[0], 1, 28, 28)
x_test = x_test.reshape(x_test.shape[0], 1, 28, 28)
# 将x_train, x_test的数据格式转为float32
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# 归一化,将值映射到 0到1区间
x_train /= 255
x_test /= 255
# 因为是10分类,所以将类别向量(从0到10的整数向量)映射为二值类别矩阵,相当于将向量用one-hot重新编码
y_train = np_utils.to_categorical(y_train, 10)
y_test = np_utils.to_categorical(y_test, 10)
# 网络配置文件
config = {
# 初始学习率
"learning_rate": 0.001,
# 优化策略: sgd/momentum/rmsprop/adam
"optimizer": "adam",
# 使用动量的梯度下降算法做优化,可以设置这一项,默认值为 0.9 ,一般不需要调整
"momentum_coefficient": 0.9,
# 训练多少次
"number_iteration": 1000,
# 每次用多少个样本训练
"batch_size": 64,
# 迭代多少次打印一次信息
"display": 100,
}
# 网络结构,数据将从上往下传播
net = [
{
# 层名,无限制
"name": "flatten_1",
# 层类型,将数据展平为适合神经网络的结构,用于输入层或者卷积层和全连接层中间。 (60000, 1, 28, 28) ——> (784, 60000)
"type": "flatten"
},
{
# 层名
"name": "fully_connected_1",
# 层类型,全连接层
"type": "fully_connected",
# 神经元个数
"neurons_number": 256,
# 权重初始化方式 msra/xavier/gaussian
"weight_init": "msra"
},
{
# 层名
"name": "relu_1",
# 层类型(激活层) 可选,relu,sigmoid,tanh,
"type": "relu"
},
{
# 层名
"name": "fully_connected_2",
# 层类型,全连接层
"type": "fully_connected",
# 神经元个数, 因为是10分类,所以神经元个数为10
"neurons_number": 10,
# 权重初始化方式 msra/xavier/gaussian
"weight_init": "msra"
},
{
# 层名
"name": "svm_1",
# 分类层svm分类,计算损失,又称为Hinge损失函数,最终输出十分类的概率分布
"type": "svm",
# 错误类别和正确类别得分阈值(loss = max(0, 错误得分-正确得分+delta))
"delta": 0.2
}
]
# 定义模型,传入网络结构和配置项
AA = AADeepLearning(net=net, config=config)
# 训练模型
AA.train(x_train=x_train, y_train=y_train)
# 使用测试集预测,如果是svm分类器,则返回得分值分布和准确率, score:样本在各个分类上的得分值, accuracy:准确率
score, accuracy = AA.predict(x_test=x_test, y_test=y_test)
print("test set accuracy:", accuracy)
print(score[0])
print(score.shape) | AAdeepLearning | /AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/example/aa_dnn_mnist_svm.py | aa_dnn_mnist_svm.py |
from __future__ import print_function
import numpy as np
np.random.seed(1337)
# 生产环境
# from aadeeplearning.aadeeplearning_old import AADeepLearning
from AADeepLearning import AADeepLearning
# print(y_test.shape)
# 网络配置文件
config = {
# 初始学习率
"learning_rate": 0.001,
# 学习率衰减: 通常设置为 0.99
"learning_rate_decay": 0.9999,
# 优化策略: sgd/momentum/rmsprop
"optimizer": "momentum",
# 使用动量的梯度下降算法做优化,可以设置这一项,默认值为 0.9 ,一般不需要调整
"momentum_coefficient": 0.95,
# rmsprop优化器的衰减系数
"rmsprop_decay": 0.95,
# 正则化系数
"reg_coefficient": 0,
# 训练多少次
"number_iteration": 10000,
# 每次用多少个样本训练
"batch_size": 2,
# 每隔几个迭代周期评估一次准确率?
"evaluate_interval": 10,
# 每隔几个迭代周期保存一次快照?
# 是否以fine_tune方式训练? true/false
# 预训练参数模型所在路径
"pre_train_model": "./iter5.gordonmodel"
}
net = [
{
# 层名
"name": "lstm_1",
# 层类型
"type": "lstm",
# 神经元个数
"neurons_number": 50,
# 权重初始化方式 msra/xavier/gaussian/xavier
"weight_init": "xavier"
}
,
{
# 层名
"name": "softmax",
# 层类型
"type": "softmax"
}
]
def str2onehot(str):
word2int = {'a': 0, 'b': 1, 'c': 2, 'd': 3, 'e': 4, 'f': 5, 'g': 6, 'h': 7, 'i': 8, 'j': 9, 'k': 10
, 'l': 11, 'm': 12, 'n': 13, 'o': 14, 'p': 15, 'q': 16, 'r': 17, 's': 18, 't': 19,
'u': 20, 'v': 21, 'w': 22, 'x': 23, 'y': 24, 'z': 25}
int2word = {v: k for k, v in word2int.items()}
element_vector = [0] * 26
element_vector[word2int[str]] = 1
return element_vector
def onehot2str(element_vector):
int2word = {0: 'a', 1: 'b', 2: 'c', 3: 'd', 4: 'e', 5: 'f', 6: 'g', 7: 'h', 8: 'i',
9: 'j', 10: 'k', 11: 'l', 12: 'm', 13: 'n', 14: 'o', 15: 'p', 16: 'q', 17: 'r',
18: 's', 19: 't', 20: 'u', 21: 'v', 22: 'w', 23: 'x', 24: 'y', 25: 'z'}
i = np.argmax(element_vector)
return int2word[i]
def list2vector(x, y):
x_vector = np.zeros((len(x), len(x[0]), 26))
y_vector = np.zeros((len(y), 26))
for i, value in enumerate(x):
j = 0
for letter in value:
x_vector[i][j] = str2onehot(letter)
j = j + 1
y_vector[i] = str2onehot(y[i])
return x_vector, y_vector
# x = ['abc', 'bcd', 'cde', 'def', 'efg']
# y = ['d', 'e', 'f', 'g', 'h']
x = ['abc', 'bcd', 'cde', 'def', 'efg', 'fgh', 'ghi', 'hij', 'ijk', 'jkl', 'klm', 'lmn', 'mno', 'nop', 'opq', 'pqr',
'qrs', 'rst', 'stu', 'tuv', 'uvw', 'vwx', 'wxy', 'xyz', 'yza', 'zab']
y = ['d', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'a', 'b', 'c']
x_train, y_train = list2vector(x, y)
x = ['cde']
y = ['f']
x_test, y_test = list2vector(x, y)
print("x_train.shape:",x_train.shape)
print("y_train.shape:",y_train.shape)
print("x_test.shape:",x_test.shape)
print("y_test.shape:",y_test.shape)
AA = AADeepLearning(net=net, config=config)
AA.train(x_train=x_train, y_train=y_train)
result, accuracy = AA.predict(x_test=x_test, y_test=y_test)
print("accuracy:", accuracy)
print("test letter: " + x[0])
print("true letter: " + y[0])
print("predict letter: " + onehot2str(result[0])) | AAdeepLearning | /AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/example/aa_lstm_letter.py | aa_lstm_letter.py |
from AADeepLearning import AADeepLearning
from AADeepLearning.datasets import mnist
from AADeepLearning.datasets import np_utils
# mnist数据集已经被划分成了60,000个训练集,10,000个测试集的形式,如果数据不存在则自动下载
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# 第一个维度是样本数目,第二维度是通道数表示颜色通道数,第三维度是高,第四个维度是宽
x_train = x_train.reshape(x_train.shape[0], 1, 28, 28)
x_test = x_test.reshape(x_test.shape[0], 1, 28, 28)
# 将x_train, x_test的数据格式转为float32
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# 归一化,将值映射到 0到1区间
x_train /= 255
x_test /= 255
# 因为是10分类,所以将类别向量(从0到10的整数向量)映射为二值类别矩阵,相当于将向量用one-hot重新编码
y_train = np_utils.to_categorical(y_train, 10)
y_test = np_utils.to_categorical(y_test, 10)
# 网络配置文件
config = {
# 初始学习率
"learning_rate": 0.001,
# 优化策略: sgd/momentum/rmsprop/adam
"optimizer": "adam",
# 使用动量的梯度下降算法做优化,可以设置这一项,默认值为 0.9 ,一般不需要调整
"momentum_coefficient": 0.9,
# 训练多少次
"number_iteration": 1000,
# 每次用多少个样本训练
"batch_size": 64,
# 迭代多少次打印一次信息
"display": 100,
}
# 网络结构,数据将从上往下传播
net = [
{
# 层名,无限制
"name": "flatten_1",
# 层类型,将数据展平为适合神经网络的结构,用于输入层或者卷积层和全连接层中间。 (60000, 1, 28, 28) ——> (784, 60000)
"type": "flatten"
},
{
# 层名
"name": "fully_connected_1",
# 层类型,全连接层
"type": "fully_connected",
# 神经元个数
"neurons_number": 256,
# 权重初始化方式 msra/xavier/gaussian
"weight_init": "msra"
},
{
# 层名
"name": "relu_1",
# 层类型(激活层) 可选,relu,sigmoid,tanh,
"type": "relu"
},
{
# 层名
"name": "fully_connected_2",
# 层类型,全连接层
"type": "fully_connected",
# 神经元个数, 因为是10分类,所以神经元个数为10
"neurons_number": 10,
# 权重初始化方式 msra/xavier/gaussian
"weight_init": "msra"
},
{
# 层名
"name": "softmax_1",
# 层类型,分类层,最终输出十分类的概率分布
"type": "softmax"
}
]
# 定义模型,传入网络结构和配置项
AA = AADeepLearning(net=net, config=config)
# 训练模型
AA.train(x_train=x_train, y_train=y_train)
# 使用测试集预测,返回概率分布和准确率, score:样本在各个分类上的概率, accuracy:准确率
score, accuracy = AA.predict(x_test=x_test, y_test=y_test)
print("test set accuracy:", accuracy) | AAdeepLearning | /AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/example/aa_dnn_mnist.py | aa_dnn_mnist.py |
from AADeepLearning import AADeepLearning
from AADeepLearning.datasets import cifar10
from AADeepLearning.datasets import np_utils
# 载入数据,如果不存在则自动下载
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# 第一个维度是样本数目,第二维度是通道数表示颜色通道数,第三维度是高,第四个维度是宽
print('x_train shape:', x_train.shape)
print('y_train shape:', y_train.shape)
print('x_test shape:', x_test.shape)
print('y_test shape:', y_test.shape)
# 将x_train, x_test的数据格式转为float32
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# 归一化,将值映射到 0到1区间
x_train /= 255
x_test /= 255
# 因为是10分类,所以将类别向量(从0到10的整数向量)映射为二值类别矩阵,相当于将向量用one-hot重新编码
y_train = np_utils.to_categorical(y_train, 10)
y_test = np_utils.to_categorical(y_test, 10)
# 网络配置文件
config = {
# 初始学习率
"learning_rate": 0.001,
# 优化策略: sgd/momentum/rmsprop/adam
"optimizer": "adam",
# 使用动量的梯度下降算法做优化,可以设置这一项,默认值为 0.9 ,一般不需要调整
"momentum_coefficient": 0.9,
# 训练多少次
"number_iteration": 1000,
# 每次用多少个样本训练
"batch_size": 64,
# 迭代多少次打印一次信息
"display": 100,
}
# 网络结构,数据将从上往下传播
net = [
{
# 层名,无限制
"name": "flatten_1",
# 层类型,将数据展平为适合神经网络的结构,用于输入层或者卷积层和全连接层中间。 (60000, 1, 28, 28) ——> (784, 60000)
"type": "flatten"
},
{
# 层名
"name": "fully_connected_1",
# 层类型,全连接层
"type": "fully_connected",
# 神经元个数
"neurons_number": 256,
# 权重初始化方式 msra/xavier/gaussian
"weight_init": "msra"
},
{
# 层名
"name": "batch_normalization_1",
# 标准化层,将上一层的数据转化为均值接近0,标准差接近1的转换。可以一定程度解决梯度消失、数据不稳定的问题
"type": "batch_normalization"
},
{
# 层名
"name": "relu_1",
# 层类型(激活层) 可选,relu,sigmoid,tanh,
"type": "relu"
},
{
# 层名
"name": "fully_connected_2",
# 层类型,全连接层
"type": "fully_connected",
# 神经元个数, 因为是10分类,所以神经元个数为10
"neurons_number": 10,
# 权重初始化方式 msra/xavier/gaussian
"weight_init": "msra"
},
{
# 层名
"name": "softmax_1",
# 层类型,分类层,最终输出十分类的概率分布
"type": "softmax"
}
]
# 定义模型,传入网络结构和配置项
AA = AADeepLearning(net=net, config=config)
# 训练模型
AA.train(x_train=x_train, y_train=y_train)
# 使用测试集预测,返回概率分布和准确率, score:样本在各个分类上的概率, accuracy:准确率
score, accuracy = AA.predict(x_test=x_test, y_test=y_test)
print("test set accuracy:", accuracy) | AAdeepLearning | /AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/example/aa_dnn_cifar10.py | aa_dnn_cifar10.py |
from __future__ import print_function
import numpy as np
np.random.seed(1337)
# 生产环境
# from aadeeplearning.aadeeplearning_old import AADeepLearning
from AADeepLearning import AADeepLearning
from AADeepLearning.datasets import mnist
from AADeepLearning.datasets import np_utils
# 测试环境
# from aadeeplearning import aadeeplearning as aa
# 10分类
nb_classes = 10
# keras中的mnist数据集已经被划分成了60,000个训练集,10,000个测试集的形式,按以下格式调用即可
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_test = x_test[:64]
y_test = y_test[:64]
# print(x_test[0])
# 画出minist 数字
# import matplotlib.pyplot as plt
# fig = plt.figure()
# plt.imshow(x_test[0],cmap = 'binary')#黑白显示
# plt.show()
# 后端使用tensorflow时,即tf模式下,
# 会将100张RGB三通道的16*32彩色图表示为(100,16,32,3),
# 第一个维度是样本维,表示样本的数目,
# 第二和第三个维度是高和宽,
# 最后一个维度是通道维,表示颜色通道数
# x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
# x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
# input_shape = (img_rows, img_cols, 1)
# 将x_train, x_test的数据格式转为float32
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# 归一化,将值映射到 0到1区间
x_train /= 255
x_test /= 255
# 将类别向量(从0到nb_classes的整数向量)映射为二值类别矩阵,
# 相当于将向量用one-hot重新编码
y_train = np_utils.to_categorical(y_train, nb_classes)
y_test = np_utils.to_categorical(y_test, nb_classes)
# 打印出相关信息
print('x_train shape:', x_train.shape)
print('y_train shape:', y_train.shape)
print('x_test shape:', x_test.shape)
print('y_test shape:', y_test.shape)
# 网络配置文件
config = {
# 初始学习率
"learning_rate": 0.001,
# 学习率衰减: 通常设置为 0.99
"learning_rate_decay": 0.9999,
# 优化策略: sgd/momentum/rmsprop
"optimizer": "momentum",
# 使用动量的梯度下降算法做优化,可以设置这一项,默认值为 0.9 ,一般不需要调整
"momentum_coefficient": 0.95,
# rmsprop优化器的衰减系数
"rmsprop_decay": 0.95,
# 正则化系数
"reg_coefficient": 0,
# 训练多少次
"number_iteration": 1000,
# 每次用多少个样本训练
"batch_size": 64,
# 预训练参数模型所在路径
"pre_train_model": "./iter5.gordonmodel"
}
net = [
{
# 层名
"name": "rnn_1",
# 层类型
"type": "rnn",
# 神经元个数
"neurons_number": 60,
# 权重初始化方式 msra/xavier/gaussian/xavier
"weight_init": "xavier"
},
{
# 层名
"name": "relu_1",
# 层类型
"type": "relu"
},
{
# 层名
"name": "fully_connected_1",
# 层类型
"type": "fully_connected",
# 神经元个数, 因为是10分类,所以神经元个数为10
"neurons_number": 10,
# 权重初始化方式 msra/xavier/gaussian/xaver
"weight_init": "msra"
},
{
# 层名
"name": "softmax",
# 层类型
"type": "softmax"
}
]
AA = AADeepLearning(net=net, config=config)
# 训练模型
AA.train(x_train=x_train, y_train=y_train)
# 使用测试集预测,返回概率分布和准确率, score:样本在各个分类上的概率, accuracy:准确率
score, accuracy = AA.predict(x_test=x_test, y_test=y_test)
print("test set accuracy:",accuracy)
"""
# 输出训练好的模型在测试集上的表现
print('Test score:', score[0])
print('Test accuracy:', score[1])
# Test score: 0.032927570413
# Test accuracy: 0.9892
""" | AAdeepLearning | /AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/example/aa_rnn_mnist.py | aa_rnn_mnist.py |
from AADeepLearning import AADeepLearning
from AADeepLearning.datasets import mnist
from AADeepLearning.datasets import np_utils
# mnist数据集已经被划分成了60,000个训练集,10,000个测试集的形式,如果数据不存在则自动下载
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# 第一个维度是样本数目,第二维度是通道数表示颜色通道数,第三维度是高,第四个维度是宽
x_train = x_train.reshape(x_train.shape[0], 1, 28, 28)
x_test = x_test.reshape(x_test.shape[0], 1, 28, 28)
# 将x_train, x_test的数据格式转为float32
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# 归一化,将值映射到 0到1区间
x_train /= 255
x_test /= 255
# 因为是10分类,所以将类别向量(从0到10的整数向量)映射为二值类别矩阵,相当于将向量用one-hot重新编码
y_train = np_utils.to_categorical(y_train, 10)
y_test = np_utils.to_categorical(y_test, 10)
# 网络配置文件
config = {
# 初始学习率
"learning_rate": 0.001,
# 优化策略: sgd/momentum/rmsprop/adam
"optimizer": "adam",
# 使用动量的梯度下降算法做优化,可以设置这一项,默认值为 0.9 ,一般不需要调整
"momentum_coefficient": 0.9,
# 训练多少次
"number_iteration": 1000,
# 每次用多少个样本训练
"batch_size": 64,
# 迭代多少次打印一次信息
"display": 100,
# 保存模型快照的名称
"save_model": "AA",
# 每隔几个迭代周期保存一次快照? 保存名称为 名称+迭代次数+.model
"save_iteration": 1000,
}
# 网络结构,数据将从上往下传播
net = [
{
# 层名,无限制
"name": "flatten_1",
# 层类型,将数据展平为适合神经网络的结构,用于输入层或者卷积层和全连接层中间。 (60000, 1, 28, 28) ——> (784, 60000)
"type": "flatten"
},
{
# 层名
"name": "fully_connected_1",
# 层类型,全连接层
"type": "fully_connected",
# 神经元个数
"neurons_number": 256,
# 权重初始化方式 msra/xavier/gaussian
"weight_init": "msra"
},
{
# 层名
"name": "relu_1",
# 层类型(激活层) 可选,relu,sigmoid,tanh,
"type": "relu"
},
{
# 层名
"name": "fully_connected_2",
# 层类型,全连接层
"type": "fully_connected",
# 神经元个数, 因为是10分类,所以神经元个数为10
"neurons_number": 10,
# 权重初始化方式 msra/xavier/gaussian
"weight_init": "msra"
},
{
# 层名
"name": "softmax_1",
# 层类型,分类层,最终输出十分类的概率分布
"type": "softmax"
}
]
# 定义模型,传入网络结构和配置项
AA = AADeepLearning(net=net, config=config)
# 训练模型
AA.train(x_train=x_train, y_train=y_train)
# 使用测试集预测,返回概率分布和准确率, score:样本在各个分类上的概率, accuracy:准确率
score, accuracy = AA.predict(x_test=x_test, y_test=y_test)
print("test set accuracy:", accuracy) | AAdeepLearning | /AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/example/aa_save.py | aa_save.py |
# import numpy as np
# from AADeepLearning.datasets import mnist
# import matplotlib.pyplot as plt
# # mnist数据集已经被划分成了60,000个训练集,10,000个测试集的形式,按以下格式调用即可
# (x_train, y_train), (x_test, y_test) = mnist.load_data()
# # 画出minist 数字
# fig = plt.figure()
# plt.imshow(x_test[0],cmap = 'binary')#黑白显示
# plt.show()
#
# print('x_train shape:', x_train.shape)
# print('y_train shape:', y_train.shape)
# print('x_test shape:', x_test.shape)
# print('y_test shape:', y_test.shape)
# import numpy as np
# import matplotlib.pyplot as plt
# from AADeepLearning.datasets import cifar10
# (x_train, y_train), (x_test, y_test) = cifar10.load_data()
# print('x_train shape:', x_train.shape)
# print('y_train shape:', y_train.shape)
# print('x_test shape:', x_test.shape)
# print('y_test shape:', y_test.shape)
#
# x_train = np.transpose(x_train, (0,2,3,1))
# plt.figure(figsize=(1,1))
# plt.imshow(x_train[0])
# plt.show()
# import numpy as np
# import matplotlib.pyplot as plt
# from AADeepLearning.datasets import cifar100
# (x_train, y_train), (x_test, y_test) = cifar100.load_data(label_mode='fine')
# print('x_train shape:', x_train.shape)
# print('y_train shape:', y_train.shape)
# print('x_test shape:', x_test.shape)
# print('y_test shape:', y_test.shape)
#
# x_train = np.transpose(x_train, (0,2,3,1))
# plt.figure(figsize=(1,1))
# plt.imshow(x_train[0])
# plt.show()
# from AADeepLearning.datasets import imdb
# (x_train, y_train), (x_test, y_test) = imdb.load_data(path="imdb.npz",
# num_words=None,
# skip_top=0,
# maxlen=None,
# seed=113,
# start_char=1,
# oov_char=2,
# index_from=3)
# print(x_train[0])
from AADeepLearning.datasets import reuters
(x_train, y_train), (x_test, y_test) = reuters.load_data(path="reuters.npz",
num_words=None,
skip_top=0,
maxlen=None,
test_split=0.2,
seed=113,
start_char=1,
oov_char=2,
index_from=3)
print(x_train[0])
# from AADeepLearning.datasets import fashion_mnist
# import matplotlib.pyplot as plt
# (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
# fig = plt.figure()
# plt.imshow(x_train[10],cmap = 'binary')#黑白显示
# plt.show()
#
# print('x_train shape:', x_train.shape)
# print('y_train shape:', y_train.shape)
# print('x_test shape:', x_test.shape)
# print('y_test shape:', y_test.shape)
# (x_train, y_train), (x_test, y_test) = boston_housing.load_data()
# print(x_train[8])
# print('x_train shape:', x_train.shape)
# print('y_train shape:', y_train.shape)
# print('x_test shape:', x_test.shape)
# print('y_test shape:', y_test.shape) | AAdeepLearning | /AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/example/aa_dataset.py | aa_dataset.py |
from AADeepLearning import AADeepLearning
from AADeepLearning.datasets import mnist
from AADeepLearning.datasets import np_utils
# mnist数据集已经被划分成了60,000个训练集,10,000个测试集的形式,如果数据不存在则自动下载
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# 第一个维度是样本数目,第二维度是通道数表示颜色通道数,第三维度是高,第四个维度是宽
x_train = x_train.reshape(x_train.shape[0], 1, 28, 28)
x_test = x_test.reshape(x_test.shape[0], 1, 28, 28)
# 将x_train, x_test的数据格式转为float32
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# 归一化,将值映射到 0到1区间
x_train /= 255
x_test /= 255
# 因为是10分类,所以将类别向量(从0到10的整数向量)映射为二值类别矩阵,相当于将向量用one-hot重新编码
y_train = np_utils.to_categorical(y_train, 10)
y_test = np_utils.to_categorical(y_test, 10)
# 网络配置文件
config = {
# 初始学习率
"learning_rate": 0.001,
# 优化策略: sgd/momentum/rmsprop/adam
"optimizer": "adam",
# 使用动量的梯度下降算法做优化,可以设置这一项,默认值为 0.9 ,一般不需要调整
"momentum_coefficient": 0.9,
# 训练多少次
"number_iteration": 1000,
# 每次用多少个样本训练
"batch_size": 64,
# 迭代多少次打印一次信息
"display": 100,
}
# 网络结构,数据将从上往下传播
net = [
{
# 层名,无限制
"name": "flatten_1",
# 层类型,将数据展平为适合神经网络的结构,用于输入层或者卷积层和全连接层中间。 (60000, 1, 28, 28) ——> (784, 60000)
"type": "flatten"
},
{
# 层名
"name": "fully_connected_1",
# 层类型,全连接层
"type": "fully_connected",
# 神经元个数
"neurons_number": 256,
# 权重初始化方式 msra/xavier/gaussian
"weight_init": "msra"
},
{
# 层名
"name": "relu_1",
# 层类型(激活层) 可选,relu,sigmoid,tanh,
"type": "relu"
},
{
# 层名
"name": "dropout_1",
# 层类型,随机失活层
"type": "dropout",
# dropout比例(置零比例),0.5表示将有50%的神经元随机处于失活状态,必须为[0,1]区间
"drop_rate": 0.5
},
{
# 层名
"name": "fully_connected_2",
# 层类型,全连接层
"type": "fully_connected",
# 神经元个数, 因为是10分类,所以神经元个数为10
"neurons_number": 10,
# 权重初始化方式 msra/xavier/gaussian
"weight_init": "msra"
},
{
# 层名
"name": "softmax_1",
# 层类型,分类层,最终输出十分类的概率分布
"type": "softmax"
}
]
# 定义模型,传入网络结构和配置项
AA = AADeepLearning(net=net, config=config)
# 训练模型
AA.train(x_train=x_train, y_train=y_train)
# 使用测试集预测,返回概率分布和准确率, score:样本在各个分类上的概率, accuracy:准确率
score, accuracy = AA.predict(x_test=x_test, y_test=y_test)
print("test set accuracy:", accuracy) | AAdeepLearning | /AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/example/aa_dnn_mnist_drop_out.py | aa_dnn_mnist_drop_out.py |
from AADeepLearning import AADeepLearning
from AADeepLearning.datasets import mnist
from AADeepLearning.datasets import np_utils
# mnist数据集已经被划分成了60,000个训练集,10,000个测试集的形式,如果数据不存在则自动下载
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# 第一个维度是样本数目,第二维度是通道数表示颜色通道数,第三维度是高,第四个维度是宽
x_train = x_train.reshape(x_train.shape[0], 1, 28, 28)
x_test = x_test.reshape(x_test.shape[0], 1, 28, 28)
# 将x_train, x_test的数据格式转为float32
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# 归一化,将值映射到 0到1区间
x_train /= 255
x_test /= 255
# 因为是10分类,所以将类别向量(从0到10的整数向量)映射为二值类别矩阵,相当于将向量用one-hot重新编码
y_train = np_utils.to_categorical(y_train, 10)
y_test = np_utils.to_categorical(y_test, 10)
# 网络配置文件
config = {
# 初始学习率
"learning_rate": 0.001,
# 优化策略: sgd/momentum/rmsprop/adam
"optimizer": "adam",
# 使用动量的梯度下降算法做优化,可以设置这一项,默认值为 0.9 ,一般不需要调整
"momentum_coefficient": 0.9,
# 训练多少次
"number_iteration": 1000,
# 每次用多少个样本训练
"batch_size": 64,
# 迭代多少次打印一次信息
"display": 100,
# 保存模型快照的名称
"save_model": "AA",
# 每隔几个迭代周期保存一次快照? 保存名称为 名称+迭代次数+.model
"save_iteration": 1000,
# 预训练参数模型所在路径,不为空框架会加载模型,用于预测或继续训练
"load_model": "AA-1000.model"
}
# 网络结构,数据将从上往下传播
net = [
{
# 层名,无限制
"name": "flatten_1",
# 层类型,将数据展平为适合神经网络的结构,用于输入层或者卷积层和全连接层中间。 (60000, 1, 28, 28) ——> (784, 60000)
"type": "flatten"
},
{
# 层名
"name": "fully_connected_1",
# 层类型,全连接层
"type": "fully_connected",
# 神经元个数
"neurons_number": 256,
# 权重初始化方式 msra/xavier/gaussian
"weight_init": "msra"
},
{
# 层名
"name": "relu_1",
# 层类型(激活层) 可选,relu,sigmoid,tanh,
"type": "relu"
},
{
# 层名
"name": "fully_connected_2",
# 层类型,全连接层
"type": "fully_connected",
# 神经元个数, 因为是10分类,所以神经元个数为10
"neurons_number": 10,
# 权重初始化方式 msra/xavier/gaussian
"weight_init": "msra"
},
{
# 层名
"name": "softmax_1",
# 层类型,分类层,最终输出十分类的概率分布
"type": "softmax"
}
]
# 定义模型,传入网络结构和配置项
AA = AADeepLearning(net=net, config=config)
# 训练模型
AA.train(x_train=x_train, y_train=y_train)
# 使用测试集预测,返回概率分布和准确率, score:样本在各个分类上的概率, accuracy:准确率
score, accuracy = AA.predict(x_test=x_test, y_test=y_test)
print("test set accuracy:", accuracy) | AAdeepLearning | /AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/example/aa_continue_train.py | aa_continue_train.py |
from AADeepLearning import AADeepLearning
from AADeepLearning.datasets import mnist
from AADeepLearning.datasets import np_utils
# mnist数据集已经被划分成了60,000个训练集,10,000个测试集的形式,如果数据不存在则自动下载
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# 第一个维度是样本数目,第二维度是通道数表示颜色通道数,第三维度是高,第四个维度是宽
x_train = x_train.reshape(x_train.shape[0], 1, 28, 28)
x_test = x_test.reshape(x_test.shape[0], 1, 28, 28)
# 将x_train, x_test的数据格式转为float32
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# 归一化,将值映射到 0到1区间
x_train /= 255
x_test /= 255
# 因为是10分类,所以将类别向量(从0到10的整数向量)映射为二值类别矩阵,相当于将向量用one-hot重新编码
y_train = np_utils.to_categorical(y_train, 10)
y_test = np_utils.to_categorical(y_test, 10)
# 网络配置文件
config = {
# 初始学习率
"learning_rate": 0.001,
# 优化策略: sgd/momentum/rmsprop/adam
"optimizer": "adam",
# 使用动量的梯度下降算法做优化,可以设置这一项,默认值为 0.9 ,一般不需要调整
"momentum_coefficient": 0.9,
# 训练多少次
"number_iteration": 1000,
# 每次用多少个样本训练
"batch_size": 64,
# 迭代多少次打印一次信息
"display": 100,
}
# 网络结构,数据将从上往下传播
net = [
{
# 层名,无限制
"name": "flatten_1",
# 层类型,将数据展平为适合神经网络的结构,用于输入层或者卷积层和全连接层中间。 (60000, 1, 28, 28) ——> (784, 60000)
"type": "flatten"
},
{
# 层名
"name": "fully_connected_1",
# 层类型,全连接层
"type": "fully_connected",
# 神经元个数
"neurons_number": 256,
# 权重初始化方式 msra/xavier/gaussian
"weight_init": "msra"
},
{
# 层名
"name": "batch_normalization_1",
# 标准化层,将上一层的数据转化为均值接近0,标准差接近1的转换。可以一定程度解决梯度消失、数据不稳定的问题
"type": "batch_normalization"
},
{
# 层名
"name": "relu_1",
# 层类型(激活层) 可选,relu,sigmoid,tanh,
"type": "relu"
},
{
# 层名
"name": "fully_connected_2",
# 层类型,全连接层
"type": "fully_connected",
# 神经元个数, 因为是10分类,所以神经元个数为10
"neurons_number": 10,
# 权重初始化方式 msra/xavier/gaussian
"weight_init": "msra"
},
{
# 层名
"name": "softmax_1",
# 层类型,分类层,最终输出十分类的概率分布
"type": "softmax"
}
]
# 定义模型,传入网络结构和配置项
AA = AADeepLearning(net=net, config=config)
# 训练模型
AA.train(x_train=x_train, y_train=y_train)
# 使用测试集预测,返回概率分布和准确率, score:样本在各个分类上的概率, accuracy:准确率
score, accuracy = AA.predict(x_test=x_test, y_test=y_test)
print("test set accuracy:", accuracy) | AAdeepLearning | /AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/example/aa_dnn_mnist_batch_normalization.py | aa_dnn_mnist_batch_normalization.py |
from AADeepLearning import AADeepLearning
from AADeepLearning.datasets import mnist
from AADeepLearning.datasets import np_utils
import numpy as np
np.random.seed(0)
# mnist数据集已经被划分成了60,000个训练集,10,000个测试集的形式,如果数据不存在则自动下载
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# 第一个维度是样本数目,第二维度是通道数表示颜色通道数,第三维度是高,第四个维度是宽
x_train = x_train.reshape(x_train.shape[0], 1, 28, 28)
x_test = x_test.reshape(x_test.shape[0], 1, 28, 28)
# 将x_train, x_test的数据格式转为float32
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# 归一化,将值映射到 0到1区间
x_train /= 255
x_test /= 255
# 因为是10分类,所以将类别向量(从0到10的整数向量)映射为二值类别矩阵,相当于将向量用one-hot重新编码
y_train = np_utils.to_categorical(y_train, 10)
y_test = np_utils.to_categorical(y_test, 10)
# 网络配置文件
config = {
# 初始学习率
"learning_rate": 0.001,
# 优化策略: sgd/momentum/rmsprop/adam
"optimizer": "adam",
# 使用动量的梯度下降算法做优化,可以设置这一项,默认值为 0.9 ,一般不需要调整
"momentum_coefficient": 0.9,
# 训练多少次
"number_iteration": 100,
# 每次用多少个样本训练
"batch_size": 16,
# 迭代多少次打印一次信息
"display": 10,
}
# 网络结构,数据将从上往下传播
net = [
{
# 层名
"name": "convolutional_1",
# 层类型,卷积层
"type": "convolutional",
# 卷积核个数
"kernel_number": 1,
# 卷积核高
"kernel_height": 2,
# 卷积核宽
"kernel_width": 2,
# 填充数,1:在图片最外层填充1圈0,2:填充2圈0,以此类推
"padding": 1,
# 滑动步长,1:水平或垂直方向滑动步长都为1,2:水平或垂直方向滑动步长都为2,以此类推
"stride": 1,
# 权重初始化 gaussian/xavier/msra
"weight_init": "msra"
},
{
# 层名
"name": "relu_1",
# 层类型, 激活函数层
"type": "relu"
},
{
# 层名
"name": "pooling_1",
# 层类型,池化层
"type": "pooling",
# 模式 max(最大池化)/average(平均池化)
"mode": "max",
# 池化核高
"kernel_height": 2,
# 池化核宽
"kernel_width": 2,
# 滑动步长,1:水平或垂直方向滑动步长都为1,2:水平或垂直方向滑动步长都为2,以此类推
"stride": 1
},
{
# 层名,无限制
"name": "flatten_1",
# 层类型,将数据展平为适合神经网络的结构,用于输入层或者卷积层和全连接层中间。 (60000, 1, 28, 28) ——> (784, 60000)
"type": "flatten"
},
{
# 层名
"name": "fully_connected_1",
# 层类型,全连接层
"type": "fully_connected",
# 神经元个数
"neurons_number": 256,
# 权重初始化方式 msra/xavier/gaussian
"weight_init": "msra"
},
{
# 层名
"name": "relu_2",
# 层类型(激活层) 可选,relu,sigmoid,tanh,
"type": "relu"
},
{
# 层名
"name": "fully_connected_2",
# 层类型,全连接层
"type": "fully_connected",
# 神经元个数, 因为是10分类,所以神经元个数为10
"neurons_number": 10,
# 权重初始化方式 msra/xavier/gaussian
"weight_init": "msra"
},
{
# 层名
"name": "softmax_1",
# 层类型,分类层,最终输出十分类的概率分布
"type": "softmax"
}
]
# 定义模型,传入网络结构和配置项
AA = AADeepLearning(net=net, config=config)
# 训练模型
AA.train(x_train=x_train, y_train=y_train)
# 使用测试集预测,返回概率分布和准确率, score:样本在各个分类上的概率, accuracy:准确率
score, accuracy = AA.predict(x_test=x_test, y_test=y_test)
print("test set accuracy:", accuracy) | AAdeepLearning | /AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/example/aa_cnn_mnist.py | aa_cnn_mnist.py |
The MIT License (MIT)
Copyright (c) 2020
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
| AAmiles | /AAmiles-0.1.2.tar.gz/AAmiles-0.1.2/LICENSE.md | LICENSE.md |
# AAmiles
*Check changes in the miles required to obtain a ticket from the AA web page.*
## **Packages required:**
- request
- bs4
- numpy
## **Installation:**
```python
pip install AAmiles
```
## **Usage:**
```python
python -m AAmiles
```
## **To run it at loading of Windows 8 and 10:**
### **a) Create a batch file in some folder with:**
```batch
@echo off
python -m AAmiles
```
### **b) Then:**
1 - Create a shortcut to the batch file.
2 - Once the shortcut is created, right-click the shortcut file and select Cut.
3 - Press Start, type Run, and press Enter.
4 - In the Run window, type shell:startup to open the Startup folder.
5 - Once the Startup folder is opened, click the Home tab at the top of the folder and select Paste to paste the shortcut file into the Startup folder.
| AAmiles | /AAmiles-0.1.2.tar.gz/AAmiles-0.1.2/README.md | README.md |
# ABBA-QuPath Registration Exporter
[](https://github.com/nickdelgrosso/ABBA-QuPath-RegistrationAnalysis/actions/workflows/python-app.yml)
[](https://coveralls.io/github/nickdelgrosso/ABBA-QuPath-RegistrationAnalysis?branch=master)
[](https://sonarcloud.io/dashboard?id=nickdelgrosso_ABBA-QuPath-RegistrationAnalysis)
A Python app and Groovy script that helps export transformed cell detections
from ABBA-registered data stored in QuPath projects.

## Installation
### First-Time Installation
```
pip install ABBA-QuPath-RegistrationExporter
```
### Upgrading to Latest Version
```
pip install --upgrade ABBA-QuPath-RegistrationExporter
```
## Running
```
regexport
```
## Running Tests
```
python -m pytest
```
| ABBA-QuPath-RegistrationExporter | /ABBA-QuPath-RegistrationExporter-0.4.2.tar.gz/ABBA-QuPath-RegistrationExporter-0.4.2/README.md | README.md |
from PySide2.QtWidgets import QMainWindow
from regexport.actions.download_biop_extensions import SaveBiopExtensionsAction, SaveBiopExtensionsActionModel
from regexport.actions.load_atlas import LoadAtlasActionModel, LoadAtlasAction
from regexport.actions.load_cells import LoadCellsActionModel, LoadCellsAction
from regexport.actions.save_cells import SaveCellsActionModel, SaveCellsAction
from regexport.actions.save_script import SaveGroovyScriptActionModel, SaveGroovyScriptAction
from regexport.model import AppState
from regexport.utils.exceptions import show_dialog_box_on_uncaught_exception
from regexport.views.channel_filter import ChannelFilterView, ChannelFilterModel
from regexport.views.histogram import HistogramModel, HistogramView
from regexport.views.histogram2 import PlotView, PlotModel
from regexport.views.main_window import MainWindow
from regexport.views.plot_3d import PlotterModel, PlotterView
from regexport.views.region_tree import BrainRegionTreeModel, BrainRegionTree
from regexport.views.checkbox import CheckboxModel, CheckboxView
from regexport.views.sidebar import Layout
from regexport.views.text_selector import TextSelectorModel, DropdownTextSelectorView
class App:
def __init__(self, debug=False):
self.debug = debug
self.model = AppState()
self.plot_window = PlotterModel()
self.plot_window.register(model=self.model)
self.brain_region_tree = BrainRegionTreeModel()
self.brain_region_tree.register(model=self.model)
self.load_atlas_button = LoadAtlasActionModel()
self.load_atlas_button.register(model=self.model)
self.load_cells_button = LoadCellsActionModel()
self.load_cells_button.register(model=self.model)
self.export_data_button = SaveCellsActionModel()
self.export_data_button.register(model=self.model)
self.colormap_selector_model = TextSelectorModel()
self.colormap_selector_model.register(model=self.model, options_attr='colormap_options',
selected_attr='selected_colormap')
self.colordata_selector_dropdown = TextSelectorModel()
self.colordata_selector_dropdown.register(model=self.model, options_attr='column_to_plot_options',
selected_attr='column_to_plot')
self.download_biop_extensions_button = SaveBiopExtensionsActionModel()
self.save_groovy_script_button = SaveGroovyScriptActionModel()
self.channel_filter_model = ChannelFilterModel()
self.channel_filter_model.register(model=self.model)
self.hist_plots = PlotModel()
self.hist_plots.register(model=self.model)
self.show_plots_checkbox = CheckboxModel(label='Auto-Update Histograms')
self.show_plots_checkbox.register(model=self.model, model_property='show_plots')
def create_gui(self) -> QMainWindow:
if not self.debug:
show_dialog_box_on_uncaught_exception()
return MainWindow(
main_widgets=(
BrainRegionTree(model=self.brain_region_tree),
PlotterView(model=self.plot_window),
Layout(
widgets=(
DropdownTextSelectorView(model=self.colordata_selector_dropdown),
DropdownTextSelectorView(model=self.colormap_selector_model),
PlotView(model=self.hist_plots),
CheckboxView(model=self.show_plots_checkbox),
ChannelFilterView(model=self.channel_filter_model),
),
horizontal=False,
),
),
menu_actions=(
SaveBiopExtensionsAction(model=self.download_biop_extensions_button),
SaveGroovyScriptAction(model=self.save_groovy_script_button),
LoadAtlasAction(model=self.load_atlas_button),
LoadCellsAction(model=self.load_cells_button),
SaveCellsAction(model=self.export_data_button),
)
)
def set_max(self, channel_name, max_spots):
... | ABBA-QuPath-RegistrationExporter | /ABBA-QuPath-RegistrationExporter-0.4.2.tar.gz/ABBA-QuPath-RegistrationExporter-0.4.2/regexport/app.py | app.py |
from enum import Enum, auto
import numpy as np
import pandas as pd
from bg_atlasapi import BrainGlobeAtlas
from matplotlib import pyplot as plt
from traitlets import HasTraits, Instance, observe, Tuple, List, Unicode, Dict, Int, Bool
from regexport.utils.filters import is_parent
class AnalysisType(Enum):
RegionLabel = auto()
SubCellCount = auto()
class AppState(HasTraits):
atlas = Instance(BrainGlobeAtlas, allow_none=True)
cells = Instance(pd.DataFrame, allow_none=True)
selected_region_ids = Tuple(default_value=()) # should be tuple of ints
selected_cell_ids = Instance(np.ndarray, default_value=np.array([], dtype=int)) # array of ints
column_to_plot_options = List(Unicode(), default_value=["BrainRegion"])
column_to_plot = Unicode(default_value="BrainRegion")
colormap_options = List(Unicode(), default_value=[cmap for cmap in plt.colormaps() if not cmap.endswith('_r')])#['tab20c', 'viridis'])
selected_colormap = Unicode(default_value='tab20c')
selected_cells = Instance(pd.DataFrame, allow_none=True)
max_numspots_filters = Dict(key_trait=Unicode(), value_trait=Int(), default_value={})
show_plots = Bool(default_value=True)
@observe('cells')
def _update_max_numspots_filter_to_max_of_each_numspots_column(self, change):
if self.cells is None:
self.max_numspots_filters = {}
else:
self.max_numspots_filters = {col: int(self.cells[col].max()) for col in self.cells.columns if 'Num Spots' in col}
@observe('cells')
def _update_column_to_plot_options(self, change):
if self.cells is None:
return
self.cells: pd.DataFrame
columns = [name for name in self.cells.columns if 'Num Spots' in name]
self.column_to_plot = "BrainRegion"
self.column_to_plot_options = ["BrainRegion"] + columns
@observe('column_to_plot')
def _validate_column_to_plot(self, change):
assert self.column_to_plot in self.column_to_plot_options
@observe('selected_region_ids', 'cells', 'max_numspots_filters')
def _update_selected_cell_ids(self, change):
if self.cells is None:
return
if len(self.selected_region_ids) == 0:
self.selected_cell_ids = self.cells.index.values
selected_cells = self.cells
else:
is_parented = self.cells.groupby('BGIdx', as_index=False).BGIdx.transform(
lambda ids: is_parent(ids.values[0], selected_ids=self.selected_region_ids, tree=self.atlas.hierarchy) if ids.values[0] != 0 else False
)
only_parented = is_parented[is_parented.BGIdx].index.values
self.selected_cell_ids = only_parented
selected_cells = self.cells.iloc[only_parented]
query = ' & '.join(f"(`{channel}` <= {value})" for channel, value in self.max_numspots_filters.items())
print('querying')
selected_cells2 = selected_cells.query(query)
print('queried')
self.selected_cells = selected_cells2 | ABBA-QuPath-RegistrationExporter | /ABBA-QuPath-RegistrationExporter-0.4.2.tar.gz/ABBA-QuPath-RegistrationExporter-0.4.2/regexport/model.py | model.py |
from pathlib import Path
from zipfile import ZipFile
from PySide2.QtCore import QThreadPool
from PySide2.QtWidgets import QFileDialog, QAction
from regexport.utils.parallel import Task
class SaveBiopExtensionsActionModel:
text = "-1. Download BIOP Extensions"
def submit(self, directory: Path):
import requests
if not directory.exists():
directory.mkdir(parents=True, exist_ok=True)
urls = [
('extensions', "https://github.com/BIOP/qupath-biop-extensions/releases/download/v2.0.8/biop-tools-2.0.8.jar"),
('extensions', "https://github.com/BIOP/qupath-biop-extensions/releases/download/v2.0.8/WSI-Dependencies.zip"),
('.', "https://github.com/SuperElastix/elastix/releases/download/5.0.1/elastix-5.0.1-win64.zip"),
('.', "https://gist.githubusercontent.com/NicoKiaru/b91f9f3f0069b765a49b5d4629a8b1c7/raw/571954a443d1e1f0597022f6c19f042aefbc0f5a/TestRegister.groovy"),
('.', "https://github.com/SuperElastix/elastix/releases/download/5.0.1/elastix-5.0.1-mac.zip")
]
for subdir, url in urls:
response = requests.get(url, allow_redirects=True)
fname = directory / subdir / Path(url).name
fname.parent.mkdir(parents=True, exist_ok=True)
with open(fname, 'wb') as f:
f.write(response.content)
if fname.suffix == '.zip':
with ZipFile(fname, 'r') as zip_ref:
zip_ref.extractall(fname.parent)
fname.unlink(missing_ok=True)
class SaveBiopExtensionsAction(QAction):
def __init__(self, model: SaveBiopExtensionsActionModel, *args, **kwargs):
self.model = model
super().__init__(*args, **kwargs)
self.setText(model.text)
self.triggered.connect(self.click)
def click(self):
directory = QFileDialog.getExistingDirectory(
caption="Make a QuPath Common Files folder for your BIOP Extensions",
)
if not directory:
return
worker = Task(self.model.submit, Path(directory))
pool = QThreadPool.globalInstance()
pool.start(worker) | ABBA-QuPath-RegistrationExporter | /ABBA-QuPath-RegistrationExporter-0.4.2.tar.gz/ABBA-QuPath-RegistrationExporter-0.4.2/regexport/actions/download_biop_extensions.py | download_biop_extensions.py |
from pathlib import Path
import pandas as pd
from PySide2.QtWidgets import QFileDialog, QCheckBox, QDialog, QAction
from traitlets import HasTraits, Bool, directional_link
from regexport.model import AppState
class SaveCellsActionModel(HasTraits):
text = "3. Save Cells"
enabled = Bool(default_value=False)
def register(self, model: AppState):
self.model = model
directional_link((model, 'cells'), (self, 'enabled'), lambda cells: cells is not None)
def submit(self, filename: Path, export_visible_cells_only: bool = False):
print('File saving...')
df = self.model.selected_cells if export_visible_cells_only else self.model.cells
types = {
'Image': 'category',
'BrainRegion': 'category',
'Acronym': 'category',
'X': 'float32',
'Y': 'float32',
'Z': 'float32',
}
types.update({col: 'uint16' for col in self.model.cells.columns if "Num Spots" in col})
df = df.astype(types)
df: pd.DataFrame = df.drop(columns=['BGIdx'])
print(df.info())
print(filename)
if filename.suffix.lower() == ".csv":
df.to_csv(filename, index=False)
elif filename.suffix.lower() == ".feather":
df.reset_index(drop=True).to_feather(filename)
else:
raise TypeError(f"Error saving file {str(filename)}: {filename.suffix} extension not supported.")
print("File saved")
class ChkBxFileDialog(QFileDialog):
def __init__(self, checkbox_title="Selected Cells Only", filename_filter="*.txt"):
super().__init__(filter=filename_filter)
self.setSupportedSchemes(["file"])
self.setOption(QFileDialog.DontUseNativeDialog)
self.setAcceptMode(QFileDialog.AcceptSave)
self.setNameFilter("Feather file (*.feather);;CSV file (*.csv)")
self.selectNameFilter("Feather file (*.feather);;CSV file (*.csv)")
self.checkbox = QCheckBox(checkbox_title)
self.layout().addWidget(self.checkbox)
@property
def full_filename(self) -> Path:
filename = self.selectedUrls()[0].toLocalFile()
extension_filter = self.selectedNameFilter()
extension = extension_filter[extension_filter.index('*.') + 1:-1]
full_filename = Path(filename).with_suffix(extension)
return full_filename
@property
def selected_cells_only(self) -> bool:
return self.checkbox.isChecked()
class SaveCellsAction(QAction):
def __init__(self, model: SaveCellsActionModel, *args, **kwargs):
self.model = model
super().__init__(*args, **kwargs)
self.setText(model.text)
self.triggered.connect(self.click)
self.model.observe(self.set_enabled, 'enabled')
self.set_enabled(None)
def set_enabled(self, changed):
self.setEnabled(self.model.enabled)
def click(self):
dialog = ChkBxFileDialog()
if dialog.exec_() == QDialog.Accepted:
self.model.submit(
filename=dialog.full_filename,
export_visible_cells_only=dialog.selected_cells_only
) | ABBA-QuPath-RegistrationExporter | /ABBA-QuPath-RegistrationExporter-0.4.2.tar.gz/ABBA-QuPath-RegistrationExporter-0.4.2/regexport/actions/save_cells.py | save_cells.py |
from PySide2.QtWidgets import QTreeWidget, QAbstractItemView, QTreeWidgetItem
from traitlets import HasTraits, Instance, Tuple, directional_link
from treelib import Tree
from regexport.model import AppState
from regexport.utils.atlas import create_brain_region_tree
from .utils import HasWidget
class BrainRegionTreeModel(HasTraits):
tree = Instance(Tree, allow_none=False, default_value=Tree())
selected_region_ids = Tuple(default_value=()) # Tuple of ints
def register(self, model: AppState):
directional_link((self, 'selected_region_ids'), (model, 'selected_region_ids'))
directional_link((model, 'atlas'), (self, 'tree'), create_brain_region_tree)
def select(self, *brain_region_ids: int):
print(brain_region_ids)
self.selected_region_ids = brain_region_ids
class BrainRegionTree(HasWidget):
def __init__(self, model: BrainRegionTreeModel):
self.model = model
treeview = QTreeWidget()
treeview.setHeaderHidden(True)
treeview.setWordWrap(False)
treeview.setSelectionMode(QAbstractItemView.ExtendedSelection)
treeview.setSelectionBehavior(QAbstractItemView.SelectRows)
treeview.itemSelectionChanged.connect(self.onSelectionChanged)
HasWidget.__init__(self, widget=treeview)
self.treeview = treeview
self.model.observe(self.render, names=['tree'])
def render(self, change=None):
tree = self.model.tree
# no need to render empty tree
if len(tree) == 0:
return
ids = tree.expand_tree(mode=Tree.DEPTH)
next(ids) # skip displaying root
for id in ids:
node = tree.get_node(id)
node.item = QTreeWidgetItem()
node.item.setText(0, node.data)
node.item.setText(1, str(node.identifier))
parent = tree.parent(node.identifier)
if parent.is_root():
self.treeview.addTopLevelItem(node.item)
else:
parent.item.addChild(node.item)
# Finish up
self.treeview.expandToDepth(1)
def onSelectionChanged(self):
self.model.select(*[int(item.text(1)) for item in self.treeview.selectedItems()]) | ABBA-QuPath-RegistrationExporter | /ABBA-QuPath-RegistrationExporter-0.4.2.tar.gz/ABBA-QuPath-RegistrationExporter-0.4.2/regexport/views/region_tree.py | region_tree.py |
from dataclasses import dataclass
from typing import List, Optional
import numpy as np
import vedo
from traitlets import HasTraits, Instance, Bool
from vedo import Plotter, pyplot
from vtkmodules.qt.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor
from regexport.model import AppState
from regexport.views.utils import HasWidget
@dataclass
class HistogramData:
zero_count: int
bin_edges: np.ndarray
bar_heights: np.ndarray
x_labels: List[str]
title: str = ""
zero_color: str = 'red'
bar_color: str = 'olivedrab'
def __post_init__(self):
assert self.bar_heights.ndim == 1
assert len(self.bar_heights) == len(self.bin_edges) - 1
assert len(self.bar_heights) == len(self.x_labels)
class HistogramModel(HasTraits):
histogram = Instance(HistogramData, allow_none=True)
cumulative = Bool(default_value=False)
def register(self, model: AppState):
self.model = model
model.observe(self.update, ['selected_cells', 'column_to_plot'])
def update(self, change):
model = self.model
if model.selected_cells is None:
self.histogram = None
return
data_column = model.selected_cells[model.column_to_plot]
if data_column.dtype.name == 'category':
self.histogram = None
else:
data = data_column.values
zero_count = int(np.sum(data == 0))
heights, bin_edges = np.histogram(data[data > 0], bins='auto', density=False)
if self.cumulative:
zero_count /= heights.sum() + zero_count
bar_heights = heights.cumsum() / heights.sum() + zero_count
else:
bar_heights = heights
self.histogram = HistogramData(
zero_count=zero_count,
bin_edges=bin_edges,
bar_heights=bar_heights,
x_labels=bin_edges[:-1].astype(int).astype(str).tolist(),
)
class HistogramView(HasWidget):
def __init__(self, model: HistogramModel):
widget = QVTKRenderWindowInteractor()
HasWidget.__init__(self, widget=widget)
self.plotter = Plotter(qtWidget=widget)
self.model = model
self.model.observe(self.render)
@staticmethod
def render_histogram_data(data: HistogramData) -> vedo.pyplot.Plot:
return vedo.pyplot.plot(
[
np.concatenate([[data.zero_count], data.bar_heights]),
np.concatenate([['0'], data.x_labels]),
[data.zero_color] + [data.bar_color] * len(data.bar_heights),
np.concatenate([[0], data.bin_edges]),
],
mode='bars'
)
def render(self, change=None):
self.plotter.clear()
hist: Optional[HistogramData] = self.model.histogram
if hist is not None:
hist_actor = self.render_histogram_data(data=hist)
self.plotter.show(hist_actor, mode=12) | ABBA-QuPath-RegistrationExporter | /ABBA-QuPath-RegistrationExporter-0.4.2.tar.gz/ABBA-QuPath-RegistrationExporter-0.4.2/regexport/views/histogram.py | histogram.py |
from typing import Dict, List
from PySide2.QtWidgets import QWidget, QVBoxLayout, QWidgetItem, QLayout
from traitlets import HasTraits, List as TList, Instance, directional_link
from regexport.model import AppState
from regexport.views.labelled_slider import LabelledSliderView, LabelledSliderModel
from regexport.views.utils import HasWidget
class ChannelFilterModel(HasTraits):
sliders = TList(Instance(LabelledSliderModel))
def __getitem__(self, label) -> LabelledSliderModel:
for slider in self.sliders:
if slider.label == label:
return slider
raise KeyError(f"Slider with label {label} not found.")
def register(self, model: AppState):
self.model = model
directional_link(
(model, 'max_numspots_filters'),
(self, 'sliders'),
lambda max_filters: self.create_new_sliders(max_filters),
)
def set_max(self, channel: str, value: int):
self[channel].value = value
def create_new_sliders(self, max_numspots_filters: Dict[str, int]) -> List[LabelledSliderModel]:
if set(slider.label for slider in self.sliders) != set(max_numspots_filters):
sliders = []
for chan, v in max_numspots_filters.items():
slider = LabelledSliderModel(label=chan, max=v, value=v)
slider.observe(self._update_model)
sliders.append(slider)
return sliders
return self.sliders
def _update_model(self, change):
self.model.max_numspots_filters = {slider.label: slider.value for slider in self.sliders}
class ChannelFilterView(HasWidget):
def __init__(self, model: ChannelFilterModel):
self.model = model
widget = QWidget()
HasWidget.__init__(self, widget=widget)
self.layout = QVBoxLayout()
widget.setLayout(self.layout)
self.model.observe(self.render)
def render(self, change=None):
if change is None or len(change.old) != len(change.new):
layout: QLayout = self.layout
# Delete any existing sliders
num_sliders = layout.count()
if num_sliders > 0:
for idx in reversed(range(num_sliders)):
item: QWidgetItem = layout.itemAt(idx)
layout.removeItem(item)
item.widget().deleteLater()
layout.update()
assert layout.count() == 0 # should be no sliders in the layout at this point.
# Make new sliders
for slider_model in self.model.sliders:
print(f"making slider from {slider_model}")
slider = LabelledSliderView(model=slider_model)
layout.addWidget(slider.widget) | ABBA-QuPath-RegistrationExporter | /ABBA-QuPath-RegistrationExporter-0.4.2.tar.gz/ABBA-QuPath-RegistrationExporter-0.4.2/regexport/views/channel_filter.py | channel_filter.py |
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from PySide2.QtWidgets import QVBoxLayout, QWidget
from traitlets import HasTraits, Instance, Bool, directional_link
from regexport.model import AppState
from regexport.views.utils import HasWidget
matplotlib.use('Qt5Agg')
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg, NavigationToolbar2QT
from matplotlib.figure import Figure
class PlotModel(HasTraits):
selected_data = Instance(np.ndarray, allow_none=True)
data = Instance(np.ndarray, allow_none=True)
show = Bool(default_value=True)
def register(self, model: AppState):
self.model = model
model.observe(self.update, ['cells', 'selected_cells', 'column_to_plot', 'show_plots'])
directional_link((model, 'show_plots'), (self, 'show'))
def update(self, change):
model = self.model
if model.selected_cells is None or model.selected_cells[model.column_to_plot].dtype.name == 'category':
self.selected_data = None
else:
self.data = model.cells[model.column_to_plot].values
self.selected_data = model.selected_cells[model.column_to_plot].values
class PlotView(HasWidget):
# Code from https://www.pythonguis.com/tutorials/plotting-matplotlib/
def __init__(self, model: PlotModel, width=5, height=4, dpi=100):
# Make a figure, turn it into a canvas widget
widget = QWidget()
layout = QVBoxLayout()
widget.setLayout(layout)
HasWidget.__init__(self, widget=widget)
self.fig, self.axes = plt.subplots(ncols=2, figsize=(width, height), dpi=dpi)
self.canvas = FigureCanvasQTAgg(figure=self.fig)
layout.addWidget(self.canvas)
self.toolbar = NavigationToolbar2QT(self.canvas, widget)
layout.addWidget(self.toolbar)
self.model = model
self.model.observe(self.render)
def render(self, change):
if self.model.show:
for ax in self.axes:
ax.cla()
if change.new is None:
return
else:
selected_data = self.model.selected_data
if selected_data is not None:
data = selected_data
_, edges = np.histogram(data[data > 0], bins='auto')
all_edges = np.concatenate([[0, 1], edges])
self.axes[0].hist(
data,
bins=all_edges,
cumulative=False,
# density=True,
)
data = self.model.data
ax: plt.Axes = self.axes[1]
ax.hist(
data,
bins=50,
cumulative=True,
density=True,
)
if selected_data is not None:
ax.vlines(selected_data.max(), 0, 1, colors='black', linestyles='dotted')
# self.axes[1].set_ylim(0, 1)
self.canvas.draw() | ABBA-QuPath-RegistrationExporter | /ABBA-QuPath-RegistrationExporter-0.4.2.tar.gz/ABBA-QuPath-RegistrationExporter-0.4.2/regexport/views/histogram2.py | histogram2.py |
from pathlib import Path
from typing import Optional
import numpy as np
from traitlets import HasTraits, Instance, directional_link
from vedo import Plotter, Mesh, Points
from vtkmodules.qt.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor
from regexport.model import AppState
from regexport.utils.point_cloud import PointCloud
from regexport.utils.profiling import warn_if_slow
from regexport.views.utils import HasWidget
class PlotterModel(HasTraits):
atlas_mesh = Instance(Path, allow_none=True)
points = Instance(PointCloud, default_value=PointCloud())
def register(self, model: AppState):
self.model = model
directional_link(
(model, 'atlas'),
(self, 'atlas_mesh'),
lambda atlas: Path(str(atlas.structures[997]['mesh_filename'])) if atlas is not None else None
)
model.observe(self.link_cells_to_points, names=[
'selected_cells', 'selected_colormap', 'column_to_plot',
])
def link_cells_to_points(self, change):
model = self.model
if model.selected_cells is None:
self.points = PointCloud()
return
color_col = model.selected_cells[model.column_to_plot]
points = PointCloud.from_cmap(
positions=model.selected_cells[['X', 'Y', 'Z']].values * 1000,
color_levels=color_col.cat.codes.values if color_col.dtype.name == 'category' else color_col.values,
cmap=self.model.selected_colormap
)
self.points = points
class PlotterView(HasWidget):
def __init__(self, model: PlotterModel):
self.model = model
self.item_points = {}
self._atlas_mesh = None
widget = QVTKRenderWindowInteractor()
HasWidget.__init__(self, widget=widget)
self.plotter = Plotter(qtWidget=widget)
self.model.observe(self.observe_atlas_mesh, ['atlas_mesh'])
self.model.observe(self.render, ['points'])
@property
def atlas_mesh(self) -> Optional[Mesh]:
return self._atlas_mesh
@atlas_mesh.setter
def atlas_mesh(self, value: Optional[Mesh]):
self._atlas_mesh = value
self.render(None)
@staticmethod
def load_mesh(filename: Path) -> Mesh:
return Mesh(
str(filename),
alpha=0.1,
computeNormals=True,
c=(1., 1., 1.)
)
def observe_atlas_mesh(self, change):
print('saw atlas change')
if self.model.atlas_mesh is None:
self._atlas_mesh = Mesh()
else:
print('loading')
# worker = Task(self.load_mesh, self.model.atlas_mesh)
# worker.signals.finished.connect(partial(setattr, self, "atlas_mesh"))
#
# pool = QThreadPool.globalInstance()
# pool.start(worker)
self.atlas_mesh = self.load_mesh(self.model.atlas_mesh)
@warn_if_slow()
def render(self, change=None):
actors = [self._atlas_mesh]
# box = self._atlas_mesh.box().wireframe().alpha(0.2).color((255, 0, 0))
# actors.append(box)
points = self.model.points
if len(points.coords) > 0:
coords = points.coords
colors = (np.hstack((points.colors, points.alphas)) * 255).astype(int) # alphas needed for fast rendering.
actors.append(Points(coords, r=3, c=colors))
self.plotter.clear(at=0)
self.plotter.show(actors, at=0)
# self.plotter.addInset(self._atlas_mesh, pos=(.9, .9), size=0.1, c='w', draggable=True)
# note: look at from vedo.applications import SlicerPlotter for inspiration | ABBA-QuPath-RegistrationExporter | /ABBA-QuPath-RegistrationExporter-0.4.2.tar.gz/ABBA-QuPath-RegistrationExporter-0.4.2/regexport/views/plot_3d.py | plot_3d.py |
import collections
import math
from scipy import stats
def get_z_critical_value(alpha, two_tailed=True):
"""
Returns the z critical value for a particular alpha = 1 - confidence level. By default returns
a two-tailed z-value, meaning the actual tail probability is alpha / 2.
"""
if two_tailed:
alpha /= 2
return stats.distributions.norm.ppf(1 - alpha)
# a value with confidence interval bounds (not necessarily centered around the point estimate)
ValueWithInterval = collections.namedtuple(
'ValueWithInterval',
('value', 'lower_bound', 'upper_bound'),
)
class ValueWithError(object):
"""
A value with standard error, from which a confidence interval can be derived.
"""
def __init__(self, value, error):
self.value = value
self.error = error
def confidence_interval_width(self, z_critical_value):
"""
z_critical_value should be the value at which the right-tail probability for a standard
normal distribution equals half the desired alpha = 1 - confidence level:
P(Z > z_value) = 1 - alpha / 2
where Z is an N(0, 1) random variable. Use get_z_critical_value(), or see
http://en.wikipedia.org/wiki/Standard_normal_table.
"""
return z_critical_value * self.error
def value_with_interval(self, z_critical_value, estimated_value=None):
width = self.confidence_interval_width(z_critical_value)
return ValueWithInterval(
value=estimated_value if estimated_value is not None else self.value,
lower_bound=self.value - width,
upper_bound=self.value + width,
)
class BinomialDistribution(object):
def __init__(self, num_trials, probability):
self.num_trials = num_trials
self.probability = probability
self.expectation = num_trials * probability
self.standard_deviation = math.sqrt(self.expectation * (1 - probability))
self._binomial = stats.binom(num_trials, probability)
def mass(self, count):
return self._binomial.pmf(count)
def cdf(self, count):
return self._binomial.cdf(count)
def survival(self, count):
return 1 - self.cdf(count)
def inverse_cdf(self, probability):
return self._binomial.ppf(probability)
def inverse_survival(self, probability):
return self._binomial.isf(probability)
class Proportion(object):
def __init__(self, num_successes, num_trials):
"""
Represents a binomial proportion with num_successes successful samples out of num_trials
total.
"""
self.num_successes = num_successes
self.num_trials = num_trials
def p_estimate(self, z_critical_value=0):
"""
Generate an adjusted estimate and error using the "Agresti-Coull Interval", see
http://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval#Agresti-Coull_Interval.
The estimated value is an adjusted best estimate for the actual probability. For example, if
0 successes were observed out of 10 samples, it's unlikely the actual probability is zero,
so the adjusted estimate will be slightly above zero.
A z_critical_value of zero yields the ordinary Wald interval.
"""
adjusted_num_trials = float(self.num_trials + z_critical_value**2)
interval_center = (self.num_successes + z_critical_value**2 / 2) / adjusted_num_trials
standard_error = math.sqrt(interval_center * (1 - interval_center) / adjusted_num_trials)
return ValueWithError(interval_center, standard_error)
def mixed_estimate(self, z_critical_value):
"""
Returns an ValueWithInterval with a MLE value and upper/lower bounds from the Agresti-Coull
interval.
"""
return (
self.p_estimate(z_critical_value=z_critical_value)
.value_with_interval(z_critical_value, estimated_value=self.p_estimate().value)
)
def confidence_interval_on_proportion(num_successes, num_trials, confidence_level=0.95):
'''Convenience function with more straightforward interface.'''
return Proportion(num_successes, num_trials).mixed_estimate(
get_z_critical_value(1 - confidence_level)
)
class ProportionComparison(object):
def __init__(self, baseline, variation):
self.baseline = baseline
self.variation = variation
def difference_estimate(self, z_critical_value):
"""
Generate an estimate of the difference in success rates between the variation and the
baseline.
"""
baseline_p = self.baseline.p_estimate(z_critical_value=z_critical_value)
variation_p = self.variation.p_estimate(z_critical_value=z_critical_value)
difference = variation_p.value - baseline_p.value
standard_error = math.sqrt(baseline_p.error ** 2 + variation_p.error ** 2)
return ValueWithError(difference, standard_error)
def difference_ratio(self, z_critical_value):
"""
Return the difference in sucess rates as a proportion of the baseline success rate.
"""
baseline_value = self.baseline.p_estimate(z_critical_value=z_critical_value).value
difference = self.difference_estimate(z_critical_value=z_critical_value)
ratio = difference.value / baseline_value
error = difference.error / baseline_value
return ValueWithError(ratio, error)
def z_test(self, z_multiplier=1):
"""
Perform a large-sample z-test of null hypothesis H0: p_baseline == p_variation against
alternative hypothesis H1: p_baseline < p_variation. Return the (one-tailed) p-value.
z_multiplier: test z-value will be multiplied by this factor before computing a p-value.
See http://en.wikipedia.org/wiki/Statistical_hypothesis_testing#Common_test_statistics,
"Two-proportion z-test, pooled for d0 = 0".
"""
pooled_stats = Proportion(
self.baseline.num_successes + self.variation.num_successes,
self.baseline.num_trials + self.variation.num_trials,
)
pooled_p_value = pooled_stats.p_estimate().value
pooled_variance_of_difference = (
pooled_p_value * (1 - pooled_p_value)
* (1.0 / self.baseline.num_trials + 1.0 / self.variation.num_trials)
)
pooled_standard_error_of_difference = math.sqrt(pooled_variance_of_difference)
test_z_value = self.difference_estimate(0).value / pooled_standard_error_of_difference
adjusted_p_value = stats.distributions.norm.sf(test_z_value * z_multiplier)
return adjusted_p_value
def _binomial_coverage_interval(self, distribution, coverage_alpha):
"""
For the given binomial distribution, compute an interval that covers at least (1 -
coverage_alpha) of the total probability mass, centered at the expectation (unless we're at
the boundary). Uses the normal approximation.
"""
if distribution.num_trials < 1000:
# don't even bother trying to optimize for small-ish sample sizes
return (0, distribution.num_trials)
else:
return (
int(math.floor(distribution.inverse_cdf(coverage_alpha / 2))),
int(math.ceil(distribution.inverse_survival(coverage_alpha / 2))),
)
def _probability_union(self, probability, num_tests):
"""
Given the probability of an event, compute the probability that it happens at least once in
num_tests independent tests. This is used to adjust a p-value for multiple comparisons.
When used to adjust alpha instead, this is called a Sidak correction (the logic is the same,
the formula is inverted):
http://en.wikipedia.org/wiki/Bonferroni_correction#.C5.A0id.C3.A1k_correction
"""
return 1 - (1 - probability)**num_tests
def iterated_test(self, num_tests, coverage_alpha, improvement_only=False):
"""
Compute a p-value testing null hypothesis H0: p_baseline == p_variation against alternative
hypothesis H1: p_baseline != p_variation by summing p-values conditioned on individual
baseline success counts. This provides a more accurate correction for multiple testing but
scales like O(sqrt(self.baseline.num_trials)), so can eventually get slow for very large
values.
Lower coverage_alpha increases accuracy at the cost of longer runtime. Roughly, the result
will be accurate within no more than coverage_alpha (but this ignores error due to the
normal approximation so isn't guaranteed).
If improvement_only=True, computes p-value for alternative hypothesis
H1: p_baseline < p_variation instead.
"""
observed_delta = self.variation.p_estimate().value - self.baseline.p_estimate().value
if observed_delta == 0 and not improvement_only:
# a trivial case that the code below does not handle well
return 1
pooled_proportion = (
(self.baseline.num_successes + self.variation.num_successes)
/ float(self.baseline.num_trials + self.variation.num_trials)
)
variation_distribution = BinomialDistribution(self.variation.num_trials, pooled_proportion)
baseline_distribution = BinomialDistribution(self.baseline.num_trials, pooled_proportion)
baseline_limits = self._binomial_coverage_interval(baseline_distribution, coverage_alpha)
p_value = 0
for baseline_successes in xrange(baseline_limits[0], baseline_limits[1] + 1):
baseline_proportion = 1.0 * baseline_successes / self.baseline.num_trials
if improvement_only:
lower_trial_count = -1
upper_trial_count = math.ceil(
(baseline_proportion + observed_delta) * self.variation.num_trials
)
else:
observed_absolute_delta = abs(observed_delta)
lower_trial_count = math.floor(
(baseline_proportion - observed_absolute_delta) * self.variation.num_trials
)
upper_trial_count = math.ceil(
(baseline_proportion + observed_absolute_delta) * self.variation.num_trials
)
# p-value of variation success counts "at least as extreme" for this particular
# baseline success count
p_value_at_baseline = (
variation_distribution.cdf(lower_trial_count)
+ variation_distribution.survival(upper_trial_count - 1)
)
# this is exact because we're conditioning on the baseline count, so the multiple
# tests are independent.
adjusted_p_value = self._probability_union(p_value_at_baseline, num_tests)
baseline_probability = baseline_distribution.mass(baseline_successes)
p_value += baseline_probability * adjusted_p_value
# the remaining baseline values we didn't cover contribute less than coverage_alpha to the
# sum, so adding that amount gives us a conservative upper bound.
return p_value + coverage_alpha
Results = collections.namedtuple(
'Results',
(
'num_successes',
'num_trials',
'proportion', # ValueWithInterval
'improvement', # ValueWithInterval
'relative_improvement', # ValueWithInterval
'two_tailed_p_value', # two-tailed p-value for trial != baseline
'improvement_one_tailed_p_value', # one-tailed p-value for trial > baseline
),
)
class Experiment(object):
P_VALUE_PRECISION = 1e-5
def __init__(self, num_trials, baseline_num_successes, baseline_num_trials,
confidence_level=0.95):
"""
num_trials: number of trials to be compared to the baseline
confidence_level: used for all confidence intervals generated
"""
self.num_comparisons = max(1, num_trials)
self._baseline = Proportion(baseline_num_successes, baseline_num_trials)
alpha = (1 - confidence_level) / num_trials # Bonferroni correction
self._z_critical_value = get_z_critical_value(alpha)
def get_baseline_proportion(self):
return self._baseline.mixed_estimate(self._z_critical_value)
def get_results(self, num_successes, num_trials):
trial = Proportion(num_successes, num_trials)
comparison = ProportionComparison(self._baseline, trial)
return Results(
num_successes=num_successes,
num_trials=num_trials,
proportion=trial.mixed_estimate(self._z_critical_value),
improvement=comparison.difference_estimate(self._z_critical_value)
.value_with_interval(
self._z_critical_value,
estimated_value=comparison.difference_estimate(0).value,
),
relative_improvement=comparison.difference_ratio(self._z_critical_value)
.value_with_interval(
self._z_critical_value,
estimated_value=comparison.difference_ratio(0).value,
),
two_tailed_p_value=comparison.iterated_test(
self.num_comparisons,
self.P_VALUE_PRECISION,
),
improvement_one_tailed_p_value=comparison.iterated_test(
self.num_comparisons,
self.P_VALUE_PRECISION,
improvement_only=True,
),
) | ABBA | /ABBA-0.1.0.tar.gz/ABBA-0.1.0/abba/stats.py | stats.py |
=====
ABBYY
=====
This package contain a wrapper to the ABBYY Cloud OCR API <http://ocrsdk.com/documentation/apireference/> and some helper functions.
EXAMPLE
=======
>>> from ABBYY import CloudOCR
>>> ocr_engine = CloudOCR(application_id='YOUR_ABBYY_APPLICATION_ID', password='YOUR_ABBY_APPLICATION_PASSWORD')
>>> pdf = open('budget1988.pdf', 'rb')
>>> file = {pdf.name: pdf}
>>> result = ocr_engine.process_and_download(file, exportFormat='xml,pdfTextAndImages', language='French')
>>> result
{'xml': <_io.BytesIO object at 0x2e2e290>, 'pdfSearchable': <_io.BytesIO object at 0x2e2e2f0>}
INSTALLATION
============
$ pip install ABBYY
| ABBYY | /ABBYY-0.3.tar.gz/ABBYY-0.3/README.rst | README.rst |
# ABC - Another Build Cystem
## About ABC
ABC is a minimalistic and easy build system, it aims to be simple to use yet fast and powerful. A few lines of abc code may contribute more than 20 lines of code on other build systems.
## Requirements
ABC requires Python 3.6 or higher to run. Installing ABC does NOT require any dependency, except if you build from source. Check requirements.txt to see which packages you need, or you can run ``python -m pip install -r requirements.txt``
## License
ABC is distributed under the MIT license, a full copy of which is in LICENSE.txt | ABC-build-tool | /ABC-build%20tool-0.0.1.tar.gz/ABC-build tool-0.0.1/README.md | README.md |
**Simulator Features**
**Scenes.** Infinite random playroom configuration, depends on the import asset classes.
**Objects.** 300+ custom annotated household objects and kids playing toys across 50 different object classes.
**Agent Types.** Support teacher and baby learning agent.
**Actions.** 20+ actions that facilitate research in a wide range of interaction and navigation based embodied AI tasks.
**Images.** Render RGBD, Depth and Instance segmentation. We also provide a selection of different camera angles for rendering.
**Metadata.** After each step in the environment, there is a large amount of sensory and visual data will be available and saved.
**Installation**
**With pip (Windows)**
```
pip install ABCDESim==0.0.1
```
**Once you've installed download the simulator via:**
```
from room import download
```
**Test via GUI controller:**
```
from room import GUI
```
```
GUI.exec_full(./filepath/demo.py)
```
**Run via command lines:**
```
from room import Run
```
```
Run.init(Stack.exe Path) #Initialize Simulator
```
```
Run.addcharacter(str(character)) # "baby" or "teacher"
```
```
Run.setcam(camnum) # 0 to 4`
```
```
Run.action(character,action,object) # In string
```
```
Run.close() #Close simulator
```
| ABCDESim | /ABCDESim-1.0.0.tar.gz/ABCDESim-1.0.0/README.txt | README.txt |
import requests
from PyQt5 import QtCore, QtWidgets, QtGui
import time
import json
import sys
import subprocess
import os
class UnityCom:
def __init__(self, url='127.0.0.1', port='8080', x_display=None, no_graphics=False,
timeout_wait=50):
self._address = 'http://' + url + ':' + port
self.port = port
self.graphics = no_graphics
self.x_display = x_display
self.timeout_wait = timeout_wait
def post_command(self, request_dict, repeat=False):
try:
if repeat:
resp = self.requests_retry_session().post(self._address, json=request_dict)
else:
resp = requests.post(
self._address, json=request_dict, timeout=self.timeout_wait)
if resp.status_code != requests.codes.ok:
print(resp.json())
# raise UnityEngineException(resp.status_code, resp.json())
return resp.json()
except requests.exceptions.RequestException as e:
print(str(e))
return
# raise UnityCommunicationException(str(e))
def switch_camera(self, cameras=[0]):
request = {'id': str(time.time()),
'action': 'switch_camera', 'intParams': cameras}
print(request)
response = self.post_command(request)
return response['success'] if response else None
def randomize_scene(self):
request = {'id': str(time.time()), 'action': 'randomize_scene'}
print(request)
response = self.post_command(request)
return response['success'] if response else None
def add_character(self, character_resource='Chars/Male1', position=None, initial_room=""):
"""
Add a character in the scene.
:param str character_resource: which game object to use for the character
:param int char_index: the index of the character you want to move
:param list position: the position where you want to place the character
:param str initial_room: the room where you want to put the character,
if positon is not specified. If this is not specified, it places character in random location
:return: succes (bool)
"""
mode = 'random'
pos = [0, 0, 0]
if position is not None:
mode = 'fix_position'
pos = position
elif not len(initial_room) == 0:
assert initial_room in ["kitchen",
"bedroom", "livingroom", "bathroom"]
mode = 'fix_room'
request = {'id': str(time.time()), 'action': 'add_character',
'stringParams': [json.dumps({
'character_resource': character_resource,
'mode': mode,
'character_position': {'x': pos[0], 'y': pos[1], 'z': pos[2]},
'initial_room': initial_room
})]}
print(request)
response = self.post_command(request)
return response['success'] if response else None
def render_script(self, script, randomize_execution=False, random_seed=-1, processing_time_limit=10,
skip_execution=False, find_solution=False, output_folder='Output/', file_name_prefix="script",
frame_rate=5, image_synthesis=['normal'], save_pose_data=False,
image_width=640, image_height=480, recording=False, record=False,
save_scene_states=False, camera_mode=['AUTO'], time_scale=1.0, skip_animation=False):
"""
Executes a script in the simulator. The script can be single or multi agent,
and can be used to generate a video, or just to change the state of the environment
:param list script: a list of script lines, of the form `['<char{id}> [{Action}] <{object_name}> ({object_id})']`
:param bool randomize_execution: randomly choose elements
:param int random_seed: random seed to use when randomizing execution, -1 means that the seed is not set
:param bool find_solution: find solution (True) or use graph ids to determine object instances (False)
:param int processing_time_limit: time limit for finding a solution in seconds
:param int skip_execution: skip rendering, only check if a solution exists
:param str output_folder: folder to output renderings
:param str file_name_prefix: prefix of created files
:param int frame_rate: frame rate at which to generate the video
:param str image_synthesis: what information to save. Can be multiple at the same time. Modes are: "normal", "seg_inst", "seg_class", "depth", "flow", "albedo", "illumination", "surf_normals". Leave empty if you don't want to generate anythign
:param bool save_pose_data: save pose data, a skeleton for every agent and frame
:param int image_width: image_height for the generated frames
:param int image_height: image_height for the generated frames
:param bool recoring: whether to record data with cameras
:param bool save_scene_states: save scene states (this will be unused soon)
:param list camera_mode: list with cameras used to render data. Can be a str(i) with i being a scene camera index or one of the cameras from `character_cameras`
:param int time_scale: accelerate time at which actions happen
:param bool skip_animation: whether agent should teleport/do actions without animation (True), or perform the animations (False)
:return: pair success (bool), message: (str)
"""
params = {'randomize_execution': randomize_execution, 'random_seed': random_seed,
'processing_time_limit': processing_time_limit, 'skip_execution': skip_execution,
'output_folder': output_folder, 'file_name_prefix': file_name_prefix,
'frame_rate': frame_rate, 'image_synthesis': image_synthesis,
'find_solution': find_solution,
'save_pose_data': save_pose_data, 'save_scene_states': save_scene_states,
'camera_mode': camera_mode, 'recording': recording, 'record': record,
'image_width': image_width, 'image_height': image_height,
'time_scale': time_scale, 'skip_animation': skip_animation}
request = {'id': str(time.time()), 'action': 'render_script',
'stringParams': [json.dumps(params)] + script}
print(request)
response = self.post_command({'id': str(time.time()), 'action': 'render_script',
'stringParams': [json.dumps(params)] + script})
try:
message = json.loads(response['message'])
except ValueError:
message = response['message']
return response['success'], message if response else None, None
def reset(self, scene_index=None):
"""
Reset scene. Deletes characters and scene chnages, and loads the scene in scene_index
:param int scene_index: integer between 0 and 6, corresponding to the apartment we want to load
:return: succes (bool)
"""
print(scene_index)
response = self.post_command({'id': str(time.time()), 'action': 'reset',
'intParams': [] if scene_index is None else [scene_index]})
return response['success'] if response else None
class UnityEngineException(Exception):
"""
This exception is raised when an error in communication occurs:
- Unity has received invalid request
More information is in the message.
"""
def __init__(self, status_code, resp_dict):
resp_msg = resp_dict['message'] if 'message' in resp_dict else 'Message not available'
self.message = 'Unity returned response with status: {0} ({1}), message: {2}'.format(
status_code, requests.status_codes._codes[status_code][0], resp_msg)
class UnityCommunicationException(Exception):
def __init__(self, message):
self.message = message
class MyWidget:
teacher_index = -1
baby_index = -1
current_index = 0
exe_name = ""
camera_list = []
def __init__(self):
super().__init__()
self.comm = UnityCom()
# self.startButton = QtWidgets.QPushButton("Start Simulator")
# self.closeButton = QtWidgets.QPushButton("Close Simulator")
# self.resetButton = QtWidgets.QPushButton("Reset")
# self.randomButton = QtWidgets.QPushButton("Randomize_scene")
# self.addTeacherButton = QtWidgets.QPushButton("Add Teacher")
# self.addBabyButton = QtWidgets.QPushButton("Add Baby")
# self.confirmButton = QtWidgets.QPushButton("Confirm")
# self.b6 = QtWidgets.QCheckBox("Record")
#
# self.b1 = QtWidgets.QCheckBox("Camera 1")
# self.b2 = QtWidgets.QCheckBox("Camera 2")
# self.b3 = QtWidgets.QCheckBox("Camera 3")
# self.b4 = QtWidgets.QCheckBox("Camera 4")
# self.b5 = QtWidgets.QCheckBox("Baby Camera")
#
# self.b1.stateChanged.connect(lambda: self.btnstate(self.b1))
# self.b2.stateChanged.connect(lambda: self.btnstate(self.b2))
# self.b3.stateChanged.connect(lambda: self.btnstate(self.b3))
# self.b4.stateChanged.connect(lambda: self.btnstate(self.b4))
# self.b5.stateChanged.connect(lambda: self.btnstate(self.b5))
#
# self.characterbox = QtWidgets.QComboBox()
# self.characterbox.addItems(
# ["Teacher", "Baby"])
# self.combobox = QtWidgets.QComboBox()
#
# self.combobox.addItems(
# ["ball", "folder", "teddybear", "toy", "numberbox", "cube", "Train", "Car", "StandingLamp", "Crib", "Bangku", "Piano"])
#
# self.actionbox = QtWidgets.QComboBox()
# self.actionbox.addItems(
# ["walk", "run", "crawl", "lookat", "touch", "grab", "rotate", "putback", "check", "walkforward", "walkbackward", "turnleft", "turnright", "lookaround"])
#
# self.resetButton.clicked.connect(self.reset)
# self.randomButton.clicked.connect(self.random)
# self.addTeacherButton.clicked.connect(self.addTeacher)
# self.addBabyButton.clicked.connect(self.addBaby)
# self.confirmButton.clicked.connect(self.run)
# self.startButton.clicked.connect(self.start_simulator)
# self.closeButton.clicked.connect(self.close_simulator)
#
# self.setWindowTitle('MyWindow')
# self._main = QtWidgets.QWidget()
# self.setCentralWidget(self._main)
#
# layout = QtWidgets.QGridLayout(self._main)
# layout.addWidget(self.startButton)
# layout.addWidget(self.closeButton)
# layout.addWidget(self.resetButton)
# layout.addWidget(self.randomButton)
# layout.addWidget(self.addTeacherButton)
# layout.addWidget(self.addBabyButton)
#
# layout.addWidget(self.b1)
# layout.addWidget(self.b2)
# layout.addWidget(self.b3)
# layout.addWidget(self.b4)
# layout.addWidget(self.b5)
#
# self.b5.hide()
#
# layout.addWidget(self.characterbox)
# layout.addWidget(self.actionbox)
# layout.addWidget(self.combobox)
# layout.addWidget(self.confirmButton)
# layout.addWidget(self.b6)
def btnstate(self,cameranum):
# numberOfCheckboxesChecked = 4
# print(b.text())
# index = 0
# if (b.text() == "Baby Camera"):
# index = 4
# else:
# index = (int)(b.text().split(" ")[1])
# index -= 1
# if len(self.camera_list) >= numberOfCheckboxesChecked and b.isChecked():
# b.setChecked(False)
# return
# else:
# if b.isChecked():
# self.camera_list.append(index)
# else:
# if index in self.camera_list:
# self.camera_list.remove(index)
# print(self.camera_list)
self.comm.switch_camera(cameras=[cameranum])
def addTeacher(self):
self.teacher_index = self.current_index
self.current_index += 1
self.comm.add_character('Chars/Teacher')
def addBaby(self):
self.comm = UnityCom()
self.baby_index = self.current_index
self.current_index += 1
self.comm.add_character('Chars/Baby')
# self.b5.show()
def random(self):
self.comm.randomize_scene()
def reset(self):
self.current_index = 0
self.comm.reset()
def run(self,charac,action,dest):
if (charac == "teacher"):
index = 1
else:
index = 0
# print(self.characterbox.currentIndex())
# if (self.characterbox.currentIndex() == 0):
# index = self.teacher_index
# else:
# index = self.baby_index
# action = "walk"
# dest = "ball"
script = ['<char{}> [{}] <{}> (1)']
script[0] = script[0].format(index, action, dest)
self.comm.render_script(
script, find_solution=True)
# def start_simulator(self,path):
# os.startfile(path)
def close_simulator(self):
os.system('taskkill /IM "' + "stack.exe" + '" /F')
def process_exists(self, process_name):
call = 'TASKLIST', '/FI', 'imagename eq %s' % process_name
# use buildin check_output right away
output = subprocess.check_output(call).decode()
# check in last line for process name
last_line = output.strip().split('\r\n')[-1]
# because Fail message could be translated
return last_line.lower().startswith(process_name.lower())
e = MyWidget()
def init(path):
os.startfile(path)
time.sleep(5)
def addcharacter(Character):
if Character == "baby":
e.addBaby()
else:
e.addTeacher()
def setcam(camnum):
e.btnstate(camnum)
def action(character,action,object):
e.run(character,action,object)
def close():
e.close_simulator()
# init()
# addcharacter("baby")
# addcharacter("teacher")
# setcam(0)
# action("baby","walk","ball")
# e.close_simulator() | ABCDESim | /ABCDESim-1.0.0.tar.gz/ABCDESim-1.0.0/room/Run.py | Run.py |
# ABCParse

[](https://pypi.python.org/pypi/ABCParse/)
[](https://badge.fury.io/py/ABCParse)
[](https://github.com/psf/black)
A better base class that handles parsing local arguments.
```bash
pip install ABCParse
```
```python
from ABCParse import ABCParse
class SomeClass(ABCParse):
def __init__(self, arg1, arg2):
self.__parse__(kwargs=locals())
something = SomeClass(arg1 = 4, arg2 = "name")
```
| ABCParse | /ABCParse-0.0.4.tar.gz/ABCParse-0.0.4/README.md | README.md |
import copy
import operator
ABN_MAX_CHARS = 14
ABN_DIGITS = 11
ACN_MAX_CHARS = 12
ACN_DIGITS = 9
WEIGHTING = [10, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19]
MODULUS = 89
def format(abn):
"""Format an ABN using standard spacing.
Args:
abn: An 11 digit ABN string.
Returns:
ABN in the standard format 'XX XXX XXX XXX'.
"""
return "{}{} {}{}{} {}{}{} {}{}{}".format(*abn)
def validate(abn):
"""Validate an 11 digit ABN.
This doesn't verify that the ABN actually exists, only that the format and
checksum match, per the method described in Australian Taxation Office
publication "NAT 2956-7.2000".
Args:
abn: The ABN to validate as integer or string. May contain whitespace.
Returns:
Formatted ABN as a string if valid, otherwise False.
"""
abn = str(abn)
if len(abn) > ABN_MAX_CHARS:
return False
abn = [int(c) for c in abn if c.isdigit()]
# NAT 2956-7.2000 states that "the first digit will be non-zero in all
# cases". While it is possible to manually generate an ABN that has a zero
# first digit, the ATO's algorithm will never do this, so we treat a leading
# zero as invalid.
if len(abn) != ABN_DIGITS or abn[0] == 0:
return False
# To verify the ABN according to NAT 2956-7.2000, we subtract 1 from the
# leading digit and take the dot product modulo 89. This will equal zero for
# a valid ABN.
temp_abn = copy.copy(abn)
temp_abn[0] -= 1
remainder = sum(map(operator.mul, temp_abn, WEIGHTING)) % MODULUS
if remainder != 0:
return False
return format(abn)
def acn_to_abn(acn):
"""Convert a 9 digit ACN or ARBN to an 11 digit ABN.
An Australian Company Number (ACN) or Australian Registered Body Number
(ARBN) can be converted to an ABN by prefixing it with two check digits.
Args:
acn: The ACN/ARBN as an integer or string. May contain whitespace.
Returns:
Formatted ABN or raises a ValueError exception.
"""
if len(acn) > ACN_MAX_CHARS:
raise ValueError('Invalid ACN, too long.')
acn = [int(c) for c in acn if c.isdigit()]
if len(acn) != ACN_DIGITS:
raise ValueError('Invalid ACN, incorrect number of digits.')
# Similar approach to validating an ABN above.
remainder = MODULUS - sum(map(operator.mul, acn, WEIGHTING[2:])) % MODULUS
prefix = list(map(int, f'{remainder:02d}'))
prefix[0] += 1
return format(prefix + acn) | ABN | /ABN-1.0.0-py3-none-any.whl/abn/__init__.py | __init__.py |
import threading
import queue
import numpy as np
import pandas as pd
from atom.api import (Atom, Typed, Dict, List, Bool, Int, Float, Tuple,
Property, Value, set_default)
from enaml.application import timed_call
from matplotlib.figure import Figure
from matplotlib.axes import Axes
from matplotlib import transforms as T
from abr.abrpanel import WaveformPlot
from abr.datatype import ABRSeries, WaveformPoint, Point
from abr.parsers.dataset import Dataset
def plot_model(axes, model):
n = len(model.waveforms)
offset_step = 1/(n+1)
plots = []
text_trans = T.blended_transform_factory(axes.figure.transFigure,
axes.transAxes)
limits = [(w.y.min(), w.y.max()) for w in model.waveforms]
base_scale = np.mean(np.abs(np.array(limits)))
bscale_in_box = T.Bbox([[0, -base_scale], [1, base_scale]])
bscale_out_box = T.Bbox([[0, -1], [1, 1]])
bscale_in = T.BboxTransformFrom(bscale_in_box)
bscale_out = T.BboxTransformTo(bscale_out_box)
tscale_in_box = T.Bbox([[0, -1], [1, 1]])
tscale_out_box = T.Bbox([[0, 0], [1, offset_step]])
tscale_in = T.BboxTransformFrom(tscale_in_box)
tscale_out = T.BboxTransformTo(tscale_out_box)
minmax_in_box = T.Bbox([[0, 0], [1, 1]])
minmax_out_box = T.Bbox([[0, 0], [1, 1]])
minmax_in = T.BboxTransformFrom(minmax_in_box)
minmax_out = T.BboxTransformTo(minmax_out_box)
boxes = {
'tscale': tscale_in_box,
'tnorm': [],
'norm_limits': limits/base_scale,
'minmax': minmax_out_box,
}
for i, waveform in enumerate(model.waveforms):
y_min, y_max = waveform.y.min(), waveform.y.max()
tnorm_in_box = T.Bbox([[0, -1], [1, 1]])
tnorm_out_box = T.Bbox([[0, -1], [1, 1]])
tnorm_in = T.BboxTransformFrom(tnorm_in_box)
tnorm_out = T.BboxTransformTo(tnorm_out_box)
boxes['tnorm'].append(tnorm_in_box)
offset = offset_step * i + offset_step * 0.5
translate = T.Affine2D().translate(0, offset)
y_trans = bscale_in + bscale_out + \
tnorm_in + tnorm_out + \
tscale_in + tscale_out + \
translate + \
minmax_in + minmax_out + \
axes.transAxes
trans = T.blended_transform_factory(axes.transData, y_trans)
plot = WaveformPlot(waveform, axes, trans)
plots.append(plot)
text_trans = T.blended_transform_factory(axes.transAxes, y_trans)
axes.text(-0.05, 0, f'{waveform.level}', transform=text_trans)
axes.set_yticks([])
axes.grid()
for spine in ('top', 'left', 'right'):
axes.spines[spine].set_visible(False)
return plots, boxes
class WaveformPresenter(Atom):
figure = Typed(Figure, {})
analyzed_filenames = List()
axes = Typed(Axes)
dataset = Typed(Dataset)
model = Typed(ABRSeries)
current = Property()
toggle = Property()
scale = Property()
normalized = Property()
top = Property()
bottom = Property()
boxes = Dict()
_current = Int()
_toggle = Value()
plots = List()
threshold_marked = Bool(False)
peaks_marked = Bool(False)
valleys_marked = Bool(False)
parser = Value()
latencies = Dict()
batch_mode = Bool(False)
def _default_axes(self):
axes = self.figure.add_axes([0.1, 0.1, 0.8, 0.8])
return axes
def __init__(self, parser, latencies):
self.parser = parser
self.latencies = latencies
def load(self, dataset):
self.dataset = dataset
self.analyzed_filenames = dataset.find_analyzed_files()
self._current = 0
self.axes.clear()
self.axes.set_xlabel('Time (msec)')
self.model = self.parser.load(dataset)
self.plots, self.boxes = plot_model(self.axes, self.model)
self.normalized = False
self.threshold_marked = False
self.peaks_marked = False
self.valleys_marked = False
# Set current before toggle. Ordering is important.
self.current = len(self.model.waveforms)-1
self.toggle = None
self.update()
def save(self):
if np.isnan(self.model.threshold):
raise ValueError('Threshold not set')
if self.latencies:
if not self.peaks_marked or not self.valleys_marked:
raise ValueError('Waves not identified')
self.parser.save(self.model)
self.analyzed_filenames = self.dataset.find_analyzed_files()
def update(self):
for p in self.plots:
p.update()
if self.axes.figure.canvas is not None:
self.axes.figure.canvas.draw()
def _get_current(self):
return self._current
def _set_current(self, value):
if not (0 <= value < len(self.model.waveforms)):
return
if value == self.current:
return
self.plots[self.current].current = False
self.plots[value].current = True
self._current = value
self.update()
def _get_scale(self):
return self.boxes['tscale'].ymax
def _set_scale(self, value):
if value < 0:
return
box = np.array([[0, -value], [1, value]])
self.boxes['tscale'].set_points(box)
self.update()
def _get_normalized(self):
box = self.boxes['tnorm'][0]
return not ((box.ymin == -1) and (box.ymax == 1))
def _set_normalized(self, value):
if value:
zipped = zip(self.boxes['tnorm'], self.boxes['norm_limits'])
for box, (lb, ub) in zipped:
points = np.array([[0, lb], [1, ub]])
box.set_points(points)
else:
for box in self.boxes['tnorm']:
points = np.array([[0, -1], [1, 1]])
box.set_points(points)
self.axes.set_title('normalized' if value else 'raw')
self.update()
def _get_top(self):
return self.boxes['minmax'].ymax
def _set_top(self, value):
points = np.array([[0, self.bottom], [1, value]])
self.boxes['minmax'].set_points(points)
self.update()
def _get_bottom(self):
return self.boxes['minmax'].ymin
def _set_bottom(self, value):
points = np.array([[0, value], [1, self.top]])
self.boxes['minmax'].set_points(points)
self.update()
def set_suprathreshold(self):
self.set_threshold(-np.inf)
def set_subthreshold(self):
self.set_threshold(np.inf)
if self.latencies:
if not self.peaks_marked:
self.guess()
if not self.valleys_marked:
self.guess()
def set_threshold(self, threshold=None):
if threshold is None:
threshold = self.get_current_waveform().level
self.model.threshold = threshold
self.threshold_marked = True
if self.latencies and not self.peaks_marked:
self.guess()
self.update()
def _get_toggle(self):
return self._toggle
def _set_toggle(self, value):
if value == self._toggle:
return
for plot in self.plots:
point = plot.point_plots.get(self.toggle)
if point is not None:
point.current = False
self._toggle = value
for plot in self.plots:
point = plot.point_plots.get(value)
if point is not None:
point.current = True
self.update()
def guess(self):
if not self.latencies:
return
if not self.peaks_marked:
self.model.guess_p(self.latencies)
ptype = Point.PEAK
self.peaks_marked = True
elif not self.valleys_marked:
self.model.guess_n()
ptype = Point.VALLEY
self.valleys_marked = True
else:
return
self.update()
self.current = len(self.model.waveforms)-1
self.toggle = 1, ptype
self.update()
def update_point(self):
level = self.model.waveforms[self.current].level
self.model.update_guess(level, self.toggle)
self.update()
def move_selected_point(self, step):
point = self.get_current_point()
point.move(step)
self.update()
def set_selected_point(self, time):
try:
point = self.get_current_point()
index = point.time_to_index(time)
point.move(('set', index))
self.update()
except:
pass
def toggle_selected_point_unscorable(self):
try:
point = self.get_current_point()
point.unscorable = not point.unscorable
self.update()
self.modified = True
except:
pass
def mark_unscorable(self, mode):
try:
for waveform in self.model.waveforms:
if mode == 'all':
waveform.points[self.toggle].unscorable = True
elif mode == 'descending':
if waveform.level <= self.get_current_waveform().level:
waveform.points[self.toggle].unscorable = True
self.update()
self.modified = True
except:
pass
def get_current_waveform(self):
return self.model.waveforms[self.current]
def get_current_point(self):
return self.get_current_waveform().points[self.toggle]
def clear_points(self):
self.model.clear_points()
self.peaks_marked = False
self.valleys_marked = False
self.update()
def clear_peaks(self):
self.model.clear_peaks()
self.peaks_marked = False
self.update()
def clear_valleys(self):
self.model.clear_valleys()
self.valleys_marked = False
self.update()
def load_analysis(self, filename):
self.clear_points()
self.parser.load_analysis(self.model, filename)
self.peaks_marked = True
self.valleys_marked = True
self.update()
def remove_analysis(self, filename):
filename.unlink()
items = self.analyzed_filenames[:]
items.remove(filename)
self.analyzed_filenames = items
def scan_worker(parser, paths, queue, stop):
for path in paths:
for ds in parser.find_unprocessed(path):
queue.put(('append', ds))
if stop.is_set():
break
if stop.is_set():
break
queue.put(('complete',))
class SerialWaveformPresenter(WaveformPresenter):
unprocessed = List()
n_unprocessed = Int(0)
current_model = Int(-1)
batch_mode = set_default(True)
scan_paths = Value()
scan_queue = Value()
scan_stop_event = Value()
scan_complete = Bool(False)
scan_thread = Value()
def __init__(self, parser, latencies, paths):
super().__init__(parser, latencies)
self.scan_paths = paths
self.scan_queue = queue.Queue()
self.scan_stop_event = threading.Event()
args = (self.parser, self.scan_paths, self.scan_queue, self.scan_stop_event)
self.scan_thread = threading.Thread(target=scan_worker, args=args)
self.scan_thread.start()
self.unprocessed = []
timed_call(100, self.scan_poll)
def scan_poll(self):
while True:
try:
mesg = self.scan_queue.get(block=False)
if mesg[0] == 'append':
self.unprocessed.append(mesg[1])
elif mesg[0] == 'complete':
self.scan_complete = True
except queue.Empty:
if not self.scan_complete:
timed_call(100, self.scan_poll)
break
self.n_unprocessed = len(self.unprocessed)
if self.current_model < 0:
self.load_next()
def scan_stop(self):
self.scan_stop_event.set()
def load_model(self):
fs = self.unprocessed[self.current_model]
self.load(fs)
def load_prior(self):
if self.current_model < 1:
return
self.current_model -= 1
self.load_model()
def load_next(self):
if self.current_model >= (len(self.unprocessed) - 1):
return
self.current_model += 1
self.load_model()
def save(self):
super().save()
self.load_next() | ABR | /ABR-0.0.10.tar.gz/ABR-0.0.10/abr/presenter.py | presenter.py |
import logging
logging.basicConfig(level=logging.INFO)
import argparse
from collections import Counter
from pathlib import Path
from matplotlib import pylab as pl
from numpy import random
import pandas as pd
from scipy import stats
from atom.api import Bool, Typed, Str
import enaml
from enaml.application import deferred_call
from enaml.core.api import d_, Declarative
from enaml.qt.qt_application import QtApplication
with enaml.imports():
from abr.launch_window import LaunchWindow
from abr.main_window import (CompareWindow, DNDWindow, load_files,
SerialWindow)
from abr.presenter import SerialWaveformPresenter, WaveformPresenter
from abr.parsers import Parser
P_LATENCIES = {
1: stats.norm(1.5, 0.5),
2: stats.norm(2.5, 1),
3: stats.norm(3.0, 1),
4: stats.norm(4.0, 1),
5: stats.norm(5.0, 2),
}
def add_default_arguments(parser, waves=True):
parser.add_argument('--nofilter', action='store_false', dest='filter',
default=True, help='Do not filter waveform')
parser.add_argument('--lowpass',
help='Lowpass cutoff (Hz), default 3000 Hz',
default=3000, type=float)
parser.add_argument('--highpass',
help='Highpass cutoff (Hz), default 300 Hz',
default=300, type=float)
parser.add_argument('--order',
help='Filter order, default 1st order', default=1,
type=int)
parser.add_argument('--parser', default='EPL', help='Parser to use')
parser.add_argument('--user', help='Name of person analyzing data')
if waves:
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--threshold-only', action='store_true')
group.add_argument('--all-waves', action='store_true')
group.add_argument('--waves', type=int, nargs='+')
def parse_args(parser, waves=True):
options = parser.parse_args()
exclude = ('filter', 'lowpass', 'highpass', 'order', 'parser', 'user',
'waves', 'all_waves', 'threshold_only')
new_options = {k: v for k, v in vars(options).items() if k not in exclude}
filter_settings = None
if options.filter:
filter_settings = {
'lowpass': options.lowpass,
'highpass': options.highpass,
'order': options.order,
}
new_options['parser'] = Parser(options.parser, filter_settings,
options.user)
if not waves:
return new_options
if options.all_waves:
waves = [1, 2, 3, 4, 5]
elif options.threshold_only:
waves = []
else:
waves = options.waves[:]
new_options['latencies'] = {w: P_LATENCIES[w] for w in waves}
return new_options
def main_launcher():
app = QtApplication()
window = LaunchWindow()
window.show()
app.start()
app.stop()
def main_gui():
parser = argparse.ArgumentParser('abr_gui')
add_default_arguments(parser)
parser.add_argument('--demo', action='store_true', dest='demo',
default=False, help='Load demo data')
parser.add_argument('filenames', nargs='*')
options = parse_args(parser)
app = QtApplication()
view = DNDWindow(parser=options['parser'], latencies=options['latencies'])
deferred_call(load_files, options['parser'], options['latencies'],
options['filenames'], view.find('dock_area'))
view.show()
app.start()
app.stop()
def main_batch():
parser = argparse.ArgumentParser("abr_batch")
add_default_arguments(parser)
parser.add_argument('dirnames', nargs='*')
parser.add_argument('--skip-errors', action='store_true')
options = parse_args(parser)
parser = options['parser']
app = QtApplication()
presenter = SerialWaveformPresenter(parser=parser,
latencies=options['latencies'],
paths=options['dirnames'])
view = SerialWindow(presenter=presenter)
view.show()
app.start()
app.stop()
class Compare(Declarative):
data = Typed(pd.DataFrame)
x_column = d_(Str())
y_column = d_(Str())
as_difference = d_(Bool(True))
jitter = d_(Bool(True))
axes = Typed(pl.Axes)
figure = Typed(pl.Figure)
selected = Typed(list)
def _default_figure(self):
return pl.Figure()
def _default_axes(self):
return self.figure.add_subplot(111)
def _observe_data(self, event):
self._update_plot()
def _observe_x_column(self, event):
self._update_plot()
def _observe_y_column(self, event):
self._update_plot()
def _observe_as_difference(self, event):
self._update_plot()
def _observe_jitter(self, event):
self._update_plot()
def _default_x_column(self):
return self.data.columns[0]
def _default_y_column(self):
i = 1 if (len(self.data.columns) > 1) else 0
return self.data.columns[i]
def _update_plot(self):
x = self.data[self.x_column].copy()
y = self.data[self.y_column].copy()
if self.as_difference:
y -= x
if self.jitter:
x += np.random.uniform(-1, 1, len(x))
y += np.random.uniform(-1, 1, len(x))
self.axes.clear()
self.axes.plot(x, y, 'ko', picker=4, mec='w', mew=1)
if self.figure.canvas is not None:
self.figure.canvas.draw()
def pick_handler(self, event):
rows = self.data.iloc[event.ind]
files = list(rows.index.get_level_values('raw_file'))
frequencies = list(rows.index.get_level_values('frequency'))
self.selected = list(zip(files, frequencies))
def main_compare():
parser = argparse.ArgumentParser("abr_compare")
add_default_arguments(parser, waves=False)
parser.add_argument('directory')
options = parse_args(parser, waves=False)
data = options['parser'].load_analyses(options['directory'])
data = data.reset_index(['analyzed_file'], drop=True).unstack('user')
data = data.sort_index()
figure, axes = pl.subplots(1, 1)
compare = Compare(data=data)
app = QtApplication()
view = CompareWindow(parser=options['parser'], compare=compare)
view.show()
app.start()
app.stop() | ABR | /ABR-0.0.10.tar.gz/ABR-0.0.10/abr/app.py | app.py |
import operator as op
import numpy as np
import pandas as pd
from scipy import signal, stats
def find_peaks(waveform, distance=0.5e-3, prominence=50, wlen=None,
invert=False, detrend=True):
y = -waveform.y if invert else waveform.y
if detrend:
y = signal.detrend(y)
x = waveform.x
fs = waveform.fs
prominence = np.percentile(y, prominence)
i_distance = round(fs*distance)
if wlen is not None:
wlen = round(fs*wlen)
kwargs = {'distance': i_distance, 'prominence': prominence, 'wlen': wlen}
indices, metrics = signal.find_peaks(y, **kwargs)
metrics.pop('left_bases')
metrics.pop('right_bases')
metrics['x'] = waveform.x[indices]
metrics['y'] = waveform.y[indices]
metrics['index'] = indices
metrics = pd.DataFrame(metrics)
return metrics
def guess_peaks(metrics, latency):
p_score_norm = metrics['prominences'] / metrics['prominences'].sum()
guess = {}
for i in sorted(latency.keys()):
l = latency[i]
l_score = metrics['x'].apply(l.pdf)
l_score_norm = l_score / l_score.sum()
score = 5 * l_score_norm + p_score_norm
m = score.idxmax()
if np.isfinite(m):
guess[i] = metrics.loc[m]
metrics = metrics.loc[m+1:]
else:
guess[i] = {'x': l.mean(), 'y': 0}
return pd.DataFrame(guess).T
def generate_latencies_bound(guess, max_time=8.5, sd=0.25):
latency = {}
waves = sorted(guess.index.values)
for lb, ub in zip(waves[:-1], waves[1:]):
t_lb = guess.loc[lb, 'x']
t_ub = guess.loc[ub, 'x']
b = (t_ub-t_lb)/sd
latency[lb] = stats.truncnorm(0, b, t_lb, sd)
g = guess.iloc[-1]
t = g['x']
wave = g.name
b = (max_time-t)/sd
latency[wave] = stats.truncnorm(0, b, t, sd)
return latency
def generate_latencies_skewnorm(guess, skew=3):
latencies = {}
for w, row in guess.iterrows():
latencies[w] = stats.skewnorm(skew, row['x'], 0.1)
return latencies
def guess_iter(waveforms, latencies, invert=False):
waveforms = sorted(waveforms, key=op.attrgetter('level'), reverse=True)
guesses = {}
for w in waveforms:
metrics = find_peaks(w, invert=invert)
guesses[w.level] = guess_peaks(metrics, latencies)
latencies = generate_latencies_skewnorm(guesses[w.level])
return guesses
def guess(waveforms, latencies, invert=False):
guesses = {}
for w in waveforms:
metrics = find_peaks(w, invert=invert)
guesses[w.level] = guess_peaks(metrics, latencies[w.level])
return guesses
def peak_iterator(waveform, index, invert=False):
'''
Coroutine that steps through the possible guesses for the peak
Parameters
----------
index : tuple of (step_mode, step_size)
'''
metrics = find_peaks(waveform, distance=0.25e-3, prominence=25,
invert=invert)
while True:
step_mode, step_size = yield index
if step_mode == 'zero_crossing':
try:
delta = metrics['index'] - index
if step_size == 1:
i = delta[delta > 0].idxmin()
index = metrics.loc[i, 'index']
elif step_size == -1:
i = delta[delta < 0].idxmax()
index = metrics.loc[i, 'index']
except:
pass
elif step_mode == 'time':
# Ensure step size is at least one period in length
step_size = max(abs(step_size), 1/waveform.fs) * np.sign(step_size)
index += round(step_size * waveform.fs)
elif step_mode == 'set':
index = step_size
index = int(round(np.clip(index, 0, len(waveform.x)-1))) | ABR | /ABR-0.0.10.tar.gz/ABR-0.0.10/abr/peakdetect.py | peakdetect.py |
from pathlib import Path
import matplotlib as mp
mp.rcParams['backend'] = 'qt5agg'
mp.rcParams['axes.labelsize'] = 14
mp.rcParams['axes.labelweight'] = 'bold'
mp.rcParams['axes.spines.left'] = False
mp.rcParams['axes.spines.bottom'] = True
mp.rcParams['axes.spines.top'] = False
mp.rcParams['axes.spines.right'] = False
mp.rcParams['ytick.left'] = False
mp.rcParams['figure.subplot.left'] = 0.1
mp.rcParams['figure.subplot.right'] = 0.8
mp.rcParams['figure.subplot.bottom'] = 0.1
mp.rcParams['figure.subplot.top'] = 0.8
from matplotlib.pylab import setp
class StylePlot:
HIDDEN = {'alpha': 0}
def update(self):
self.update_plot()
setp(self.plot, **self.get_style())
def get_style(self):
raise NotImplementedError
def update_plot(self):
pass
class PointPlot(StylePlot):
PEAK = {
'linestyle': ' ',
'marker': 'o',
'zorder': 20,
'alpha': 1,
'markersize': 8,
'markeredgewidth': 1,
'markeredgecolor': (0, 0, 0)
}
PEAK_FADED = {
'linestyle': ' ',
'marker': 'o',
'zorder': 20,
'alpha': 0.5,
'markersize': 8,
'markeredgewidth': 1,
'markeredgecolor': (0, 0, 0)
}
VALLEY = {
'linestyle': ' ',
'marker': '^',
'zorder': 20,
'alpha': 1,
'markersize': 9,
'markeredgewidth': 1,
'markeredgecolor': (0, 0, 0)
}
TOGGLE = {
'linestyle': ' ',
'marker': 's',
'zorder': 100,
'alpha': 1,
'markersize': 8,
'markeredgewidth': 1,
'markerfacecolor': (1, 1, 1),
'markeredgecolor': (0, 0, 0)
}
COLORS = [(1, 0, 0), (1, 1, 0), (0, 1, 0), (0, 1, 1), (0, 0, 1)]
def __init__(self, parent, figure, point):
self.figure = figure
self.parent = parent
self.point = point
self.plot, = self.figure.plot(0, 0, transform=parent.transform,
clip_on=False, picker=10)
self.current = False
self.update()
def get_style(self):
# Hide subthreshold points
if self.parent.waveform.is_subthreshold():
return self.HIDDEN
# Return toggled value
if self.current and self.parent.current:
return self.TOGGLE
# Fallback to this
style = self.PEAK.copy() if self.point.is_peak() else self.VALLEY.copy()
index = self.point.wave_number-1
c = self.COLORS[self.point.wave_number-1]
style['c'] = c
style['markerfacecolor'] = c
if self.point.unscorable:
style['alpha'] = 0.5
style['markersize'] = 4
return style
def update_plot(self):
self.plot.set_data(self.point.x, self.point.y)
def remove(self):
self.plot.remove()
class WaveformPlot(StylePlot):
CUR_PLOT = {
'c': (0, 0, 0),
'linewidth': 4,
'linestyle': '-',
'zorder': 20,
}
PLOT = {
'c': (0, 0, 0),
'linewidth': 2,
'linestyle': '-',
'zorder': 10,
}
CUR_SUBTH_PLOT = {
'c': (0.75, 0.75, 0.75),
'linewidth': 4,
'linestyle': '-',
'zorder': 10,
}
SUBTH_PLOT = {
'c': (0.75, 0.75, 0.75),
'linewidth': 2,
'linestyle': '-',
'zorder': 10,
}
def __init__(self, waveform, axis, transform):
self.axis = axis
self.waveform = waveform
self.current = False
self.point_plots = {}
self.transform = transform
# Create the plot
self.plot, = self.axis.plot(self.waveform.x, self.waveform.y, 'k-',
transform=transform, clip_on=False,
picker=10)
self.update()
STYLE = {
(True, True): CUR_PLOT,
(True, False): CUR_SUBTH_PLOT,
(False, True): PLOT,
(False, False): SUBTH_PLOT,
}
def get_style(self):
style = self.current, self.waveform.is_suprathreshold()
return self.STYLE[style]
def update(self):
# Check to see if new points were added (e.g. valleys)
for key, point in self.waveform.points.items():
if key not in self.point_plots:
self.point_plots[key] = PointPlot(self, self.axis, point)
for key, point_plot in list(self.point_plots.items()):
point = self.waveform.points.get(key)
if point is None:
point_plot.remove()
del self.point_plots[key]
elif point != point_plot.point:
point_plot.point = self.waveform.points[key]
for p in self.point_plots.values():
p.update()
super().update() | ABR | /ABR-0.0.10.tar.gz/ABR-0.0.10/abr/abrpanel.py | abrpanel.py |
from enum import Enum
import functools
import operator
import numpy as np
import pandas as pd
from scipy import signal
from atom.api import Atom, Bool, Int, Typed, Value
from .peakdetect import (generate_latencies_bound, generate_latencies_skewnorm,
guess, guess_iter, peak_iterator)
@functools.total_ordering
class Point(Enum):
PEAK = 'peak'
VALLEY = 'valley'
def __lt__(self, other):
if self.__class__ is other.__class__:
return self.value < other.value
return NotImplementedError
class ABRWaveform:
def __init__(self, fs, signal, level):
self.fs = fs
self.signal = signal
self.level = level
self.points = {}
self.series = None
@property
def x(self):
return self.signal.index.values
@property
def y(self):
return signal.detrend(self.signal.values)
def is_subthreshold(self):
if self.series.threshold is None or np.isnan(self.series.threshold):
return False
return self.level < self.series.threshold
def is_suprathreshold(self):
if self.series.threshold is None or np.isnan(self.series.threshold):
return True
return self.level >= self.series.threshold
def stat(self, lb, ub, func):
return func(self.signal.loc[lb:ub])
def mean(self, lb, ub):
return self.stat(lb, ub, np.mean)
def std(self, lb, ub):
return self.stat(lb, ub, np.std)
def set_point(self, wave, ptype, index=None, latency=None, unscorable=False):
# First, figure out index given requested latency
if index is None and latency is None:
raise ValueError('Must provide index or latency')
elif index is not None and latency is not None:
raise ValueError('Must provide either index or latency')
elif latency is not None:
index = np.searchsorted(self.x, latency)
# Now, create point if it does not exist
if (wave, ptype) not in self.points:
point = WaveformPoint(self, 0, wave, ptype)
self.points[wave, ptype] = point
# Update the values on the point
self.points[wave, ptype].index = int(index)
self.points[wave, ptype].unscorable = unscorable
def clear_points(self):
self.points = {}
def clear_peaks(self):
for wave, ptype in list(self.points):
if ptype == Point.PEAK:
del self.points[wave, ptype]
def clear_valleys(self):
for wave, ptype in list(self.points):
if ptype == Point.VALLEY:
del self.points[wave, ptype]
def _set_points(self, guesses, ptype):
for wave, wave_guess in guesses.iterrows():
index = wave_guess.get('index', np.nan)
if not np.isfinite(index):
index = np.searchsorted(self.x , wave_guess['x'])
index = np.clip(index, 0, len(self.x)-1)
else:
index = int(index)
self.set_point(wave, ptype, index)
class WaveformPoint(Atom):
'''
Parameters
----------
TODO
'''
parent = Typed(ABRWaveform)
index = Int()
wave_number = Int()
point_type = Typed(Point)
iterator = Value()
unscorable = Bool(False)
def __init__(self, parent, index, wave_number, point_type):
# Order of setting attributes is important here
self.parent = parent
self.point_type = point_type
self.wave_number = wave_number
invert = self.is_valley()
iterator = peak_iterator(parent, index, invert=invert)
next(iterator)
self.iterator = iterator
self.index = index
def _observe_index(self, event):
if event['type'] == 'update':
self.iterator.send(('set', event['value']))
@property
def x(self):
return self.parent.x[self.index]
@property
def y(self):
return self.parent.y[self.index]
def is_peak(self):
return self.point_type == Point.PEAK
def is_valley(self):
return self.point_type == Point.VALLEY
@property
def latency(self):
latency = self.x
if self.parent.is_subthreshold():
return -np.abs(latency)
elif self.unscorable:
return -np.abs(latency)
return latency
@property
def amplitude(self):
if self.unscorable:
return np.nan
return self.parent.signal.iloc[self.index]
def move(self, step):
self.index = self.iterator.send(step)
def time_to_index(self, time):
return np.searchsorted(self.parent.x, time)
class ABRSeries(object):
def __init__(self, waveforms, freq=None, threshold=np.nan):
waveforms.sort(key=operator.attrgetter('level'))
self.waveforms = waveforms
self.freq = freq
self.threshold = threshold
for waveform in self.waveforms:
waveform.series = self
def get_level(self, level):
for waveform in self.waveforms:
if waveform.level == level:
return waveform
raise AttributeError(f'{level} dB SPL not in series')
def guess_p(self, latencies):
level_guesses = guess_iter(self.waveforms, latencies)
self._set_points(level_guesses, Point.PEAK)
def guess_n(self):
n_latencies = {}
for w in self.waveforms:
g = {p.wave_number: p.x for p in w.points.values() if p.is_peak()}
g = pd.DataFrame({'x': g})
n_latencies[w.level] = generate_latencies_bound(g)
level_guesses = guess(self.waveforms, n_latencies, invert=True)
self._set_points(level_guesses, Point.VALLEY)
def update_guess(self, level, point):
waveform = self.get_level(level)
p = waveform.points[point]
g = {p.wave_number: p.x}
g = pd.DataFrame({'x': g})
latencies = generate_latencies_skewnorm(g)
i = self.waveforms.index(waveform)
waveforms = self.waveforms[:i]
level_guesses = guess_iter(waveforms, latencies, invert=p.is_valley())
self._set_points(level_guesses, p.point_type)
def clear_points(self):
for waveform in self.waveforms:
waveform.clear_points()
def clear_peaks(self):
for waveform in self.waveforms:
waveform.clear_peaks()
def clear_valleys(self):
for waveform in self.waveforms:
waveform.clear_valleys()
def _set_points(self, level_guesses, ptype):
for level, level_guess in level_guesses.items():
waveform = self.get_level(level)
waveform._set_points(level_guess, ptype)
def load_analysis(self, threshold, points):
if threshold is None:
threshold = np.nan
self.threshold = threshold
for j, waveform in enumerate(self.waveforms[::-1]):
analysis = points.iloc[j]
for i in range(1, 6):
try:
p_latency = np.abs(analysis[f'P{i} Latency'])
n_latency = np.abs(analysis[f'N{i} Latency'])
p_amplitude = analysis[f'P{i} Amplitude']
n_amplitude = analysis[f'N{i} Amplitude']
p_unscorable = bool(np.isnan(p_amplitude))
n_unscorable = bool(np.isnan(n_amplitude))
waveform.set_point(i, Point.PEAK, latency=p_latency, unscorable=p_unscorable)
waveform.set_point(i, Point.VALLEY, latency=n_latency, unscorable=n_unscorable)
except KeyError:
pass | ABR | /ABR-0.0.10.tar.gz/ABR-0.0.10/abr/datatype.py | datatype.py |
import io
from pathlib import Path
import re
import numpy as np
import pandas as pd
from scipy import signal
from abr.datatype import ABRWaveform, ABRSeries
def load(filename, filter_settings=None, frequencies=None):
filename = Path(filename)
with filename.open(encoding='ISO-8859-1') as f:
line = f.readline()
if not line.startswith(':RUN-'):
raise IOError('Unsupported file format')
p_level = re.compile(':LEVELS:([0-9;]+)')
p_fs = re.compile('SAMPLE \(.sec\): ([0-9]+)')
p_freq = re.compile('FREQ: ([0-9\.]+)')
abr_window = 8500 # usec
try:
with filename.open(encoding='ISO-8859-1') as f:
header, data = f.read().split('DATA')
# Extract data from header
levelstring = p_level.search(header).group(1).strip(';').split(';')
levels = np.array(levelstring).astype(np.float32)
sampling_period = float(p_fs.search(header).group(1))
frequency = float(p_freq.search(header).group(1))
# Convert text representation of data to Numpy array
fs = 1e6/sampling_period
cutoff = int(abr_window / sampling_period)
data = np.array(data.split()).astype(np.float32)
data.shape = -1, len(levels)
data = data.T[:, :cutoff]
t = np.arange(data.shape[-1]) / fs * 1e3
t = pd.Index(t, name='time')
if filter_settings is not None:
Wn = filter_settings['highpass'], filter_settings['lowpass']
N = filter_settings['order']
b, a = signal.iirfilter(N, Wn, fs=fs)
data = signal.filtfilt(b, a, data, axis=-1)
waveforms = []
for s, level in zip(data, levels):
# Checks for a ABR I-O bug that sometimes saves zeroed waveforms
if not (s == 0).all():
w = pd.Series(s, index=t)
waveform = ABRWaveform(fs, w, level)
waveforms.append(waveform)
series = ABRSeries(waveforms, frequency)
series.filename = filename
return [series]
except (AttributeError, ValueError):
msg = 'Could not parse %s. Most likely not a valid ABR file.' % fname
raise IOError(msg) | ABR | /ABR-0.0.10.tar.gz/ABR-0.0.10/abr/parsers/EPL.py | EPL.py |
from __future__ import division
import pandas as pd
import numpy as np
from abr.datatype import ABRWaveform, ABRSeries
################################################################################
# Utility functions
################################################################################
def _parse_line(line):
'''
Parse list of comma-separated values from line
Parameters
----------
line : string
Line containing the values that need to be parsed
Returns
-------
tokens : list
List of values found in line. If values are numeric, they will be
converted to floats. Otherwise they will be returned as strings.
'''
tokens = line.strip().split(',')[1:]
try:
return [float(t) for t in tokens if t]
except ValueError:
return [t for t in tokens if t]
def load_metadata(filename):
'''
Load the metadata stored in the ABR file
Parameters:
-----------
filename : string
Filename to load
Returns
-------
info : pandas.DataFrame
Dataframe containing information on each waveform
'''
info = {}
with open(filename, 'r') as fh:
for i, line in enumerate(fh):
if i == 20:
break
name = line.split(',', 1)[0].strip(':').lower()
info[name] = _parse_line(line)
info = pd.DataFrame(info)
# Number the trials. We will use this number later to look up which column
# contains the ABR waveform for corresponding parameter.
info['waveform'] = np.arange(len(info))
info.set_index('waveform', inplace=True)
# Convert the intensity to the actual level in dB SPL
info['level'] = np.round(info.intensity/10)*10
# Store the scaling factor for the waveform so we can recover this when
# loading. By default the scaling factor is 674. For 110 dB SPL, the
# scaling factor is 337. The statistician uses 6.74 and 3.37, but he
# includes a division of 100 elsewhere in his code to correct.
info['waveform_sf'] = 6.74e2
# The rows where level is 110 dB SPL have a different scaling factor.
info.loc[info.level == 110, 'waveform_sf'] = 3.37e2
# Start time of stimulus in usec (since sampling period is reported in usec,
# we should try to be consistent with all time units).
info['stimulus_start'] = 12.5e3
return info
def load_waveforms(filename, info):
'''
Load the waveforms stored in the ABR file
Only the waveforms specified in info will be loaded. For example, if you
have filtered the info DataFrame to only contain waveforms from channel 1,
only those waveforms will be loaded.
Parameters:
-----------
filename : string
Filename to load
info : pandas.DataFrame
Waveform metadata (see `load_metadata`)
Returns
-------
info : pandas.DataFrame
Dataframe containing waveforms
'''
# Read the waveform table into a dataframe
df = pd.io.parsers.read_csv(filename, skiprows=20)
# Keep only the columns containing the signal of interest. There are six
# columns for each trial. We only want the column containing the raw
# average (i.e., not converted to uV).
df = df[[c for c in df.columns if c.startswith('Average:')]]
# Renumber them so we can look them up by number. The numbers should
# correspond to the trial number we generated in `load_metadata`.
df.columns = np.arange(len(df.columns))
# Loop through the entries in the info DataFrame. This dataframe contains
# metadata needed for processing the waveform (e.g., it tells us which
# waveforms to keep, the scaling factor to use, etc.).
signals = []
for w_index, w_info in info.iterrows():
# Compute time of each point. Currently in usec because smp. period is
# in usec.
t = np.arange(len(df), dtype=np.float32)*w_info['smp. period']
# Subtract stimulus start so that t=0 is when stimulus begins. Convert
# to msec.
t = (t-w_info['stimulus_start'])*1e-3
time = pd.Index(t, name='time')
# Divide by the scaling factor and convert from nV to uV
s = df[w_index]/w_info['waveform_sf']*1e-3
s.index = time
signals.append(s)
# Merge together the waveforms into a single DataFrame
waveforms = pd.concat(signals, keys=info.index, names=['waveform'])
waveforms = waveforms.unstack(level='waveform')
return waveforms
################################################################################
# API
################################################################################
# Minimum wave 1 latencies
latencies = {
1000: 3.1,
3000: 2.1,
4000: 2.3,
6000: 1.8,
}
def load(fname, filter=None, abr_window=8.5e-3):
with open(fname) as fh:
line = fh.readline()
if not line.startswith('Identifier:'):
raise IOError('Unsupported file format')
info = load_metadata(fname)
info = info[info.channel == 1]
fs = 1/(info.iloc[0]['smp. period']*1e-6)
series = []
for frequency, f_info in info.groupby('stim. freq.'):
signal = load_waveforms(fname, f_info)
signal = signal[signal.index >= 0]
waveforms = []
min_latency = latencies.get(frequency)
for i, row in f_info.iterrows():
s = signal[i].values[np.newaxis]
waveform = ABRWaveform(fs, s, row['level'], min_latency=min_latency,
filter=filter)
waveforms.append(waveform)
s = ABRSeries(waveforms, frequency/1e3)
s.filename = fname
series.append(s)
return series | ABR | /ABR-0.0.10.tar.gz/ABR-0.0.10/abr/parsers/NCRAR.py | NCRAR.py |
from functools import cached_property, lru_cache
import glob
import json
import os.path
from pathlib import Path
import numpy as np
import pandas as pd
from scipy import signal
from abr.datatype import ABRWaveform, ABRSeries
from .dataset import DataCollection, Dataset
def get_filename(pathname, suffix='ABR average waveforms.csv'):
if pathname.is_file():
if pathname.name.endswith(suffix):
return pathname
else:
raise IOError('Invalid ABR file')
filename = pathname / suffix
if filename.exists():
return filename
filename = pathname / f'{pathname.stem} {suffix}'
if filename.exists():
return filename
raise IOError(f'Could not find average waveforms file for {pathname}')
@lru_cache(maxsize=64)
def read_file(filename):
with filename.open() as fh:
# This supports a variable-length header where we may not have included
# some levels (e.g., epoch_n and epoch_reject_ratio).
header = {}
while True:
line = fh.readline()
if line.startswith('time'):
break
name, *keys = line.split(',')
header[name] = np.array(keys).astype('f')
data = pd.read_csv(fh, index_col=0, header=None)
header = pd.MultiIndex.from_arrays(list(header.values()),
names=list(header.keys()))
data.index.name = 'time'
data.index *= 1e3
data.columns = header
return data.T
class PSIDataCollection(DataCollection):
def __init__(self, filename):
filename = Path(filename)
self.filename = get_filename(filename)
@cached_property
def fs(self):
settings_file = get_filename(self.filename.parent, 'ABR processing settings.json')
if settings_file.exists():
fs = json.loads(settings_file.read_text())['actual_fs']
else:
fs = np.mean(np.diff(data.columns.values)**-1)
return fs
@cached_property
def data(self):
data = read_file(self.filename)
keep = ['frequency', 'level']
drop = [c for c in data.index.names if c not in keep]
return data.reset_index(drop, drop=True)
@cached_property
def frequencies(self):
return self.data.index.unique('frequency').values
@property
def name(self):
return self.filename.parent.stem
def iter_frequencies(self):
for frequency in self.frequencies:
yield PSIDataset(self, frequency)
class PSIDataset(Dataset):
def __init__(self, parent, frequency):
self.parent = parent
self.frequency = frequency
@property
def filename(self):
return self.parent.filename
@property
def fs(self):
return self.parent.fs
def get_series(self, filter_settings=None):
data = self.parent.data.loc[self.frequency]
if filter_settings is not None:
Wn = filter_settings['highpass'], filter_settings['lowpass']
N = filter_settings['order']
b, a = signal.iirfilter(N, Wn, fs=self.fs)
data_filt = signal.filtfilt(b, a, data.values, axis=-1)
data = pd.DataFrame(data_filt, columns=data.columns, index=data.index)
waveforms = []
for level, w in data.iterrows():
level = float(level)
waveforms.append(ABRWaveform(self.fs, w, level))
series = ABRSeries(waveforms, self.frequency)
series.filename = self.parent.filename
series.id = self.parent.filename.parent.name
return series
def iter_all(path):
results = []
path = Path(path)
if path.stem.endswith('abr_io'):
yield from PSIDataCollection(path).iter_frequencies()
else:
for subpath in path.glob('**/*abr_io'):
yield from PSIDataCollection(subpath).iter_frequencies() | ABR | /ABR-0.0.10.tar.gz/ABR-0.0.10/abr/parsers/PSI.py | PSI.py |
import importlib
import re
from glob import glob
import os
from pathlib import Path
import time
import pandas as pd
import numpy as np
import abr
from ..datatype import Point
P_ANALYZER = re.compile('.*kHz(?:-(\w+))?-analyzed.txt')
def get_analyzer(filename):
return P_ANALYZER.match(filename.name).group(1)
def waveform_string(waveform):
data = [f'{waveform.level:.2f}']
data.append(f'{waveform.mean(0, 1)}')
data.append(f'{waveform.std(0, 1)}')
for _, point in sorted(waveform.points.items()):
data.append(f'{point.latency:.8f}')
data.append(f'{point.amplitude:.8f}')
return '\t'.join(data)
def filter_string(waveform):
if getattr(waveform, '_zpk', None) is None:
return 'No filtering'
t = 'Pass %d -- z: %r, p: %r, k: %r'
filt = [t % (i, z, p, k) for i, (z, p, k) in enumerate(waveform._zpk)]
return '\n' + '\n'.join(filt)
def load_analysis(fname):
th_match = re.compile('Threshold \(dB SPL\): ([\w.-]+)')
freq_match = re.compile('Frequency \(kHz\): ([\d.]+)')
with open(fname) as fh:
text = fh.readline()
th = th_match.search(text).group(1)
th = None if th == 'None' else float(th)
text = fh.readline()
freq = float(freq_match.search(text).group(1))
for line in fh:
if line.startswith('NOTE'):
break
data = pd.io.parsers.read_csv(fh, sep='\t', index_col='Level')
return (freq, th, data)
def parse_peaks(peaks, threshold):
# Convert the peaks dataframe to a format that can be used by _set_points.
p_pattern = re.compile('P(\d) Latency')
n_pattern = re.compile('N(\d) Latency')
p_latencies = {}
n_latencies = {}
for c in peaks:
match = p_pattern.match(c)
if match:
wave = int(match.group(1))
p_latencies[wave] = pd.DataFrame({'x': peaks[c]})
match = n_pattern.match(c)
if match:
wave = int(match.group(1))
n_latencies[wave] = pd.DataFrame({'x': peaks[c]})
p_latencies = pd.concat(p_latencies.values(), keys=p_latencies.keys(),
names=['wave'])
p_latencies = {g: df.reset_index('Level', drop=True) \
for g, df in p_latencies.groupby('Level')}
n_latencies = pd.concat(n_latencies.values(), keys=n_latencies.keys(),
names=['wave'])
n_latencies = {g: df.reset_index('Level', drop=True) \
for g, df in n_latencies.groupby('Level')}
for level, df in p_latencies.items():
if level < threshold:
df[:] = -df[:]
for level, df in n_latencies.items():
if level < threshold:
df[:] = -df[:]
return p_latencies, n_latencies
class Parser(object):
filename_template = '{filename}-{frequency}kHz-{user}analyzed.txt'
def __init__(self, file_format, filter_settings, user=None):
'''
Parameters
----------
file_format : string
File format that will be loaded.
filter_settings : {None, dict}
If None, no filtering is applied. If dict, must contain ftype,
lowpass, highpass and order as keys.
user : {None, string}
Person analyzing the data.
'''
self._file_format = file_format
self._filter_settings = filter_settings
self._user = user
self._module_name = f'abr.parsers.{file_format}'
self._module = importlib.import_module(self._module_name)
def load(self, fs):
return fs.get_series(self._filter_settings)
def load_analysis(self, series, filename):
freq, th, peaks = load_analysis(filename)
series.load_analysis(th, peaks)
def find_analyzed_files(self, filename, frequency):
frequency = round(frequency * 1e-3, 8)
glob_pattern = self.filename_template.format(
filename=filename.with_suffix(''),
frequency=frequency,
user='*')
path = Path(glob_pattern)
return list(path.parent.glob(path.name))
def get_save_filename(self, filename, frequency):
# Round frequency to nearest 8 places to minimize floating-point
# errors.
user_name = self._user + '-' if self._user else ''
frequency = round(frequency * 1e-3, 8)
save_filename = self.filename_template.format(
filename=filename.with_suffix(''),
frequency=frequency,
user=user_name)
return Path(save_filename)
def save(self, model):
# Assume that all waveforms were filtered identically
filter_history = filter_string(model.waveforms[-1])
# Generate list of columns
columns = ['Level', '1msec Avg', '1msec StDev']
point_keys = sorted(model.waveforms[0].points)
for point_number, point_type in point_keys:
point_type_code = 'P' if point_type == Point.PEAK else 'N'
for measure in ('Latency', 'Amplitude'):
columns.append(f'{point_type_code}{point_number} {measure}')
columns = '\t'.join(columns)
spreadsheet = '\n'.join(waveform_string(w) \
for w in reversed(model.waveforms))
content = CONTENT.format(threshold=model.threshold,
frequency=model.freq*1e-3,
filter_history=filter_history,
columns=columns,
spreadsheet=spreadsheet,
version=abr.__version__)
filename = self.get_save_filename(model.filename, model.freq)
with open(filename, 'w') as fh:
fh.writelines(content)
def iter_all(self, path):
yield from self._module.iter_all(path)
def find_processed(self, path):
for ds in self.iter_all(path):
if self.get_save_filename(ds.filename, ds.frequency).exists():
yield ds
def find_unprocessed(self, path):
for ds in self.iter_all(path):
if not self.get_save_filename(ds.filename, ds.frequency).exists():
yield ds
def find_analyses(self, dirname, frequencies=None):
analyzed = {}
for p, f in self.find_all(dirname, frequencies):
analyzed[p, f] = self.find_analyzed_files(p, f)
return analyzed
def load_analyses(self, dirname, frequencies=None):
analyzed = self.find_analyses(dirname, frequencies)
keys = []
thresholds = []
for (raw_file, frequency), analyzed_files in analyzed.items():
for analyzed_file in analyzed_files:
user = get_analyzer(analyzed_file)
keys.append((raw_file, frequency, analyzed_file, user))
_, threshold, _ = load_analysis(analyzed_file)
thresholds.append(threshold)
cols = ['raw_file', 'frequency', 'analyzed_file', 'user']
index = pd.MultiIndex.from_tuples(keys, names=cols)
return pd.Series(thresholds, index=index)
CONTENT = '''
Threshold (dB SPL): {threshold:.2f}
Frequency (kHz): {frequency:.2f}
Filter history (zpk format): {filter_history}
file_format_version: 0.0.2
code_version: {version}
NOTE: Negative latencies indicate no peak. NaN for amplitudes indicate peak was unscorable.
{columns}
{spreadsheet}
'''.strip()
PARSER_MAP = {
'PSI': 'psiexperiment',
'NCRAR': 'IHS text export',
'EPL': 'EPL CFTS',
} | ABR | /ABR-0.0.10.tar.gz/ABR-0.0.10/abr/parsers/__init__.py | __init__.py |
# Pacote principal Absolute Investimentos
Pacote principal com funções básicas para uso interno na Absolute Investimentos
## Instalando pacote no Windows
- Substituir arquivos na rede, na pasta `P:/sistemas/Python`
- Alterar versão no setup.py para a data da publicação
- Caso necessário, incluir novas dependências no requirements.txt
- Enviar email respondendo "Documentação Pacote ABS (python)" avisando que
há uma nova versão disponível
## Instalando pacote no linux
Para instalação do pacote, é recomendado ter o conda configurado na máquina.
Também é necessário instalar os drivers compatíveis do [Microsoft ODBC](https://bit.ly/3Bsn0Pz).
Depois de ambos instalados, em um terminal, navegue para a pasta raiz deste
projeto, `PackageABS`, e digite:
```bash
> sudo apt-get install unixodbc-dev
> conda install pyodbc
> cd Pacotes/
> pip install -r requirements.txt
> pip install .
> python -c "import ABS; print(ABS.__name__)"
```
| ABS-95i943594 | /ABS_95i943594-2023.5.4.tar.gz/ABS_95i943594-2023.5.4/README.md | README.md |
from absql.files import accepted_file_types
from absql.files.loader import generate_loader
from absql.render import render_text, render_context, render_file
class Runner:
def __init__(
self,
extra_constructors=None,
replace_only=False,
file_context_from=None,
partial_kwargs=None,
**extra_context,
):
self.extra_context = dict(extra_context)
self.loader = generate_loader(extra_constructors or [])
self.replace_only = replace_only
self.file_context_from = file_context_from
self.partial_kwargs = partial_kwargs or ["engine"]
@staticmethod
def render_text(
text, replace_only=False, pretty_encode=False, partial_kwargs=None, **vars
):
return render_text(
text=text,
replace_only=replace_only,
pretty_encode=pretty_encode,
partial_kwargs=partial_kwargs,
**vars,
)
@staticmethod
def render_context(extra_context=None, file_contents=None, partial_kwargs=None):
return render_context(
extra_context=extra_context,
file_contents=file_contents,
partial_kwargs=partial_kwargs,
)
@staticmethod
def render_file(
file_path,
loader=None,
replace_only=False,
extra_constructors=None,
file_context_from=None,
pretty_encode=False,
partial_kwargs=None,
**extra_context,
):
return render_file(
file_path=file_path,
loader=loader,
replace_only=replace_only,
extra_constructors=extra_constructors,
file_context_from=file_context_from,
pretty_encode=pretty_encode,
partial_kwargs=partial_kwargs,
**extra_context,
)
def render(self, text, pretty_encode=False, replace_only=None, **extra_context):
"""
Given text or a file path, render SQL with the a combination of
the vars in the file and any extras passed to extra_context during
the instantiation of the runner.
"""
current_context = self.extra_context.copy()
current_context.update(extra_context)
if text.endswith(accepted_file_types):
rendered = render_file(
file_path=text,
loader=self.loader,
replace_only=replace_only or self.replace_only,
file_context_from=self.file_context_from,
pretty_encode=pretty_encode,
partial_kwargs=self.partial_kwargs,
**current_context,
)
else:
rendered = render_text(
text=text,
replace_only=replace_only or self.replace_only,
pretty_encode=pretty_encode,
partial_kwargs=self.partial_kwargs,
**render_context(current_context, partial_kwargs=self.partial_kwargs),
)
return rendered
def set_context(self, **context):
self.extra_context = self.extra_context.copy()
self.extra_context.update(context) | ABSQL | /ABSQL-0.4.2-py3-none-any.whl/absql/__init__.py | __init__.py |
import yaml
from absql.functions import default_constructors
def scalar_to_value(scalar, constructor_dict):
"""
Converts a YAML ScalarNode to its underlying Python value
"""
type = scalar.tag.split(":")[-1]
val = scalar.value
if isinstance(scalar, yaml.MappingNode):
val = node_converter(scalar, constructor_dict)
func = constructor_dict.get(type)
return func(**val)
if isinstance(scalar, yaml.SequenceNode):
val = node_converter(scalar, constructor_dict)
func = constructor_dict.get(type)
return func(*val)
if type.startswith("!"):
func = constructor_dict.get(type)
return func(val)
# Handle null type - https://yaml.org/type/null.html
if type == "null":
return None
return eval('{type}("""{val}""")'.format(type=type, val=val))
def node_converter(x, constructor_dict):
"""
Converts YAML nodes of varying types into Python values,
lists, and dictionaries
"""
if isinstance(x, yaml.ScalarNode):
# "I am an atomic value"
return yaml.load(x.value, yaml.SafeLoader)
if isinstance(x, yaml.SequenceNode):
# "I am a list"
return [scalar_to_value(v, constructor_dict) for v in x.value]
if isinstance(x, yaml.MappingNode):
# "I am a dict"
return {
scalar_to_value(v[0], constructor_dict): scalar_to_value(
v[1], constructor_dict
)
for v in x.value
}
def wrap_yaml(func, constructor_dict):
"""Turn a function into one that can be run on a YAML input"""
def ret(loader, x):
value = node_converter(x, constructor_dict)
if value is not None:
if isinstance(value, list):
return func(*value)
if isinstance(value, dict):
return func(**value)
return func(value)
else:
return func()
return ret
def generate_loader(extra_constructors=None):
"""Generates a SafeLoader with both default and custom constructors"""
class Loader(yaml.SafeLoader):
# ensures a new Loader is returned
# every time the function is called
pass
extra_constructors = extra_constructors or []
unchecked_constructors = default_constructors.copy()
if isinstance(extra_constructors, list) and len(extra_constructors) > 0:
extra_constructors = {
("!" + func.__name__): func for func in extra_constructors
}
if len(extra_constructors) > 0:
unchecked_constructors.update(extra_constructors)
# Ensure all tags start with "!"
checked_constructors = {}
for tag, func in unchecked_constructors.items():
if not tag.startswith("!"):
tag = "!" + tag
checked_constructors[tag] = func
for tag, func in checked_constructors.items():
Loader.add_constructor(tag, wrap_yaml(func, checked_constructors))
return Loader | ABSQL | /ABSQL-0.4.2-py3-none-any.whl/absql/files/loader.py | loader.py |
from absql.functions.env import env_var
from sqlalchemy import create_engine, text
from sqlalchemy.engine.base import Engine
def table_exists(table_location, engine_env="AB__URI", engine=None):
table_parts = split_parts(table_location)
engine = (
handle_engine(env_var(engine_env)) if engine is None else handle_engine(engine)
)
if hasattr(engine, "has_table"):
return engine.has_table(
table_name=table_parts["target"], schema=table_parts["namespace"]
)
else:
return engine.reflection.Inspector.has_table(
table_name=table_parts["target"], schema=table_parts["namespace"]
)
def query_db(query, engine_env="AB__URI", engine=None):
engine = (
handle_engine(env_var(engine_env)) if engine is None else handle_engine(engine)
)
with engine.connect() as connection:
return connection.execute(text(query)).fetchall()
def handle_engine(engine):
if isinstance(engine, Engine):
return engine
else:
return create_engine(engine)
def get_max_value(field_location, engine_env="AB__URI", engine=None):
field_parts = split_parts(field_location)
query = "SELECT MAX({field}) AS value FROM {table}".format(
field=field_parts["target"], table=field_parts["namespace"]
)
try:
return query_db(query, engine_env, engine)[0].value
except Exception:
return None
def get_min_value(field_location, engine_env="AB__URI", engine=None):
field_parts = split_parts(field_location)
query = "SELECT MIN({field}) AS value FROM {table}".format(
field=field_parts["target"], table=field_parts["namespace"]
)
try:
return query_db(query, engine_env, engine)[0].value
except Exception:
return None
def split_parts(location):
parts = {}
location_parts = location.split(".")
target = location_parts[-1]
namespace = (
None
if len(location_parts) == 1
else ".".join(location_parts[: len(location_parts) - 1])
)
parts["target"] = target
parts["namespace"] = namespace
return parts | ABSQL | /ABSQL-0.4.2-py3-none-any.whl/absql/functions/db.py | db.py |
from inspect import cleandoc
from jinja2 import Environment, DebugUndefined
from absql.text import (
clean_spacing,
create_replacements,
flatten_inputs,
pretty_encode_sql,
)
from absql.files import parse
from absql.files.loader import generate_loader
from absql.functions import default_functions
from absql.utils import nested_apply, partialize_function
def render_text(
text, replace_only=False, pretty_encode=False, partial_kwargs=None, **vars
):
"""
Given some text, render the template with the vars.
If a templated variable is unknown, leave it alone.
"""
if replace_only:
text = clean_spacing(text)
flat_vars = flatten_inputs(**vars)
replacements = create_replacements(**flat_vars)
for k, v in replacements.items():
text = text.replace(k, str(v))
text = cleandoc(text)
else:
env = Environment(undefined=DebugUndefined)
for k, v in vars.items():
if v.__class__.__name__ == "function":
vars[k] = partialize_function(v, partial_kwargs=partial_kwargs, **vars)
env.filters[k] = vars[k]
template = env.from_string(text)
text = cleandoc(template.render(**vars))
if pretty_encode:
return pretty_encode_sql(text)
else:
return text
def render_context(extra_context=None, file_contents=None, partial_kwargs=None):
"""
Render context dictionaries passed through a function call or
file frontmatter (file_contents), with file_contents taking
precedence over other all other provided context.
"""
rendered_context = default_functions.copy()
if extra_context:
rendered_context.update(**extra_context)
if file_contents:
rendered_context.update(**file_contents)
rendered_context = nested_apply(
rendered_context,
lambda x: render_text(x, partial_kwargs=partial_kwargs, **rendered_context),
)
return rendered_context
def render_file(
file_path,
loader=None,
replace_only=False,
extra_constructors=None,
file_context_from=None,
pretty_encode=False,
partial_kwargs=None,
**extra_context,
):
"""
Given a file path, render SQL with a combination of
the vars in the file and any extras passed to extra_context.
"""
if loader is None:
loader = generate_loader(extra_constructors or [])
file_contents = parse(file_path, loader=loader)
sql = file_contents["sql"]
file_contents.pop("sql")
if file_context_from:
file_contents.update(file_contents.get(file_context_from, {}))
file_contents.pop(file_context_from, {})
rendered_context = render_context(extra_context, file_contents, partial_kwargs)
rendered = render_text(
text=sql,
replace_only=replace_only,
pretty_encode=pretty_encode,
partial_kwargs=partial_kwargs,
**rendered_context,
)
return rendered | ABSQL | /ABSQL-0.4.2-py3-none-any.whl/absql/render/__init__.py | __init__.py |
import base64
import collections
import hashlib
import threading
from concurrent.futures import Future
from typing import Optional
from sdk.audience_matcher import AudienceMatcher
from sdk.context_config import ContextConfig
from sdk.context_data_provider import ContextDataProvider
from sdk.context_event_handler import ContextEventHandler
from sdk.context_event_logger import ContextEventLogger, EventType
from sdk.internal.lock.atomic_bool import AtomicBool
from sdk.internal.lock.atomic_int import AtomicInt
from sdk.internal.lock.concurrency import Concurrency
from sdk.internal.lock.read_write_lock import ReadWriteLock
from sdk.internal.variant_assigner import VariantAssigner
from sdk.json.attribute import Attribute
from sdk.json.context_data import ContextData
from sdk.json.experiment import Experiment
from sdk.json.exposure import Exposure
from sdk.json.goal_achievement import GoalAchievement
from sdk.json.publish_event import PublishEvent
from sdk.json.unit import Unit
from sdk.time.clock import Clock
from sdk.variable_parser import VariableParser
class Assignment:
def __init__(self):
self.id: Optional[int] = 0
self.iteration: Optional[int] = 0
self.full_on_variant: Optional[int] = 0
self.name: Optional[str] = None
self.unit_type: Optional[str] = None
self.traffic_split: list[int] = []
self.variant: Optional[int] = 0
self.assigned: Optional[bool] = False
self.overridden: Optional[bool] = False
self.eligible: Optional[bool] = False
self.full_on: Optional[bool] = False
self.custom: Optional[bool] = False
self.audience_mismatch: Optional[bool] = False
self.variables: dict = {}
self.exposed = AtomicBool()
class ExperimentVariables:
data: Optional[Experiment]
variables: Optional[list[dict]]
def experiment_matches(experiment: Experiment, assignment: Assignment):
return experiment.id == assignment.id and \
experiment.unitType == assignment.unit_type and \
experiment.iteration == assignment.iteration and \
experiment.fullOnVariant == assignment.full_on_variant and \
collections.Counter(experiment.trafficSplit) == \
collections.Counter(assignment.traffic_split)
class Context:
def __init__(self,
clock: Clock, config: ContextConfig,
data_future: Future, data_provider: ContextDataProvider,
event_handler: ContextEventHandler,
event_logger: ContextEventLogger,
variable_parser: VariableParser,
audience_matcher: AudienceMatcher):
self.clock = clock
self.publish_delay = config.publish_delay
self.refresh_interval = config.refresh_interval
self.event_handler = event_handler
self.event_logger = event_logger
self.data_provider = data_provider
self.variable_parser = variable_parser
self.audience_matcher = audience_matcher
self.units = {}
self.index = {}
self.index_variables = {}
self.assignment_cache = {}
self.cassignments = {}
self.overrides = {}
self.exposures = []
self.achievements = []
self.data: Optional[ContextData] = None
self.failed = False
self.closed = AtomicBool()
self.closing = AtomicBool()
self.refreshing = AtomicBool()
self.pending_count = AtomicInt()
self.context_lock = ReadWriteLock()
self.data_lock = ReadWriteLock()
self.timeout_lock = ReadWriteLock()
self.event_lock = ReadWriteLock()
self.refresh_future: Optional[Future] = None
self.closing_future: Optional[Future] = None
self.refresh_timer: Optional[threading.Timer] = None
self.timeout: Optional[threading.Timer] = None
if config.units is not None:
self.set_units(config.units)
self.assigners = dict.fromkeys((range(len(self.units))))
self.hashed_units = dict.fromkeys((range(len(self.units))))
self.attributes: list[Attribute] = []
if config.attributes is not None:
self.set_attributes(config.attributes)
if config.overrides is not None:
self.overrides = dict(config.overrides)
else:
self.overrides = {}
if config.cassigmnents is not None:
self.cassignments = dict(config.cassigmnents)
else:
self.cassignments = {}
if data_future.done():
def when_finished(data: Future):
if data.done() and data.cancelled() is False and \
data.exception() is None:
self.set_data(data.result())
self.log_event(EventType.READY, data.result())
elif data.cancelled() is False and \
data.exception() is not None:
self.set_data_failed(data.exception())
self.log_error(data.exception())
data_future.add_done_callback(when_finished)
else:
self.ready_future = Future()
def when_finished(data: Future):
if data.done() and data.cancelled() is False and \
data.exception() is None:
self.set_data(data.result())
self.ready_future.set_result(None)
self.ready_future = None
self.log_event(EventType.READY, data.result())
if self.get_pending_count() > 0:
self.set_timeout()
elif data.cancelled() is False and \
data.exception() is not None:
self.set_data_failed(data.exception())
self.ready_future.set_result(None)
self.ready_future = None
self.log_error(data.exception())
data_future.add_done_callback(when_finished)
def set_units(self, units: dict):
for key, value in units.items():
self.set_unit(key, value)
def set_unit(self, unit_type: str, uid: str):
self.check_not_closed()
try:
self.context_lock.acquire_write()
if unit_type in self.units.keys() and self.units[unit_type] != uid:
raise ValueError("Unit already set.")
trimmed = uid.strip()
if len(trimmed) == 0:
raise ValueError("Unit UID must not be blank.")
self.units[unit_type] = trimmed
finally:
self.context_lock.release_write()
def set_attributes(self, attributes: dict):
for key, value in attributes.items():
self.set_attribute(key, value)
def set_attribute(self, name: str, value: object):
self.check_not_closed()
attribute = Attribute()
attribute.name = name
attribute.value = value
attribute.setAt = self.clock.millis()
Concurrency.add_rw(self.context_lock, self.attributes, attribute)
def check_not_closed(self):
if self.closed.value:
raise RuntimeError('ABSmartly Context is closed')
elif self.closing.value:
raise RuntimeError('ABSmartly Context is closing')
def set_data(self, data: ContextData):
index = {}
index_variables = {}
for experiment in data.experiments:
experiment_variables = ExperimentVariables()
experiment_variables.data = experiment
experiment_variables.variables = []
for variant in experiment.variants:
if variant.config is not None and len(variant.config) > 0:
variables = self.variable_parser.parse(
self,
experiment.name,
variant.name,
variant.config)
for key, value in variables.items():
index_variables[key] = experiment_variables
experiment_variables.variables.append(variables)
else:
experiment_variables.variables.append({})
index[experiment.name] = experiment_variables
try:
self.data_lock.acquire_write()
self.index = index
self.index_variables = index_variables
self.data = data
self.set_refresh_timer()
finally:
self.data_lock.release_write()
def set_refresh_timer(self):
if self.refresh_interval > 0 and self.refresh_timer is None and not self.is_closing() and not self.is_closed():
def ref():
self.refresh_async()
self.refresh_timer = threading.Timer(
self.refresh_interval,
ref)
self.refresh_timer.start()
self.refresh_timer = threading.Timer(
self.refresh_interval,
ref)
self.refresh_timer.start()
def set_timeout(self):
if self.is_ready():
if self.timeout is None:
try:
self.timeout_lock.acquire_write()
def flush():
self.flush()
self.timeout = threading.Timer(self.publish_delay, flush)
self.timeout.start()
finally:
self.timeout_lock.release_write()
def is_ready(self):
return self.data is not None
def is_failed(self):
return self.failed
def is_closed(self):
return self.closed.value
def is_closing(self):
return not self.closed.value and self.closing.value
def refresh_async(self):
self.check_not_closed()
if self.refreshing.compare_and_set(False, True):
self.refresh_future = Future()
def when_ready(data):
if data.done() and data.cancelled() is False and \
data.exception() is None:
self.set_data(data.result())
self.refreshing.set(False)
self.refresh_future.set_result(None)
self.log_event(EventType.REFRESH, data.result())
elif data.cancelled() is False and \
data.exception() is not None:
self.refreshing.set(False)
self.refresh_future.set_exception(data.exception())
self.log_error(data.exception())
self.data_provider\
.get_context_data()\
.add_done_callback(when_ready)
if self.refresh_future is not None:
return self.refresh_future
else:
result = Future()
result.set_result(None)
return result
def set_data_failed(self, exception):
try:
self.data_lock.acquire_write()
self.index = {}
self.index_variables = {}
self.data = ContextData()
self.failed = True
finally:
self.data_lock.release_write()
def log_error(self, exception):
if self.event_logger is not None:
self.event_logger.handle_event(EventType.ERROR, exception)
def log_event(self, event: EventType, data: object):
if self.event_logger is not None:
self.event_logger.handle_event(event, data)
def get_pending_count(self):
return self.pending_count.get()
def flush(self):
self.clear_timeout()
if self.failed is False:
if self.pending_count.get() > 0:
exposures = None
achievements = None
event_count = 0
try:
self.event_lock.acquire_write()
event_count = self.pending_count.get()
if event_count > 0:
if len(self.exposures) > 0:
exposures = list(self.exposures)
self.exposures.clear()
if len(self.achievements) > 0:
achievements = list(self.achievements)
self.achievements.clear()
self.pending_count.set(0)
finally:
self.event_lock.release_write()
if event_count > 0:
event = PublishEvent()
event.hashed = True
event.publishedAt = self.clock.millis()
event.units = []
for key, value in self.units.items():
unit = Unit()
unit.type = key
unit.uid = str(
self.get_unit_hash(key, value),
encoding='ascii')\
.encode('ascii', errors='ignore')\
.decode()
event.units.append(unit)
if len(self.attributes) > 0:
event.attributes = list(self.attributes)
else:
event.attributes = None
event.exposures = exposures
event.goals = achievements
result = Future()
def run(data):
if data.done() and \
data.cancelled() is False and \
data.exception() is None:
self.log_event(EventType.PUBLISH, event)
result.set_result(None)
elif data.cancelled() is False and \
data.exception() is not None:
self.log_error(data.exception())
result.set_exception(data.exception())
self.event_handler\
.publish(self, event)\
.add_done_callback(run)
return result
else:
try:
self.event_lock.acquire_write()
self.exposures.clear()
self.achievements.clear()
self.pending_count.set(0)
finally:
self.event_lock.release_write()
result = Future()
result.set_result(None)
return result
def close(self):
self.close_async().result()
def refresh(self):
self.refresh_async().result()
def publish(self):
self.publish_async().result()
def publish_async(self):
self.check_not_closed()
return self.flush()
def track(self, goal_name: str, properties: dict):
self.check_not_closed()
achievement = GoalAchievement()
achievement.achievedAt = self.clock.millis()
achievement.name = goal_name
if properties is None:
achievement.properties = None
else:
achievement.properties = dict(properties)
try:
self.event_lock.acquire_write()
self.pending_count.increment_and_get()
self.achievements.append(achievement)
finally:
self.event_lock.release_write()
self.log_event(EventType.GOAL, achievement)
self.set_timeout()
def wait_until_ready(self):
if self.data is None:
if self.ready_future is not None and not self.ready_future.done():
self.ready_future.result()
return self
def wait_until_ready_async(self):
if self.data is not None:
result = Future()
result.set_result(self)
return result
else:
def apply(fut: Future):
return self
self.ready_future.add_done_callback(apply)
return self.ready_future
def clear_timeout(self):
if self.timeout is not None:
try:
self.timeout_lock.acquire_write()
if self.timeout is not None:
self.timeout.cancel()
self.timeout = None
finally:
self.timeout_lock.release_write()
def clear_refresh_timer(self):
if self.refresh_timer is not None:
self.refresh_timer.cancel()
self.refresh_timer = None
def get_variable_value(self, key: str, default_value: object):
self.check_ready(True)
assignment = self.get_variable_assignment(key)
if assignment is not None:
if assignment.variables is not None:
if not assignment.exposed.value:
self.queue_exposure(assignment)
if key in assignment.variables:
return assignment.variables[key]
return default_value
def peek_variable_value(self, key: str, default_value: object):
self.check_ready(True)
assignment = self.get_variable_assignment(key)
if assignment is not None:
if assignment.variables is not None:
if key in assignment.variables:
return assignment.variables[key]
return default_value
def peek_treatment(self, experiment_name: str):
self.check_ready(True)
return self.get_assignment(experiment_name).variant
def get_unit_hash(self, unit_type: str, unit_uid: str):
def computer(key: str):
dig = hashlib.md5(unit_uid.encode('utf-8')).digest()
unithash = base64.urlsafe_b64encode(dig).rstrip(b'=')
return unithash
return Concurrency.compute_if_absent_rw(
self.context_lock,
self.hashed_units,
unit_type,
computer)
def get_treatment(self, experiment_name: str):
self.check_ready(True)
assignment = self.get_assignment(experiment_name)
if not assignment.exposed.value:
self.queue_exposure(assignment)
return assignment.variant
def get_variable_keys(self):
self.check_ready(True)
variable_keys = {}
try:
self.data_lock.acquire_read()
for key, value in self.index_variables.items():
expr_var: ExperimentVariables = value
variable_keys[key] = expr_var.data.name
finally:
self.data_lock.release_write()
return variable_keys
def get_assignment(self, experiment_name: str):
try:
self.context_lock.acquire_read()
if experiment_name in self.assignment_cache:
assignment: Assignment = self.assignment_cache[experiment_name]
experiment: ExperimentVariables = \
self.get_experiment(experiment_name)
if experiment_name in self.overrides:
override = self.overrides[experiment_name]
if assignment.overridden and \
assignment.variant == override:
return assignment
elif experiment is None:
if assignment.assigned is False:
return assignment
elif experiment_name not in self.cassignments or \
self.cassignments[experiment_name] == \
assignment.variant:
if experiment_matches(experiment.data, assignment):
return assignment
finally:
self.context_lock.release_read()
try:
self.context_lock.acquire_write()
experiment: ExperimentVariables = \
self.get_experiment(experiment_name)
assignment = Assignment()
assignment.name = experiment_name
assignment.eligible = True
if experiment_name in self.overrides:
if experiment is not None:
assignment.id = experiment.data.id
assignment.unit_type = experiment.data.unitType
assignment.overridden = True
assignment.variant = self.overrides[experiment_name]
else:
if experiment is not None:
unit_type = experiment.data.unitType
if experiment.data.audience is not None and \
len(experiment.data.audience) > 0:
attrs = {}
for attr in self.attributes:
attrs[attr.name] = attr.value
match = self.audience_matcher.evaluate(
experiment.data.audience,
attrs)
if match is not None:
assignment.audience_mismatch = not match.result
if experiment.data.audienceStrict and \
assignment.audience_mismatch:
assignment.variant = 0
elif experiment.data.fullOnVariant == 0:
if experiment.data.unitType in self.units:
uid = self.units[experiment.data.unitType]
unit_hash = self.get_unit_hash(unit_type, uid)
assigner: VariantAssigner = \
self.get_variant_assigner(unit_type,
unit_hash)
eligible = \
assigner.assign(
experiment.data.trafficSplit,
experiment.data.trafficSeedHi,
experiment.data.trafficSeedLo) == 1
if eligible:
if experiment_name in self.cassignments:
custom = self.cassignments[experiment_name]
assignment.variant = custom
assignment.custom = True
else:
assignment.variant = \
assigner.assign(experiment.data.split,
experiment.data.seedHi,
experiment.data.seedLo)
else:
assignment.eligible = False
assignment.variant = 0
assignment.assigned = True
else:
assignment.assigned = True
assignment.variant = experiment.data.fullOnVariant
assignment.full_on = True
assignment.unit_type = unit_type
assignment.id = experiment.data.id
assignment.iteration = experiment.data.iteration
assignment.traffic_split = experiment.data.trafficSplit
assignment.full_on_variant = experiment.data.fullOnVariant
if experiment is not None and \
(assignment.variant < len(experiment.data.variants)):
assignment.variables = experiment.variables[assignment.variant]
self.assignment_cache[experiment_name] = assignment
return assignment
finally:
self.context_lock.release_write()
def check_ready(self, expect_not_closed: bool):
if not self.is_ready():
raise RuntimeError('ABSmartly Context is not yet ready')
elif expect_not_closed:
self.check_not_closed()
def get_experiment(self, experiment_name: str):
try:
self.data_lock.acquire_read()
return self.index.get(experiment_name, None)
finally:
self.data_lock.release_read()
def get_experiments(self):
self.check_ready(True)
try:
self.data_lock.acquire_read()
experiment_names = []
for experiment in self.data.experiments:
experiment_names.append(experiment.name)
return experiment_names
finally:
self.data_lock.release_read()
def get_data(self):
self.check_ready(True)
try:
self.data_lock.acquire_read()
return self.data
finally:
self.data_lock.release_read()
def set_override(self, experiment_name: str, variant: int):
self.check_not_closed()
return Concurrency.put_rw(self.context_lock,
self.overrides,
experiment_name, variant)
def get_override(self, experiment_name: str):
return Concurrency.get_rw(self.context_lock,
self.overrides,
experiment_name)
def set_overrides(self, overrides: dict):
for key, value in overrides.items():
self.set_override(key, value)
def set_custom_assignment(self, experiment_name: str, variant: int):
self.check_not_closed()
Concurrency.put_rw(self.context_lock,
self.cassignments,
experiment_name, variant)
def get_custom_assignment(self, experiment_name: str):
return Concurrency.get_rw(self.context_lock,
self.cassignments,
experiment_name)
def set_custom_assignments(self, custom_assignments: dict):
for key, value in custom_assignments.items():
self.set_custom_assignment(key, value)
def get_variant_assigner(self, unit_type: str, unit_hash: bytes):
def apply(key: str):
return VariantAssigner(bytearray(unit_hash))
return Concurrency.compute_if_absent_rw(self.context_lock,
self.assigners,
unit_type, apply)
def get_variable_experiment(self, key: str):
return Concurrency.get_rw(self.data_lock, self.index_variables, key)
def get_variable_assignment(self, key: str):
experiment: ExperimentVariables = self.get_variable_experiment(key)
if experiment is not None:
return self.get_assignment(experiment.data.name)
return None
def close_async(self):
if not self.closed.value:
if self.closing.compare_and_set(False, True):
self.clear_refresh_timer()
if self.pending_count.get() > 0:
self.closing_future = Future()
def accept(res: Future):
if res.done() and res.cancelled() is False \
and res.exception() is None:
self.closed.set(True)
self.closing.set(False)
self.closing_future.set_result(None)
self.log_event(EventType.CLOSE, None)
elif res.cancelled() is False \
and res.exception() is not None:
self.closed.set(True)
self.closing.set(False)
self.closing_future.exception(res.exception())
self.flush().add_done_callback(accept)
return self.closing_future
else:
self.closed.set(True)
self.closing.set(False)
self.log_event(EventType.CLOSE, None)
if self.closing_future is not None:
return self.closing_future
result = Future()
result.set_result(None)
return result
def queue_exposure(self, assignment: Assignment):
if assignment.exposed.compare_and_set(False, True):
exposure = Exposure()
exposure.id = assignment.id
exposure.name = assignment.name
exposure.unit = assignment.unit_type
exposure.variant = assignment.variant
exposure.exposedAt = self.clock.millis()
exposure.assigned = assignment.assigned
exposure.eligible = assignment.eligible
exposure.overridden = assignment.overridden
exposure.fullOn = assignment.full_on
exposure.custom = assignment.custom
exposure.audienceMismatch = assignment.audience_mismatch
try:
self.event_lock.acquire_write()
self.pending_count.increment_and_get()
self.exposures.append(exposure)
finally:
self.event_lock.release_write()
self.log_event(EventType.EXPOSURE, exposure)
self.set_timeout() | ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/context.py | context.py |
from sdk.client_config import ClientConfig
from sdk.http_client import HTTPClient
from sdk.json.publish_event import PublishEvent
class Client:
def __init__(self, config: ClientConfig, http_client: HTTPClient):
self.serializer = config.serializer
self.deserializer = config.deserializer
self.executor = config.executor
endpoint = config.endpoint
api_key = config.api_key
application = config.application
environment = config.environment
self.url = endpoint + "/context"
self.http_client = http_client
self.headers = {"X-API-Key": api_key,
"X-Application": application,
"X-Environment": environment,
"X-Application-Version": '0',
"X-Agent": "absmartly-python-sdk"}
self.query = {"application": application,
"environment": environment}
def get_context_data(self):
return self.executor.submit(self.send_get, self.url, self.query, {})
def send_get(self, url: str, query: dict, headers: dict):
response = self.http_client.get(url, query, headers)
if response.status_code // 100 == 2:
content = response.content
return self.deserializer.deserialize(content, 0, len(content))
return response.raise_for_status()
def publish(self, event: PublishEvent):
return self.executor.submit(
self.send_put,
self.url,
{},
self.headers,
event)
def send_put(self,
url: str,
query: dict,
headers: dict,
event: PublishEvent):
content = self.serializer.serialize(event)
response = self.http_client.put(url, query, headers, content)
if response.status_code // 100 == 2:
content = response.content
return self.deserializer.deserialize(content, 0, len(content))
return response.raise_for_status() | ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/client.py | client.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.