code
stringlengths
501
5.19M
package
stringlengths
2
81
path
stringlengths
9
304
filename
stringlengths
4
145
import sys import argparse from AaronTools.geometry import Geometry from AaronTools.fileIO import FileReader, read_types from AaronTools.finders import * from AaronTools.utils.utils import glob_files vsepr_choices = [ "linear_1", "linear_2", "bent_2_planar", "bent_2_tetrahedral", "trigonal_planar", "bent_3_tetrahedral", "t_shaped", "tetrahedral", "sawhorse", "square_planar", "trigonal_bipyriamidal", "square_pyramidal", "octahedral", ] find_parser = argparse.ArgumentParser( description="find atoms matching a description and return the list of indices", formatter_class=argparse.RawTextHelpFormatter ) find_parser.add_argument( "infile", metavar="input file", type=str, nargs="*", default=[sys.stdin], help="a coordinate file" ) find_parser.add_argument( "-if", "--input-format", type=str, default=None, dest="input_format", choices=read_types, help="file format of input - xyz is assumed if input is stdin", ) find_parser.add_argument( "-o", "--output", type=str, default=False, required=False, dest="outfile", help="output destination\n" + "Default: stdout" ) find_parser.add_argument( "-d", "--delimiter", required=False, dest="delim", default="comma", choices=["comma", "semicolon", "tab", "space"], help="delimiter for output atom indices" ) find_parser.add_argument( "-e", "--element", type=str, action="append", default=[], required=False, dest="elements", help="element symbol", ) find_parser.add_argument( "-n", "--index", type=str, action="append", default=[], required=False, dest="ndx", help="1-index position of atoms in the input file\n" "may hyphen separated to denote a range\n" "ranges and individual indices may be comma-separated", ) find_parser.add_argument( "-bf", "--bonds-from", type=str, nargs=2, action="append", default=[], required=False, metavar=("BONDS", "NUM"), dest="bonds_from", help="find atoms BONDS (integer) bonds away from atom NDX", ) find_parser.add_argument( "-wb", "--within-bonds", type=str, nargs=2, action="append", default=[], required=False, metavar=("BONDS", "NDX"), dest="within_bonds", help="find atoms within BONDS (integer) bonds from atom NDX", ) find_parser.add_argument( "-bt", "--bonded-to", type=str, action="append", default=[], required=False, metavar="NDX", dest="bonded_to", help="find atoms bonded to atom NDX", ) find_parser.add_argument( "-pd", "--point-distance", type=float, nargs=4, action="append", default=[], required=False, metavar=("X", "Y", "Z", "R"), dest="point_dist", help="find atoms within R Angstroms of (X, Y, Z)", ) find_parser.add_argument( "-ad", "--atom-distance", type=str, nargs=2, action="append", default=[], required=False, metavar=("NDX", "R"), dest="atom_dist", help="find atoms within R Angstroms of atom NDX", ) find_parser.add_argument( "-tm", "--transition-metal", action="store_true", default=False, required=False, dest="tmetal", help="find any elements in the d-block, up to the Actinides", ) find_parser.add_argument( "-mg", "--main-group", action="store_true", default=False, required=False, dest="main_group", help="find any main group element (including H)", ) find_parser.add_argument( "-v", "--vsepr", type=str, action="append", default=[], required=False, choices=vsepr_choices, metavar="SHAPE", dest="vsepr", help="find atoms with the specified VSEPR shape\n" + "shape can be:\n%s" % "".join( s + ", " if (sum(len(x) for x in vsepr_choices[:i])) % 40 < 21 else s + ",\n" for i, s in enumerate(vsepr_choices) ).strip().strip(","), ) find_parser.add_argument( "-nb", "--number-of-bonds", type=int, action="append", default=[], required=False, dest="num_neighbors", help="find atoms with the specified number of bonds", ) find_parser.add_argument( "-ct", "--closer-to", nargs=2, metavar=("THIS_NDX", "THAN_NDX"), action="append", default=[], dest="closer_to", help="atoms that are fewer bonds from THIS_NDX than THAN_NDX", ) find_parser.add_argument( "-c", "--chiral-center", action="store_true", default=False, dest="chiral", help="find chiral centers", ) find_parser.add_argument( "-f", "--fragment", type=str, default=None, dest="fragments", help="fragments containing the specified atoms" ) finder_combination = find_parser.add_argument_group( "match method (Default is atoms matching all)" ) finder_combination.add_argument( "-or", "--match-any", action="store_true", default=False, dest="match_any", help="find atoms matching any of the given descriptions", ) finder_combination.add_argument( "-i", "--invert", action="store_true", default=False, dest="invert", help="invert match results", ) args = find_parser.parse_args() # create a list of geometry-independent finders finders = [] for ndx in args.ndx: finders.append(ndx) for ele in args.elements: finders.append(ele) for pd in args.point_dist: r = pd.pop(-1) p = np.array(pd) finders.append(WithinRadiusFromPoint(pd, r)) if args.tmetal: finders.append(AnyTransitionMetal()) if args.main_group: finders.append(AnyNonTransitionMetal()) for vsepr in args.vsepr: finders.append(VSEPR(vsepr.replace("_", " "))) for nb in args.num_neighbors: finders.append(NumberOfBonds(nb)) if args.chiral: finders.append(ChiralCenters()) if args.delim == "comma": delim = "," elif args.delim == "space": delim = " " elif args.delim == "tab": delim = "\t" elif args.delim == "semicolon": delim = ";" s = "" for f in glob_files(args.infile, parser=find_parser): if isinstance(f, str): if args.input_format is not None: infile = FileReader((f, args.input_format, None)) else: infile = FileReader(f) else: if args.input_format is not None: infile = FileReader(("from stdin", args.input_format, f)) else: if len(sys.argv) >= 1: infile = FileReader(("from stdin", "xyz", f)) geom = Geometry(infile) geom_finders = [x for x in finders] # some finders require and atom upon instantiation # add those once we have the geometry for ad in args.atom_dist: r = float(ad.pop(-1)) atom_ndx = ad.pop(0) for atom in geom.find(atom_ndx): geom_finders.append(WithinRadiusFromAtom(atom, r)) for wb in args.within_bonds: n_bonds = int(wb.pop(0)) atom_ndx = wb.pop(0) for atom in geom.find(atom_ndx): geom_finders.append(WithinBondsOf(atom, n_bonds)) for bt in args.bonded_to: for atom in geom.find(bt): geom_finders.append(BondedTo(atom)) for ct in args.closer_to: atom1, atom2 = geom.find(ct) geom_finders.append(CloserTo(atom1, atom2)) if args.fragments: for atom in geom.find(args.fragments): frag_atoms = geom.get_all_connected(atom) geom_finders.append(frag_atoms) if len(args.infile) > 1: s += "%s\n" % str(s) try: if args.match_any: results = geom.find(geom_finders) else: results = geom.find(*geom_finders) if args.invert: results = geom.find(NotAny(results)) s += delim.join(atom.name for atom in results) s += "\n" except LookupError as e: if args.invert: s += delim.join(atom.name for atom in geom.atoms) s += "\n" else: s += "%s\n" % str(e) if not args.outfile: print(s.strip()) else: with open(args.outfile, "a") as f: f.write(s.strip())
AaronTools
/AaronTools-1.0b14.tar.gz/AaronTools-1.0b14/bin/findAtoms.py
findAtoms.py
import os import re from AaronTools.atoms import Atom from AaronTools.geometry import Geometry from pdfminer.high_level import extract_pages def main(args): geom_patt = re.compile("([A-Z][a-z]*)((?:\s+-?\d+\.?\d*){3})") float_patt = re.compile("-?\d+\.?\d*") all_names = [] atoms = [] name = None for i, page in enumerate(extract_pages(args.infile)): print("parsing page {: 4d} please wait...".format(i + 1), end="\r") for element in page: last_line = None if hasattr(element, "get_text"): for line in element: text = line.get_text() match = geom_patt.search(text) if not match and last_line and atoms: name_match = geom_patt.search(name) if name_match: geom = Geometry(all_names[-1] + ".xyz") geom.atoms.extend(atoms) else: geom = Geometry(atoms) geom.name = name geom.comment = name if args.directory != "CURRENTDIR": geom.name = os.path.join( args.directory, geom.name ) orig_name = geom.name i = 2 while geom.name in all_names: geom.name = "{}_{:03d}".format(orig_name, i) i += 1 if args.sort: geom.refresh_connected() geom.refresh_ranks() geom.atoms = geom.reorder()[0] geom.write() all_names.append(geom.name) atoms = [] name = None # print() # print(geom.name, len(geom.atoms)) # print(geom) if match: if not name: name = last_line element = match.group(1) coords = float_patt.findall(match.group(2)) atoms.append(Atom(element, [float(c) for c in coords])) last_line = text.strip() if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument("infile", help="PDF file to scrape geometries from") parser.add_argument( "directory", nargs="?", default="CURRENTDIR", help="directory to save XYZ files in", ) parser.add_argument( "--sort", action="store_true", help="perform canonical sorting before saving", ) args = parser.parse_args() main(args)
AaronTools
/AaronTools-1.0b14.tar.gz/AaronTools-1.0b14/bin/parsePDF.py
parsePDF.py
import argparse import os import re import sys from numpy import prod from AaronTools.fileIO import FileReader, read_types from AaronTools.finders import BondedTo from AaronTools.geometry import Geometry from AaronTools.substituent import Substituent from AaronTools.utils.utils import get_filename, glob_files makeconf_parser = argparse.ArgumentParser( description="generate rotamers for substituents using a hierarchical method", formatter_class=argparse.RawTextHelpFormatter ) makeconf_parser.add_argument( "infile", metavar="input file", type=str, nargs="*", default=[sys.stdin], help="a coordinate file" ) makeconf_parser.add_argument( "-ls", "--list", action="store_const", const=True, default=False, required=False, dest="list_avail", help="list available substituents" ) makeconf_parser.add_argument( "-i", "--info", action="store_const", const=True, default=False, required=False, dest="list_info", help="list information on substituents to be generated" ) makeconf_parser.add_argument( "-if", "--input-format", type=str, nargs=1, default=None, choices=read_types, dest="input_format", help="file format of input - xyz is assumed if input is stdin" ) makeconf_parser.add_argument( "-s", "--substituent", metavar="(n=substituent|substituent name)", type=str, action="append", default=[], required=False, dest="substituents", help="substituents to rotate \n" + "n is the 1-indexed position of the starting position of the\n" + "substituent you are rotating\n" + "if only a substituent name is specified, all substituents with\n" + "that name will be rotated\n" + "Default: rotate any detected substituents" ) makeconf_parser.add_argument( "-rc", "--remove-clash", action="store_true", default=False, required=False, dest="remove_clash", help="if atoms are too close together, wiggle the\n" + "substituents to remove the clash" ) makeconf_parser.add_argument( "-sc", "--skip-clash", action="store_true", required=False, default=False, dest="skip_clash", help="do not print structures with atoms that are\n" + "too close together or for which substituent\n" + "clashing could not be resolved with '--remove-clash'" ) makeconf_parser.add_argument( "-o", "--output-destination", type=str, default=None, required=False, metavar="output destination", dest="outfile", help="output destination\n" + "$i in the filename will be replaced with conformer number\n" + "if a directory is given, default is \"conformer-$i.xyz\" in that directory\n" + "$INFILE will be replaced with the name of the input file\n" + "Default: stdout" ) args = makeconf_parser.parse_args() if args.list_avail: s = "" for i, name in enumerate(sorted(Substituent.list())): sub = Substituent(name) if sub.conf_num > 1: s += "%-20s" % name # if (i + 1) % 3 == 0: if (i + 1) % 1 == 0: s += "\n" print(s.strip()) sys.exit(0) detect_subs = False s = "" skipped = 0 for infile in glob_files(args.infile, parser=makeconf_parser): if isinstance(infile, str): if args.input_format is not None: f = FileReader((infile, args.input_format[0], infile)) else: f = FileReader(infile) else: if args.input_format is not None: f = FileReader(("from stdin", args.input_format[0], infile)) else: f = FileReader(("from stdin", "xyz", infile)) geom = Geometry(f) target_list = [] explicit_subnames = [] subnames = [] if not args.substituents: detect_subs = True else: for sub in args.substituents: if re.search(r"^\d+.*=\w+", sub): ndx_targets = sub.split("=")[0] target_list.append(geom.find(ndx_targets)) explicit_subnames.append("=".join(sub.split("=")[1:])) else: detect_subs = True subnames.append(sub) substituents = [] if detect_subs: geom.detect_substituents() if not subnames: substituents.extend( [sub for sub in geom.substituents if sub.name in Substituent.list()] ) else: substituents.extend([sub for sub in geom.substituents if sub.name in subnames]) for a, subname in zip(target_list, explicit_subnames): for atom in a: for bonded_atom in atom.connected: frag = geom.get_fragment(atom, bonded_atom) try: sub = Substituent(frag, end=bonded_atom) if sub.name == subname: substituents.append(sub) except LookupError: pass if args.list_info: total_conf = 1 if len(args.infile) > 1: s += "%s\n" % infile else: s += "" s += "Substituent \tRotamers\n" for sub in substituents: if sub.conf_num > 1: s += "%2s=%-10s\t%s\n" % (sub.end.name, sub.name, sub.conf_num) total_conf *= sub.conf_num s += "Total Number of Conformers = %i\n" % total_conf if infile is not args.infile[-1]: s += "\n" continue # imagine conformers as a number # each place in that number is in base conf_num # we determine what to rotate by adding one (starting from 0) and subtracting the # previous number conformers = [] rotations = [] for sub in substituents: conformers.append(sub.conf_num) rotations.append(sub.conf_angle) mod_array = [] for i in range(0, len(rotations)): mod_array.append(1) for j in range(0, i): mod_array[i] *= conformers[j] prev_conf = 0 for conf in range(0, int(prod(conformers))): for i, sub in enumerate(substituents): rot = int(conf / mod_array[i]) % conformers[i] rot -= int(prev_conf / mod_array[i]) % conformers[i] angle = rotations[i] * rot if angle != 0: sub_atom = sub.find_exact(BondedTo(sub.end))[0] axis = sub_atom.bond(sub.end) center = sub.end.coords geom.rotate(axis, angle=angle, targets=sub.atoms, center=center) prev_conf = conf bad_subs = [] print_geom = geom if args.remove_clash: print_geom = Geometry([a.copy() for a in geom]) # print_geom.update_geometry(geom.coordinates.copy()) sub_list = [Substituent(print_geom.find([at.name for at in sub]), detect=False, end=print_geom.find_exact(sub.end.name)[0]) for sub in substituents] bad_subs = print_geom.remove_clash(sub_list) # somehow the atoms get out of order print_geom.atoms = sorted(print_geom.atoms, key=lambda a: float(a.name)) if args.skip_clash: if bad_subs: skipped += 1 continue clashing = False for j, a1 in enumerate(print_geom.atoms): for a2 in print_geom.atoms[:j]: if a2 not in a1.connected and a1.is_connected(a2): clashing = True break if clashing: break if clashing: skipped += 1 continue if args.outfile is None: s += print_geom.write(outfile=False) s += "\n" else: if os.path.isdir(os.path.expanduser(args.outfile)): outfile = os.path.join( os.path.expanduser(args.outfile), "conformer-%i.xyz" % (conf + 1) ) else: outfile = args.outfile.replace("$i", str(conf + 1)) if "$INFILE" in outfile: outfile = outfile.replace("$INFILE", get_filename(infile)) print_geom.write(outfile=outfile, append="$i" not in args.outfile) if args.outfile is None or args.list_info: print(s[:-1]) if skipped > 0: print("%i conformers skipped because of clashing substituent(s)" % skipped)
AaronTools
/AaronTools-1.0b14.tar.gz/AaronTools-1.0b14/bin/makeConf.py
makeConf.py
import sys import argparse import re import numpy as np from AaronTools.fileIO import FileReader, read_types from AaronTools.finders import AnyTransitionMetal, AnyNonTransitionMetal from AaronTools.geometry import Geometry from AaronTools.utils.utils import glob_files info_parser = argparse.ArgumentParser( description="print information about pairs of atoms in Gaussian, ORCA, or Psi4 output files", formatter_class=argparse.RawTextHelpFormatter ) info_parser.add_argument( "infile", metavar="input file", type=str, nargs="*", default=[sys.stdin], help="a coordinate file" ) info_parser.add_argument( "-o", "--output", type=str, default=False, required=False, dest="outfile", help="output destination \nDefault: stdout" ) info_parser.add_argument( "-t1", "--first-targets", type=str, default=[AnyTransitionMetal(), AnyNonTransitionMetal()], dest="targets_1", help="print info from target atoms", ) info_parser.add_argument( "-t2", "--second-targets", type=str, default=[AnyTransitionMetal(), AnyNonTransitionMetal()], dest="targets_2", help="print info from target atoms", ) info_parser.add_argument( "-if", "--input-format", type=str, default=None, dest="input_format", choices=read_types, help="file format of input - xyz is assumed if input is stdin" ) info_parser.add_argument( "-ls", "--list", action="store_true", default=False, required=False, dest="list", help="list info categories and exit", ) info_parser.add_argument( "-i", "--info", type=str, default=[], action="append", required=False, dest="info", help="information to print\n" + "Default is all info" ) info_parser.add_argument( "-csv", "--csv-format", nargs="?", default=False, choices=("comma", "semicolon", "tab", "space"), required=False, dest="csv", help="print info in CSV format with the specified separator\n" + "Default: do not print in CSV format", ) args = info_parser.parse_args() if args.csv is None: args.csv = "comma" if args.csv: if args.csv == "comma": sep = "," elif args.csv == "tab": sep = "\t" elif args.csv == "semicolon": sep = ";" else: sep = " " s = "" np.set_printoptions(precision=5) for f in glob_files(args.infile, parser=info_parser): if isinstance(f, str): if args.input_format is not None: infile = FileReader((f, args.input_format[0], None), just_geom=False) else: infile = FileReader(f, just_geom=False) else: if args.input_format is not None: infile = FileReader(("from stdin", args.input_format[0], f), just_geom=False) else: if len(sys.argv) >= 1: infile = FileReader(("from stdin", "xyz", f), just_geom=False) if args.list: s += "info in %s:\n" % f for key in infile.other.keys(): val = infile.other[key] if isinstance(val, np.ndarray) and val.shape == (len(infile.atoms), len(infile.atoms)): s += "\t%s\n" % key else: geom = Geometry(infile) try: atoms1 = geom.find(args.targets_1) except LookupError: print("%s not found in %s" % (args.targets_1, f)) continue try: atoms2 = geom.find(args.targets_2) except LookupError: print("%s not found in %s" % (args.targets_2, f)) continue ndx_list1 = [geom.atoms.index(atom) for atom in atoms1] ndx_list2 = [geom.atoms.index(atom) for atom in atoms2] s += "%s:\n" % f missing_keys = [ key for key in args.info if not any( re.search(key, data_key, flags=re.IGNORECASE) for data_key in infile.other.keys() ) ] if missing_keys: s += "\nmissing some info: %s\n" % ", ".join(missing_keys) for key in infile.other.keys(): if args.info == [] or any(re.search(info, key, flags=re.IGNORECASE) for info in args.info): val = infile.other[key] if isinstance(val, np.ndarray) and val.shape == (len(infile.atoms), len(infile.atoms)): for ndx1 in ndx_list1: for ndx2 in ndx_list2: if args.csv: s += "\"%s\"%s%i%s%i%s%s\n" % ( key, sep, ndx1, sep, ndx2, str(val[ndx1, ndx2]), ) else: s += "\t%-30s %i %i =\t%s\n" % ( key, ndx1, ndx2, str(val[ndx1, ndx2]), ) if not args.outfile: print(s.strip()) else: with open(args.outfile, "a") as f: f.write(s.strip())
AaronTools
/AaronTools-1.0b14.tar.gz/AaronTools-1.0b14/bin/printAtomPairInfo.py
printAtomPairInfo.py
import sys import argparse import re import numpy as np from AaronTools.fileIO import FileReader, read_types from AaronTools.finders import AnyTransitionMetal, AnyNonTransitionMetal from AaronTools.geometry import Geometry from AaronTools.utils.utils import glob_files info_parser = argparse.ArgumentParser( description="print information about atoms in Gaussian, ORCA, or Psi4 output files", formatter_class=argparse.RawTextHelpFormatter ) info_parser.add_argument( "infile", metavar="input file", type=str, nargs="*", default=[sys.stdin], help="a coordinate file" ) info_parser.add_argument( "-o", "--output", type=str, default=False, required=False, dest="outfile", help="output destination \nDefault: stdout" ) info_parser.add_argument( "-t", "--targets", type=str, default=[AnyTransitionMetal(), AnyNonTransitionMetal()], dest="targets", help="print info from target atoms", ) info_parser.add_argument( "-if", "--input-format", type=str, default=None, dest="input_format", choices=read_types, help="file format of input - xyz is assumed if input is stdin" ) info_parser.add_argument( "-ls", "--list", action="store_true", default=False, required=False, dest="list", help="list info categories and exit", ) info_parser.add_argument( "-i", "--info", type=str, default=[], action="append", required=False, dest="info", help="information to print\n" + "Default is all info" ) info_parser.add_argument( "-csv", "--csv-format", nargs="?", default=False, choices=("comma", "semicolon", "tab", "space"), required=False, dest="csv", help="print info in CSV format with the specified separator\n" + "Default: do not print in CSV format", ) args = info_parser.parse_args() if args.csv is None: args.csv = "comma" if args.csv: if args.csv == "comma": sep = "," elif args.csv == "tab": sep = "\t" elif args.csv == "semicolon": sep = ";" else: sep = " " s = "" np.set_printoptions(precision=5) for f in glob_files(args.infile, parser=info_parser): if isinstance(f, str): if args.input_format is not None: infile = FileReader((f, args.input_format[0], None), just_geom=False) else: infile = FileReader(f, just_geom=False) else: if args.input_format is not None: infile = FileReader(("from stdin", args.input_format[0], f), just_geom=False) else: if len(sys.argv) >= 1: infile = FileReader(("from stdin", "xyz", f), just_geom=False) if args.list: s += "info in %s:\n" % f for key in infile.other.keys(): val = infile.other[key] if not isinstance(val, str) and hasattr(val, "__iter__") and len(val) == len(infile.atoms): s += "\t%s\n" % key else: geom = Geometry(infile) try: atoms = geom.find(args.targets) except LookupError: print("%s not found in %s" % (args.targets, f)) continue ndx_list = [geom.atoms.index(atom) for atom in atoms] s += "%s:\n" % f missing_keys = [ key for key in args.info if not any( re.search(key, data_key, flags=re.IGNORECASE) for data_key in infile.other.keys() ) ] if missing_keys: s += "\nmissing some info: %s\n" % ", ".join(missing_keys) for key in infile.other.keys(): if args.info == [] or any(re.search(info, key, flags=re.IGNORECASE) for info in args.info): val = infile.other[key] if not isinstance(val, str) and hasattr(val, "__iter__") and len(val) == len(infile.atoms): if args.csv: s += "\"%s\"%s%s\n" % ( key, sep, sep.join([str(x) for i, x in enumerate(val) if i in ndx_list]) ) else: s += "\t%-30s =\t%s\n" % ( key, ", ".join([str(x) for i, x in enumerate(val) if i in ndx_list]) ) if not args.outfile: print(s.strip()) else: with open(args.outfile, "a") as f: f.write(s.strip())
AaronTools
/AaronTools-1.0b14.tar.gz/AaronTools-1.0b14/bin/printAtomInfo.py
printAtomInfo.py
import argparse import sys from AaronTools.fileIO import FileReader, read_types from AaronTools.geometry import Geometry from AaronTools.component import Component from AaronTools.substituent import Substituent from AaronTools.utils.utils import glob_files def main(argv): sterimol_parser = argparse.ArgumentParser( description="calculate B1-B5, and L sterimol parameters for ligands - see Verloop, A. and Tipker, J. (1976), Use of linear free energy related and other parameters in the study of fungicidal selectivity. Pestic. Sci., 7: 379-390.", formatter_class=argparse.RawTextHelpFormatter ) sterimol_parser.add_argument( "infile", metavar="input file", type=str, nargs="*", default=[sys.stdin], help="a coordinate file" ) sterimol_parser.add_argument( "-if", "--input-format", type=str, default=None, choices=read_types, dest="input_format", help="file format of input\nxyz is assumed if input is stdin" ) sterimol_parser.add_argument( "-k", "--key-atoms", type=str, required=True, dest="key", help="1-indexed position of the ligand's coordinating atoms" ) sterimol_parser.add_argument( "-c", "--center-atom", type=str, required=True, dest="center", help="atom the ligand is coordinated to" ) sterimol_parser.add_argument( "-r", "--radii", type=str, default="bondi", choices=["bondi", "umn"], dest="radii", help="VDW radii to use in calculation\n" "umn: main group vdw radii from J. Phys. Chem. A 2009, 113, 19, 5806–5812\n" " (DOI: 10.1021/jp8111556)\n" " transition metals are crystal radii from Batsanov, S.S. Van der Waals\n" " Radii of Elements. Inorganic Materials 37, 871–885 (2001).\n" " (DOI: 10.1023/A:1011625728803)\n" "bondi: radii from J. Phys. Chem. 1964, 68, 3, 441–451\n(DOI: 10.1021/j100785a001)\n" "Default: bondi" ) sterimol_parser.add_argument( "-bl", "--bisect-L", action="store_true", required=False, dest="bisect_L", help="L axis will bisect (or analogous for higher denticity\n" "ligands) the L-M-L angle\n" "Default: center to centroid of key atoms" ) sterimol_parser.add_argument( "-al", "--at-L", default=[None], dest="L_value", type=lambda x: [float(v) for v in x.split(",")], help="get widths at specific L values (comma-separated)\n" "can be used for Sterimol2Vec parameters\n" "Default: use the entire ligand", ) sterimol_parser.add_argument( "-v", "--vector", action="store_true", required=False, dest="vector", help="print Chimera/ChimeraX bild file for vectors instead of parameter values" ) sterimol_parser.add_argument( "-o", "--output", type=str, default=False, required=False, metavar="output destination", dest="outfile", help="output destination\n" + "Default: stdout" ) args = sterimol_parser.parse_args(args=argv) s = "" if not args.vector: s += "B1\tB2\tB3\tB4\tB5\tL\tfile\n" for infile in glob_files(args.infile, parser=sterimol_parser): if isinstance(infile, str): if args.input_format is not None: f = FileReader((infile, args.input_format, infile)) else: f = FileReader(infile) else: if args.input_format is not None: f = FileReader(("from stdin", args.input_format, infile)) else: f = FileReader(("from stdin", "xyz", infile)) geom = Geometry(f, refresh_ranks=False) comp = Component( geom.get_fragment(args.key, stop=args.center), to_center=geom.find(args.center), key_atoms=args.key, detect_backbone=False, ) for val in args.L_value: data = comp.sterimol( to_center=geom.find(args.center), return_vector=args.vector, radii=args.radii, bisect_L=args.bisect_L, at_L=val, ) if args.vector: for key, color in zip( ["B1", "B2", "B3", "B4", "B5", "L"], ["black", "green", "purple", "orange", "red", "blue"] ): start, end = data[key] s += ".color %s\n" % color s += ".note Sterimol %s\n" % key s += ".arrow %6.3f %6.3f %6.3f %6.3f %6.3f %6.3f\n" % (*start, *end) else: s += "%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%s\n" % ( data["B1"], data["B2"], data["B3"], data["B4"], data["B5"], data["L"], infile, ) if not args.outfile: print(s) else: with open(args.outfile, "w") as f: f.write(s) if __name__ == "__main__": main(None)
AaronTools
/AaronTools-1.0b14.tar.gz/AaronTools-1.0b14/bin/ligandSterimol.py
ligandSterimol.py
import sys import numpy as np import argparse from AaronTools.geometry import Geometry from AaronTools.fileIO import FileReader, read_types from AaronTools.utils.utils import get_filename, glob_files def four_atoms_and_a_float(vals): """check to see if argument is four numbers and a float""" err = argparse.ArgumentTypeError( "error with atom/dihedral specification: %s\n" % " ".join(vals) + "expected -s/-c int int int int float\n" + "example: -c 4 5 6 7 1.337" ) out = [] if len(vals) != 5: raise err for v in vals[:-1]: try: out.append(int(v)) except ValueError: raise err if int(v) != float(v): raise err try: out.append(float(vals[-1])) except ValueError: raise err return out dihedral_parser = argparse.ArgumentParser( description="measure or modify torsional angles", formatter_class=argparse.RawTextHelpFormatter ) dihedral_parser.add_argument( "infile", metavar="input file", type=str, nargs="*", default=[sys.stdin], help="a coordinate file" ) dihedral_parser.add_argument( "-if", "--input-format", type=str, choices=read_types, default=None, dest="input_format", help="file format of input - xyz is assumed if input is stdin" ) dihedral_parser.add_argument( "-m", "--measure", metavar=("atom1", "atom2", "atom3", "atom4"), action="append", type=str, nargs=4, default=[], required=False, dest="measure", help="measure and print the torsional angle (1-indexed)" ) dihedral_parser.add_argument( "-c", "--change", metavar=("atom1", "atom2", "atom3", "atom4", "increment"), action="append", type=str, nargs=5, default=[], required=False, dest="change", help="change torsional angle by the amount specified" ) dihedral_parser.add_argument( "-s", "--set", metavar=("atom1", "atom2", "atom3", "atom4", "angle"), action="append", type=str, nargs=5, default=[], required=False, dest="set_ang", help="set dihedral to the amount specified" ) dihedral_parser.add_argument( "-r", "--radians", action="store_const", const=True, default=False, required=False, dest="radians", help="work with radians instead of degrees" ) dihedral_parser.add_argument( "-o", "--output", type=str, default=False, required=False, dest="outfile", metavar="output destination", help="output destination\n" + "$INFILE will be replaced with the name of the input file\n" + "Default: stdout" ) args = dihedral_parser.parse_args() for f in glob_files(args.infile, dihedral_parser): if isinstance(f, str): if args.input_format is not None: infile = FileReader((f, args.input_format, None)) else: infile = FileReader(f) else: if args.input_format is not None: infile = FileReader(("from stdin", args.input_format, f)) else: infile = FileReader(("from stdin", "xyz", f)) geom = Geometry(infile) # set dihedral to specified value for dihedral in args.set_ang: vals = four_atoms_and_a_float(dihedral) a1 = geom.find(str(vals[0]))[0] a2 = geom.find(str(vals[1]))[0] a3 = geom.find(str(vals[2]))[0] a4 = geom.find(str(vals[3]))[0] geom.change_dihedral(a1, a2, a3, a4, vals[4], radians=args.radians, adjust=False, as_group=True) #change dihedral by specified amount for dihedral in args.change: vals = four_atoms_and_a_float(dihedral) a1 = geom.find(str(vals[0]))[0] a2 = geom.find(str(vals[1]))[0] a3 = geom.find(str(vals[2]))[0] a4 = geom.find(str(vals[3]))[0] geom.change_dihedral(a1, a2, a3, a4, vals[4], radians=args.radians, adjust=True, as_group=True) #print specified dihedrals if len(args.infile) > 1: out = "%s\t" % f else: out = "" for dihedral in args.measure: a1 = geom.find(dihedral[0])[0] a2 = geom.find(dihedral[1])[0] a3 = geom.find(dihedral[2])[0] a4 = geom.find(dihedral[3])[0] val = geom.dihedral(a1, a2, a3, a4) if not args.radians: val *= 180 / np.pi out += "%f\n" % val out = out.rstrip() if len(args.set_ang) + len(args.change) > 0: if args.outfile: outfile = args.outfile if "$INFILE" in outfile: outfile = outfile.replace("$INFILE", get_filename(f)) geom.write(append=True, outfile=outfile) else: print(geom.write(outfile=False)) if len(args.measure) > 0: if not args.outfile: print(out) else: outfile = args.outfile if "$INFILE" in outfile: outfile = outfile.replace("$INFILE", get_filename(f)) with open(outfile, "a") as f: f.write(out)
AaronTools
/AaronTools-1.0b14.tar.gz/AaronTools-1.0b14/bin/dihedral.py
dihedral.py
import argparse import sys from AaronTools.comp_output import CompOutput from AaronTools.fileIO import FileReader from AaronTools.spectra import Frequency from AaronTools.utils.utils import get_filename, glob_files from matplotlib import rcParams import matplotlib.pyplot as plt import numpy as np rcParams["savefig.dpi"] = 300 peak_types = ["pseudo-voigt", "gaussian", "lorentzian", "delta"] plot_types = ["transmittance", "absorbance", "vcd", "raman"] weight_types = ["electronic", "zero-point", "enthalpy", "free", "quasi-rrho", "quasi-harmonic"] def peak_type(x): out = [y for y in peak_types if y.startswith(x)] if out: return out[0] raise TypeError( "peak type must be one of: %s" % ", ".join( peak_types ) ) def plot_type(x): out = [y for y in plot_types if y.startswith(x)] if out: return out[0] raise TypeError( "plot type must be one of: %s" % ", ".join( plot_types ) ) def weight_type(x): out = [y for y in weight_types if y.startswith(x)] if len(out) == 1: return out[0] raise TypeError( "weight type must be one of: %s" % ", ".join( weight_types ) ) ir_parser = argparse.ArgumentParser( description="plot a Boltzmann-averaged IR spectrum", formatter_class=argparse.RawTextHelpFormatter ) ir_parser.add_argument( "infiles", metavar="files", type=str, nargs="+", help="frequency job output file(s)" ) ir_parser.add_argument( "-o", "--output", type=str, default=None, dest="outfile", help="output destination\n" "if the file extension is .csv, a CSV file will be written\n" "Default: show plot", ) ir_parser.add_argument( "-t", "--plot-type", type=plot_type, choices=plot_types, default="transmittance", dest="plot_type", help="type of plot\nDefault: transmittance", ) # TODO: figure out more anharmonic options # anharmonic_options = ir_parser.add_argument_group("anharmonic options") ir_parser.add_argument( "-na", "--harmonic", action="store_false", default=True, dest="anharmonic", help="force to use harmonic frequencies when anharmonic data is in the file", ) peak_options = ir_parser.add_argument_group("peak options") peak_options.add_argument( "-p", "--peak-type", type=peak_type, choices=peak_types, default="pseudo-voigt", dest="peak_type", help="function for peaks\nDefault: pseudo-voigt", ) peak_options.add_argument( "-m", "--voigt-mixing", type=float, default=0.5, dest="voigt_mixing", help="fraction of pseudo-Voigt that is Gaussian\nDefault: 0.5", ) peak_options.add_argument( "-fwhm", "--full-width-half-max", type=float, default=15.0, dest="fwhm", help="full width at half max. of peaks\nDefault: 15 cm^-1", ) ir_parser.add_argument( "-s", "--point-spacing", default=None, type=float, dest="point_spacing", help="spacing between each x value\n" "Default: a non-uniform spacing that is more dense near peaks", ) scale_options = ir_parser.add_argument_group("scale frequencies") scale_options.add_argument( "-l", "--linear-scale", type=float, default=0.0, dest="linear_scale", help="subtract linear_scale * frequency from each mode (i.e. this is 1 - λ)\n" "Default: 0 (no scaling)", ) scale_options.add_argument( "-q", "--quadratic-scale", type=float, default=0.0, dest="quadratic_scale", help="subtract quadratic_scale * frequency^2 from each mode\n" "Default: 0 (no scaling)", ) ir_parser.add_argument( "-nr", "--no-reverse", action="store_false", default=True, dest="reverse_x", help="do not reverse x-axis", ) center_centric = ir_parser.add_argument_group("x-centered interruptions") center_centric.add_argument( "-sc", "--section-centers", type=lambda x: [float(v) for v in x.split(",")], dest="centers", default=None, help="split plot into sections with a section centered on each of the specified values\n" "values should be separated by commas" ) center_centric.add_argument( "-sw", "--section-widths", type=lambda x: [float(v) for v in x.split(",")], dest="widths", default=None, help="width of each section specified by -sc/--section-centers\n" "should be separated by commas, with one for each section" ) minmax_centric = ir_parser.add_argument_group("x-range interruptions") minmax_centric.add_argument( "-r", "--ranges", type=lambda x: [[float(v) for v in r.split("-")] for r in x.split(",")], dest="ranges", default=None, help="split plot into sections (e.g. 0-1900,2900-3300)" ) ir_parser.add_argument( "-fw", "--figure-width", type=float, dest="fig_width", help="width of figure in inches" ) ir_parser.add_argument( "-fh", "--figure-height", type=float, dest="fig_height", help="height of figure in inches" ) ir_parser.add_argument( "-csv", "--experimental-csv", type=str, nargs="+", dest="exp_data", help="CSV file containing observed spectrum data, which will be plotted on top\n" "frequency job files should not come directly after this flag" ) energy_options = ir_parser.add_argument_group("energy weighting") energy_options.add_argument( "-w", "--weighting-energy", type=weight_type, dest="weighting", default="quasi-rrho", choices=weight_types, help="type of energy to use for Boltzmann weighting\n" "Default: quasi-rrho", ) energy_options.add_argument( "-sp", "--single-point-files", type=str, nargs="+", default=None, required=False, dest="sp_files", help="single point energies to use for thermochem\n" "Default: energies from INFILES" ) energy_options.add_argument( "-temp", "--temperature", type=float, dest="temperature", default=298.15, help="temperature (K) to use for weighting\n" "Default: 298.15", ) energy_options.add_argument( "-w0", "--frequency-cutoff", type=float, dest="w0", default=100, help="cutoff frequency for quasi free energy corrections (1/cm)\n" + "Default: 100 cm^-1", ) ir_parser.add_argument( "-rx", "--rotate-x-ticks", action="store_true", dest="rotate_x_ticks", default=False, help="rotate x-axis tick labels by 45 degrees" ) args = ir_parser.parse_args() if bool(args.centers) != bool(args.widths): sys.stderr.write( "both -sw/--section-widths and -sc/--section-centers must be specified" ) sys.exit(2) if args.ranges and bool(args.ranges) == bool(args.widths): sys.stderr.write( "cannot use -r/--ranges with -sw/--section-widths" ) sys.exit(2) centers = args.centers widths = args.widths if args.ranges: centers = [] widths = [] for (xmin, xmax) in args.ranges: centers.append((xmin + xmax) / 2) widths.append(abs(xmax - xmin)) exp_data = None if args.exp_data: exp_data = [] for f in args.exp_data: data = np.loadtxt(f, delimiter=",") for i in range(1, data.shape[1]): exp_data.append((data[:,0], data[:,i], None)) compouts = [] for f in glob_files(args.infiles, parser=ir_parser): fr = FileReader(f, just_geom=False) co = CompOutput(fr) compouts.append(co) sp_cos = compouts if args.sp_files: sp_cos = [] for f in glob_files(args.sp_files, parser=ir_parser): fr = FileReader(f, just_geom=False) co = CompOutput(fr) sp_cos.append(co) if args.weighting == "electronic": weighting = CompOutput.ELECTRONIC_ENERGY elif args.weighting == "zero-point": weighting = CompOutput.ZEROPOINT_ENERGY elif args.weighting == "enthalpy": weighting = CompOutput.RRHO_ENTHALPY elif args.weighting == "free": weighting = CompOutput.RRHO elif args.weighting == "quasi-rrho": weighting = CompOutput.QUASI_RRHO elif args.weighting == "quasi-harmonic": weighting = CompOutput.QUASI_HARMONIC for i, (freq, sp) in enumerate(zip(compouts, sp_cos)): rmsd = freq.geometry.RMSD(sp.geometry, sort=True) if rmsd > 1e-2: print( "single point energy structure might not match frequency file:\n" "%s %s RMSD = %.2f" % (sp.geometry.name, freq.geometry.name, rmsd) ) for freq2 in compouts[:i]: rmsd = freq.geometry.RMSD(freq2.geometry, sort=True) if rmsd < 1e-2: print( "two frequency files appear to be identical:\n" "%s %s RMSD = %.2f" % (freq2.geometry.name, freq.geometry.name, rmsd) ) weights = CompOutput.boltzmann_weights( compouts, nrg_cos=sp_cos, temperature=args.temperature, weighting=weighting, v0=args.w0, ) data_attr = "data" if all(co.frequency.anharm_data for co in compouts) and args.anharmonic: data_attr = "anharm_data" mixed_freq = Frequency.get_mixed_signals( [co.frequency for co in compouts], weights=weights, data_attr=data_attr, ) if not args.outfile or not args.outfile.lower().endswith("csv"): fig = plt.gcf() fig.clear() mixed_freq.plot_ir( fig, centers=centers, widths=widths, plot_type=args.plot_type, peak_type=args.peak_type, reverse_x=args.reverse_x, fwhm=args.fwhm, point_spacing=args.point_spacing, voigt_mixing=args.voigt_mixing, linear_scale=args.linear_scale, quadratic_scale=args.quadratic_scale, exp_data=exp_data, anharmonic=mixed_freq.anharm_data and args.anharmonic, rotate_x_ticks=args.rotate_x_ticks, ) if args.fig_width: fig.set_figwidth(args.fig_width) if args.fig_height: fig.set_figheight(args.fig_height) if args.outfile: plt.savefig(args.outfile, dpi=300) else: plt.show() else: intensity_attr = "intensity" if args.plot_type.lower() == "vcd": intensity_attr = "rotation" if args.plot_type.lower() == "raman": intensity_attr = "raman_activity" funcs, x_positions, intensities = mixed_freq.get_spectrum_functions( fwhm=args.fwhm, peak_type=args.peak_type, voigt_mixing=args.voigt_mixing, linear_scale=args.linear_scale, quadratic_scale=args.quadratic_scale, data_attr="anharm_data" if mixed_freq.anharm_data and args.anharmonic else "data", intensity_attr=intensity_attr, ) x_values, y_values, _ = mixed_freq.get_plot_data( funcs, x_positions, point_spacing=args.point_spacing, transmittance=args.plot_type == "transmittance", peak_type=args.peak_type, fwhm=args.fwhm, ) if args.plot_type.lower() == "transmittance": y_label = "Transmittance (%)" elif args.plot_type.lower() == "absorbance": y_label = "Absorbance (arb.)" elif args.plot_type.lower() == "vcd": y_label = "delta_Absorbance (arb.)" elif args.plot_type.lower() == "raman": y_label = "Activity (arb.)" with open(args.outfile, "w") as f: s = ",".join(["frequency (cm^-1)", y_label]) s += "\n" for x, y in zip(x_values, y_values): s += ",".join(["%.4f" % z for z in [x, y]]) s += "\n" f.write(s)
AaronTools
/AaronTools-1.0b14.tar.gz/AaronTools-1.0b14/bin/plotAverageIR.py
plotAverageIR.py
import sys import argparse from AaronTools.geometry import Geometry from AaronTools.fileIO import FileReader, read_types from AaronTools.utils.utils import get_filename, glob_files fukui_parser = argparse.ArgumentParser( description="integrate weighted Fukui functions around atoms", formatter_class=argparse.RawTextHelpFormatter ) fukui_parser.add_argument( "infile", metavar="input file", type=str, nargs="*", default=[sys.stdin], help="an FCHK file or ORCA output with MO's" ) fukui_parser.add_argument( "-o", "--output", type=str, default=False, dest="outfile", help="output destination\n" + "$INFILE will be replaced with the name of the input file\n" + "Default: stdout" ) info = fukui_parser.add_mutually_exclusive_group(required=True) info.add_argument( "-fd", "--fukui-donor", dest="fukui_donor", default=False, action="store_true", help="print Fukui donor values\n" "see DOI 10.1002/jcc.24699 for weighting method\n" "for details on how the function is condensed, see the GitHub wiki:\n" "https://github.com/QChASM/AaronTools.py/wiki/Structure-Analysis-and-Descriptor-Implementation#condensed-fukui" ) info.add_argument( "-fa", "--fukui-acceptor", dest="fukui_acceptor", default=False, action="store_true", help="print Fukui acceptor values\n" "see DOI 10.1021/acs.jpca.9b07516 for weighting method\n" "for details on how the function is condensed, see the GitHub wiki:\n" "https://github.com/QChASM/AaronTools.py/wiki/Structure-Analysis-and-Descriptor-Implementation#condensed-fukui" ) info.add_argument( "-f2", "--fukui-dual", dest="fukui_dual", default=False, action="store_true", help="print Fukui dual values\n" "see DOI 10.1021/acs.jpca.9b07516 for weighting method\n" "for details on how the function is condensed, see the GitHub wiki:\n" "https://github.com/QChASM/AaronTools.py/wiki/Structure-Analysis-and-Descriptor-Implementation#condensed-fukui" ) fukui_parser.add_argument( "-d", "--delta", type=float, dest="delta", default=0.1, help="delta parameter for weighting orbitals in Fukui functions\n" "Default: 0.1 Hartree", ) fukui_parser.add_argument( "-nt", "--number-of-threads", type=int, default=1, dest="n_jobs", help="number of threads to use when evaluating basis functions\n" "this is on top of NumPy's multithreading,\n" "so if NumPy uses 8 threads and n_jobs=2, you can\n" "expect to see 16 threads in use\n" "Default: 1" ) fukui_parser.add_argument( "-m", "--max-array", type=int, default=10000000, dest="max_length", help="max. array size to read from FCHK files\n" "a reasonable size for setting parsing orbital data\n" "can improve performance when reading large FCHK files\n" "too small of a value will prevent orbital data from\n" "being parsed\n" "Default: 10000000", ) fukui_parser.add_argument( "-v", "--vdw-radii", default="umn", choices=["umn", "bondi"], dest="radii", help="VDW radii to use in calculation\n" + "umn: main group vdw radii from J. Phys. Chem. A 2009, 113, 19, 5806–5812\n" + " (DOI: 10.1021/jp8111556)\n" + " transition metals are crystal radii from Batsanov, S.S. Van der Waals\n" + " Radii of Elements. Inorganic Materials 37, 871–885 (2001).\n" + " (DOI: 10.1023/A:1011625728803)\n" + "bondi: radii from J. Phys. Chem. 1964, 68, 3, 441–451 (DOI: 10.1021/j100785a001)\n" + "Default: umn", ) grid_options = fukui_parser.add_argument_group("Lebedev integration options") grid_options.add_argument( "-rp", "--radial-points", type=int, default=32, choices=[20, 32, 64, 75, 99, 127], dest="rpoints", help="number of radial shells for Gauss-Legendre integration\n" + "of the radial component\n" + "lower values are faster, but at the cost of accuracy\nDefault: 32" ) grid_options.add_argument( "-ap", "--angular-points", type=int, default=302, choices=[110, 194, 302, 590, 974, 1454, 2030, 2702, 5810], dest="apoints", help="number of angular points for Lebedev integration\n" + "lower values are faster, but at the cost of accuracy\nDefault: 1454" ) args = fukui_parser.parse_args() infiles = glob_files(args.infile, parser=fukui_parser) for n, f in enumerate(infiles): if isinstance(f, str): infile = FileReader( f, just_geom=False, max_length=args.max_length ) elif len(sys.argv) >= 1: infile = FileReader( ("from stdin", "fchk", f), just_geom=False, max_length=args.max_length, ) try: orbits = infile.other["orbitals"] except KeyError: raise RuntimeError("orbital info was not parsed from %s" % f) geom = Geometry(infile, refresh_connected=False, refresh_ranks=False) if args.fukui_donor: func = orbits.condensed_fukui_donor_values elif args.fukui_acceptor: func = orbits.condensed_fukui_acceptor_values elif args.fukui_dual: func = orbits.condensed_fukui_dual_values vals = func( geom, n_jobs=args.n_jobs, rpoints=args.rpoints, apoints=args.apoints, radii=args.radii, ) s = "" if len(infiles) > 1: s += "%s:\n" % f for i, atom in enumerate(geom.atoms): s += "%s%3s\t%6.3f\n" % ("\t" if len(infiles) > 1 else "", atom.name, vals[i]) if not args.outfile: print(s) else: outfile = args.outfile mode = "w" if "$INFILE" in outfile: outfile = outfile.replace("$INFILE", get_filename(f)) elif n > 0: mode = "a" with open(outfile, mode) as f: f.write(s)
AaronTools
/AaronTools-1.0b14.tar.gz/AaronTools-1.0b14/bin/condensedFukui.py
condensedFukui.py
import sys import argparse import numpy as np from AaronTools.geometry import Geometry from AaronTools.fileIO import FileReader, read_types from AaronTools.utils.utils import get_filename, glob_files translate_parser = argparse.ArgumentParser( description="move atoms along a vector", formatter_class=argparse.RawTextHelpFormatter ) translate_parser.add_argument( "infile", metavar="input file", type=str, nargs="*", default=[sys.stdin], help="a coordinate file" ) translate_parser.add_argument( "-if", "--input-format", type=str, choices=read_types, default=None, dest="input_format", help="file format of input - xyz is assumed if input is stdin" ) target_specifier = translate_parser.add_argument_group("atoms to move") target_specifier.add_argument( "-t", "--targets", type=str, default=None, required=False, dest="targets", metavar="atoms", help="move atoms with specified indices\n" "Default: whole structure" ) target_specifier.add_argument( "-f", "--fragment", type=str, default=None, required=False, dest="fragments", metavar="atoms", help="move fragments containing specified atoms\n" "Default: whole structure" ) translate_parser.add_argument( "-ct", "--center-targets", type=str, default=None, required=False, dest="targets", metavar="targets", help="target atoms for -com or -cent arguments\n" + "comma (,) and/or hyphen (-) separated list\n" + "hyphens denote a range of atoms\n" + "commas separate individual atoms or ranges\n" + "default: whole structure" ) translate_mode = translate_parser.add_argument_group( "translation mode (default: move centroid to origin)" ) trans_modes = translate_mode.add_mutually_exclusive_group(required=True) trans_modes.add_argument( "-v", "--vector", type=float, nargs=3, default=None, required=False, dest="vector", metavar=("x", "y", "z"), help="translate in direction of this vector\n" + "vector is normalized when --distance/-d is used" ) translate_parser.add_argument( "-d", "--distance", type=float, default=None, required=False, dest="distance", help="distance translated - only applies to --vector/-v" ) trans_modes.add_argument( "-dest", "--destination", type=float, nargs=3, default=None, required=False, dest="dest", metavar=("x", "y", "z"), help="translate fragment to a point" ) center_parser = translate_parser.add_argument_group("center (default: centroid)") center_option = center_parser.add_mutually_exclusive_group(required=False) center_option.add_argument( "-com", "--center-of-mass", action="store_const", const=True, default=None, required=False, dest="com", help="translate the center of mass of the targets to the destination" ) center_option.add_argument( "-cent", "--centroid", action="store_const", const=True, default=None, required=False, dest="cent", help="translate the centroid of the targets to the destination" ) translate_parser.add_argument( "-o", "--output", type=str, default=False, required=False, dest="outfile", help="output destination\n" + "$INFILE will be replaced with the name of the input file\n" + "Default: stdout" ) args = translate_parser.parse_args() for f in glob_files(args.infile, parser=translate_parser): if isinstance(f, str): if args.input_format is not None: infile = FileReader((f, args.input_format, None)) else: infile = FileReader(f) else: if args.input_format is not None: infile = FileReader(("from stdin", args.input_format, f)) else: infile = FileReader(("from stdin", "xyz", f)) geom = Geometry(infile) # targets to move targets = [] if args.targets is not None: targets.extend(geom.find(args.targets)) if args.fragments is not None: for atom in geom.find(args.fragments): frag_atoms = geom.get_all_connected(atom) targets.extend([frag_atom for frag_atom in frag_atoms if frag_atom not in targets]) if not targets: targets = None # start with com or centroid where it is if args.com or not args.cent: com = geom.COM(targets=targets, mass_weight=True) start = com if args.cent: cent = geom.COM(targets=targets, mass_weight=False) start = cent # find where we are moving com or centroid to if args.dest is not None: # destination was specified destination = np.array(args.dest) elif args.vector is not None: # direction was specified if args.distance is not None: # magnitute was specified v = np.array(args.vector) v /= np.linalg.norm(v) destination = start + args.distance * v else: destination = start + np.array(args.vector) else: # nothing was specified - move to origin destination = np.zeros(3) translate_vector = destination - start geom.coord_shift(translate_vector, targets=targets) if args.outfile: outfile = args.outfile if "$INFILE" in outfile: outfile = outfile.replace("$INFILE", get_filename(f)) geom.write( append=True, outfile=outfile, ) else: print(geom.write(outfile=False))
AaronTools
/AaronTools-1.0b14.tar.gz/AaronTools-1.0b14/bin/translate.py
translate.py
import sys import argparse import os from glob import glob from warnings import warn from numpy import isclose from AaronTools.comp_output import CompOutput from AaronTools.fileIO import FileReader from AaronTools.geometry import Geometry from AaronTools.utils.utils import glob_files thermo_parser = argparse.ArgumentParser( description="print thermal corrections and free energy", formatter_class=argparse.RawTextHelpFormatter ) thermo_parser.add_argument( "infile", metavar="frequency output file", type=str, nargs="*", default=[sys.stdin], help="completed QM output file with frequency info" ) thermo_parser.add_argument( "-o", "--output", type=str, default=None, required=False, dest="outfile", help="output destination \nDefault: stdout" ) thermo_parser.add_argument( "-if", "--input-format", type=str, nargs=1, default=None, dest="input_format", choices=["log", "out", "dat"], help="file format of input - required if input is stdin" ) thermo_parser.add_argument( "-sp", "--single-point", type=str, nargs="*", default=[None], required=False, dest="sp_file", help="file containing single-point energy" ) thermo_parser.add_argument( "-t", "--temperature", type=float, default=None, required=False, dest="temp", help="compute thermal corrections using the specified temperature (K)\n" + "Default: value found in file or 298.15" ) thermo_parser.add_argument( "-w0", "--frequency-cutoff", type=float, default=100.0, required=False, dest="w0", help="cutoff frequency for quasi free energy corrections (1/cm)\n" + "Default: 100 cm^-1" ) thermo_parser.add_argument( "-csv", "--csv-format", nargs="?", required=False, dest="csv", default=False, choices=["comma", "semicolon", "tab", "space"], help="print output in CSV format with the specified delimiter" ) thermo_parser.add_argument( "-r", "--recursive", metavar="PATTERN", type=str, nargs="*", default=None, required=False, dest="pattern", help="search subdirectories of current directory for files matching PATTERN" ) args = thermo_parser.parse_args() if args.csv is None: args.csv = "comma" if args.csv: if args.csv == "comma": delim = "," elif args.csv == "semicolon": delim = ";" elif args.csv == "tab": delim = "\t" elif args.csv == "space": delim = " " anharm_header = delim.join([ "E", "E+ZPE", "E+ZPE(anh)", "H(RRHO)", "G(RRHO)", "G(Quasi-RRHO)", "G(Quasi-harmonic)", "ZPE", "ZPE(anh)", "dH(RRHO)", "dG(RRHO)", "dG(Quasi-RRHO)", "dG(Quasi-harmonic)", "SP_File", "Thermo_File" ]) harm_header = delim.join([ "E", "E+ZPE", "H(RRHO)", "G(RRHO)", "G(Quasi-RRHO)", "G(Quasi-harmonic)", "ZPE", "dH(RRHO)", "dG(RRHO)", "dG(Quasi-RRHO)", "dG(Quasi-harmonic)", "SP_File", "Thermo_File" ]) header = None output = "" if args.pattern is None: infiles = glob_files(args.infile, parser=thermo_parser) else: infiles = [] if args.infile == [sys.stdin]: directories = [os.getcwd()] else: directories = [] for directory in args.infile: directories.extend(glob(directory)) for directory in directories: for root, dirs, files in os.walk(directory, topdown=True): for pattern in args.pattern: full_glob = os.path.join(root, pattern) infiles.extend(glob(full_glob)) infiles.sort() if args.sp_file != [None]: if args.pattern is None: sp_filenames = glob_files([f for f in args.sp_file]) else: sp_filenames = [] if args.infile == [sys.stdin]: directories = [os.getcwd()] else: directories = [] for directory in args.infile: directories.extend(glob(directory)) for directory in directories: for root, dirs, files in os.walk(directory, topdown=True): for pattern in args.sp_file: full_glob = os.path.join(root, pattern) sp_filenames.extend(glob(full_glob)) sp_filenames.sort() sp_files = [FileReader(f, just_geom=False) for f in sp_filenames] sp_energies = [sp_file.other["energy"] for sp_file in sp_files] else: sp_energies = [None for f in infiles] sp_filenames = [None for f in infiles] while len(sp_energies) < len(infiles): sp_energies.extend([sp_file.other["energy"] for sp_file in sp_files]) sp_filenames.extend(args.sp_file) while len(infiles) < len(sp_filenames): infiles.extend(args.infile) for sp_nrg, sp_file, f in zip(sp_energies, sp_filenames, infiles): if isinstance(f, str): if args.input_format is not None: infile = FileReader((f, args.input_format[0], None), just_geom=False) else: infile = FileReader(f, just_geom=False) else: if args.input_format is not None: infile = FileReader(("from stdin", args.input_format[0], f), just_geom=False) else: if len(sys.argv) >= 1: thermo_parser.print_help() raise RuntimeError( "when no input file is given, stdin is read and a format must be specified" ) if "frequency" not in infile.other: warn("no frequencies in %s - skipping" % f) continue freq = infile.other["frequency"] co = CompOutput( infile, ) if sp_nrg is None: nrg = co.energy else: nrg = sp_nrg sp_geom = Geometry(sp_file) freq_geom = Geometry(infile) rmsd = sp_geom.RMSD(freq_geom) if not isclose(rmsd, 0, atol=1e-5): warn( "\ngeometries in supposed single-point/thermochemistry pair appear\n" + "to be different (rmsd = %.5f)\n" % rmsd + "%s\n%s" % (sp_geom.name, freq_geom.name) ) dE, dH, s = co.therm_corr(temperature=args.temp) if freq.anharm_data: ZPVE_anh = co.calc_zpe(anharmonic=True) rrho_dG = co.calc_G_corr(v0=0, temperature=args.temp, method="RRHO") qrrho_dG = co.calc_G_corr(v0=args.w0, temperature=args.temp, method="QRRHO") qharm_dG = co.calc_G_corr(v0=args.w0, temperature=args.temp, method="QHARM") if args.temp is None: t = co.temperature else: t = args.temp if args.csv: nrg_str = "%.6f" % nrg corrections = [co.ZPVE] if freq.anharm_data: if header != anharm_header: output += "%s\n" % anharm_header header = anharm_header corrections.append(ZPVE_anh) elif header != harm_header: output += "%s\n" % harm_header header = harm_header corrections.extend([dH, rrho_dG, qrrho_dG, qharm_dG]) therm = [nrg + correction for correction in corrections] output += delim.join( [nrg_str] + ["%.6f" % x for x in therm] + \ ["%.6f" % x for x in corrections] + \ [sp_file if sp_file is not None else f, f] ) output += "\n" else: output += "electronic energy of %s = %.6f Eh\n" % ( sp_file if sp_file is not None else f, nrg ) output += " E+ZPE = %.6f Eh (ZPE = %.6f)\n" % (nrg + co.ZPVE, co.ZPVE) if freq.anharm_data: output += " E+ZPE(anh) = %.6f Eh (ZPE(anh) = %.6f)\n" % ( nrg + ZPVE_anh, ZPVE_anh ) output += "thermochemistry from %s at %.2f K:\n" % (f, t) output += " H(RRHO) = %.6f Eh (dH = %.6f)\n" % (nrg + dH, dH) output += " G(RRHO) = %.6f Eh (dG = %.6f)\n" % (nrg + rrho_dG, rrho_dG) output += " quasi treatments for entropy (w0=%.1f cm^-1):\n" % args.w0 output += " G(Quasi-RRHO) = %.6f Eh (dG = %.6f)\n" % (nrg + qrrho_dG, qrrho_dG) output += " G(Quasi-harmonic) = %.6f Eh (dG = %.6f)\n" % (nrg + qharm_dG, qharm_dG) output += "\n" output = output.strip() if not args.outfile: print(output.strip()) else: with open( args.outfile, "a" ) as f: f.write(output.strip())
AaronTools
/AaronTools-1.0b14.tar.gz/AaronTools-1.0b14/bin/grabThermo.py
grabThermo.py
import sys import argparse import numpy as np from AaronTools.fileIO import FileReader, read_types from AaronTools.geometry import Geometry from AaronTools.symmetry import ( PointGroup, InversionCenter, ProperRotation, ImproperRotation, MirrorPlane, ) from AaronTools.utils.utils import perp_vector, glob_files, get_filename pg_parser = argparse.ArgumentParser( description="print point group", formatter_class=argparse.RawTextHelpFormatter ) pg_parser.add_argument( "infile", metavar="input file", type=str, nargs="*", default=[sys.stdin], help="a coordinate file" ) pg_parser.add_argument( "-o", "--output", type=str, default=False, required=False, dest="outfile", help="output destination \nDefault: stdout" ) pg_parser.add_argument( "-if", "--input-format", type=str, default=None, dest="input_format", choices=read_types, help="file format of input - xyz is assumed if input is stdin" ) pg_parser.add_argument( "-t", "--tolerance", default=0.1, type=float, dest="tolerance", help="tolerance for determining if a symmetry element is valid\n" "for the input structure(s)\nDefault: 0.1" ) pg_parser.add_argument( "-a", "--axis-tolerance", default=0.01, type=float, dest="rotation_tolerance", help="tolerance for determining if two axes are coincident or orthogonal" "\nDefault: 0.01" ) pg_parser.add_argument( "-n", "--max-n", default=6, type=int, dest="max_n", help="max. order for proper rotation axes (improper rotations can be 2x this)" "\nDefault: 6" ) pg_parser.add_argument( "-e", "--report-error", action="store_true", default=False, dest="report_error", help="print all symmetry elements", ) args = pg_parser.parse_args() s = "" for f in glob_files(args.infile, parser=pg_parser): if isinstance(f, str): if args.input_format is not None: infile = FileReader((f, args.input_format, None), just_geom=True) else: infile = FileReader(f, just_geom=True) else: if args.input_format is not None: infile = FileReader(("from stdin", args.input_format[0], f), just_geom=True) else: if len(sys.argv) >= 1: infile = FileReader(("from stdin", "xyz", f), just_geom=True) geom = Geometry(infile) out = geom.write(outfile=False) pg = PointGroup( geom, tolerance=args.tolerance, rotation_tolerance=args.rotation_tolerance, max_rotation=args.max_n, ) if args.report_error: print("%s is %s" % (geom.name, pg.name)) tot_error, max_error, max_ele = pg.total_error(return_max=True) print("total error before symmetrizing: %.4f" % tot_error) print("max. error before symmetrizing: %.4f (%s)" % (max_error, max_ele)) pg.idealize_geometry() if args.report_error: tot_error, max_error, max_ele = pg.total_error(return_max=True) print("total error after symmetrizing: %.4f" % tot_error) print("max. error after symmetrizing: %.4f (%s)" % (max_error, max_ele)) out = geom.write(outfile=False) if not args.outfile: print(out) else: outfile = args.outfile if "$INFILE" in outfile: outfile = outfile.replace("$INFILE", get_filename(f)) with open(outfile, "w") as f: f.write(out)
AaronTools
/AaronTools-1.0b14.tar.gz/AaronTools-1.0b14/bin/symmetrize.py
symmetrize.py
import sys import numpy as np import argparse from AaronTools.geometry import Geometry from AaronTools.fileIO import FileReader, read_types from AaronTools.utils.utils import get_filename, glob_files def three_atoms_and_a_float(vals): """check to see if argument is three ints and a float""" err = argparse.ArgumentTypeError( "error with atom/angle specification: %s\n" % " ".join(vals) + "expected -s/-c int int int float\n" + "example: -c 4 5 6 +180" ) out = [] if len(vals) != 4: raise err for v in vals[:-1]: try: out.append(int(v)-1) except ValueError: raise err if int(v) != float(v): raise err try: out.append(float(vals[-1])) except ValueError: raise err return out angle_parser = argparse.ArgumentParser( description="measure or modify 1-2-3 angles", formatter_class=argparse.RawTextHelpFormatter ) angle_parser.add_argument( "infile", metavar="input file", type=str, nargs="*", default=[sys.stdin], help="a coordinate file" ) angle_parser.add_argument( "-if", "--input-format", type=str, nargs=1, default=None, choices=read_types, dest="input_format", help="file format of input - xyz is assumed if input is stdin" ) angle_parser.add_argument( "-m", "--measure", metavar=("atom1", "center", "atom3"), action="append", type=int, nargs=3, default=[], required=False, dest="measure", help="measure and print 1-2-3 angle (1-indexed)" ) angle_parser.add_argument( "-c", "--change", metavar=("atom1", "center", "atom3", "increment"), action="append", type=str, nargs=4, default=[], required=False, dest="change", help="change 1-2-3 angle by the amount specified" ) angle_parser.add_argument( "-s", "--set", metavar=("atom1", "center", "atom3", "angle"), action="append", type=str, nargs=4, default=[], required=False, dest="set_ang", help="set 1-2-3 angle to the amount specified" ) angle_parser.add_argument( "-r", "--radians", action="store_const", const=True, default=False, required=False, dest="radians", help="work with radians instead of degrees" ) angle_parser.add_argument( "-o", "--output", type=str, default=False, required=False, dest="outfile", metavar="output destination", help="output destination\n" + "$INFILE will be replaced with the name of the input file\n" + "Default: stdout" ) args = angle_parser.parse_args() for f in glob_files(args.infile, parser=angle_parser): if isinstance(f, str): if args.input_format is not None: infile = FileReader((f, args.input_format[0], None)) else: infile = FileReader(f) else: if args.input_format is not None: infile = FileReader(("from stdin", args.input_format[0], f)) else: infile = FileReader(("from stdin", "xyz", f)) geom = Geometry(infile) #set angle to specified value for angle in args.set_ang: vals = three_atoms_and_a_float(angle) a1 = geom.find(str(vals[0]))[0] a2 = geom.find(str(vals[1]))[0] a3 = geom.find(str(vals[2]))[0] geom.change_angle(a1, a2, a3, vals[3], radians=args.radians, adjust=False) #change angle by specified amount for angle in args.change: vals = three_atoms_and_a_float(angle) a1 = geom.find(str(vals[0]))[0] a2 = geom.find(str(vals[1]))[0] a3 = geom.find(str(vals[2]))[0] geom.change_angle(a1, a2, a3, vals[3], radians=args.radians, adjust=True) #print specified angles out = "" for angle in args.measure: a1 = geom.find(str(angle[0]))[0] a2 = geom.find(str(angle[1]))[0] a3 = geom.find(str(angle[2]))[0] val = geom.angle(a1, a2, a3) if not args.radians: val *= 180 / np.pi out += "%f\n" % val out = out.rstrip() if len(args.set_ang) + len(args.change) > 0: if args.outfile: outfile = args.outfile if "$INFILE" in outfile: outfile = outfile.replace("$INFILE", get_filename(f)) geom.write(append=True, outfile=outfile) else: print(geom.write(outfile=False)) if len(args.measure) > 0: if not args.outfile: print(out) else: outfile = args.outfile if "$INFILE" in outfile: outfile = outfile.replace("$INFILE", get_filename(f)) with open(outfile, "a") as f: f.write(out)
AaronTools
/AaronTools-1.0b14.tar.gz/AaronTools-1.0b14/bin/angle.py
angle.py
import argparse import os import sys from warnings import warn import numpy as np from AaronTools.fileIO import FileReader, read_types from AaronTools.geometry import Geometry from AaronTools.utils.utils import get_filename, glob_files rotate_parser = argparse.ArgumentParser( description="rotate a fragment or molecule's coordinates", formatter_class=argparse.RawTextHelpFormatter, ) rotate_parser.add_argument( "infile", metavar="input file", type=str, nargs="*", default=[sys.stdin], help="a coordinate file", ) rotate_parser.add_argument( "-if", "--input-format", type=str, default=None, choices=read_types, dest="input_format", help="file format of input - xyz is assumed if input is stdin", ) rotated_atoms = rotate_parser.add_argument_group( "rotated atoms (default is all atoms)" ) rot_atoms = rotated_atoms.add_mutually_exclusive_group(required=False) rot_atoms.add_argument( "-t", "--target", type=str, default=None, required=False, dest="targets", metavar="targets", help="atoms to rotate (1-indexed)\n" + "comma- and/or hyphen-separated list\n" + "hyphens denote a range of atoms; commas separate individual atoms or ranges", ) rot_atoms.add_argument( "-f", "--fragment", type=str, default=None, required=False, dest="fragment", metavar="target", help="rotate fragment containing target", ) rotate_parser.add_argument( "-c", "--center", default=None, required=False, dest="center", metavar="targets", help="translate the centroid of the specified atoms to the\norigin before rotating", ) defined_vector = rotate_parser.add_argument_group("define vector") def_vector = defined_vector.add_mutually_exclusive_group(required=True) def_vector.add_argument( "-v", "--vector", type=float, nargs=3, default=None, required=False, dest="vector", metavar=("x", "y", "z"), help="rotate about the vector from the origin to (x, y, z)", ) def_vector.add_argument( "-b", "--bond", type=str, nargs=2, default=None, dest="bond", metavar=("a1", "a2"), help="rotate about the vector from atom a1 to atom a2 (1-indexed)", ) def_vector.add_argument( "-x", "--axis", type=str, default=None, required=False, dest="axis", choices=["x", "y", "z"], help="rotate about specified axis", ) def_vector.add_argument( "-g", "--group", type=str, default=None, required=False, dest="group", metavar="targets", help="rotate about axis from origin (or center specified with '--center')\n" + "to the centroid of the specified atoms", ) def_vector.add_argument( "-p", "--perpendicular", type=str, default=None, required=False, dest="perp", metavar="targets", help="rotate about a vector orthogonal to the plane of best fit containing targets", ) rotate_parser.add_argument( "-a", "--angle", type=float, default=None, required=None, dest="angle", metavar="angle", help="angle of rotation (in degrees by default)", ) rotate_parser.add_argument( "-r", "--radians", action="store_const", const=True, default=False, required=False, dest="radians", help="use when angle is specified in radians instead of degrees", ) rotate_parser.add_argument( "-n", "--number", type=int, default=None, required=False, dest="num", metavar="num", help="when angle is specified, rotate num times by angle\n" + "when angle is not specified, rotate 360/num degrees num times", ) rotate_parser.add_argument( "-o", "--output", type=str, default=False, required=False, dest="outfile", help="output destination\n" + "$INFILE, $AXIS, $ANGLE will be replaced with the name of the\n" + "input file, rotation axis, and angle or rotation, respectively\nDefault: stdout", ) args = rotate_parser.parse_args() if args.angle is None and args.num is None: raise ValueError("must specified one of ('--angle', '--number')") elif args.num is None and args.angle is not None: args.num = 1 args.angle = args.angle elif args.num is not None and args.angle is None: args.num = args.num args.angle = 360.0 / args.num elif args.num is not None and args.angle is not None: args.num = args.num args.angle = args.angle if not args.radians: args.angle = np.deg2rad(args.angle) for f in glob_files(args.infile, parser=rotate_parser): if isinstance(f, str): if args.input_format is not None: infile = FileReader((f, args.input_format, None)) else: infile = FileReader(f) else: if args.input_format is not None: infile = FileReader(("from stdin", args.input_format, f)) else: infile = FileReader(("from stdin", "xyz", f)) geom = Geometry(infile) center = args.center if args.fragment is not None: targets = geom.get_all_connected(args.fragment) else: targets = args.targets if args.vector is not None: vector = args.vector elif args.bond is not None: a1 = geom.find(args.bond[0])[0] a2 = geom.find(args.bond[1])[0] vector = a1.bond(a2) if center is None: warn( "center set to the coordinates of atom %s; " + "using --center/-c none will override this" % a1.name ) center = a1 elif args.axis is not None: vector = np.zeros(3) vector[["x", "y", "z"].index(args.axis)] = 1.0 elif args.group is not None: vector = geom.COM(targets=args.group) if center is not None: vector -= geom.COM(targets=center) elif args.perp is not None: if len(geom.find(args.perp)) < 3: raise RuntimeError( "must specify at least three atoms to --perpendicular/-p" ) xyz = geom.coordinates(args.perp) - geom.COM(args.perp) R = np.dot(xyz.T, xyz) u, s, vh = np.linalg.svd(R, compute_uv=True) vector = u[:, -1] if center is None: warn( "center set to the centroid of atoms %s; using --center/-c none will override this" % args.perp ) center = geom.COM(args.perp) if args.center is not None and args.center.lower() == "none": center = None rotated_geoms = [] for i in range(0, args.num): geom.rotate(vector, args.angle, targets=targets, center=center) if args.outfile is not False: outfile = args.outfile if "$INFILE" in outfile: outfile = outfile.replace("$INFILE", get_filename(f)) outfile = outfile.replace( "$AXIS", ".".join(["%.3f" % x for x in vector]) ) outfile = outfile.replace( "$ANGLE", str.zfill("%.2f" % np.rad2deg(args.angle * (i + 1)), 6), ) parent_dir = os.path.dirname(outfile) if not os.path.isdir(parent_dir) and parent_dir != "": os.makedirs(parent_dir) else: outfile = args.outfile s = geom.write(append=True, outfile=outfile) if not outfile: print(s)
AaronTools
/AaronTools-1.0b14.tar.gz/AaronTools-1.0b14/bin/rotate.py
rotate.py
import sys import argparse from warnings import warn from AaronTools.geometry import Geometry from AaronTools.fileIO import FileReader, read_types from AaronTools.utils.utils import get_filename, glob_files def range2int(s): """split on "," and turn "-" into a range range2int(["1,3-5,7"]) returns [0, 2, 3, 4, 6] returns None if input is None""" if s is None: return s if isinstance(s, list): range_str = ",".join(s) out = [] c = range_str.split(",") for v in c: n = v.split("-") if len(n) == 2: out.extend([i for i in range(int(n[0])-1, int(n[1]))]) else: for i in n: out.append(int(i)-1) return out rmsd_parser = argparse.ArgumentParser( description="align structure to reference", formatter_class=argparse.RawTextHelpFormatter ) rmsd_parser.add_argument( "infile", metavar="input file", type=str, nargs="*", default=[sys.stdin], help="a coordinate file" ) rmsd_parser.add_argument( "-if", "--input-format", type=str, default=None, choices=read_types, dest="input_format", help="file format of input - xyz is assumed if input is stdin" ) rmsd_parser.add_argument( "-r", "--reference", type=str, default=None, dest="ref", help="reference structure" ) rmsd_parser.add_argument( "-it", "--input-targets", type=str, default=None, required=False, dest="in_target", metavar="targets", help="target atoms on input (1-indexed)\n" + "comma (,) and/or hyphen (-) separated list\n" + "hyphens denote a range of atoms\n" + "commas separate individual atoms or ranges\n" + "Default: whole structure" ) rmsd_parser.add_argument( "-rt", "--ref-targets", type=str, default=None, required=False, dest="ref_target", metavar="targets", help="target atoms on reference (1-indexed)" ) output_options = rmsd_parser.add_argument_group("output options") output_format = output_options.add_mutually_exclusive_group(required=False) output_format.add_argument( "-v", "--value", action="store_true", required=False, dest="value_only", help="print RMSD only" ) output_format.add_argument( "-csv", "--comma-seperated", action="store_true", required=False, dest="csv", help="print output in CSV format" ) output_options.add_argument( "-d", "--delimiter", type=str, default="comma", dest="delimiter", choices=["comma", "semicolon", "tab", "space"], help="CSV delimiter" ) rmsd_parser.add_argument( "-s", "--sort", action="store_true", required=False, dest="sort", help="sort atoms" ) rmsd_parser.add_argument( "-n", "--non-hydrogen", action="store_true", required=False, dest="heavy", help="ignore hydrogen atoms" ) output_options.add_argument( "-o", "--output", type=str, default=False, required=False, dest="outfile", help="output destination\n" + "$INFILE will be replaced with the name of the input file\n" + "Default: stdout" ) args = rmsd_parser.parse_args() if args.ref is not None: ref_geom = Geometry(args.ref) else: rmsd_parser.print_help() raise RuntimeError("reference geometry was not specified") if bool(args.in_target) ^ bool(args.ref_target): warn("targets may need to be specified for both input and reference") if args.csv: if args.delimiter == "comma": delim = "," elif args.delimiter == "space": delim = " " elif args.delimiter == "semicolon": delim = ";" elif args.delimiter == "tab": delim = "\t" header = delim.join(["reference", "geometry", "RMSD"]) if args.outfile: with open(args.outfile, "w") as f: f.write(header + "\n") else: print(header) for f in glob_files(args.infile, parser=rmsd_parser): if isinstance(f, str): if args.input_format is not None: infile = FileReader((f, args.input_format, None)) else: infile = FileReader(f) else: if args.input_format is not None: infile = FileReader(("from stdin", args.input_format, f)) else: infile = FileReader(("from stdin", "xyz", f)) geom = Geometry(infile) # align rmsd = geom.RMSD( ref_geom, align=True, targets=args.in_target, ref_targets=args.ref_target, heavy_only=args.heavy, sort=args.sort ) geom.comment = "rmsd = %f" % rmsd if not args.value_only and not args.csv: if args.outfile: outfile = args.outfile if "$INFILE" in outfile: outfile = outfile.replace("$INFILE", get_filename(f)) geom.write(append=True, outfile=outfile) else: print(geom.write(outfile=False)) elif args.value_only: if args.outfile: outfile = args.outfile if "$INFILE" in outfile: outfile = outfile.replace("$INFILE", get_filename(f)) with open(outfile, "a") as f: f.write("%f\n" % rmsd) else: print("%f" % rmsd) elif args.csv: s = delim.join([ref_geom.name, geom.name, "%f" % rmsd]) if args.outfile: outfile = args.outfile if "$INFILE" in outfile: outfile = outfile.replace("$INFILE", get_filename(f)) with open(outfile, "a") as f: f.write(s + "\n") else: print(s)
AaronTools
/AaronTools-1.0b14.tar.gz/AaronTools-1.0b14/bin/rmsdAlign.py
rmsdAlign.py
import argparse import sys from AaronTools.fileIO import FileReader, read_types from AaronTools.geometry import Geometry from AaronTools.substituent import Substituent from AaronTools.utils.utils import get_filename, glob_files substitute_parser = argparse.ArgumentParser( description="replace an atom or substituent with another", formatter_class=argparse.RawTextHelpFormatter, ) substitute_parser.add_argument( "infile", metavar="input file", type=str, nargs="*", default=[sys.stdin], help="a coordinate file", ) substitute_parser.add_argument( "-ls", "--list", action="store_const", const=True, default=False, required=False, dest="list_avail", help="list available substituents", ) substitute_parser.add_argument( "-if", "--input-format", type=str, default=None, choices=read_types, dest="input_format", help="file format of input - xyz is assumed if input is stdin", ) substitute_parser.add_argument( "-s", "--substitute", metavar="n=substituent", type=str, action="append", default=None, required=False, dest="substitutions", help="substitution instructions \n" + "n is the 1-indexed position of the starting position of the\n" + "substituent you are replacing\n" + "a substituent name prefixed by iupac: or smiles: (e.g. iupac:acetyl\n" + "or smiles:O=[N.]=O) will create the substituent from the\n" + "corresponding identifier", ) substitute_parser.add_argument( "-m", "--minimize", action="store_const", const=True, default=False, required=False, dest="mini", help="rotate substituents to try to minimize LJ energy", ) substitute_parser.add_argument( "-o", "--output", type=str, default=False, required=False, metavar="output destination", dest="outfile", help="output destination\n" + "$INFILE will be replaced with the name of the input file\n" + "Default: stdout" ) args = substitute_parser.parse_args() if args.list_avail: s = "" for i, name in enumerate(sorted(Substituent.list())): s += "%-20s" % name # if (i + 1) % 3 == 0: if (i + 1) % 1 == 0: s += "\n" print(s.strip()) sys.exit(0) for infile in glob_files(args.infile, parser=substitute_parser): if isinstance(infile, str): if args.input_format is not None: f = FileReader((infile, args.input_format, infile)) else: f = FileReader(infile) else: if args.input_format is not None: f = FileReader(("from stdin", args.input_format, infile)) else: f = FileReader(("from stdin", "xyz", infile)) geom = Geometry(f) target_list = [] for sub in args.substitutions: ndx_targets = sub.split("=")[0] target_list.append(geom.find(ndx_targets)) for i, sub in enumerate(args.substitutions): ndx_target = target_list[i] sub_name = "=".join(sub.split("=")[1:]) for target in ndx_target: if sub_name.lower().startswith("iupac:"): sub_name = ":".join(sub_name.split(":")[1:]) sub = Substituent.from_string(sub_name, form="iupac") elif sub_name.lower().startswith("smiles:"): sub_name = ":".join(sub_name.split(":")[1:]) sub = Substituent.from_string(sub_name, form="smiles") else: sub = Substituent(sub_name) # replace old substituent with new substituent geom.substitute(sub, target, minimize=args.mini) geom.refresh_connected() if args.outfile: outfile = args.outfile if "INFILE" in outfile: outfile = outfile.replace("$INFILE", get_filename(infile)) geom.write( append=True, outfile=outfile, ) else: print(geom.write(outfile=False))
AaronTools
/AaronTools-1.0b14.tar.gz/AaronTools-1.0b14/bin/substitute.py
substitute.py
import os import sys import argparse import numpy as np from AaronTools.const import UNIT from AaronTools.geometry import Geometry from AaronTools.fileIO import FileReader, read_types from AaronTools.utils.utils import get_filename, glob_files unique_parser = argparse.ArgumentParser( description="determine which structures are unique", formatter_class=argparse.RawTextHelpFormatter ) unique_parser.add_argument( "infile", metavar="input file", type=str, nargs="*", default=[sys.stdin], help="a coordinate file" ) unique_parser.add_argument( "-if", "--input-format", type=str, default=None, dest="input_format", choices=read_types, help="file format of input - xyz is assumed if input is stdin" ) unique_parser.add_argument( "-t", "--rmsd-tolerance", type=float, default=0.15, dest="tol", help="RMSD tolerance for structures with the same chemical formula\n" + "to be considered unique\nDefault:0.15" ) unique_parser.add_argument( "-e", "--energy-filter", metavar="KCAL/MOL", nargs="?", default=False, type=float, dest="energy_filter", help="only compare structures with similar energy\n" + "structures without an energy are always compared\n" + "Default: compare regardless of energy", ) unique_parser.add_argument( "-m", "--mirror", action="store_true", default=False, dest="mirror", help="also mirror structures when comparing", ) unique_parser.add_argument( "-d", "--directory", type=str, dest="directory", default=False, help="put structures in specified directory\nDefault: don't output structures", ) args = unique_parser.parse_args() if args.energy_filter is None: args.energy_filter = 0.2 mirror_mat = np.eye(3) mirror_mat[0][0] *= -1 # dictionary of structures, which will be ordered by number of atoms, elements, etc. structures = {} for f in glob_files(args.infile, parser=unique_parser): if isinstance(f, str): if args.input_format is not None: infile = FileReader((f, args.input_format, None), just_geom=False) else: infile = FileReader(f, just_geom=False) else: if args.input_format is not None: infile = FileReader(("from stdin", args.input_format, f), just_geom=False) else: if len(sys.argv) >= 1: infile = FileReader(("from stdin", "xyz", f), just_geom=False) geom = Geometry(infile) geom.other = infile.other if args.mirror: geom_mirrored = geom.copy() geom_mirrored.update_geometry(np.dot(geom.coords, mirror_mat)) n_atoms = len(geom.atoms) if n_atoms not in structures: structures[n_atoms] = {} element_list = [atom.element for atom in geom.atoms] elements = sorted(list(set(element_list))) s = "" for ele in elements: s += "%s%i" % (ele, element_list.count(ele)) if not any(s == key for key in structures[n_atoms].keys()): structures[n_atoms][s] = [] dup = False for group in structures[n_atoms][s]: for struc, _, _ in group: if args.energy_filter and "energy" in geom.other and "energy" in struc.other: d_nrg = UNIT.HART_TO_KCAL * abs( geom.other["energy"] - struc.other["energy"] ) if d_nrg > args.energy_filter: continue rmsd = geom.RMSD(struc, sort=True) if rmsd < args.tol: dup = True group.append((geom, rmsd, False)) break if args.mirror: rmsd2 = geom_mirrored.RMSD(struc, sort=True) if rmsd2 < args.tol: dup = True group.append(geom, rmsd2, True) if dup: break if not dup: structures[n_atoms][s].append([(geom, 0, False)]) s = "" unique = 0 total = 0 for n_atoms in structures: for formula in structures[n_atoms]: formula_unique = len(structures[n_atoms][formula]) unique += formula_unique s += "%s\n" % formula for group in structures[n_atoms][formula]: total += len(group) if args.directory: dir_name = os.path.join( args.directory, formula, get_filename(group[0][0].name, include_parent_dir=False), ) if not os.path.exists(dir_name): os.makedirs(dir_name) for geom, rmsd, _ in group: geom.comment="RMSD from %s = %.4f" % ( get_filename(group[0][0].name, include_parent_dir=False), rmsd, ) if args.energy_filter and all("energy" in g.other for g in [geom, group[0][0]]): d_nrg = UNIT.HART_TO_KCAL * (geom.other["energy"] - group[0][0].other["energy"]) geom.comment += " energy from %s = %.1f kcal/mol" % ( get_filename(group[0][0].name, include_parent_dir=False), d_nrg, ) geom.write( outfile=os.path.join( args.directory, formula, get_filename(group[0][0].name, include_parent_dir=False), get_filename(geom.name, include_parent_dir=False) + ".xyz" ), ) if len(group) > 1: if len(group) == 2: s += "there is %i structure similar to %s:\n" % ( len(group) - 1, group[0][0].name, ) else: s += "there are %i structures similar to %s:\n" % ( len(group) - 1, group[0][0].name, ) for geom, rmsd, mirrored in group[1:]: if not mirrored: s += "\t%s (RMSD = %.3f)" % (geom.name, rmsd) else: s += "\t%s (mirrored) (RMSD = %.3f)" % (geom.name, rmsd) if args.energy_filter and all("energy" in g.other for g in [geom, group[0][0]]): d_nrg = UNIT.HART_TO_KCAL * (geom.other["energy"] - group[0][0].other["energy"]) s += " (dE = %.1f kcal/mol)" % ( d_nrg, ) else: s += "there are no other structures identical to %s\n" % group[0][0].name s += "\n" s += "-----\n\n" s += "there were %i input structures\n" % total s += "in total, there are %i unique structures\n" % unique print(s)
AaronTools
/AaronTools-1.0b14.tar.gz/AaronTools-1.0b14/bin/unique.py
unique.py
import sys import argparse from warnings import warn from AaronTools.atoms import Atom from AaronTools.geometry import Geometry from AaronTools.fileIO import FileReader, read_types from AaronTools.utils.utils import get_filename, glob_files vsepr_choices = [ "point", "linear_1", "linear_2", "bent_2_tetrahedral", "bent_2_planar", "trigonal_planar", "bent_3_tetrahedral", "t_shaped", "tetrahedral", "sawhorse", "seesaw", "square_planar", "trigonal_pyramidal", "trigonal_bipyramidal", "square_pyramidal", "pentagonal", "hexagonal", "trigonal_prismatic", "pentagonal_pyramidal", "octahedral", "capped_octahedral", "hexagonal_pyramidal", "pentagonal_bipyramidal", "capped_trigonal_prismatic", "heptagonal", "hexagonal_bipyramidal", "heptagonal_pyramidal", "octagonal", "square_antiprismatic", "trigonal_dodecahedral", "capped_cube", "biaugmented_trigonal_prismatic", "cubic", "elongated_trigonal_bipyramidal", "capped_square_antiprismatic", "enneagonal", "heptagonal_bipyramidal", "hula-hoop", "triangular_cupola", "tridiminished_icosahedral", "muffin", "octagonal_pyramidal", "tricapped_trigonal_prismatic", ] element_parser = argparse.ArgumentParser( description="change an element and/or adjust the VSEPR geometry", formatter_class=argparse.RawTextHelpFormatter ) element_parser.add_argument( "infile", metavar="input file", type=str, nargs="*", default=[sys.stdin], help="a coordinate file" ) element_parser.add_argument( "-o", "--output", type=str, default=False, required=False, dest="outfile", help="output destination\n" + "$INFILE will be replaced with the name of the input file\n" + "Default: stdout" ) element_parser.add_argument( "-if", "--input-format", type=str, default=None, dest="input_format", choices=read_types, help="file format of input - xyz is assumed if input is stdin" ) element_parser.add_argument( "-e", "--element", metavar="target=element", type=str, action="append", required=True, dest="targets", help="element to change into" ) element_parser.add_argument( "-b", "--fix-bonds", action="store_true", required=False, dest="fix_bonds", help="adjust bond lengths for the new element" ) element_parser.add_argument( "-c", "--change-hydrogens", nargs="?", required=False, default=False, type=int, dest="change_hs", metavar="N", help="change the number of hydrogens by the specified amount\n" + "Specify nothing to automatically determine how many hydrogens\n" + "to add or remove. If nothing is specified, the new geometry will\n" + "also be determined automatically." ) element_parser.add_argument( "-g", "--geometry", type=str, default=False, dest="geometry", choices=vsepr_choices, required=False, help="specify the geometry to use with the new element\n" + "if the argument is not supplied, the geometry will remain the same as\n" + "the previous element's, unless necessitated by an increase in hydrogens", ) args = element_parser.parse_args() fix_bonds = args.fix_bonds if isinstance(args.change_hs, int): adjust_hs = args.change_hs elif args.change_hs is None: adjust_hs = True else: adjust_hs = 0 new_vsepr = None if args.geometry: new_vsepr = args.geometry.replace("_", " ") if adjust_hs == 0 and new_vsepr is None: adjust_structure = False elif adjust_hs == 0 and new_vsepr: goal = len(Atom.get_shape(new_vsepr)) - 1 def goal_func(atom, goal=goal): return goal - len(atom.connected) adjust_structure = (goal_func, new_vsepr) elif adjust_hs is True: adjust_structure = True else: adjust_structure = (adjust_hs, new_vsepr) for f in glob_files(args.infile, parser=element_parser): if isinstance(f, str): if args.input_format is not None: infile = FileReader((f, args.input_format, None)) else: infile = FileReader(f) else: if args.input_format is not None: infile = FileReader(("from stdin", args.input_format, f)) else: if len(sys.argv) >= 1: infile = FileReader(("from stdin", "xyz", f)) geom = Geometry(infile) target_list = [] for sub in args.targets: ndx_targets = sub.split("=")[0] target_list.append(geom.find(ndx_targets)) for i, target in enumerate(target_list): element = args.targets[i].split("=")[1] # changeElement will only change one at a time for single_target in target: geom.change_element( single_target, element, adjust_bonds=fix_bonds, adjust_hydrogens=adjust_structure ) if args.outfile: outfile = args.outfile if "$INFILE" in outfile: outfile = outfile.replace("$INFILE", get_filename(f)) geom.write(append=True, outfile=outfile) else: print(geom.write(outfile=False))
AaronTools
/AaronTools-1.0b14.tar.gz/AaronTools-1.0b14/bin/changeElement.py
changeElement.py
import sys import argparse from AaronTools.geometry import Geometry from AaronTools.fileIO import FileReader, read_types from AaronTools.utils.utils import get_filename, glob_files def two_atoms_and_a_float(vals): """check to see if argument is two ints and a float""" err = argparse.ArgumentTypeError( "error with atom/distance specification: %s\n" % " ".join(vals) + "expected -s/-c int int float\nexample: -c 5 6 +9001" ) out = [] if len(vals) != 3: raise err for v in vals[:-1]: try: out.append(int(v)-1) except ValueError: raise err if int(v) != float(v): raise err try: out.append(float(vals[-1])) except ValueError: raise err return out bond_parser = argparse.ArgumentParser( description="measure or modify distance between atoms", formatter_class=argparse.RawTextHelpFormatter ) bond_parser.add_argument( "infile", metavar="input file", type=str, nargs="*", default=[sys.stdin], help="a coordinate file" ) bond_parser.add_argument( "-if", "--input-format", type=str, default=None, choices=read_types, dest="input_format", help="file format of input - xyz is assumed if input is stdin" ) bond_parser.add_argument( "-o", "--output", type=str, default=False, required=False, dest="outfile", metavar="output destination", help="output destination\n" + "$INFILE will be replaced with the name of the input file\n" + "Default: stdout" ) bond_parser.add_argument( "-m", "--measure", metavar=("atom1", "atom2"), action="append", type=int, nargs=2, default=[], required=False, dest="measure", help="measure and print distance between atoms (1-indexed)" ) bond_parser.add_argument( "-c", "--change", metavar=("atom1", "atom2", "increment"), action="append", type=str, nargs=3, default=[], required=False, dest="change", help="change distance by the amount specified" ) bond_parser.add_argument( "-s", "--set", metavar=("atom1", "atom2", "distance"), action="append", type=str, nargs=3, default=[], required=False, dest="set_ang", help="set distance to the amount specified" ) args = bond_parser.parse_args() for f in glob_files(args.infile, parser=bond_parser): if isinstance(f, str): if args.input_format is not None: infile = FileReader((f, args.input_format, None)) else: infile = FileReader(f) else: if args.input_format is not None: infile = FileReader(("from stdin", args.input_format, f)) else: infile = FileReader(("from stdin", "xyz", f)) geom = Geometry(infile) #set bond to specified value for bond in args.set_ang: vals = two_atoms_and_a_float(bond) a1 = geom.find(str(vals[0]))[0] a2 = geom.find(str(vals[1]))[0] geom.change_distance(a1, a2, dist=vals[2], adjust=False) #change bond by specified amount for bond in args.change: vals = two_atoms_and_a_float(bond) a1 = geom.find(str(vals[0]))[0] a2 = geom.find(str(vals[1]))[0] geom.change_distance(a1, a2, dist=vals[2], adjust=True) #print specified bonds out = "" for bond in args.measure: a1 = geom.find(str(bond[0]))[0] a2 = geom.find(str(bond[1]))[0] val = a1.dist(a2) out += "%f\n" % val out = out.rstrip() if len(args.set_ang) + len(args.change) > 0: if args.outfile: outfile = args.outfile if "$INFILE" in outfile: outfile = outfile.replace("$INFILE", get_filename(f)) geom.write(append=True, outfile=outfile) else: print(geom.write(outfile=False)) if len(args.measure) > 0: if not args.outfile: print(out) else: outfile = args.outfile if "$INFILE" in outfile: outfile = outfile.replace("$INFILE", get_filename(f)) with open(outfile, "a") as f: f.write(out)
AaronTools
/AaronTools-1.0b14.tar.gz/AaronTools-1.0b14/bin/bond.py
bond.py
from AaronTools.fileIO import FileReader def main(args): fr = FileReader(args.filename, just_geom=False) if args.show: s = "frequency\t" for x in args.show: if x == "vector": continue s += "%s\t" % x print(s) freq = fr.other["frequency"] if not any((args.fundamentals, args.overtones, args.combinations)): for i, data in enumerate(sorted(freq.data, key=lambda x: x.frequency)): if args.type == "neg" and data.frequency > 0: continue if args.type == "pos" and data.frequency < 0: continue if isinstance(args.type, int) and i + 1 > args.type: break s = "%9.4f\t" % data.frequency for x in args.show: if x == "vector": continue val = getattr(data, x) if isinstance(val, float): s += "%9.4f\t" % val else: s += "%s\t" % str(val) print(s) if "vector" in args.show: print(data.vector) print() if freq.anharm_data: for i, data in enumerate(sorted(freq.anharm_data, key=lambda x: x.frequency)): if args.fundamentals: s = "%9.4f\t" % data.frequency for x in args.show: if not hasattr(data, x): continue val = getattr(data, x) if isinstance(val, float): s += "%9.4f\t" % val else: s += "%s\t" % str(val) print(s) if args.overtones: for k, overtone in enumerate(data.overtones): s = "%i x %9.4f = %9.4f\t" % ( (k + 2), data.frequency, overtone.frequency ) for x in args.show: if not hasattr(overtone, x): continue val = getattr(overtone, x) if isinstance(val, float): s += "%9.4f\t" % val else: s += "%s\t" % str(val) print(s) if args.combinations: for key in data.combinations: for combo in data.combinations[key]: s = "%9.4f + %9.4f = %9.4f\t" % ( data.frequency, freq.anharm_data[key].frequency, combo.frequency, ) for x in args.show: if not hasattr(combo, x): continue val = getattr(combo, x) if isinstance(val, float): s += "%9.4f\t" % val else: s += "%s\t" % str(val) print(s) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser( description="Prints frequencies from computational output file", formatter_class=argparse.RawTextHelpFormatter, ) parser.add_argument( "filename", help="Completed QM output file with frequency info" ) parser.add_argument( "--type", "-t", type=str, nargs="?", help="Types of frequencies to print (defaults to `all`. Allowed values: `all`, `neg[ative]`, `pos[itive]`, or `x` to print the first int(x) modes)", default="all", ) parser.add_argument( "--show", "-s", type=str, nargs="*", help="Specify what additional information to show\n" "Some info may not be available for certain file formats", choices=["intensity", "vector", "forcek", "symmetry", "delta_anh", "harmonic_frequency", "harmonic_intensity", "rotation", "raman_activity"], default=[], ) parser.add_argument( "--fundamentals", "-f", action="store_true", default=False, dest="fundamentals", help="print anharmonic fundamental frequencies for files with anharmonic data", ) parser.add_argument( "--overtone-bands", "-ob", action="store_true", default=False, dest="overtones", help="print overtone frequencies for files with anharmonic data", ) parser.add_argument( "--combination-bands", "-cb", action="store_true", default=False, dest="combinations", help="print combination frequencies for files with anharmonic data", ) args = parser.parse_args() try: args.type = int(args.type) except ValueError: args.type = args.type.lower() if args.type not in ["all", "neg", "negative", "pos", "positive"]: parser.print_help() exit(1) elif args.type in ["neg", "negative"]: args.type = "neg" elif args.type in ["pos", "positive"]: args.type = "pos" main(args)
AaronTools
/AaronTools-1.0b14.tar.gz/AaronTools-1.0b14/bin/printFreq.py
printFreq.py
import argparse import os from sys import stdin from warnings import warn from AaronTools.fileIO import FileReader, read_types from AaronTools.finders import ChiralCenters, Bridgehead, SpiroCenters, NotAny from AaronTools.geometry import Geometry from AaronTools.substituent import Substituent from AaronTools.utils.utils import glob_files changechiral_parser = argparse.ArgumentParser( description="change handedness of chiral centers", formatter_class=argparse.RawTextHelpFormatter ) changechiral_parser.add_argument( "infile", metavar="input file", type=str, nargs="*", default=[stdin], help="a coordinate file" ) changechiral_parser.add_argument( "-ls", "--list-chiral", action="store_const", const=True, default=False, required=False, dest="list_info", help="list information on detected chiral centers" ) changechiral_parser.add_argument( "-if", "--input-format", type=str, nargs=1, default=None, choices=read_types, dest="input_format", help="file format of input - xyz is assumed if input is stdin" ) changechiral_parser.add_argument( "-o", "--output-destination", type=str, default=None, required=False, metavar="output destination", dest="outfile", help="output destination\n" + "$i in the filename will be replaced with a number\n" + "if a directory is given, default is \"diastereomer-$i.xyz\" in \n" + "that directory\n" + "Default: stdout" ) changechiral_parser.add_argument( "-t", "--targets", type=str, default=None, action="append", required=False, dest="targets", help="comma- or hyphen-seperated list of chiral centers to invert (1-indexed)\n" + "Chiral centers must have at least two fragments not in a ring\n" + "Detected chiral centers are atoms that:\n" + " - have > 2 bonds\n" + " - have a non-planar VSEPR shape\n" + " - each connected fragment is distinct or is a spiro center\n" + "Default: change chirality of any detected chiral centers" ) changechiral_parser.add_argument( "-d", "--diastereomers", action="store_const", const=True, default=False, required=False, dest="combos", help="print all diastereomers for selected chiral centers" ) changechiral_parser.add_argument( "-m", "--minimize", action="store_true", default=False, dest="minimize", help="rotate substituents to mitigate steric clashing", ) args = changechiral_parser.parse_args() s = "" for infile in glob_files(args.infile, parser=changechiral_parser): if isinstance(infile, str): if args.input_format is not None: f = FileReader((infile, args.input_format[0], infile)) else: f = FileReader(infile) else: if args.input_format is not None: f = FileReader(("from stdin", args.input_format[0], stdin)) else: f = FileReader(("from stdin", "xyz", stdin)) geom = Geometry(f) target_list = [] if args.targets is None: try: chiral_centers = geom.find(ChiralCenters()) spiro_centers = geom.find(SpiroCenters(), chiral_centers) bridge_centers = geom.find(chiral_centers, Bridgehead(), NotAny(spiro_centers)) target_list = [t for t in chiral_centers if t not in bridge_centers] except LookupError as e: warn(str(e)) else: for targ in args.targets: target_list.extend(geom.find(targ)) if args.list_info: if len(args.infile) > 1: s += "%s\n" % infile s += "Target\tElement\n" for targ in target_list: s += "%-2s\t%-2s\n" % (targ.name, targ.element) if infile is not args.infile[-1]: s += "\n" continue geom.substituents = [] if args.combos: # this stuff is copy-pasted from makeConf, so it's a bit overkill # for getting all diastereomers, as each chiral center can only # have 2 options instead of the random number of rotamers # a substituent can have diastereomers = Geometry.get_diastereomers(geom, minimize=args.minimize) for i, diastereomer in enumerate(diastereomers): if args.outfile is None: s += diastereomer.write(outfile=False) s += "\n" else: if os.path.isdir(os.path.expanduser(args.outfile)): outfile = os.path.join( os.path.expanduser(args.outfile), "diastereomer-%i.xyz" % (i + 1) ) else: outfile = args.outfile.replace("$i", str(i + 1)) diastereomer.write(outfile=outfile, append="$i" not in args.outfile) else: for targ in target_list: geom.change_chirality(targ) if args.minimize: geom.minimize_sub_torsion(increment=15) if args.outfile is None: s += geom.write(outfile=False) s += "\n" else: geom.write(outfile=args.outfile) if args.outfile is None or args.list_info: print(s[:-1])
AaronTools
/AaronTools-1.0b14.tar.gz/AaronTools-1.0b14/bin/changeChirality.py
changeChirality.py
import sys import argparse import numpy as np from AaronTools.geometry import Geometry from AaronTools.fileIO import FileReader, read_types from AaronTools.utils.utils import get_filename, glob_files mirror_parser = argparse.ArgumentParser( description="mirror a molecular structure", formatter_class=argparse.RawTextHelpFormatter ) mirror_parser.add_argument( "infile", metavar="input file", type=str, nargs="*", default=[sys.stdin], help="a coordinate file" ) mirror_parser.add_argument( "-if", "--input-format", type=str, default=None, choices=read_types, dest="input_format", help="file format of input - xyz is assumed if input is stdin" ) mirror_parser.add_argument( "-o", "--output", type=str, default=False, required=False, dest="outfile", metavar="output destination", help="output destination\n" + "$INFILE will be replaced with the name of the input file\n" + "Default: stdout" ) plane_options = mirror_parser.add_argument_group("plane") plane_options.add_argument( "-yz", "--yz-plane", action="store_true", default=False, dest="yz_plane", help="mirror across the yz plane (default)" ) plane_options.add_argument( "-xz", "--xz-plane", action="store_true", default=False, dest="xz_plane", help="mirror across the xz plane" ) plane_options.add_argument( "-xy", "--xy-plane", action="store_true", default=False, dest="xy_plane", help="mirror across the xy plane" ) args = mirror_parser.parse_args() eye = np.identity(3) if args.yz_plane: eye[0, 0] *= -1 if args.xz_plane: eye[1, 1] *= -1 if args.xy_plane: eye[2, 2] *= -1 if np.sum(eye) == 3: eye[0, 0] *= -1 for f in glob_files(args.infile, parser=mirror_parser): if isinstance(f, str): if args.input_format is not None: infile = FileReader((f, args.input_format, None)) else: infile = FileReader(f) else: if args.input_format is not None: infile = FileReader(("from stdin", args.input_format, f)) else: infile = FileReader(("from stdin", "xyz", f)) geom = Geometry(infile) geom.update_geometry(np.dot(geom.coords, eye)) if args.outfile: outfile = args.outfile if "$INFILE" in outfile: outfile = outfile.replace("$INFILE", outfile) geom.write(append=True, outfile=outfile) else: print(geom.write(outfile=False))
AaronTools
/AaronTools-1.0b14.tar.gz/AaronTools-1.0b14/bin/mirror.py
mirror.py
import argparse import sys import numpy as np from AaronTools.comp_output import CompOutput from AaronTools.const import UNIT from AaronTools.fileIO import FileReader from AaronTools.finders import NotAny, AnyTransitionMetal, AnyNonTransitionMetal from AaronTools.geometry import Geometry from AaronTools.substituent import Substituent from AaronTools.utils.utils import boltzmann_coefficients, glob_files def main(argv): vbur_parser = argparse.ArgumentParser( description="calculate Boltzmann-weighted percent buried volume parameters", formatter_class=argparse.RawTextHelpFormatter ) vbur_parser.add_argument( "infiles", metavar="input files", type=str, nargs="+", help="file containing coordinates and energy" ) vbur_parser.add_argument( "-if", "--input-format", type=str, default=None, choices=["log", "out", "dat"], dest="input_format", help="file format of input" ) vbur_parser.add_argument( "-o", "--output", type=str, default=False, required=False, metavar="output destination", dest="outfile", help="output destination\n" + "Default: stdout" ) vbur_parser.add_argument( "-l", "--ligand-atoms", default=None, required=False, dest="targets", help="atoms to consider in calculation\nDefault: use all atoms except the center", ) vbur_parser.add_argument( "-e", "--exclude-atoms", default=None, required=False, dest="exclude_atoms", help="atoms to exclude from the calculation\nDefault: exclude no ligand atoms", ) vbur_parser.add_argument( "-c", "--center", action="append", default=None, required=False, dest="center", help="atom the sphere is centered on\n" + "Default: detect metal center (centroid of all metals if multiple are present)", ) vbur_parser.add_argument( "-r", "--radius", default=3.5, type=float, dest="radius", help="radius around center\nDefault: 3.5 Ångström" ) vbur_parser.add_argument( "-vdw", "--vdw-radii", type=str, default="bondi", choices=["bondi", "umn"], dest="radii", help="VDW radii to use in calculation\n" + "umn: main group vdw radii from J. Phys. Chem. A 2009, 113, 19, 5806–5812\n" + " (DOI: 10.1021/jp8111556)\n" + " transition metals are crystal radii from Batsanov, S.S. Van der Waals\n" + " Radii of Elements. Inorganic Materials 37, 871–885 (2001).\n" + " (DOI: 10.1023/A:1011625728803)\n" + "bondi: radii from J. Phys. Chem. 1964, 68, 3, 441–451 (DOI: 10.1021/j100785a001)\n" + "Default: bondi" ) vbur_parser.add_argument( "-s", "--scale", default=1.17, type=float, dest="scale", help="scale VDW radii by this amount\nDefault: 1.17", ) vbur_parser.add_argument( "-t", "--temperature", type=str, default=298.15, required=False, dest="temperature", help="temperature in K\nDefault: 298.15" ) vbur_parser.add_argument( "-f", "--frequency", action="store_true", default=False, required=False, dest="frequency", help="input files are frequency job output files\n" "additional average values will be calculated for ZPE, H, G, etc." ) vbur_parser.add_argument( "-w0", "--frequency-cutoff", type=float, default=100.0, required=False, dest="w0", help="cutoff frequency for quasi free energy corrections (1/cm)\n" + "Default: 100 cm^-1" ) vbur_parser.add_argument( "-v", "--verbose", action="store_true", default=False, required=False, dest="verbose", help="also print population" ) vbur_parser.add_argument( "-m", "--method", default="Lebedev", type=lambda x: x.capitalize() if x.lower() == "lebedev" else x.upper(), choices=["MC", "Lebedev"], dest="method", help="integration method - Monte-Carlo (MC) or Lebedev quadrature (Lebedev)\nDefault: Lebedev" ) grid_options = vbur_parser.add_argument_group("Lebedev integration options") grid_options.add_argument( "-rp", "--radial-points", type=int, default=20, choices=[20, 32, 64, 75, 99, 127], dest="rpoints", help="number of radial shells for Gauss-Legendre integration\n" + "of the radial component\n" + "lower values are faster, but at the cost of accuracy\nDefault: 20" ) grid_options.add_argument( "-ap", "--angular-points", type=int, default=1454, choices=[110, 194, 302, 590, 974, 1454, 2030, 2702, 5810], dest="apoints", help="number of angular points for Lebedev integration\n" + "lower values are faster, but at the cost of accuracy\nDefault: 1454" ) mc_options = vbur_parser.add_argument_group("Monte-Carlo integration options") mc_options.add_argument( "-i", "--minimum-iterations", type=int, default=25, metavar="ITERATIONS", dest="min_iter", help="minimum iterations - each is a batch of 3000 points\n" + "MC will continue after this until convergence criteria are met\n" + "Default: 25", ) args = vbur_parser.parse_args(args=argv) targets = None if args.exclude_atoms and not args.targets: targets = (NotAny(args.exclude_atoms)) elif args.exclude_atoms and args.targets: targets = (NotAny(args.exclude_atoms), args.targets) else: targets = NotAny(args.center) geoms = [] energies = {"E":[]} if args.frequency: energies["E+ZPE"] = [] energies["H(RRHO)"] = [] energies["G(RRHO)"] = [] energies["G(Quasi-RRHO)"] = [] energies["G(Quasi-Harmonic)"] = [] for infile in glob_files(args.infiles, parser=vbur_parser): if args.input_format is not None: fr = FileReader((infile, args.input_format, infile), just_geom=False) else: fr = FileReader(infile, just_geom=False) geom = Geometry(fr) geoms.append(geom) nrg = fr.other["energy"] energies["E"].append(nrg) if args.frequency: co = CompOutput(fr) dE, dH, entropy = co.therm_corr(temperature=args.temperature) rrho_dG = co.calc_G_corr(v0=0, temperature=args.temperature, method="RRHO") qrrho_dG = co.calc_G_corr(v0=args.w0, temperature=args.temperature, method="QRRHO") qharm_dG = co.calc_G_corr(v0=args.w0, temperature=args.temperature, method="QHARM") energies["E+ZPE"].append(nrg + co.ZPVE) energies["H(RRHO)"].append(nrg + dH) energies["G(RRHO)"].append(nrg + rrho_dG) energies["G(Quasi-RRHO)"].append(nrg + qrrho_dG) energies["G(Quasi-Harmonic)"].append(nrg + qharm_dG) s = "" for nrg_type in energies: energies_arr = np.array(energies[nrg_type]) energies_arr *= UNIT.HART_TO_KCAL if args.verbose and nrg_type == "E": s += "\t".join(["%Vbur", "file"]) s += "\n" for f, geom in zip(args.infiles, geoms): data = geom.percent_buried_volume( targets=targets, center=args.center, radius=args.radius, radii=args.radii, scale=args.scale, method=args.method, rpoints=args.rpoints, apoints=args.apoints, min_iter=args.min_iter, ) s += "%.1f%%\t%s\n" % (data, f) s += "\n" s += "weighted using %s:\n" % nrg_type data = Geometry.weighted_percent_buried_volume( geoms, energies_arr, args.temperature, targets=targets, center=args.center, radius=args.radius, radii=args.radii, scale=args.scale, method=args.method, rpoints=args.rpoints, apoints=args.apoints, min_iter=args.min_iter, ) if args.verbose: coeff = boltzmann_coefficients(energies_arr, args.temperature) coeff /= sum(coeff) coeff *= 100 for f, c, e in zip(args.infiles, coeff, energies_arr): s += "%s %.1f%% (%.1f kcal/mol)\n" % (f, c, e - min(energies_arr)) s += "%%Vbur: %.2f\n\n" % data if not args.outfile: print(s) else: with open(args.outfile, "w") as f: f.write(s) if __name__ == "__main__": main(None)
AaronTools
/AaronTools-1.0b14.tar.gz/AaronTools-1.0b14/bin/averageBuriedVolume.py
averageBuriedVolume.py
import sys import argparse from AaronTools.geometry import Geometry from AaronTools.component import Component from AaronTools.finders import AnyTransitionMetal from AaronTools.fileIO import FileReader, read_types from AaronTools.utils.utils import glob_files cone_parser = argparse.ArgumentParser( description="calculate ligand cone angles", formatter_class=argparse.RawTextHelpFormatter ) cone_parser = argparse.ArgumentParser( description="print ligand cone angle", formatter_class=argparse.RawTextHelpFormatter ) cone_parser.add_argument( "infile", metavar="input file", type=str, nargs="*", default=[sys.stdin], help="a coordinate file", ) cone_parser.add_argument( "-o", "--output", type=str, default=False, required=False, dest="outfile", help="output destination\n" + "$INFILE will be replaced with the name of the input file\n" + "Default: stdout" ) cone_parser.add_argument( "-if", "--input-format", type=str, default=None, dest="input_format", choices=read_types, help="file format of input - xyz is assumed if input is stdin", ) cone_parser.add_argument( "-k", "--key-atoms", type=str, default=None, dest="key_atoms", help="indices of ligand coordinating atoms you are calculating\n" + "the cone angle of (1-indexed)", ) cone_parser.add_argument( "-c", "--center", type=str, default=AnyTransitionMetal(), dest="center", help="index of complex's center atom (1-indexed)\nDefault: transition metals", ) cone_parser.add_argument( "-m", "--method", type=lambda x: x.capitalize(), choices=["Tolman", "Exact"], default="exact", dest="method", help="cone angle type\n" + "Tolman: Tolman's method for unsymmetric mono- and bidentate ligands\n" + " see J. Am. Chem. Soc. 1974, 96, 1, 53–60 (DOI:\n" + " 10.1021/ja00808a009)\n" + "Exact: (Default) Allen's method for an all-encompassing cone\n" + " see Bilbrey, J.A., Kazez, A.H., Locklin, J. and Allen, W.D.\n" + " (2013), Exact ligand cone angles. J. Comput. Chem., 34:\n" + " 1189-1197. (DOI: 10.1002/jcc.23217)", ) cone_parser.add_argument( "-r", "--vdw-radii", default="umn", choices=["umn", "bondi"], dest="radii", help="VDW radii to use in calculation\n" + "umn: main group vdw radii from J. Phys. Chem. A 2009, 113, 19, 5806–5812\n" + " (DOI: 10.1021/jp8111556)\n" + " transition metals are crystal radii from Batsanov, S.S. Van der Waals\n" + " Radii of Elements. Inorganic Materials 37, 871–885 (2001).\n" + " (DOI: 10.1023/A:1011625728803)\n" + "bondi: radii from J. Phys. Chem. 1964, 68, 3, 441–451 (DOI: 10.1021/j100785a001)\n" + "Default: umn", ) cone_parser.add_argument( "-b", "--cone-bild", action="store_true", default=False, dest="print_cones", help="print Chimera/ChimeraX bild file containing cones", ) args = cone_parser.parse_args() s = "" for f in glob_files(args.infile, parser=cone_parser): if isinstance(f, str): if args.input_format is not None: infile = FileReader((f, args.input_format[0], None)) else: infile = FileReader(f, just_geom=False) else: if args.input_format is not None: infile = FileReader(("from stdin", args.input_format[0], f)) else: if len(sys.argv) >= 1: infile = FileReader(("from stdin", "xyz", f)) geom = Geometry(infile) ligand = geom.get_fragment(args.key_atoms, stop=args.center) comp = Component(ligand, key_atoms=args.key_atoms, detect_backbone=False) angle = comp.cone_angle( center=geom.find(args.center), method=args.method, radii=args.radii, return_cones=args.print_cones, ) if args.print_cones: angle, cones = angle if len(args.infile) > 1: s += "%20s:\t" % f s += "%4.1f\n" % angle if args.print_cones: s += ".transparency 0.5\n" for cone in cones: apex, base, radius = cone s += ".cone %6.3f %6.3f %6.3f %6.3f %6.3f %6.3f %.3f open\n" % ( *apex, *base, radius ) if not args.outfile: print(s.rstrip()) else: with open(args.outfile, "a") as f: f.write(s.rstrip())
AaronTools
/AaronTools-1.0b14.tar.gz/AaronTools-1.0b14/bin/coneAngle.py
coneAngle.py
import argparse import sys import numpy as np from AaronTools.comp_output import CompOutput from AaronTools.const import UNIT from AaronTools.fileIO import FileReader from AaronTools.geometry import Geometry from AaronTools.substituent import Substituent from AaronTools.utils.utils import boltzmann_coefficients, glob_files def main(argv): sterimol_parser = argparse.ArgumentParser( description="calculate Boltzmann-weighted Sterimol parameters - see doi 10.1021/acscatal.8b04043", formatter_class=argparse.RawTextHelpFormatter ) sterimol_parser.add_argument( "infiles", metavar="input files", type=str, nargs="+", help="file containing coordinates and energy" ) sterimol_parser.add_argument( "-if", "--input-format", type=str, default=None, choices=["log", "out", "dat"], dest="input_format", help="file format of input" ) sterimol_parser.add_argument( "-s", "--substituent-atom", type=str, required=True, dest="targets", help="substituent atom\n" + "1-indexed position of the starting position of the\n" + "substituent of which you are calculating sterimol\nparameters" ) sterimol_parser.add_argument( "-a", "--attached-to", type=str, required=True, dest="avoid", help="non-substituent atom\n" + "1-indexed position of the starting position of the atom\n" + "connected to the substituent of which you are calculating\n" + "sterimol parameters" ) sterimol_parser.add_argument( "-r", "--radii", type=str, default="bondi", choices=["bondi", "umn"], dest="radii", help="VDW radii to use in calculation\n" + "umn: main group vdw radii from J. Phys. Chem. A 2009, 113, 19, 5806–5812\n" + " (DOI: 10.1021/jp8111556)\n" + " transition metals are crystal radii from Batsanov, S.S. Van der Waals\n" + " Radii of Elements. Inorganic Materials 37, 871–885 (2001).\n" + " (DOI: 10.1023/A:1011625728803)\n" + "bondi: radii from J. Phys. Chem. 1964, 68, 3, 441–451 (DOI: 10.1021/j100785a001)\n" + "Default: bondi" ) sterimol_parser.add_argument( "-l", "--old-l", action="store_true", required=False, dest="old_L", help="approximate FORTRAN Sterimol method for determining L\n" "This is 0.4 + the ideal bond length for a target-H bond\n" "to outer VDW radii of atoms projected onto L-axis\n" "Default: L value is from VDW radii of target atom to outer\n" "VDW radii of atoms projected onto L-axis" ) sterimol_parser.add_argument( "-t", "--temperature", type=float, default=298.15, required=False, dest="temperature", help="temperature in K\nDefault: 298.15" ) sterimol_parser.add_argument( "-f", "--frequency", action="store_true", default=False, required=False, dest="frequency", help="input files are frequency job output files\n" "additional average values will be calculated for ZPE, H, G, etc." ) sterimol_parser.add_argument( "-w0", "--frequency-cutoff", type=float, default=100.0, required=False, dest="w0", help="cutoff frequency for quasi free energy corrections (1/cm)\n" + "Default: 100 cm^-1" ) sterimol_parser.add_argument( "-v", "--verbose", action="store_true", default=False, required=False, dest="verbose", help="also print population" ) sterimol_parser.add_argument( "-o", "--output", type=str, default=False, required=False, metavar="output destination", dest="outfile", help="output destination\n" + "Default: stdout" ) args = sterimol_parser.parse_args(args=argv) subs = [] energies = {"E":[]} if args.frequency: energies["E+ZPE"] = [] energies["H(RRHO)"] = [] energies["G(RRHO)"] = [] energies["G(Quasi-RRHO)"] = [] energies["G(Quasi-Harmonic)"] = [] for infile in glob_files(args.infiles, parser=sterimol_parser): if args.input_format is not None: fr = FileReader((infile, args.input_format, infile), just_geom=False) else: fr = FileReader(infile, just_geom=False) geom = Geometry(fr) target = args.targets avoid = args.avoid end = geom.find(avoid)[0] frag = geom.get_fragment(target, stop=end) sub = Substituent(frag, end=end, detect=False) subs.append(sub) nrg = fr.other["energy"] energies["E"].append(nrg) if args.frequency: co = CompOutput(fr) dE, dH, entropy = co.therm_corr(temperature=args.temperature) rrho_dG = co.calc_G_corr(v0=0, temperature=args.temperature, method="RRHO") qrrho_dG = co.calc_G_corr(v0=args.w0, temperature=args.temperature, method="QRRHO") qharm_dG = co.calc_G_corr(v0=args.w0, temperature=args.temperature, method="QHARM") energies["E+ZPE"].append(nrg + co.ZPVE) energies["H(RRHO)"].append(nrg + dH) energies["G(RRHO)"].append(nrg + rrho_dG) energies["G(Quasi-RRHO)"].append(nrg + qrrho_dG) energies["G(Quasi-Harmonic)"].append(nrg + qharm_dG) s = "" for nrg_type in energies: energies_arr = np.array(energies[nrg_type]) energies_arr *= UNIT.HART_TO_KCAL if args.verbose and nrg_type == "E": s += "\t".join(["B1", "B2", "B3", "B4", "B5", "L", "file"]) s += "\n" for f, sub in zip(args.infiles, subs): data = sub.sterimol( radii=args.radii, old_L=args.old_L, ) s += "\t".join( ["%.2f" % data[x] for x in ["B1", "B2", "B3", "B4", "B5", "L"]] ) s += "\t%s\n" % f s += "weighted using %s:\n" % nrg_type data = Substituent.weighted_sterimol( subs, energies_arr, args.temperature, radii=args.radii, old_L=args.old_L, ) if args.verbose: coeff = boltzmann_coefficients(energies_arr, args.temperature) coeff /= sum(coeff) coeff *= 100 for f, c, e in zip(args.infiles, coeff, energies_arr): s += "%s %.1f%% (%.1f kcal/mol)\n" % (f, c, e - min(energies_arr)) s += "\t".join(["B1", "B2", "B3", "B4", "B5", "L"]) s += "\n" s += "\t".join(["%.2f" % data[x] for x in ["B1", "B2", "B3", "B4", "B5", "L"]]) s += "\n" s += "\n" if not args.outfile: print(s) else: with open(args.outfile, "w") as f: f.write(s) if __name__ == "__main__": main(None)
AaronTools
/AaronTools-1.0b14.tar.gz/AaronTools-1.0b14/bin/averageSterimol.py
averageSterimol.py
import argparse from warnings import warn import numpy as np from AaronTools.fileIO import FileReader from AaronTools.geometry import Geometry from AaronTools.pathway import Pathway from AaronTools.utils.utils import get_filename def width(n): """use to determine 0-padding based on number of files we're printing""" return np.ceil(np.log10(n)) def parse_mode_str(s, t): """split mode string into modes and mode combos e.g. t=int, 1,2+3,4 -> [[0], [1,2], [3]] t=float 0.1,0.05+0.03,0.07 -> [[0.1], [0.05, 0.03], [0.07]]""" # the way this is being used is if t is int, we are changing 1-indexed things to 0-index # if t is float, were going to use the result to scale a normal mode (don't subtract 1) if t is not int and t is not float: raise TypeError( "can only parse mode string into ints or floats, not %s" % repr(t) ) modes = s.split(",") out_modes = [] for mode in modes: out_modes.append([]) for combo in mode.split("+"): if t is int: out_modes[-1].append(int(combo) - 1) elif t is float: out_modes[-1].append(float(combo)) return out_modes follow_parser = argparse.ArgumentParser( description="move the structure along a normal mode", formatter_class=argparse.RawTextHelpFormatter, ) follow_parser.add_argument( "input_file", metavar="input file", type=str, help='input frequency file (i.e. Gaussian output where "freq" was specified)', ) follow_parser.add_argument( "-m", "--mode", type=str, nargs="+", required=False, default=None, dest="mode", metavar=("mode 1", "mode 2"), help="comma-separated list of modes to follow (1-indexed)", ) follow_parser.add_argument( "-r", "--reverse", action="store_const", const=True, default=False, required=False, dest="reverse", help="follow the normal mode in the opposite direction", ) follow_parser.add_argument( "-a", "--animate", type=int, nargs=1, default=None, required=False, dest="animate", metavar="frames", help="print specified number of structures to make an animation", ) follow_parser.add_argument( "-rt", "--roundtrip", action="store_const", const=True, default=False, required=False, dest="roundtrip", help="make animation roundtrip", ) follow_parser.add_argument( "-s", "--scale", type=str, nargs=1, default=None, required=False, dest="scale", metavar="max displacement", help="scale the normal mode so that this is the maximum amount an \natom is displaced", ) follow_parser.add_argument( "-o", "--output-destination", type=str, nargs="+", default=False, required=False, dest="outfile", help="output destination\n" + "$i in file name will be replaced with zero-padded numbers if --animate is used\n" + "$INFILE will be replaced with the name of the input file\n" + "Default: stdout", ) args = follow_parser.parse_args() fr = FileReader(args.input_file, just_geom=False) geom = Geometry(fr) if args.mode is None: modes = [ [i] for i, freq in enumerate(fr.other["frequency"].data) if freq.frequency < 0 ] else: modes = parse_mode_str(args.mode[0], int) if not modes: raise RuntimeError("no vibrational mode specified and no imaginary modes found") # copy the list of output files or set all output files to False (print all to stdout) if args.outfile is not False: outfiles = [f for f in args.outfile] else: outfiles = [False for m in modes] if len(outfiles) != len(modes): warn( "number of output files does not match number of modes: %i files, %i modes" % (len(outfiles), len(modes)) ) if args.scale is None: scale = [[0.35] * len(mode) for mode in modes] else: scale = parse_mode_str(args.scale[0], float) for i, mode in enumerate(modes): if outfiles[i]: if "$i" not in outfiles[i]: append = True else: append = False dX = np.zeros((len(geom.atoms), 3)) # figure out how much we'll have to scale each mode for j, combo in enumerate(mode): max_norm = 0 for k, v in enumerate( fr.other["frequency"].data[combo].vector ): n = np.linalg.norm(v) if n > max_norm: max_norm = n # scale this mode by 0.35 (or whatever the user asked for)/max_norm x_factor = scale[i][j] / max_norm if args.reverse: x_factor *= -1 dX += x_factor * fr.other["frequency"].data[combo].vector if args.animate is not None: # animate by setting up 3 geometries: -, 0, and + # then create a Pathway to interpolate between these # if roundtrip, - -> 0 -> + -> 0 -> - # if not roundtrip, - -> 0 -> + w = width(args.animate[0]) fmt = "%0" + "%i" % w + "i" Xf = geom.coords + dX X = geom.coords Xr = geom.coords - dX # make a scales(t) function so we can see the animation progress in the XYZ file comment if args.roundtrip: other_vars = {} for i, mode_scale in enumerate(scale[i]): other_vars["scale %i"] = [ mode_scale, 0, -mode_scale, 0, mode_scale, ] pathway = Pathway(geom, np.array([Xf, X, Xr, X, Xf]), other_vars=other_vars) else: other_vars = {} for i, mode_scale in enumerate(scale[i]): other_vars["scale %i"] = [mode_scale, 0, -mode_scale] pathway = Pathway(geom, np.array([Xf, X, Xr]), other_vars=other_vars) # print animation frames for k, t in enumerate(np.linspace(0, 1, num=args.animate[0])): if outfiles[i] is not False: outfile = outfiles[i].replace("$i", fmt % k) else: outfile = outfiles[i] followed_geom = pathway.geom_func(t) followed_geom.comment = ( "animating mode %s scaled to displace at most [%s]" % ( repr(mode), ", ".join(str(pathway.var_func[key](t)) for key in other_vars), ) ) if args.outfile: followed_geom.write( append=False, outfile=outfile.replace("$INFILE", get_filename(args.input_file)) ) else: print(followed_geom.write(outfile=False)) else: w = width(len(modes)) fmt = "%0" + "%i" % w + "i" followed_geom = geom.copy() followed_geom.update_geometry(geom.coords + dX) followed_geom.comment = "following mode %s scaled to displace at most %s" % ( repr(mode), repr(scale[i]), ) outfile = outfiles[i] if args.outfile: if "$INFILE" in outfile: outfile = outfile.replace("$INFILE", get_filename(args.input_file)) followed_geom.write(append=True, outfile=outfile) else: print(followed_geom.write(outfile=False))
AaronTools
/AaronTools-1.0b14.tar.gz/AaronTools-1.0b14/bin/follow.py
follow.py
import argparse import os import sys import numpy as np from AaronTools.geometry import Geometry from AaronTools.substituent import Substituent libaddsub_parser = argparse.ArgumentParser( description="add a substituent to your personal library", formatter_class=argparse.RawTextHelpFormatter, ) libaddsub_parser.add_argument( "infile", metavar="input file", type=str, default=None, help="a coordinate file", ) libaddsub_parser.add_argument( "-n", "--name", type=str, required=False, default=None, dest="name", help="Name of substituent being added to the library\n" + "if no name is given, the substituent will be printed to STDOUT", ) libaddsub_parser.add_argument( "-s", "--substituent-atom", type=str, nargs="+", required=True, dest="target", help="substituent atom connected to the rest of the molecule (1-indexed)", ) libaddsub_parser.add_argument( "-a", "--attached-to", type=str, nargs="+", required=True, dest="avoid", help="atom on the molecule that is connected to the substituent", ) libaddsub_parser.add_argument( "-c", "--conf", type=int, nargs=2, required=True, metavar=("CONFORMERS", "ANGLE"), dest="confangle", help="number of conformers and the rotation angle (degrees) used to generate each conformer", ) args = libaddsub_parser.parse_args() n_confs, angle = args.confangle if n_confs < 1: raise RuntimeError("conformers cannot be < 1") geom = Geometry(args.infile) geom.coord_shift(-geom.COM(args.avoid)) sub = geom.get_fragment(args.target, args.avoid, as_object=True) target = geom.COM(args.target) x_axis = np.array([1.0, 0.0, 0.0]) n = np.linalg.norm(target) vb = target / n d = np.linalg.norm(vb - x_axis) theta = np.arccos((d ** 2 - 2) / -2) vx = np.cross(vb, x_axis) sub.rotate(vx, theta) sub.comment = "CF:%i,%i" % (n_confs, angle) if args.name is None: print(sub.write(outfile=False)) else: sub_file = os.path.join( os.path.dirname(Substituent.AARON_LIBS), args.name + ".xyz" ) if os.path.exists(sub_file): overwrite = input( "%s already exists.\nWould you like to overwrite it? (yes/NO)\n" % sub_file ) if overwrite.lower() not in ["yes", "y"]: print("not overwriting") sys.exit(0) sub.write(outfile=sub_file)
AaronTools
/AaronTools-1.0b14.tar.gz/AaronTools-1.0b14/bin/libaddSubstituent.py
libaddSubstituent.py
import argparse import numpy as np from AaronTools.geometry import Geometry from AaronTools.fileIO import FileReader def parse_mode_str(s, t): """split mode string into modes and mode combos e.g. t=int, 1,2+3,4 -> [[0], [1,2], [3]] t=float 0.1,0.05+0.03,0.07 -> [[0.1], [0.05, 0.03], [0.07]]""" #the way this is being used is if t is int, we are changing 1-indexed things to 0-index #if t is float, were going to use the result to scale a normal mode (don"t subtract 1) if t is not int and t is not float: raise TypeError("can only parse mode string into ints or floats, not %s" % repr(t)) modes = s.split(",") out_modes = [] for mode in modes: out_modes.append([]) for combo in mode.split("+"): if t is int: out_modes[-1].append(int(combo)-1) elif t is float: out_modes[-1].append(float(combo)) return out_modes freqbild_parser = argparse.ArgumentParser( description="print Chimera bild file with vectors for the specified normal modes to std out", formatter_class=argparse.RawTextHelpFormatter ) freqbild_parser.add_argument( "infile", metavar="input file", type=str, default=None, help="a frequency job output file" ) freqbild_parser.add_argument( "-m", "--mode", type=str, nargs=1, default=None, required=False, metavar="1,2+3,4", dest="mode_str", help="mode(s) to print (1-indexed)\n" + "Default is to print all imaginary modes separately\n" + "- comma (,) delimited modes will be printed separately\n" + "- plus (+) delimited modes will be combined" ) freqbild_parser.add_argument( "-s", "--scale", type=str, nargs=1, default=None, required=False, dest="scale", metavar="max displacement", help="scale the longest vector to be this many Angstroms long\n" + "default is 1.5\nmay be delimited in accordance with the --mode option" ) freqbild_parser.add_argument( "-r", "--remove-mass", action="store_const", const=True, default=False, required=False, dest="mass_weight", help="remove mass-weighting from normal modes" ) freqbild_parser.add_argument( "-c", "--color", type=str, nargs="+", default=["green"], required=False, dest="color", metavar=("BILD 1 color", "BILD 2 color"), help="color of vectors" ) args = freqbild_parser.parse_args() fr = FileReader(args.infile, just_geom=False) geom = Geometry(fr) if args.mode_str is None: #if not modes were requested, print all the imaginary ones modes = [ [i] for i, freq in enumerate(fr.other["frequency"].data) if freq.frequency < 0 ] else: #otherwise, split the modes on delimiters modes = parse_mode_str(args.mode_str[0], int) if args.scale is None: scale = [[1.5]*len(mode) for mode in modes] else: scale = parse_mode_str(args.scale[0], float) color = args.color colors = len(color) while colors < len(modes): color.extend(args.color) colors = len(color) #output is the string of everything we"ll print output = "" for i, mode in enumerate(modes): output += ".color %s\n.comment " % args.color[i] dX = np.zeros((len(geom.atoms), 3)) #figure out how much we"ll have to scale each mode for j, combo in enumerate(mode): output += "%f cm^-1" % fr.other["frequency"].data[combo].frequency max_norm = 0 for k, v in enumerate(fr.other["frequency"].data[combo].vector): if args.mass_weight: n = np.linalg.norm(v) * geom.atoms[k].mass() else: n = np.linalg.norm(v) if n > max_norm: max_norm = n # scale this mode by 1.5 (or whatever the user asked for)/max_norm x_factor = scale[i][j]/max_norm dX += x_factor * fr.other["frequency"].data[combo].vector output += " x %.2f " % x_factor output += "\n" for n in range(0, len(geom.atoms)): # scale the vector for each atom and add it to output if args.mass_weight: dX[n] *= geom.atoms[n].mass() v_len = np.linalg.norm(dX[n]) # we also scale the cone part of the arrow start = [x for x in geom.atoms[n].coords] stop = [x for x in geom.atoms[n].coords + dX[n]] head_len = [v_len / (v_len + 0.75)] if v_len > 0.1: output += ( ".arrow %10.6f %10.6f %10.6f " % tuple(start) + "%10.6f %10.6f %10.6f " % tuple(stop) + "0.02 0.05 %5.3f\n" % tuple(head_len) ) print(output.rstrip())
AaronTools
/AaronTools-1.0b14.tar.gz/AaronTools-1.0b14/bin/printFreqBild.py
printFreqBild.py
import sys import argparse import re import numpy as np from AaronTools.fileIO import FileReader, read_types from AaronTools.utils.utils import glob_files info_parser = argparse.ArgumentParser( description="print information in Gaussian, ORCA, or Psi4 output files", formatter_class=argparse.RawTextHelpFormatter ) info_parser.add_argument( "infile", metavar="input file", type=str, nargs="*", default=[sys.stdin], help="a coordinate file" ) info_parser.add_argument( "-o", "--output", type=str, default=False, required=False, dest="outfile", help="output destination \nDefault: stdout" ) info_parser.add_argument( "-if", "--input-format", type=str, default=None, dest="input_format", choices=read_types, help="file format of input - xyz is assumed if input is stdin" ) info_parser.add_argument( "-ls", "--list", action="store_true", default=False, required=False, dest="list", help="list info categories and exit", ) info_parser.add_argument( "-i", "--info", type=str, default=[], action="append", required=False, dest="info", help="information to print\n" + "Default is all info" ) info_parser.add_argument( "-csv", "--csv-format", nargs="?", default=False, choices=("comma", "semicolon", "tab", "space"), required=False, dest="csv", help="print info in CSV format with the specified separator\n" + "Default: do not print in CSV format", ) args = info_parser.parse_args() if args.csv is None: args.csv = "comma" if args.csv: if args.csv == "comma": sep = "," elif args.csv == "tab": sep = "\t" elif args.csv == "semicolon": sep = ";" else: sep = " " s = "" np.set_printoptions(precision=5) for f in glob_files(args.infile, parser=info_parser): if isinstance(f, str): if args.input_format is not None: infile = FileReader((f, args.input_format[0], None), just_geom=False) else: infile = FileReader(f, just_geom=False) else: if args.input_format is not None: infile = FileReader(("from stdin", args.input_format[0], f), just_geom=False) else: if len(sys.argv) >= 1: infile = FileReader(("from stdin", "xyz", f), just_geom=False) if args.list: s += "info in %s:\n" % f for key in infile.other.keys(): s += "\t%s\n" % key else: s += "%s:\n" % f missing_keys = [ key for key in args.info if not any( re.search(key, data_key, flags=re.IGNORECASE) for data_key in infile.other.keys() ) ] if missing_keys: s += "\nmissing some info: %s\n" % ", ".join(missing_keys) for key in infile.other.keys(): if args.info == [] or any(re.search(info, key, flags=re.IGNORECASE) for info in args.info): if isinstance(infile.other[key], str): if args.csv: s += "\"%s\"%s%s\n" % (key, sep, infile.other[key]) else: s += "\t%-30s=\t%s\n" % (key, infile.other[key]) elif isinstance(infile.other[key], bool): if args.csv: s += "\"%s\"%s%s\n" % (key, sep, infile.other[key]) else: s += "\t%-30s =\t%s\n" % (key, str(infile.other[key])) elif isinstance(infile.other[key], int): if args.csv: s += "\"%s\"%s%i\n" % (key, sep, infile.other[key]) else: s += "\t%-30s =\t%i\n" % (key, infile.other[key]) elif isinstance(infile.other[key], float): if args.csv: s += "\"%s\"%s%.8f\n" % (key, sep, infile.other[key]) else: s += "\t%-30s =\t%.8f\n" % (key, infile.other[key]) elif isinstance(infile.other[key], list) or ( isinstance(infile.other[key], np.ndarray) and infile.other[key].ndim == 1 ): if args.csv: s += "\"%s\"%s%s\n" % ( key, sep, sep.join([str(x) for x in infile.other[key]]) ) else: s += "\t%-30s =\t%s\n" % ( key, ", ".join([str(x) for x in infile.other[key]]) ) elif isinstance(infile.other[key], np.ndarray): if args.csv: s += "\"%s\"%s" % (key, sep) vectorized = np.reshape(infile.other[key], (infile.other[key].size,)) if isinstance(vectorized[0], float): s += sep.join(["%11.8f" % x for x in vectorized]) else: s += sep.join([str(x) for x in vectorized]) s += "\n" else: s += "\t%-30s =\n" % key for line in str(infile.other[key]).splitlines(): s += "\t\t%s\n" % line if not args.outfile: print(s.strip()) else: with open(args.outfile, "a") as f: f.write(s.strip())
AaronTools
/AaronTools-1.0b14.tar.gz/AaronTools-1.0b14/bin/printInfo.py
printInfo.py
import argparse import sys from AaronTools.component import Component from AaronTools.fileIO import FileReader, read_types from AaronTools.geometry import Geometry from AaronTools.utils.utils import get_filename, glob_files def get_matching_ligands(name): name_info = name coordinating_elements = None denticity = None if isinstance(name_info, str): if ( "name:" not in name_info.lower() and "elements:" not in name_info.lower() and "denticity:" not in name_info.lower() ): name_info = "^%s$" % name_info else: lig_info = name.split(":") for i, info in enumerate(lig_info): if info.lower() == "name" and i+1 < len(lig_info): name_info = name_info.replace("%s:%s" % (info, lig_info[i+1]), "") name_info = lig_info[i+1] elif info.lower() == "elements" and i+1 < len(lig_info): name_info = name_info.replace("%s:%s" % (info, lig_info[i+1]), "") coordinating_elements = lig_info[i+1].split(",") elif info.lower() == "denticity" and i+1 < len(lig_info): name_info = name_info.replace("%s:%s" % (info, lig_info[i+1]), "") denticity = int(lig_info[i+1]) if not name_info: name_info = None return Component.list( name_regex=name_info, coordinating_elements=coordinating_elements, denticity=denticity ) maplig_parser = argparse.ArgumentParser( description="replace a ligand on an organometallic system", formatter_class=argparse.RawTextHelpFormatter, ) maplig_parser.add_argument( "infile", metavar="input file", type=str, nargs="*", default=[sys.stdin], help="a coordinate file", ) maplig_parser.add_argument( "-ls", "--list", nargs="?", const=None, default=False, required=False, dest="list_avail", metavar="elements:X[,Y...] | name:RegEx", help="list available ligands\n" + "elements:X[,Y] can be used to only list ligands that coordinate\n" + "with the specified elements - must match exactly\n" + "name:RegEx can be used to only list ligands with names matching\n" + "the supplied regular expression - matches are case-insensitive", ) maplig_parser.add_argument( "-if", "--input-format", type=str, nargs=1, default=None, choices=read_types, dest="input_format", help="file format of input - xyz is assumed if input is stdin", ) maplig_parser.add_argument( "-l", "--ligand", metavar="[n[,m...]]=ligand | ligand", type=str, default=None, required=False, dest="ligand", help="ligand used to replace the current one\n" + "n[,m...] are the 1-indexed positions of the coordinating atoms of the\n" + "ligand that is being replaced\n" + "if these indices are not provided, they will the guessed\n" + "elements:X[,Y] or name:RegEx can be used in place of ligand\n" + "to swap ligands matching these criteria (see --list option)", ) maplig_parser.add_argument( "-c", "--center", required=False, default=None, dest="center", help="catalyst center the ligand is bonded to\nDefault: any transition metal" ) maplig_parser.add_argument( "-o", "--output", type=str, default=False, required=False, metavar="output destination", dest="outfile", help="output destination\n" + "$LIGAND will be replaced with ligand name\n" + "$INFILE will be replaced with the name of the input file\n" + "Default: stdout", ) args = maplig_parser.parse_args() if args.list_avail is not False: s = "" for i, name in enumerate(sorted(get_matching_ligands(args.list_avail))): s += "%-35s" % name if (i + 1) % 3 == 0: # if (i + 1) % 1 == 0: s += "\n" print(s.strip()) sys.exit(0) for infile in glob_files(args.infile, parser=maplig_parser): if isinstance(infile, str): if args.input_format is not None: f = FileReader((infile, args.input_format[0], infile)) else: f = FileReader(infile) else: if args.input_format is not None: f = FileReader(("from stdin", args.input_format[0], infile)) else: f = FileReader(("from stdin", "xyz", infile)) cat = Geometry(f) if args.center: cat.detect_components(center=args.center) # TODO: change this if to a regex if '=' in args.ligand: key_atoms = args.ligand.split("=")[0] lig_names = [l for l in "=".join(args.ligand.split("=")[1:]).split(",")] else: lig_names = [l for l in "=".join(args.ligand.split("=")[1:]).split(",")] key_atoms = [] for lig_names in [get_matching_ligands(lig_name) for lig_name in lig_names]: for lig_name in lig_names: ligands = [Component(lig_name)] cat_copy = cat.copy() if key_atoms != []: key = cat_copy.find(key_atoms) else: key = [] original_ligands = [l for l in ligands] lig_keys = sum([len(ligand.key_atoms) for ligand in ligands]) while len(key) > lig_keys: ligands.extend(l.copy() for l in original_ligands) lig_keys += sum([len(ligand.key_atoms) for ligand in original_ligands]) j = 0 while len(key) < sum([len(ligand.key_atoms) for ligand in ligands]): if j >= len(cat.components["ligand"]): raise RuntimeError( "new ligand appears to have a higher denticity than old ligands combined" ) key.extend(cat.components["ligand"][j].key_atoms) j += 1 cat_copy.map_ligand(ligands, key) if args.outfile: if "$INFILE" in args.outfile: outfile = args.outfile.replace("$INFILE", get_filename(infile)) outfile = outfile.replace("$LIGAND", lig_name) cat_copy.write(append=False, outfile=outfile) else: s = cat_copy.write(outfile=False) print(s)
AaronTools
/AaronTools-1.0b14.tar.gz/AaronTools-1.0b14/bin/mapLigand.py
mapLigand.py
import sys import argparse from warnings import warn from AaronTools.geometry import Geometry from AaronTools.fileIO import FileReader, read_types from AaronTools.finders import NotAny from AaronTools.utils.utils import glob_files vbur_parser = argparse.ArgumentParser( description="calculated % volume buried in a sphere around a center atom - see Organometallics 2008, 27, 12, 2679–2681", formatter_class=argparse.RawTextHelpFormatter ) vbur_parser.add_argument( "infile", metavar="input file", type=str, nargs="*", default=[sys.stdin], help="a coordinate file" ) vbur_parser.add_argument( "-o", "--output", type=str, default=False, required=False, dest="outfile", help="output destination \nDefault: stdout" ) vbur_parser.add_argument( "-if", "--input-format", type=str, default=None, dest="input_format", choices=read_types, help="file format of input - xyz is assumed if input is stdin" ) vbur_parser.add_argument( "-t", "--targets", default=None, required=False, dest="targets", help="atoms to consider in calculation\nDefault: use all atoms except the center", ) vbur_parser.add_argument( "-e", "--exclude-atoms", default=None, required=False, dest="exclude_atoms", help="atoms to exclude from the calculation\nDefault: exclude no ligand atoms", ) vbur_parser.add_argument( "-c", "--center", action="append", default=None, required=False, dest="center", help="atom the sphere is centered on\n" + "Default: detect metal center (centroid of all metals if multiple are present)", ) vbur_parser.add_argument( "-v", "--vdw-radii", default="umn", choices=["umn", "bondi"], dest="radii", help="VDW radii to use in calculation\n" + "umn: main group vdw radii from J. Phys. Chem. A 2009, 113, 19, 5806–5812\n" + " (DOI: 10.1021/jp8111556)\n" + " transition metals are crystal radii from Batsanov, S.S. Van der Waals\n" + " Radii of Elements. Inorganic Materials 37, 871–885 (2001).\n" + " (DOI: 10.1023/A:1011625728803)\n" + "bondi: radii from J. Phys. Chem. 1964, 68, 3, 441–451 (DOI: 10.1021/j100785a001)\n" + "Default: umn", ) vbur_parser.add_argument( "-s", "--scale", default=1.17, type=float, dest="scale", help="scale VDW radii by this amount\nDefault: 1.17", ) vbur_parser.add_argument( "-r", "--radius", default=3.5, type=float, dest="radius", help="radius around center\nDefault: 3.5 Ångström" ) vbur_parser.add_argument( "-dr", "--scan", default=[0, 1], nargs=2, type=float, dest="scan", metavar=["dR", "NUMBER"], help="calculate %%Vbur with NUMBER different radii, starting with\n" "the radius specified with -r/--radius and increasing\n" "in increments in dR" ) vbur_parser.add_argument( "-m", "--method", default="Lebedev", type=lambda x: x.capitalize() if x.lower() == "lebedev" else x.upper(), choices=["MC", "Lebedev"], dest="method", help="integration method - Monte-Carlo (MC) or Lebedev quadrature (Lebedev)\nDefault: Lebedev" ) grid_options = vbur_parser.add_argument_group("Lebedev integration options") grid_options.add_argument( "-rp", "--radial-points", type=int, default=20, choices=[20, 32, 64, 75, 99, 127], dest="rpoints", help="number of radial shells for Gauss-Legendre integration\n" + "of the radial component\n" + "lower values are faster, but at the cost of accuracy\nDefault: 20" ) grid_options.add_argument( "-ap", "--angular-points", type=int, default=1454, choices=[110, 194, 302, 590, 974, 1454, 2030, 2702, 5810], dest="apoints", help="number of angular points for Lebedev integration\n" + "lower values are faster, but at the cost of accuracy\nDefault: 1454" ) mc_options = vbur_parser.add_argument_group("Monte-Carlo integration options") mc_options.add_argument( "-i", "--minimum-iterations", type=int, default=25, metavar="ITERATIONS", dest="min_iter", help="minimum iterations - each is a batch of 3000 points\n" + "MC will continue after this until convergence criteria are met\n" + "Default: 25", ) args = vbur_parser.parse_args() s = "" for f in glob_files(args.infile, parser=vbur_parser): if isinstance(f, str): if args.input_format is not None: infile = FileReader((f, args.input_format[0], None)) else: infile = FileReader(f, just_geom=False) else: if args.input_format is not None: infile = FileReader(("from stdin", args.input_format[0], f)) else: if len(sys.argv) >= 1: infile = FileReader(("from stdin", "xyz", f)) geom = Geometry(infile) targets = None if args.exclude_atoms and not args.targets: targets = (NotAny(args.exclude_atoms)) elif args.exclude_atoms and args.targets: targets = (NotAny(args.exclude_atoms), args.targets) elif not args.exclude_atoms and args.targets: targets = geom.find(args.targets) else: targets = NotAny(args.center) radius = args.radius for i in range(0, int(args.scan[1])): try: vbur = geom.percent_buried_volume( targets=targets, center=args.center, radius=radius, radii=args.radii, scale=args.scale, method=args.method, rpoints=args.rpoints, apoints=args.apoints, min_iter=args.min_iter, ) if i == 0: if len(args.infile) > 1: s += "%-20s\n" % (f + ":") s += "radius\t%Vbur\n" s += "%.2f\t%4.1f\n" % (radius, vbur) except Exception as e: raise RuntimeError("calulation failed for %s: %s" % (f, e)) break radius += args.scan[0] else: s += "\n" if not args.outfile: print(s.strip()) else: with open(args.outfile, "a") as f: f.write(s.strip())
AaronTools
/AaronTools-1.0b14.tar.gz/AaronTools-1.0b14/bin/percentVolumeBuried.py
percentVolumeBuried.py
import argparse import sys from AaronTools.fileIO import FileReader, read_types from AaronTools.geometry import Geometry from AaronTools.substituent import Substituent from AaronTools.utils.utils import glob_files def main(argv): sterimol_parser = argparse.ArgumentParser( description="calculate B1-B5, and L sterimol parameters for substituents - see Verloop, A. and Tipker, J. (1976), Use of linear free energy related and other parameters in the study of fungicidal selectivity. Pestic. Sci., 7: 379-390.", formatter_class=argparse.RawTextHelpFormatter ) sterimol_parser.add_argument( "infile", metavar="input file", type=str, nargs="*", default=[sys.stdin], help="a coordinate file" ) sterimol_parser.add_argument( "-if", "--input-format", type=str, default=None, choices=read_types, dest="input_format", help="file format of input\nxyz is assumed if input is stdin" ) sterimol_parser.add_argument( "-s", "--substituent-atom", type=str, required=True, dest="targets", help="substituent atom\n" + "1-indexed position of the starting position of the\n" + "substituent of which you are calculating sterimol\nparameters" ) sterimol_parser.add_argument( "-a", "--attached-to", type=str, required=True, dest="avoid", help="non-substituent atom\n" + "1-indexed position of the starting position of the atom\n" + "connected to the substituent of which you are calculating\n" + "sterimol parameters" ) sterimol_parser.add_argument( "-r", "--radii", type=str, default="bondi", choices=["bondi", "umn"], dest="radii", help="VDW radii to use in calculation\n" + "umn: main group vdw radii from J. Phys. Chem. A 2009, 113, 19, 5806–5812\n" + " (DOI: 10.1021/jp8111556)\n" + " transition metals are crystal radii from Batsanov, S.S. Van der Waals\n" + " Radii of Elements. Inorganic Materials 37, 871–885 (2001).\n" + " (DOI: 10.1023/A:1011625728803)\n" + "bondi: radii from J. Phys. Chem. 1964, 68, 3, 441–451 (DOI: 10.1021/j100785a001)\n" + "Default: bondi" ) sterimol_parser.add_argument( "-l", "--old-l", action="store_true", required=False, dest="old_L", help="approximate FORTRAN Sterimol method for determining L\n" "This is 0.4 + the ideal bond length for a target-H bond\n" "Default: L value is from VDW radii of target atom to outer\n" "VDW radii of atoms projected onto L-axis" ) sterimol_parser.add_argument( "-al", "--at-L", default=[None], dest="L_value", type=lambda x: [float(v) for v in x.split(",")], help="get widths at specific L values (comma-separated)\n" "Default: use the entire ligand", ) sterimol_parser.add_argument( "-v", "--vector", action="store_true", required=False, dest="vector", help="print Chimera/ChimeraX bild file for vectors instead of parameter values" ) sterimol_parser.add_argument( "-o", "--output", type=str, default=False, required=False, metavar="output destination", dest="outfile", help="output destination\n" + "Default: stdout" ) args = sterimol_parser.parse_args(args=argv) s = "" if not args.vector: s += "B1\tB2\tB3\tB4\tB5\tL\tfile\n" for infile in glob_files(args.infile, parser=sterimol_parser): if isinstance(infile, str): if args.input_format is not None: f = FileReader((infile, args.input_format, infile)) else: f = FileReader(infile) else: if args.input_format is not None: f = FileReader(("from stdin", args.input_format, infile)) else: f = FileReader(("from stdin", "xyz", infile)) geom = Geometry(f) target = args.targets avoid = args.avoid end = geom.find(avoid)[0] frag = geom.get_fragment(target, stop=end) sub = Substituent(frag, end=end, detect=False) for val in args.L_value: data = sub.sterimol( return_vector=args.vector, radii=args.radii, old_L=args.old_L, at_L=val, ) if args.vector: for key, color in zip( ["B1", "B2", "B3", "B4", "B5", "L"], ["black", "green", "purple", "orange", "red", "blue"] ): start, end = data[key] s += ".color %s\n" % color s += ".note Sterimol %s\n" % key s += ".arrow %6.3f %6.3f %6.3f %6.3f %6.3f %6.3f\n" % (*start, *end) else: s += "%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%s\n" % ( data["B1"], data["B2"], data["B3"], data["B4"], data["B5"], data["L"], infile, ) if not args.outfile: print(s) else: with open(args.outfile, "w") as f: f.write(s) if __name__ == "__main__": main(None)
AaronTools
/AaronTools-1.0b14.tar.gz/AaronTools-1.0b14/bin/substituentSterimol.py
substituentSterimol.py
import ast import dis import importlib import inspect import os import re from importlib import resources from pprint import pprint from AaronTools import getlogger LOG = getlogger() def main(args): modules = [ r[:-3] for r in resources.contents(args.package) if r.endswith(".py") ] obj = None class_name, function_name = None, None if os.path.isfile(args.name): with open(args.name) as f: obj = compile(f.read(), filename=args.name, mode="exec") else: match = re.match("(\w+)\.(\w+)", args.name) if match is not None: class_name, function_name = match.groups() else: match = re.match("(\w+)", args.name) if match is not None: class_name, function_name = match.group(1), None for module in modules: module_name = module module = "{}.{}".format(args.package, module_name) try: module = importlib.import_module(module) except Exception: continue all_classes = inspect.getmembers(module, inspect.isclass) for c in all_classes: if class_name == c[0]: obj = c[1] break else: continue break if function_name is not None: obj = getattr(obj, function_name) if obj is None and class_name is not None: if function_name is None: LOG.error("Cannot find %s in %s", class_name, args.package) else: LOG.error( "Cannot find %s.%s in %s", class_name, function_name, args.package, ) exit(1) elif obj is None: LOG.error("Cannot load %s", args.name) exit(1) for citation in set(get_citations(obj, args.name)): print(*citation) def get_citations(obj, obj_name, done=None, citations=None): if done is None: done = set([]) if citations is None: citations = [] if obj in done: return citations done.add(obj) try: instructions = [inst for inst in dis.get_instructions(obj)] except TypeError: return citations names = {} methods = {} add_obj = set([]) for i, inst in enumerate(instructions): if ( inst.argval == "CITATION" and instructions[i - 1].opname == "LOAD_CONST" ): names.setdefault(inst.argval, []) names[inst.argval].append((obj_name, instructions[i - 1].argval)) elif inst.opname == "STORE_NAME": names.setdefault(inst.argval, []) for prev in reversed(instructions[:i]): if prev.opname == "STORE_NAME": break if prev.opname == "POP_TOP": break if "JUMP" in prev.opname: break if prev.opname == "IMPORT_FROM": names[inst.argval].append(prev.argval) if prev.opname == "IMPORT_NAME": names[inst.argval].append(prev.argval) break if prev.opname == "LOAD_NAME": names[inst.argval].append(prev.argval) names[inst.argval].reverse() elif ( inst.opname in ["LOAD_ATTR", "LOAD_METHOD"] and instructions[i - 1].argval == "self" ): add_obj.add(".".join(obj_name.split(".")[:-1] + [inst.argval])) elif inst.opname == "LOAD_METHOD": methods[inst.argval] = instructions[i - 1].argval if "CITATION" in names: citations += names["CITATION"] for rm in get_recurse_methods(names, methods, add_obj=add_obj): citations = get_citations(*rm, done=done, citations=citations) return citations def get_recurse_methods(name_dict, method_dict, add_obj=None): if add_obj is None: recurse_methods = set([]) else: recurse_methods = add_obj for method, name_key in method_dict.items(): if name_key not in name_dict: continue name_list = name_dict[name_key] tmp = [] for name in name_list: if name in name_dict: for i in name_dict[name]: if i in tmp: continue tmp.append(i) elif name not in tmp: tmp.append(name) recurse_methods.add(".".join(tmp + [method])) rv = set([]) for rm in recurse_methods: module_name = rm obj = None while True: try: obj = importlib.import_module(module_name) break except ModuleNotFoundError: module_name = module_name.rsplit(".", maxsplit=1)[0] remainder = rm.replace(module_name + ".", "").split(".") if len(remainder) == 2: class_name, function_name = remainder[0], remainder[1] elif len(remainder) == 1: class_name, function_name = remainder[0], None elif len(remainder) == 0: class_name, function_name = None, None else: raise Exception all_classes = inspect.getmembers(obj, inspect.isclass) for c in all_classes: if c[0] == class_name: obj = c[1] break init_method = getattr(obj, "__init__") rv.add((init_method, "{}.{}.__init__".format(module_name, class_name))) if function_name is not None and hasattr(obj, function_name): obj = getattr(obj, function_name) rv.add((obj, rm)) return rv if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument("name") parser.add_argument("-p", "--package", default="AaronTools") args = parser.parse_args() main(args)
AaronTools
/AaronTools-1.0b14.tar.gz/AaronTools-1.0b14/bin/getCitations.py
getCitations.py
import argparse from warnings import warn from AaronTools.config import Config from AaronTools.job_control import SubmitProcess from AaronTools.utils.utils import glob_files config = Config(quiet=True) default_proc = config.getint( "Job", "processors", fallback=config.getint("Job", "procs", fallback=4) ) default_mem = config.getint("Job", "memory", fallback=8) default_walltime = config.getint("Job", "walltime", fallback=12) default_template = config.get("Job", "template", fallback=None) submit_parser = argparse.ArgumentParser( description="submit a QM computation to the queue", formatter_class=argparse.RawTextHelpFormatter, ) submit_parser.add_argument( "infile", metavar="input file", type=str, nargs="+", help="a Psi4, ORCA, or Gaussian input file", ) submit_parser.add_argument( "-d", "--config-default", type=str, default=None, dest="section", help="use memory, processors, walltime, and template from\n" "the specified seciton of the AaronTools config", ) submit_parser.add_argument( "-j", "--job-template", type=str, default=None, dest="template", help="template job template file (i.e. for `qsub`, `bsub`, or `sbatch`)", ) submit_parser.add_argument( "-p", "--processors", type=int, required=False, default=None, dest="processors", help="number of processors\n" "Default: %i" % default_proc, ) submit_parser.add_argument( "-m", "--memory", type=int, required=False, default=None, dest="memory", help="memory in GB\n" "Default: %i" % default_mem, ) submit_parser.add_argument( "-t", "--walltime", type=int, required=False, default=None, dest="time", help="walltime in hours\n" "Default: %i" % default_walltime, ) submit_parser.add_argument( "-wl", "--wait-last", action="store_true", default=False, dest="wait_last", help="wait for the last job to finish before exiting", ) submit_parser.add_argument( "-we", "--wait-each", action="store_true", default=False, dest="wait_each", help="wait for each job to finish before submitting the next", ) args = submit_parser.parse_args() for i, f in enumerate(glob_files(args.infile, parser=submit_parser)): # TODO: if processors etc. is not specified, read the input file to see if # processors were specified processors = args.processors memory = args.memory walltime = args.time template = args.template if args.section is not None: if args.processors is None: processors = config.getint( args.section, "processors", fallback=None ) if args.memory is None: memory = config.getint(args.section, "memory", fallback=None) if args.time is None: walltime = config.getint(args.section, "walltime", fallback=None) if args.template is None: template = config.get(args.section, "template", fallback=None) if processors is None: processors = default_proc if memory is None: memory = default_mem if walltime is None: walltime = default_walltime if template is None: template = default_template submit_process = SubmitProcess( f, walltime, processors, memory, template=template ) try: if args.wait_each or (i == len(args.infile) - 1 and args.wait_last): submit_process.submit(wait=True, quiet=False) else: submit_process.submit(wait=False, quiet=False) except Exception as e: warn("failed to submit %s: %s" % (f, str(e)))
AaronTools
/AaronTools-1.0b14.tar.gz/AaronTools-1.0b14/bin/jobSubmit.py
jobSubmit.py
import argparse import os from AaronTools.geometry import Geometry from AaronTools.component import Component vsepr_choices = [ "tetrahedral", "seesaw", "square_planar", "trigonal_pyramidal", "trigonal_bipyramidal", "square_pyramidal", "pentagonal", "hexagonal", "trigonal_prismatic", "pentagonal_pyramidal", "octahedral", "capped_octahedral", "hexagonal_pyramidal", "pentagonal_bipyramidal", "capped_trigonal_prismatic", "heptagonal", ] coord_comp_parser = argparse.ArgumentParser( description="build coordination complexes using templates from Inorg. Chem. 2018, 57, 17, 10557–10567", formatter_class=argparse.RawTextHelpFormatter, ) coord_comp_parser.add_argument( "-l", "--ligands", type=str, nargs="*", required=True, # choices=Component.list(), dest="ligands", help="list of ligands to attach to the coordination complex\n" + "see `mapLigand.py --list` for a list of available ligands", ) coord_comp_parser.add_argument( "-c2", "--c2-symmetric", type=lambda x: x.lower() in ["yes", "true", "t", "arr", "y", "aye", "yeah"], nargs="*", default=None, required=False, dest="c2_symmetric", help="list of true/false corresping to --ligands to denote which bidentate\n" + "ligands are C2-symmetric\nDefault: try to determine if bidentate ligands are C2-symmetric", ) coord_comp_parser.add_argument( "-g", "--coordination-geometry", choices=vsepr_choices, required=True, dest="shape", help="coordination geometry of central atom" ) coord_comp_parser.add_argument( "-c", "--center-atom", required=True, metavar="element", dest="center", help="central atom for coordination complexes" ) coord_comp_parser.add_argument( "-m", "--minimize", action="store_true", default=False, required=False, dest="minimize", help="try to relax ligands to minimize steric clashing\nDefault: False", ) coord_comp_parser.add_argument( "-o", "--output", type=str, required=True, metavar="output destination", dest="outdir", help="output directory\n" +\ "Filenames will match the detected generic formula and\n" + "include the point group and subset from the reference\n" "noted above\n" "Subsets with primes (e.g. A' and A'') are not distinguished", ) args = coord_comp_parser.parse_args() geoms, formula = Geometry.get_coordination_complexes( args.center, args.ligands, args.shape.replace("_", " "), c2_symmetric=args.c2_symmetric, minimize=args.minimize, ) print("formula is %s" % formula) if not os.path.exists(args.outdir): os.makedirs(args.outdir) for geom in geoms: geom.write(outfile=os.path.join(args.outdir, geom.name + ".xyz")) print("wrote", len(geoms), "structures to", args.outdir)
AaronTools
/AaronTools-1.0b14.tar.gz/AaronTools-1.0b14/bin/getCoordinationComplexes.py
getCoordinationComplexes.py
import sys import argparse from warnings import warn import numpy as np from AaronTools.geometry import Geometry from AaronTools.fileIO import FileReader from AaronTools.pathway import Pathway def width(n): """use to determine 0-padding based on number of files we're printing""" return np.ceil(np.log10(n)) instruction_args = ["-n", "-e", "-min", "-max", "-t"] interpolate_parser = argparse.ArgumentParser( description="interpolate between input structures", formatter_class=argparse.RawTextHelpFormatter ) interpolate_parser.add_argument( "infiles", metavar="infile", type=str, nargs="*", default=[], help="one or more input coordinate files" ) interpolate_parser.add_argument( "-n", "--print-nstruc", type=int, nargs=1, default=0, required=False, dest="n_struc", metavar="number of structures", help="number of interpolated structures to print" ) interpolate_parser.add_argument( "-max", "--print-maxima", action="store_const", const=True, default=False, required=False, dest="print_max", help="print coordinates for potential energy maxima" ) interpolate_parser.add_argument( "-min", "--print-minima", action="store_const", const=True, default=False, required=False, dest="print_min", help="print coordinates for potential energy minima" ) interpolate_parser.add_argument( "-e", "--print-energy", action="store_const", const=True, default=False, required=False, dest="print_E", help="print energy and energy derivative instead of structures" ) interpolate_parser.add_argument( "-o", "--output-destination", type=str, nargs=1, default=[None], required=False, metavar="output destination", dest="outfile", help="output destination\n" + "$i will be replaced with zero-padded numbers\n" + "Default: traj-$i.xyz for structures, stdout for energies" ) interpolate_parser.add_argument( "-u", "--use-unfinished", action="store_const", const=True, default=False, required=False, dest="use_incomplete", help="use unfinished geometries (e.g. optimization still running)" ) interpolate_parser.add_argument( "-t", "--print-ts", type=float, nargs="+", default=False, required=False, metavar=("t1", "t2"), dest="specific_ts", help="space-separated list of t values at which to print structures \n{t| 0 <= t <= 1}" ) args = interpolate_parser.parse_args() if not any([args.specific_ts, args.print_E, args.print_min, args.print_max, args.n_struc]): interpolate_parser.print_help(sys.stderr) raise RuntimeError( "one of the following flags should be used: %s" % ", ".join(instruction_args) ) if args.use_incomplete and args.print_E: warn( "Any unfinished geometry optimizations will be used to interpolate energies.\n" + "Results may be nonsense" ) #take ".xyz" off the outfile b/c aarontools adds it back outfile = args.outfile[0] #list of usable input geometries geom_list = [] nrg_list = [] #read each input file and see if it has normal modes for f in args.infiles: fr = FileReader(f, just_geom=False) if "finished" not in fr.other: #finished was never set (e.g. input was an xyz file), but we"ll use it anyways if args.use_incomplete: geom = Geometry(fr) geom_list.append(geom) else: warn("not using %s because it is not marked as finished" % f) else: if fr.other["finished"] or args.use_incomplete: #finished is False, but we"ll use it anyways geom = Geometry(fr) nrg_list.append(fr.other["energy"]) geom_list.append(geom) else: warn("not using %s because it is not marked as finished" % f) if len(geom_list) <= 1: warn( "nothing to interpolate: %i usable input structure%s" % ( len(geom_list), (1 - len(geom_list)) * "s" ) ) warn("use the -u option to include structures without an associated energy") sys.exit(0) ref_geom = geom_list[0] if len(nrg_list) < len(geom_list): nrg_list = np.zeros(len(geom_list)) #align all input geometries to the reference for geom, nrg in zip(geom_list, nrg_list): centroid = geom.COM(mass_weight=True) geom.coord_shift(vector=-1 * centroid) geom.RMSD(ref=ref_geom, align=True) #interpolate between the structures pathway = Pathway( ref_geom, np.array([geom.coords for geom in geom_list]), other_vars={"energy": nrg_list} ) s_max, r_max = Pathway.t_to_s(1, pathway.region_length) #header for writing energies nrg_out = "t\tE\tdE/dt\n" #list of geometries to print write_geoms = [] if args.print_max or args.print_min: #to find minima and maxima, find where derivative crosses 0 and #sign of derivative at neighboring points max_n_min_ts = [] ts = np.linspace(0, 1, num=10001) dt = ts[1] - ts[0] for t in ts: dnrg_dt = pathway.dvar_func_dt["energy"](t) * pathway.dvar_func_dt["energy"](t + dt) if dnrg_dt <= 0 and pathway.dvar_func_dt["energy"](t) > 0 and args.print_max: max_n_min_ts.append(t) elif dnrg_dt <= 0 and pathway.dvar_func_dt["energy"](t) < 0 and args.print_min: max_n_min_ts.append(t) for i, t in enumerate(max_n_min_ts): nrg = pathway.var_func["energy"](t) if args.print_E: d_nrg = pathway.dvar_func_dt["energy"](t) nrg_out += "%f\t%f\t%f\n" % (t, nrg, d_nrg) else: geom = pathway.geom_func(t) comment = "E(%f) = %f" % (t, nrg) geom.comment = comment write_geoms.append(geom) if args.specific_ts: #print structures for specified values of t for i, t in enumerate(args.specific_ts): if args.print_E: nrg = pathway.var_func["energy"](t) d_nrg = pathway.dvar_func_dt["energy"](t) nrg_out += "%f\t%f\t%f\n" % (t, nrg, d_nrg) else: geom = pathway.geom_func(t) nrg = pathway.var_func["energy"](t) comment = "E(%f) = %f" % (t, nrg) geom.comment = comment write_geoms.append(geom) if args.print_E: if args.n_struc: ss = np.linspace(0, s_max, num=args.n_struc[0]) for s in ss: t = Pathway.s_to_t(s, pathway.region_length) nrg = pathway.var_func["energy"](t) d_nrg = pathway.dvar_func_dt["energy"](t) nrg_out += "%f\t%f\t%f\n" % (t, nrg, d_nrg) if outfile is not None: with open(outfile, "w") as f: f.write(nrg_out.rstrip()) else: print(nrg_out.rstrip()) else: if args.n_struc: w = width(args.n_struc[0]) fmt = "%0" + "%i" % w + "i" ts = np.linspace(0, 1, num=args.n_struc[0]) for i, t in enumerate(ts): geom = pathway.geom_func(t) nrg = pathway.var_func["energy"](t) comment = "E(%f) = %f" % (t, nrg) geom.comment = comment write_geoms.append(geom) if len(write_geoms) > 0: w = width(len(write_geoms)) fmt = "%0" + "%i" % w + "i" if outfile is None: outfile = "traj-$i.xyz" for i, geom in enumerate(write_geoms): my_outfile = outfile.replace("$i", fmt % i) if my_outfile == outfile: #if there"s no $i, we are writing all the structures to the same file geom.write(append=True, outfile=my_outfile) else: geom.write(append=False, outfile=my_outfile)
AaronTools
/AaronTools-1.0b14.tar.gz/AaronTools-1.0b14/bin/interpolate.py
interpolate.py
"""Generates prime numbers""" import sys from math import sqrt from os import makedirs, path, access, W_OK from AaronTools.const import AARONLIB class Primes: """ find and cache prime numbers """ primes = [2, 3] clean = False cache = path.join(AARONLIB, "cache", "primes.dat") def __init__(self, clean=False, cache=None): Primes.clean = clean if cache is not None: Primes.cache = cache if Primes.clean or (not path.exists(Primes.cache) and access(Primes.cache, W_OK)): prime_dir, _ = path.split(Primes.cache) if not path.exists(prime_dir): makedirs(prime_dir) with open(Primes.cache, "w") as f: f.writelines([str(i) + "\n" for i in Primes.primes]) f.close() @classmethod def next_prime(cls): """determine the next prime number""" # first return the ones we already found if path.exists(Primes.cache): with open(Primes.cache) as f: for line in f: prime = int(line.strip()) if prime in cls.primes: continue cls.primes += [prime] yield prime f.close() # then start generating new ones test_prime = cls.primes[-1] + 2 while True: max_check = sqrt(test_prime) for prime in cls.primes[1:]: if test_prime % prime == 0: test_prime += 2 break if prime > max_check: cls.primes += [test_prime] cls.store_prime(test_prime) yield test_prime test_prime += 2 break else: cls.primes += [test_prime] cls.store_prime(test_prime) yield test_prime test_prime += 2 @classmethod def store_prime(cls, prime): """add the prime number to the cache""" if not path.exists(path.dirname(cls.cache)): return with open(cls.cache, "a") as f: if hasattr(prime, "__iter__"): f.writelines([str(i) + "\n" for i in prime]) else: f.write(str(prime) + "\n") f.close() @classmethod def list(cls, n): """list the first n prime numbers""" rv = [prime for i, prime in enumerate(cls.primes) if i < n] new = False if len(cls.primes) < n: if path.exists(cls.cache): cur_primes = len(cls.primes) k = 0 with open(cls.cache) as f: for line in f: prime = int(line) k += 1 if new or prime not in cls.primes: new = True cls.primes += [prime] rv += [prime] if len(cls.primes) >= n: break new_primes = [] test_prime = cls.primes[-1] + 2 while len(cls.primes) < n: # this is similar to next_prime, though it doesn't # cache each new prime number as it finds them # it saves them as a group # writing to a file is slow as heck max_check = sqrt(test_prime) for prime in cls.primes[1:]: if test_prime % prime == 0: test_prime += 2 break if prime > max_check: cls.primes += [test_prime] new_primes += [test_prime] test_prime += 2 break else: cls.primes += [test_prime] new_primes += [test_prime] test_prime += 2 rv += new_primes # print(new_primes) cls.store_prime(new_primes) return rv @classmethod def primes_below(cls, le): """list the primes that are less than or equal to le""" rv = [p for p in cls.primes if p <= le] if cls.primes[-1] <= le: for i, prime in enumerate(cls.next_prime()): if prime >= le: break rv += [prime] return rv if __name__ == "__main__": Primes(clean=True) print(Primes.list(int(sys.argv[1])))
AaronTools
/AaronTools-1.0b14.tar.gz/AaronTools-1.0b14/utils/prime_numbers.py
prime_numbers.py
import collections.abc import os import re from collections import OrderedDict from math import acos, sin, cos, sqrt import AaronTools.atoms as Atoms import numpy as np from AaronTools.const import AARONTOOLS, PHYSICAL def range_list(number_list, sep=",", sort=True): """ Takes a list of numbers and puts them into a string containing ranges, eg: [1, 2, 3, 5, 6, 7, 9, 10] -> "1-3,5-7,9,10" :sep: the separator to use between consecutive ranges :sort: sort the list before parsing """ if sort: number_list = sorted(number_list) tmp = [[]] for i, n in enumerate(number_list): if i == 0: tmp[-1] += [n] elif n == number_list[i - 1] + 1: tmp[-1] += [n] else: tmp += [[n]] rv = "" for t in tmp: if len(t) > 2: rv += "{}-{},".format(t[0], t[-1]) elif len(t) == 2: rv += "{},{},".format(t[0], t[-1]) else: rv += "{},".format(t[0]) return rv[:-1] def progress_bar(this, max_num, name=None, width=50): if name is None: out_str = "" else: out_str = "{}: ".format(name) out_str += "Progress {:3.0f}% ".format(100 * this / max_num) out_str += "|{:<{width}s}|".format( "#" * int(width * this / max_num), width=width ) print(out_str, end="\r") def clean_progress_bar(width=50): print(" " * 2 * width, end="\r") def proj(v_vec, u_vec): """ projection of u_vec into v_vec v_vec should be a np.array, and u_vec should have the same shape as v_vec""" numerator = np.dot(u_vec, v_vec) denominator = np.linalg.norm(v_vec) ** 2 return numerator * v_vec / denominator def quat_matrix(pt1, pt2): """build quaternion matrix from pt1 and pt2""" pt1 = np.array(pt1, dtype=np.longdouble) pt2 = np.array(pt2, dtype=np.longdouble) for pt in [pt1, pt2]: if len(pt.shape) != 1 or pt.shape[0] != 3: raise ValueError("Arguments should be 3-element vectors") xm, ym, zm = tuple(pt1 - pt2) xp, yp, zp = tuple(pt1 + pt2) matrix = np.array( [ [ xm * xm + ym * ym + zm * zm, yp * zm - ym * zp, xm * zp - xp * zm, xp * ym - xm * yp, ], [ yp * zm - ym * zp, yp * yp + zp * zp + xm * xm, xm * ym - xp * yp, xm * zm - xp * zp, ], [ xm * zp - xp * zm, xm * ym - xp * yp, xp * xp + zp * zp + ym * ym, ym * zm - yp * zp, ], [ xp * ym - xm * yp, xm * zm - xp * zp, ym * zm - yp * zp, xp * xp + yp * yp + zm * zm, ], ] ) return matrix.astype(np.double) def uptri2sym(vec, n=None, col_based=False): """ Converts upper triangular matrix to a symmetric matrix :vec: the upper triangle array/matrix :n: the number of rows/columns :col_based: if true, triangular matirx is of the form 0 1 3 - 2 4 - - 5 if false, triangular matrix is of the form 0 1 2 - 3 4 - - 5 """ if hasattr(vec[0], "__iter__") and not isinstance(vec[0], str): tmp = [] for v in vec: tmp += v vec = tmp if n is None: n = -1 + np.sqrt(1 + 8 * len(vec)) n = int(n / 2) if n * (n + 1) / 2 != len(vec): raise RuntimeError("Bad number of rows requested") matrix = np.zeros((n, n)) if col_based: i = 0 # vector index j = 0 # for column counting for c in range(n): j += 1 for r in range(n): matrix[r, c] = vec[i] matrix[c, r] = vec[i] i += 1 if r + 1 == j: break else: for r in range(n): for c in range(r, n): i = n * r + c - r * (1 + r) / 2 matrix[r, c] = vec[int(i)] matrix[c, r] = vec[int(i)] return matrix def float_vec(word): """ Turns strings into floating point vectors :word: a comma-delimited string of numbers if no comma or only one element: returns just the floating point number if elements in word are strings: returns word unchanged else: returns a np.array() of floating point numbers """ val = word val = val.split(",") try: val = [float(v) for v in val] except ValueError: val = word if len(val) == 1: return val[0] return np.array(val) def is_alpha(test): """determine if string contains only letters""" return test.isalpha() def is_int(test): rv = re.search(r"^[+-]?\d+$", test) return bool(rv) def is_num(test): """determine if string is valid as a number""" rv = re.search(r"^[+-]?\d+\.?\d*", test) return bool(rv) def add_dict(this, other, skip=None): if skip is None: skip = [] for key, val in other.items(): if key in skip: continue if key in this and isinstance(val, dict): add_dict(this[key], val) else: this[key] = val return this def resolve_concatenation(*args): seq = [isinstance(a, collections.abc.MutableSequence) for a in args] if any(seq) and not all(seq): rv = [] for i, s in enumerate(seq): if s: rv.extend(args[i]) else: rv.append(args[i]) return rv err_msg = "Cannot concatenate" + " {}" * len(args) raise TypeError(err_msg.format(*[type(a) for a in args])) def combine_dicts(*args, case_sensitive=False, dict2_conditional=False): """combine dictionaries d1 and d2 to return a dictionary with keys d1.keys() + d2.keys() if a key is in d1 and d2, the items will be combined: if they are both dictionaries, combine_dicts is called recursively otherwise, d2[key] is appended to d1[key] if case_sensitive=False, the key in the output will be the lowercase of the d1 key and d2 key (only for combined items) dict2_conditional: bool - if True, don't add d2 keys unless they are also in d1 """ from copy import deepcopy d1 = args[0] d2 = args[1:] if len(d2) > 1: d2 = combine_dicts( d2[0], *d2[1:], case_sensitive=case_sensitive, dict2_conditional=dict2_conditional ) else: d2 = d2[0] out = OrderedDict() case_keys_1 = list(d1.keys()) case_keys_2 = list(d2.keys()) if case_sensitive: keys_1 = case_keys_1 keys_2 = case_keys_2 else: keys_1 = [ key.lower() if isinstance(key, str) else key for key in case_keys_1 ] keys_2 = [ key.lower() if isinstance(key, str) else key for key in case_keys_2 ] # go through keys from d1 for case_key, key in zip(case_keys_1, keys_1): # if the key is only in d1, add the item to out if key in keys_1 and key not in keys_2: out[case_key] = deepcopy(d1[case_key]) # if the key is in both, combine the items elif key in keys_1 and key in keys_2: key_2 = case_keys_2[keys_2.index(key)] if isinstance(d1[case_key], dict) and isinstance(d2[key_2], dict): out[key] = combine_dicts( d1[case_key], d2[key_2], case_sensitive=case_sensitive, ) else: try: out[key] = deepcopy(d1[case_key]) + deepcopy(d2[key_2]) except TypeError: out[key] = resolve_concatenation(d1[case_key], d2[key_2]) # go through keys from d2 if not dict2_conditional: for case_key, key in zip(case_keys_2, keys_2): # if it's only in d2, add item to out if key in keys_2 and key not in keys_1: out[case_key] = d2[case_key] return out def integrate(fun, start, stop, num=101): """numerical integration using Simpson's method fun - function to integrate start - starting point for integration stop - stopping point for integration num - number of points used for integration""" delta_x = float(stop - start) / (num - 1) x_set = np.linspace(start, stop, num=num) running_sum = -(fun(start) + fun(stop)) i = 1 max_4th_deriv = 0 while i < num: if i % 2 == 0: running_sum += 2 * fun(x_set[i]) else: running_sum += 4 * fun(x_set[i]) if i < num - 4 and i >= 3: sg_4th_deriv = ( 6 * fun(x_set[i - 3]) + 1 * fun(x_set[i - 2]) - 7 * fun(x_set[i - 1]) - 3 * fun(x_set[i]) - 7 * fun(x_set[i + 1]) + fun(x_set[i + 2]) + 6 * fun(x_set[i + 3]) ) sg_4th_deriv /= 11 * delta_x ** 4 if abs(sg_4th_deriv) > max_4th_deriv: max_4th_deriv = abs(sg_4th_deriv) i += 1 running_sum *= delta_x / 3.0 # close enough error estimate e = (abs(stop - start) ** 5) * max_4th_deriv / (180 * num ** 4) return (running_sum, e) def same_cycle(graph, a, b): """ Determines if Atom :a: and Atom :b: are in the same cycle in a undirected :graph: Returns: True if cycle found containing a and b, False otherwise :graph: connectivity matrix or Geometry :a:, :b: indices in connectivity matrix/Geometry or Atoms in Geometry """ from AaronTools.geometry import Geometry if isinstance(a, Atoms.Atom): a = graph.atoms.index(a) if isinstance(b, Atoms.Atom): b = graph.atoms.index(b) if isinstance(graph, Geometry): graph = [ [graph.atoms.index(j) for j in i.connected] for i in graph.atoms ] graph = [[i for i in j] for j in graph] graph, removed = trim_leaves(graph) if a in removed or b in removed: return False path = shortest_path(graph, a, b) for p, q in zip(path[:-1], path[1:]): graph[p].remove(q) graph[q].remove(p) path = shortest_path(graph, a, b) if path is None: return False return True def shortest_path(graph, start, end): """ Find shortest path from :start: to :end: in :graph: using Dijkstra's algorithm Returns: list(node_index) if path found, None if path not found :graph: the connection matrix or Geometry :start: the first atom or node index :end: the last atom or node index """ from AaronTools.geometry import Geometry if isinstance(start, Atoms.Atom): start = graph.atoms.index(start) if isinstance(end, Atoms.Atom): end = graph.atoms.index(end) if isinstance(graph, Geometry): graph = [ [graph.atoms.index(j) for j in i.connected if j in graph.atoms] for i in graph.atoms ] graph = [[i for i in j] for j in graph] # initialize distance array, parent array, and set of unvisited nodes dist = [np.inf for x in graph] parent = [-1 for x in graph] unvisited = set([i for i in range(len(graph))]) dist[start] = 0 current = start while True: # for all unvisited neighbors of current node, update distances # if we update the distance to a neighboring node, # then also update its parent to be the current node for v in graph[current]: if v not in unvisited: continue if dist[v] == np.inf: new_dist = dist[current] + 1 else: new_dist = dist[current] + dist[v] if dist[v] > new_dist: dist[v] = new_dist parent[v] = current # mark current node as visited # select closest unvisited node to be next node # break loop if we found end node or if no unvisited connected nodes unvisited.remove(current) if end not in unvisited: break current = None for u in unvisited: if current is None or dist[u] < dist[current]: current = u if dist[current] == np.inf: break # return shortest path from start to end path = [end] while True: if parent[path[-1]] == -1: break path += [parent[path[-1]]] path.reverse() if path[0] != start or path[-1] != end: return None return path def trim_leaves(graph, _removed=None): from AaronTools.geometry import Geometry # print(_removed) if _removed is None: _removed = [] if isinstance(graph, Geometry): graph = [ [graph.atoms.index(j) for j in i.connected] for i in graph.atoms ] graph = [[i for i in j] for j in graph] some_removed = False for i, con in enumerate(graph): if len(con) == 1: graph[con[0]].remove(i) graph[i].remove(con[0]) some_removed = True _removed += [i] if some_removed: graph, _removed = trim_leaves(graph, _removed) return graph, set(_removed) def to_closing(string, brace): """returns the portion of string from the beginning to the closing paratheses or bracket denoted by brace brace can be '(', '{', or '[' if the closing paratheses is not found, returns None instead""" if brace == "(": pair = ("(", ")") elif brace == "{": pair = ("{", "}") elif brace == "[": pair = ("[", "]") else: raise RuntimeError("brace must be '(', '{', or '['") out = "" count = 0 for x in string: if x == pair[0]: count += 1 elif x == pair[1]: count -= 1 out += x if count == 0: break if count != 0: return None else: return out def rotation_matrix(theta, axis, renormalize=True): """rotation matrix for rotating theta radians about axis""" # I've only tested this for rotations in R3 dim = len(axis) if renormalize: if np.linalg.norm(axis) == 0: axis = np.zeros(dim) axis[0] = 1.0 axis = axis / np.linalg.norm(axis) outer_prod = np.outer(axis, axis) cos_comp = cos(theta) outer_prod *= 1 - cos_comp iden = np.identity(dim) cos_comp = iden * cos_comp sin_comp = sin(theta) * (np.ones((dim, dim)) - iden) cross_mat = np.zeros((dim, dim)) for i in range(0, dim): for j in range(0, i): p = 1 if (i + j) % 2 != 0: p = -1 cross_mat[i][j] = -1 * (p * axis[dim - (i + j)]) cross_mat[j][i] = p * axis[dim - (i + j)] return outer_prod + cos_comp + sin_comp * cross_mat def mirror_matrix(norm, renormalize=True): """mirror matrix for the specified norm""" if renormalize: norm /= np.linalg.norm(norm) A = np.identity(3) B = -2 * np.outer(norm, norm) return A + B def fibonacci_sphere(radius=1, center=np.zeros(3), num=500): """ returns a grid of points that are equally spaced on a sphere with the specified radius and center number of points can be adjusted with num """ # generate a grid of points on the unit sphere grid = np.zeros((num, 3)) i = np.arange(0, num) + 0.5 phi = np.arccos(1 - 2 * i / num) ratio = (1 + sqrt(5.)) / 2 theta = 2 * np.pi * i / ratio grid[:, 0] = np.cos(theta) * np.sin(phi) grid[:, 1] = np.sin(theta) * np.sin(phi) grid[:, 2] = np.cos(phi) # scale the points to the specified radius and move the center grid *= radius grid += center return grid def lebedev_sphere(radius=1, center=np.zeros(3), num=302): """ returns one of the Lebedev grid points (xi, yi, zi) and weights (wi) with the specified radius and center. Weights do not include r**2, so integral of F(x,y,z) over sphere is 4*pi*r**2\sum_i{F(xi,yi,zi)wi}. The number of points (num) must be one of 110, 194, 302, 590, 974, 1454, 2030, 2702, 5810 """ # read grid data on unit sphere grid_file = os.path.join( AARONTOOLS, "utils", "quad_grids", "Leb" + str(num) + ".grid" ) if not os.path.exists(grid_file): # maybe some other error type? raise NotImplementedError( "cannot use Lebedev grid with %i points\n" % num + "use one of 110, 194, 302, 590, 974, 1454, 2030, 2702, 5810" ) grid_data = np.loadtxt(grid_file) grid = grid_data[:, [0, 1, 2]] weights = grid_data[:, 3] # scale the points to the specified radius and move the center grid *= radius grid += center return grid, weights def gauss_legendre_grid(start=-1, stop=1, num=32): """ returns a Gauss-Legendre grid points (xi) and weights (wi)for the range start to stop. Integral over F(x) is \sum_i{F(xi)wi}. The number of points (num) must be one of 20, 32, 64, 75, 99, 127 """ # read grid points on the range [-1,1] and weights grid_file = os.path.join( AARONTOOLS, "utils", "quad_grids", "Leg" + str(num) + ".grid" ) if not os.path.exists(grid_file): # maybe some other error type? raise NotImplementedError( "cannot use Gauss-Legendre grid with %i points\n" % num + "use one of 20, 32, 64, 75, 99, 127" ) grid_data = np.loadtxt(grid_file) # shift grid range to [start, stop] grid = grid_data[:, 0] * (stop - start) / 2 + start + (stop - start) / 2 # adjust weights for new range weights = grid_data[:, 1] * (stop - start) / 2 return grid, weights def perp_vector(vec): """ returns a vector orthonormal to vec (np.ndarray) if vec is 2D, returns a vector orthonormal to the plane of best for the rows of vec """ vec = np.squeeze(vec) if vec.ndim == 1: out_vec = np.roll(vec, 1) if all(x == vec[0] for x in vec): for k in range(0, len(vec)): if out_vec[k] != 0: out_vec[k] *= -1 break if np.linalg.norm(vec) == 0: # a zero-vector was given return np.ones(len(vec)) / len(vec) out = np.cross(out_vec, vec) out /= np.linalg.norm(out) return out elif vec.ndim == 2: xyz = vec - np.mean(vec, axis=0) cov_prod = np.dot(xyz.T, xyz) u, s, vh = np.linalg.svd(cov_prod, compute_uv=True) return u[:, -1] raise NotImplementedError( "cannot determine vector perpendicular to %i-dimensional array" % vec.ndim ) def get_filename(path, include_parent_dir=True): """returns the name of the file without parent directories or extension""" if not include_parent_dir: fname = os.path.basename(path) else: fname = path fname, _ = os.path.splitext(fname) return fname def boltzmann_coefficients(energies, temperature, absolute=True): """ returns boltzmann weights for the energies and T energies - numpy array of energies in kcal/mol temperature - T in K absolute - True if the energies given are absolute, false if they are relative energies """ if absolute: min_nrg = min(energies) energies -= min_nrg weights = np.exp(-energies / (PHYSICAL.R * temperature)) return weights def boltzmann_average(energies, values, temperature, absolute=True): """ returns the AVT result for the values corresponding to the energies energies - np.array, energy for each state in kcal/mol values - np.array, values for which the weighting is applied; the ith value corresponds to the ith energy temperature - float, temperature in K absolute - True if the energies given are absolute, false if they are relative energies """ weights = boltzmann_coefficients(energies, temperature, absolute=absolute) avg = np.dot(weights, values) / sum(weights) return avg def glob_files(infiles, parser=None): """ globs input files used for command line scripts because Windows doesn't support globbing... """ from glob import glob import sys if isinstance(infiles, str): infiles = [infiles] outfiles = [] for f in infiles: if isinstance(f, str): outfiles.extend(glob(f)) elif len(sys.argv) > 1: outfiles.append(f) if not outfiles and all(isinstance(f, str) for f in infiles): raise RuntimeError( "no files could be found for %s" % ", ".join(infiles) ) elif not outfiles and parser is not None: parser.print_help() sys.exit(0) return outfiles def angle_between_vectors(v1, v2, renormalize=True): """returns the angle between v1 and v2 (numpy arrays)""" if renormalize: v1 = v1 / np.linalg.norm(v1) v2 = v2 / np.linalg.norm(v2) v12 = v2 - v1 c2 = np.dot(v12, v12) t = (c2 - 2) / -2 if t > 1: t = 1 elif t < -1: t = -1 # math.acos is faster than numpy.arccos for non-arrays return acos(t) def is_alpha(test): rv = re.search("^[a-zA-Z]+$", test) return bool(rv) def is_int(test): rv = re.search("^[+-]?\d+$", test) return bool(rv) def is_num(test): rv = re.search("^[+-]?\d+\.?\d*", test) return bool(rv) float_num = re.compile("[-+]?\d+\.?\d*")
AaronTools
/AaronTools-1.0b14.tar.gz/AaronTools-1.0b14/utils/utils.py
utils.py
AbPyTools ========= [![Coverage Status](https://coveralls.io/repos/github/gf712/AbPyTools/badge.svg?branch=master)](https://coveralls.io/github/gf712/AbPyTools?branch=master) [![Build Status](https://travis-ci.org/gf712/AbPyTools.svg?branch=master)](https://travis-ci.org/gf712/AbPyTools) [![Code Health](https://landscape.io/github/gf712/AbPyTools/master/landscape.svg?style=flat)](https://landscape.io/github/gf712/AbPyTools/master) [![PyPi version](https://pypip.in/v/abpytools/badge.png)](https://crate.io/packages/abpytools/) [![Documentation Status](https://readthedocs.org/projects/abpytools/badge/?version=latest)](https://abpytools.readthedocs.io/en/latest/?badge=latest) AbPyTools is a Python 3 package to extract information from heavy and light antibody chain sequences. Using the built-in Antibody and ChainCollection it is very easy to manipulate the data and do more specific analysis with custom scripts. This package is still in its early days and and lacks detailed documentation...! Below are some existing features and some planned future additions. At the moment it is updated several times a day until I am happy with the base functions. In the future the development will be performed in a separate branch. AbPyTools features - - obtain Antibody numbering by querying AbNum (http://www.bioinf.org.uk/abs/abnum/) - analysis of scFv sequences with optimised backend - higher level class that can load data from several antibody sequences - load and write antibody sequences in FASTA and json formats - calculates hydrophobicity matrix for whole dataset - get all the data already mentioned above - access CDR and framework sequences - Work with heavy and light chains or combinations - high level function to easily plot CDR length using a FASTA file as input Stuff that will be added/ worked on next - - Add remaining antibody numbering schemes - write tutorials - adding some useful functions, such as comparing sequence with available datasets - write high level code for more specific analysis - plot CDR lengths of antibodies Cython - From version 0.3 AbPyTools will start using Cython to speed up numerical manipulations. In the front end nothing will change, but installation from source will require Cython! This new feature will speed up most calculations significantly. The backend uses code written from scratch that mimics numpy behaviour but runs much faster, since it is more specialised and lightweight. Installing abpytools - ## From source Clone code from the GitHub repository `git clone https://github.com/gf712/AbPyTools.git` Change to package directory in your local machine `cd path/to/AbPyTools` Install package `python setup.py install` Run tests (recommended) `python setup.py test` ## From pypi `pip install abpytools` Import to python - `import abpytools` Changelog - ### v0.3.2 (release date 31/10/2018) - Added docs page - Fixed Pypi build with Cython files ### v0.3.1 (release date 30/10/2018) - Major: - Protobuf support to serialise core objects (`ChainCollection` and `FabCollection`) - speed up in saving and loading large files - files are about 5 times smaller - Dropped support for Python 3.5 to start using f-strings - API changes: - Cleaned up object instantiation and added factory functions (**This will break some old code but provides a cleaner iterface**) - Minor: - added Python 3.7 to Travis script (also dropped Python 3.5) ### v0.3 (release date 11/10/2018): - Implementation of backend calculations with Cython leading to speedups of several orders of magnitude
AbPyTools
/AbPyTools-0.3.2.tar.gz/AbPyTools-0.3.2/README.md
README.md
import warnings import _pickle as cPickle from abpytools.home import Home from ..utils.python_config import PythonConfig import matplotlib.pyplot as plt SUPPORTED_SUBSITUTION_MATRICES = ['BLOSUM45', 'BLOSUM62', 'BLOSUM80'] def load_alignment_algorithm(algorithm): if algorithm.lower() == 'needleman_wunsch': return needleman_wunsch else: raise ValueError("Unknown algorithm") def load_substitution_matrix(substitution_matrix): abpytools_directory = Home().homedir if substitution_matrix in SUPPORTED_SUBSITUTION_MATRICES: with open('{}/data/{}.txt'.format(abpytools_directory, substitution_matrix), 'rb') as f: matrix = cPickle.load(f) else: raise ValueError("Unknown substitution matrix") return matrix def needleman_wunsch(seq_1, seq_2, substitution_matrix, indel=-1): if indel >= 0: f = "Indel must be negative, setting indel to {}.".format(-indel) warnings.warn(f) indel = -indel # initialise matrix init_matrix = init_score_matrix(seq_1=seq_1, seq_2=seq_2, indel=indel) scores, traceback_matrix = calculate_scores(matrix=init_matrix, seq_1=seq_1, seq_2=seq_2, substitution_matrix=substitution_matrix, gap_penalty=indel) seq_2_aligned = traceback(traceback_matrix=traceback_matrix, seq_1=seq_1, seq_2=seq_2) return seq_2_aligned, scores[-1][-1] def init_score_matrix(seq_1, seq_2, indel): """ - score matrix initialisation with two sequences - pure python, i.e. no numpy Example init_score_matrix('SEND', 'AND', -1): [[0, -1, -2], [-1, 0, 0], [-2, 0, 0], [-3, 0, 0]] """ init_matrix = [[x * indel] + [0] * len(seq_1) if x > 0 else list(range(0, (len(seq_1) + 1) * indel, indel)) for x in range(len(seq_2) + 1)] return init_matrix def calculate_scores(matrix, seq_1, seq_2, substitution_matrix, gap_penalty): traceback_matrix = [['up'] + [''] * len(seq_1) if x > 0 else ['left'] * (len(seq_1) + 1) for x in range(len(seq_2) + 1)] traceback_matrix[0][0] = 'done' for i in range(1, len(matrix)): for j in range(1, len(matrix[i])): q_diag = matrix[i - 1][j - 1] + int(substitution_matrix[(seq_2[i - 1], seq_1[j - 1])]) q_up = matrix[i - 1][j] + gap_penalty q_left = matrix[i][j - 1] + gap_penalty results = [q_diag, q_up, q_left] matrix[i][j] = max(results) max_index = results.index(matrix[i][j]) if max_index == 0: traceback_matrix[i][j] = 'diag' elif max_index == 1: traceback_matrix[i][j] = 'up' else: traceback_matrix[i][j] = 'left' return matrix, traceback_matrix def traceback(traceback_matrix, seq_1, seq_2): row = -1 column = -1 current = traceback_matrix[row][column] # iter_seq_1 = iter(seq_1[::-1]) # iter_seq_2 = iter(seq_2[::-1]) # seq_1 = seq_1[::-1] # seq_2 = seq_2[::-1] # aligned_seq_1 = list() aligned_seq_2 = list() while current != 'done': # is aligned if current == 'diag': # aligned_seq_1.append(next(iter_seq_1)) # aligned_seq_2.append(next(iter_seq_2)) # aligned_seq_1.append(seq_1[column]) aligned_seq_2.append(seq_2[row]) row -= 1 column -= 1 # leave a gap elif current == 'left': # aligned_seq_1.append(next(iter_seq_1)) # aligned_seq_1.append(seq_1[column]) aligned_seq_2.append('-') column -= 1 # leave a gap else: # aligned_seq_1.append(next(iter_seq_1)) # aligned_seq_1.append(seq_1[column]) aligned_seq_2.append('-') row -= 1 current = traceback_matrix[row][column] return ''.join(aligned_seq_2)[::-1] def switch_interactive_mode(save=False): ipython_config = PythonConfig() if ipython_config.ipython_info == 'notebook' and save is False: if ipython_config.matplotlib_interactive is False: # turns on interactive mode plt.ion()
AbPyTools
/AbPyTools-0.3.2.tar.gz/AbPyTools-0.3.2/abpytools/analysis/analysis_helper_functions.py
analysis_helper_functions.py
from abpytools.features.regions import ChainDomains from collections import Counter import numpy as np from matplotlib import pyplot as plt from abpytools.utils.data_loader import DataLoader import os from abpytools.utils import PythonConfig amino_acid_index = {"R": 0, "N": 1, "D": 2, "E": 3, "Q": 4, "K": 5, "S": 6, "T": 7, "C": 8, "H": 9, "M": 10, "A": 11, "V": 12, "G": 13, "I": 14, "L": 15, "F": 16, "P": 17, "W": 18, "Y": 19} class AminoAcidFreq(ChainDomains): def __init__(self, antibody_objects=None, path=None, region='CDR3', load=False): super(AminoAcidFreq, self).__init__(antibody_objects=antibody_objects, path=path, load=load) regions = ['all', 'CDRs', 'FRs', 'FR1', 'FR2', 'FR3', 'FR4', 'CDR1', 'CDR2', 'CDR3'] if region in regions: # get the sequence for the specified region self.region = region if self.region.startswith('CDR'): self._sequences = [self.cdr_sequences()[name][self.region] for name in self.names] data_loader = DataLoader(data_type='CDR_positions', data=['chothia', self.chain]) self._numbering = data_loader.get_data()[self.region] elif self.region.startswith('FR'): self._sequences = [self.framework_sequences()[name][self.region] for name in self.names] # TODO: implement 'all' elif self.region == 'all': raise NotImplementedError("This is not the code you're looking for.") else: raise ValueError('Parameter region must be either: {}. Not {}'.format(' ,'.join(regions), region)) self._sequence_count = len(max(self._sequences, key=len)) self._aa_freq = np.zeros((20, self._sequence_count)) self._aa_hyd_freq = np.zeros((3, self._sequence_count)) self._aa_chg_freq = np.zeros((3, self._sequence_count)) self._aa_count = np.zeros((20, self._sequence_count)) self._aa_hyd_count = np.zeros((3, self._sequence_count)) self._aa_chg_count = np.zeros((3, self._sequence_count)) def _amino_acid_freq(self, normalize): # if the sum of self._aa_count is zero then the count has not been performed at this point if self._aa_count.sum() == 0: for position in range(len(max(self._sequences, key=len))): position_sequence = [x[position] for x in self._sequences if len(x) > position] count_i = Counter(position_sequence) total_i = len(position_sequence) for amino_acid_i in count_i.keys(): self._aa_count[amino_acid_index[amino_acid_i], position] = count_i[amino_acid_i] # _aa_hyd_freq: row1 -> hydrophilic # row2 -> moderate # row3 -> hydrophobic if amino_acid_i in ['R', 'N', 'D', 'E', 'Q', 'K', 'S', 'T']: self._aa_hyd_count[0, position] += count_i[amino_acid_i] elif amino_acid_i in ['C', 'H', 'M']: self._aa_hyd_count[1, position] += count_i[amino_acid_i] else: self._aa_hyd_count[2, position] += count_i[amino_acid_i] # _aa_chg_freq: row1 -> negative # row2 -> positive # row3 -> neutral if amino_acid_i in ['D', 'E']: self._aa_chg_count[0, position] += count_i[amino_acid_i] elif amino_acid_i in ['R', 'K', 'H']: self._aa_chg_count[1, position] += count_i[amino_acid_i] else: self._aa_chg_count[2, position] += count_i[amino_acid_i] # normalize values # doing it even when it is not required comes at a small computational cost # it would take longer if the user had to recalculate everything to have a count plot and then a # frequency plot self._aa_freq[:, position] = self._aa_count[:, position] / total_i self._aa_chg_freq[:, position] = self._aa_chg_count[:, position] / total_i self._aa_hyd_freq[:, position] = self._aa_hyd_count[:, position] / total_i if normalize: return self._aa_freq, self._aa_chg_freq, self._aa_hyd_freq else: return self._aa_count, self._aa_chg_count, self._aa_hyd_count def plot(self, sort_by='name', normalize=True, display_count=True, plot_path='./', plot_name='AminoAcidFrequency.png', notebook_plot=True): ipython_config = PythonConfig() if ipython_config.matplotlib_interactive is False and ipython_config.ipython_info == 'notebook': plt.ion() if sort_by not in ['name', 'hydropathy', 'charge']: raise ValueError("Argument for sort_by not valid. Valid arguments are name, hydrophobicity and charge") # get count/ freq matrices # to avoid writing more code than necessary the count and freq are stored in the same variable # since they will always be plotted independently aa, chg, hyd = self._amino_acid_freq(normalize=normalize) fig = plt.figure(1, figsize=(8, 8)) ax = fig.add_subplot(111) for position in range(self._aa_freq.shape[1]): previous = 0 # 20 distinct colors colors = ["#023fa5", "#7d87b9", "#bec1d4", "#d6bcc0", "#bb7784", "#8e063b", "#4a6fe3", "#8595e1", "#b5bbe3", "#e6afb9", "#e07b91", "#d33f6a", "#11c638", "#8dd593", "#c6dec7", "#ead3c6", "#f0b98d", "#ef9708", "#0fcfc0", "#9cded6"] if sort_by == 'name': # previous, lgd = self.plot_helper(ax=ax, colors=colors, # title='amino acids', # keys=sorted(amino_acid_index.keys()), # position=position, data=aa, # previous=previous) ax.set_title(self.region + ' amino acids', size=20) for i, amino_acid in enumerate(sorted(amino_acid_index.keys())): c = colors[i] ax.bar(position, aa[amino_acid_index[amino_acid], position], bottom=previous, label=amino_acid, color=c, align='center') previous += aa[amino_acid_index[amino_acid], position] lgd = ax.legend(sorted(amino_acid_index.keys()), loc='center left', bbox_to_anchor=(1, 0.5), prop={"size": 16}) elif sort_by == 'hydropathy': previous, lgd = self.plot_helper(ax=ax, colors=['b', 'r', 'k'], title=sort_by, keys=['Hydrophilic', 'Moderate', 'Hydrophobic'], position=position, data=hyd, previous=previous) else: previous, lgd = self.plot_helper(ax=ax, colors=['b', 'r', 'k'], title='amino acid charge', keys=['Negative', 'Positive', 'Neutral'], position=position, data=chg, previous=previous) if display_count: for position in range(aa.shape[1]): ax.text(x=position, y=aa[:, position].sum(), s=str(int(self._aa_count[:, position].sum())), rotation=45, ha='center', va='bottom') if normalize: ax.set_ylabel('Frequency', size=16) else: ax.set_ylabel('Count', size=16) ax.set_xticks(np.arange(len(self._numbering))) ax.set_xticklabels(self._numbering, rotation=60) ax.set_xlabel('Position', size=16) ax.set_ylim([0, aa.sum(0).max()*1.1]) ax.margins(0.02) ax.grid(axis='y') if ipython_config.ipython_info == 'notebook' and notebook_plot: ax.plot() else: fig.savefig(os.path.join(plot_path, plot_name), bbox_extra_artists=(lgd,), bbox_inches='tight') plt.close(fig) def plot_helper(self, ax, colors, title, keys, position, data, previous): ax.set_title('{} {}'.format(self.region, title), size=20) for i, prop_i in enumerate(keys): c = colors[i] ax.bar(position, data[i, position], bottom=previous, label=prop_i, color=c, align='center') previous += data[i, position] lgd = ax.legend(keys, loc='center left', bbox_to_anchor=(1, 0.5), prop={"size": 16}) return previous, lgd
AbPyTools
/AbPyTools-0.3.2.tar.gz/AbPyTools-0.3.2/abpytools/analysis/amino_acid_freq.py
amino_acid_freq.py
from sklearn import cluster, decomposition import numpy as np from abpytools import ChainCollection from matplotlib import pyplot as plt class Cluster: def __init__(self, antibodies, metric='hydrophobicity', clustering_method='kmeans', decomposition_method='PCA'): if isinstance(antibodies, ChainCollection): self.antibodies = antibodies elif isinstance(antibodies, str): self.antibodies = ChainCollection(path=antibodies) if self.antibodies.n_ab == 0: self.antibodies.load() self.metric = metric self.clustering_method = clustering_method self.decomposition_method = decomposition_method self.cluster_assignment = np.zeros(self.antibodies.n_ab, dtype=int) self.cluster_assignment_dict = dict() self._data = None def _collect_data(self): if self.metric == 'hydrophobicity': return self.antibodies.hydrophobicity_matrix() def cluster(self, n_components=0.95, n_clusters=3): if self.decomposition_method == 'PCA': decomposition_obj = decomposition.PCA(n_components) self._data = decomposition_obj.fit_transform(self._collect_data()) if self.clustering_method == 'kmeans': clustering_obj = cluster.KMeans(n_clusters=n_clusters) self.cluster_assignment = clustering_obj.fit_predict(self._data) for i, antibody_obj in enumerate(self.antibodies.antibody_objects): assignment = 'Cluster_{}'.format(self.cluster_assignment[i]) if assignment not in self.cluster_assignment_dict: self.cluster_assignment_dict[assignment] = list() self.cluster_assignment_dict[assignment].append(antibody_obj) def plot_cluster(self): if len(self.cluster_assignment_dict) == 0: self.cluster() color = iter(plt.get_cmap('Vega20').colors) plt.figure(figsize=(8, 8)) for assignment in np.unique(self.cluster_assignment): c = next(color) plt.scatter(self._data[self.cluster_assignment == assignment, 0], self._data[self.cluster_assignment == assignment, 1], c=c, label='Cluster {}'.format(assignment)) plt.legend(loc='best', prop={"size": 14})
AbPyTools
/AbPyTools-0.3.2.tar.gz/AbPyTools-0.3.2/abpytools/analysis/cluster.py
cluster.py
from ..core.chain_collection import ChainCollection import seaborn as sns from .analysis_helper_functions import switch_interactive_mode from matplotlib import pyplot as plt from scipy.cluster.hierarchy import linkage, dendrogram import scipy.spatial.distance as ssd from ..utils.python_config import PythonConfig class DistancePlot(ChainCollection): def __init__(self, antibody_objects=None, path=None): super().__init__(antibody_objects=antibody_objects, path=path) def plot_heatmap(self, feature='chou', distance_metric='cosine_distance', save=False, ax=None, labels=None, multiprocessing=False, file_name='./heatmap.png', **kwargs): data = self.distance_matrix(feature=feature, metric=distance_metric, multiprocessing=multiprocessing) switch_interactive_mode(save=save) if ax is None: f, ax = plt.subplots(1, 1, figsize=(8, 6)) ax.set(xlabel='Antibody', ylabel='Antibody', title=distance_metric, xticks=range(self.n_ab), yticks=range(self.n_ab)) if labels is None: labels = self.names sns.heatmap(data, ax=ax, **kwargs) ax.set_yticklabels(labels, rotation='horizontal') ax.set_xticklabels(labels, rotation=60) ipython_config = PythonConfig() if ipython_config.ipython_info == 'notebook' and save is False: plt.plot() else: plt.savefig(file_name) def plot_dendrogram(self, feature='chou', distance_metric='cosine_distance', save=False, ax=None, labels=None, multiprocessing=False, **kwargs): switch_interactive_mode(save=save) data = self.distance_matrix(feature=feature, metric=distance_metric, multiprocessing=multiprocessing) # convert the redundant n*n square matrix form into a condensed nC2 array data = ssd.squareform(data) clustered_data = linkage(y=data) if ax is None: f, ax = plt.subplots(1, 1, figsize=(8, 6)) ax.set(xlabel='Antibody', ylabel='Distance', title=distance_metric) if labels is None: labels = self.names # plot dendrogram _ = dendrogram(clustered_data, labels=labels, ax=ax, **kwargs) ipython_config = PythonConfig() if ipython_config.ipython_info == 'notebook' and save is False: plt.plot() else: plt.savefig(file_name)
AbPyTools
/AbPyTools-0.3.2.tar.gz/AbPyTools-0.3.2/abpytools/analysis/distance.py
distance.py
from .analysis_helper_functions import load_alignment_algorithm, load_substitution_matrix class SequenceAlignment: """ Sequence alignment with two chain objects """ def __init__(self, target, collection, algorithm, substitution_matrix): """ :param chain_1: :param collection: :param algorithm: :param substitution_matrix: """ self._algorithm = algorithm self._substitution_matrix = load_substitution_matrix(substitution_matrix) self.target = target self._collection = collection self._aligned_collection = dict() self._alignment_scores = dict() self._aligned = False def align_sequences(self, **kwargs): # perform the alignment for each chain object in collection and store results in dictionaries with keys # corresponding to names of the sequence to be aligned for seq in self._collection.antibody_objects: result = self._align(self.target, seq, **kwargs) self._aligned_collection[seq.name] = result[0] self._alignment_scores[seq.name] = result[1] self._aligned = True def print_aligned_sequences(self): if not self._aligned: raise ValueError("Method align_sequences must be called first to perform alignment.") final_string = self._aligned_sequences_string() print(*final_string, sep='\n') @property def target_sequence(self): return self.target.sequence @property def aligned_sequences(self): return self._aligned_collection @property def score(self): return self._alignment_scores def _align(self, seq_1, seq_2, **kwargs): # loads the function object that is then called self._algorithm_function = load_alignment_algorithm(self._algorithm) return self._algorithm_function(seq_1.sequence, seq_2.sequence, self._substitution_matrix, **kwargs) def _aligned_sequences_string(self): # find longest name of target sequence and aligned sequences (for display purposes) max_name = max(len(self.target.name), max(len(x) for x in self._collection.names)) f = '{:>%d}: {}' % max_name f_score = '{:>%d}: {} (Score: {})' % max_name # store the final string in a list so that everything is printed at the end in one go final_string = list() final_string.append(f.format(self.target.name, self.target.sequence)) final_string.append('-'*(len(self.target.name)+len(self.target.sequence))) for seq in self._collection.names: final_string.append(f_score.format(seq, self._aligned_collection[seq], self._alignment_scores[seq])) return final_string
AbPyTools
/AbPyTools-0.3.2.tar.gz/AbPyTools-0.3.2/abpytools/analysis/sequence_alignment.py
sequence_alignment.py
from matplotlib import pyplot as plt import seaborn as sns import os from abpytools.utils import PythonConfig from abpytools.features.regions import ChainDomains from matplotlib.ticker import MaxNLocator from .analysis_helper_functions import switch_interactive_mode class CDRLength(ChainDomains): def __init__(self, path=None, antibody_objects=None, verbose=True, show_progressbar=True, n_threads=10): super().__init__(path=path, antibody_objects=antibody_objects, verbose=verbose, show_progressbar=show_progressbar, n_threads=n_threads) def plot_cdr(self, only_cdr3=True, save=False, plot_path='./', plot_name='CDR_length', plot_title=None, hist=True, ax=None, **kwargs): switch_interactive_mode(save=save) if ax is None: if only_cdr3: f, ax = plt.subplots(nrows=1, ncols=1) else: f, ax = plt.subplots(nrows=1, ncols=3, figsize=(15, 5)) ax = ax.ravel() if only_cdr3: if plot_title is None: ax.set_title('CDR3 Length', size=18) else: ax.set_title(plot_title, size=18) sns.distplot(self.cdr_lengths()[:, 2], hist=hist, ax=ax, **kwargs) ax.set_ylabel('Density', size=14) ax.set_xlabel('CDR Length', size=14) ax.xaxis.set_major_locator(MaxNLocator(integer=True)) else: if plot_title is None: plt.suptitle('CDR Length', size=20) else: plt.suptitle(plot_title, size=20) for i, cdr in enumerate(['CDR 1', 'CDR 2', 'CDR 3']): ax[i].set_title(cdr, size=16) sns.distplot(self.cdr_lengths()[:, i], hist=hist, ax=ax[i]) if i == 0: ax[i].set_ylabel('Density', size=16) if i == 1: ax[i].set_xlabel('CDR Length', size=16) ax[i].xaxis.set_major_locator(MaxNLocator(integer=True)) plt.tight_layout() plt.subplots_adjust(top=0.85) ipython_config = PythonConfig() if ipython_config.ipython_info == 'notebook' and save is False: plt.plot() else: plt.savefig(os.path.join(plot_path, plot_name), format='png') plt.close()
AbPyTools
/AbPyTools-0.3.2.tar.gz/AbPyTools-0.3.2/abpytools/analysis/cdr_length.py
cdr_length.py
from .distance_metrics_ import cosine_distance_, hamming_distance_, levenshtein_distance_ from abpytools.utils.math_utils import Vector # from .analysis_helper_functions import init_score_matrix # from math import acos # from ..utils.math_utils import dot_product, magnitude def cosine_distance(u, v): """ returns the cosine distance between vectors u and v :param u: :param v: :return: """ if u == v: return 0 else: # pure python: acos(max(min(dot_product(u, v) / (magnitude(u) * magnitude(v)), 1), -1)) return cosine_distance_(u, v) def cosine_similarity(u, v): """ returns the cosine similarity between vectors u and v :param u: :param v: :return: """ return 1 - cosine_distance(u, v) def hamming_distance(seq1, seq2): """ returns the hamming distance between two sequences :param seq1: :param seq2: :return: """ if len(seq1) != len(seq2): raise ValueError("Sequences must be equal length, instead got {} and {}".format(len(seq1), len(seq2))) # pure python: # sum(aa1 != aa2 for aa1, aa2 in zip(seq1, seq2)) return hamming_distance_(seq1, seq2) def levenshtein_distance(seq1, seq2): """ :param seq1: :param seq2: :return: """ # pure python: # dist = init_score_matrix(seq_1=seq1, seq_2=seq2, indel=1) # # cols = len(dist[0]) # rows = len(dist) # # for col in range(1, cols): # for row in range(1, rows): # if seq2[row - 1] == seq1[col - 1]: # cost = 0 # else: # cost = 1 # dist[row][col] = min(dist[row - 1][col] + 1, # deletion # dist[row][col - 1] + 1, # insertion # dist[row - 1][col - 1] + cost) # substitution # # return dist[rows-1][cols-1] return levenshtein_distance_(seq1, seq2) def euclidean_distance(u, v): """ returns the euclidean distance :param u: :param v: :return: """ u = Vector(u) v = Vector(v) r = u - v return r.norm(2) def manhattan_distance(u, v): """ returns the Manhattan distance :param u: :param v: :return: """ u = Vector(u) v = Vector(v) r = u - v return r.norm(1) def norm(u, v, degree=2): u = Vector(u) v = Vector(v) r = u - v return r.norm(degree)
AbPyTools
/AbPyTools-0.3.2.tar.gz/AbPyTools-0.3.2/abpytools/analysis/distance_metrics.py
distance_metrics.py
from abpytools.core.chain_collection import ChainCollection from abpytools.core.cache import Cache import numpy as np class ChainDomains(ChainCollection): def __init__(self, antibody_objects=None, path=None, verbose=True, show_progressbar=True, n_threads=10): super().__init__(antibody_objects=antibody_objects) if antibody_objects: self.load() else: self.__init__(antibody_objects=ChainCollection.load_from_file(path=path, verbose=verbose, show_progressbar=show_progressbar, n_threads=n_threads)) self._cache = Cache(max_cache_size=5) def cdr_lengths(self): """ method to obtain cdr_lengths :return: m by n matrix with CDR lengths, where m is the number of antibodies in ChainCollection and n is three, corresponding to the three CDRs. """ if 'cdr_lengths' not in self._cache: cdr_length_matrix = np.zeros((self.n_ab, 3), dtype=np.int) cdr_sequences = self.cdr_sequences() for m, antibody in enumerate(self.antibody_objects): for n, cdr in enumerate(['CDR1', 'CDR2', 'CDR3']): cdr_length_matrix[m, n] = len(cdr_sequences[antibody.name][cdr]) self._cache.update(key='cdr_lengths', data=cdr_length_matrix) return self._cache['cdr_lengths'] def cdr_sequences(self): """ method that returns sequences of each cdr :return: list of dictionaries with keys 'CDR1', 'CDR2' and 'CDR3' containing a string with the respective amino acid sequence """ if 'cdr_sequences' not in self._cache: cdr_sequences = dict() for antibody in self.antibody_objects: dict_i = dict() for cdr in ['CDR1', 'CDR2', 'CDR3']: self.sequence_splitter_helper(antibody=antibody, region=cdr, index=0, dict_i=dict_i) cdr_sequences[antibody.name] = dict_i self._cache.update(key='cdr_sequences', data=cdr_sequences) return self._cache['cdr_sequences'] def framework_length(self): framework_length_matrix = np.zeros((self.n_ab, 4), dtype=np.int) fr_sequences = self.framework_sequences() for m, antibody in enumerate(self.antibody_objects): for n, framework in enumerate(['FR1', 'FR2', 'FR3', 'FR4']): framework_length_matrix[m, n] = len(fr_sequences[antibody.name][framework]) return framework_length_matrix def framework_sequences(self): framework_sequences = dict() for antibody in self.antibody_objects: dict_i = dict() for framework in ['FR1', 'FR2', 'FR3', 'FR4']: self.sequence_splitter_helper(antibody=antibody, region=framework, index=1, dict_i=dict_i) framework_sequences[antibody.name] = dict_i return framework_sequences @staticmethod def sequence_splitter_helper(antibody, region, index, dict_i): seq_i = list() indices = antibody.ab_regions()[index][region] for i in indices: seq_i.append(antibody.sequence[i]) dict_i[region] = ''.join(seq_i)
AbPyTools
/AbPyTools-0.3.2.tar.gz/AbPyTools-0.3.2/abpytools/features/regions.py
regions.py
from collections import Counter, defaultdict from itertools import product import re aa_order = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y'] # this classification is based on the Shen paper and takes into account the aa side chain dipole and volume # for more information check http://www.pnas.org/content/104/11/4337/suppl/DC1 aa_group = {'A': '0', 'G': '0', 'V': '0', 'I': '1', 'L': '1', 'F': '1', 'P': '1', 'Y': '2', 'M': '2', 'T': '2', 'S': '2', 'H': '3', 'N': '3', 'Q': '3', 'W': '3', 'R': '4', 'K': '4', 'D': '5', 'E': '5', 'C': '6'} def chou_pseudo_aa_composition(*sequences): """ M.K. Gupta , R. Niyogi & M. Misra (2013) An alignment-free method to find similarity among protein sequences via the general form of Chou’s pseudo amino acid composition, SAR and QSAR in Environmental Research, 24:7, 597-609, DOI: 10.1080/1062936X.2013.773378 Args: *sequences: amino acid sequences Returns: list of Chou's pseudo amino acid composition for each sequence """ # first the aa count aa_count_dict = [aa_composition(seq) for seq in sequences] # distance to first aa_distance_to_first_dict = [distance_to_first(x) for x in sequences] aa_distribution_dict = [aa_distribution(seq, aa_c, aa_dist) for seq, aa_c, aa_dist in zip(sequences, aa_count_dict, aa_distance_to_first_dict)] # create lists with amino acids in the right order aa_count = [order_seq(aa_count_dict_i) for aa_count_dict_i in aa_count_dict] aa_distance_to_first = [order_seq(aa_distance_to_first_i) for aa_distance_to_first_i in aa_distance_to_first_dict] aa_dist = [order_seq(aa_distribution_dict_i) for aa_distribution_dict_i in aa_distribution_dict] return [x + y + z for x, y, z in zip(aa_count, aa_distance_to_first, aa_dist)] def aa_composition(seq): """ Number of amino acids in a given sequence. Args: seq (str): A string representing a sequence Returns: Counter with amino acid composition """ return Counter(seq) def aa_frequency(seq): """ Normalised amino acid composition. Args: seq (str): A string representing a sequence Returns: Dictionary with amino acid frequency """ aa_count = aa_composition(seq) total = sum(aa_count.values()) return {key: value/total for key, value in aa_count.items()} def distance_to_first(seq): """ Cumulative distance of each of the twenty amino acids to the first residue, Args: seq (str): A string representing a sequence Returns: Dictionary with cumulative """ return {x: sum([m.start() for m in re.finditer(x, seq)]) for x in aa_order} def aa_distribution(seq, aa_count, aa_distance_to_first): """ Amino acid distribution described in An alignment-free method to find similarity among protein sequences via the general form of Chou’s pseudo amino acid composition. Args: seq (str): amino acid sequence aa_count (dict): aminod acid count of sequence aa_distance_to_first (dict): distance to first for each amino acid to first position Returns: dict """ aa_dist_dict = defaultdict(int) for i, aa in enumerate(seq): aa_dist_dict[aa] += (i - (aa_distance_to_first[aa] / aa_count[aa])) ** 2 / aa_count[aa] return aa_dist_dict def order_seq(seq_dict): """ Orders dictionary to a list Args: seq_dict (dict): dictionary with amino acid keys Returns: A list with ordered amino acid """ return [seq_dict[aa] if aa in seq_dict else 0 for aa in aa_order] def triad_method(*sequences): """ Triad featurisation method described in Shen J. et al. (2006). Predicting protein–protein interactions based only on sequences information. PNAS, 104(11), pp: 4337-4341. Args: *sequences (list): sequence of amino acids Returns: list of lists with results of triad method """ d_matrix = [] for sequence in sequences: # start dictionary with all 343 triads/keys (7 classes ** 3) f_keys = [''.join(x) for x in product(['0', '1', '2', '3', '4', '5', '6'], ['0', '1', '2', '3', '4', '5', '6'], ['0', '1', '2', '3', '4', '5', '6'])] f_results = {x: 0 for x in f_keys} # classify aa in sequence v = [aa_group[aa] for aa in sequence] # get triads triads = [''.join(v[x:x + 3]) for x in range(len(v) - 2)] for triad in triads: f_results[triad] += 1 # normalise values f_max = max(f_results.values()) f_min = min(f_results.values()) d = [(f_results[key] - f_min) / f_max for key in f_keys] # append 343 dimensional vector to d_matrix d_matrix.append(d) # d_matrix has shape (len(sequences), 343) return d_matrix def side_chain_volume(sequences): pass def auto_covariance(sequences): pass
AbPyTools
/AbPyTools-0.3.2.tar.gz/AbPyTools-0.3.2/abpytools/features/composition.py
composition.py
from ..utils import DataLoader import itertools import pandas as pd import numpy as np available_regions = ['FR1', 'CDR1', 'FR2', 'CDR2', 'FR3', 'CDR3', 'FR4'] def numbering_table_sequences(region, numbering_scheme, chain): # next two conditionals are used to only extract the needed data with FR and CDR positions # it makes the loading of the data quicker when there are only CDRs or FRs if any([True if x.startswith('CDR') else False for x in region]): cdr_list = DataLoader(data_type='CDR_positions', data=[numbering_scheme, chain]) cdrs = cdr_list.get_data() else: cdrs = {} if any([True if x.startswith('FR') else False for x in region]): fr_list = DataLoader(data_type='Framework_positions', data=[numbering_scheme, chain]) frs = fr_list.get_data() else: frs = {} # pack it all up into a single dictionary whole_sequence_dict = {**cdrs, **frs} # get the sequence list in the correct order (since region has been sorted before) whole_sequence_list = [whole_sequence_dict[x] for x in region] # unpack whole_sequence_list into a single list whole_sequence = list(itertools.chain.from_iterable(whole_sequence_list)) return whole_sequence_dict, whole_sequence def numbering_table_region(region): # if 'all' is chosen then region becomes a list with all if region == 'all': region = ['FR1', 'CDR1', 'FR2', 'CDR2', 'FR3', 'CDR3', 'FR4'] # if region is a string (i.e. 'CDR1') it becomes a list and in the next block of code is checked if it # is a valid selection if isinstance(region, str): region = [region] # checks if the selected regions are all in the available_regions if not set(region).issubset(set(available_regions)): raise ValueError("The chosen region is not available" "Currently available regions: {}".format(available_regions)) # make sure the regions are in a logical order region.sort(key=lambda x: available_regions.index(x)) return region def numbering_table_multiindex(region, whole_sequence_dict): # [[(CDR1, L23), (CDR1, L24),..], ..., [(CDR3, L100), ...]] # here we get the list of lists from the line above pre_region_map = [[(region_i, numbering) for numbering in whole_sequence_dict[region_i]] for region_i in region] # which can be easily unpacked into a single list # region_map is a list of tuples that can be interpreted by pd.MultiIndex to form a two layer column system region_map = list(itertools.chain.from_iterable(pre_region_map)) multi_index = pd.MultiIndex.from_tuples(tuples=region_map, names=['Region', 'Numbering']) return multi_index def germline_identity_pd(heavy_identity, light_identity, internal_heavy, internal_light, names): regions = ['CDR1', 'CDR2', 'CDR3', 'FR1', 'FR2', 'FR3', 'Total'] columns = pd.MultiIndex.from_tuples([('Light', x) for x in regions] + [('Heavy', x) for x in regions] + [('Average', x) for x in regions], names=['Chain', 'Region']) df = pd.DataFrame(columns=columns, index=names) for column in columns: if column[0] == 'Light': df[column] = [light_identity[x][column[1]] if column[1] in light_identity[x] else np.NaN for x in internal_light] # df[column] = list(map(lambda x: light_identity[x][column[1]] if column[1] in light_identity[x] else np.NaN, # internal_light)) elif column[0] == 'Heavy': df[column] = [heavy_identity[x][column[1]] if column[1] in heavy_identity[x] else np.NaN for x in internal_heavy] # df[column] = list(map(lambda x: heavy_identity[x][column[1]] if column[1] in heavy_identity[x] else np.NaN, # internal_heavy)) else: df[column] = (df[('Light', column[1])] + df[('Heavy', column[1])]) / 2 return df def to_numbering_table(as_array, region, chain, heavy_chains_numbering_table, light_chains_numbering_table, names, **kwargs): if chain == 'both': if as_array: t_heavy = heavy_chains_numbering_table(as_array=True, region=region, **kwargs) t_light = light_chains_numbering_table(as_array=True, region=region, **kwargs) data = np.concatenate((t_light, t_heavy), axis=1) else: t_heavy = heavy_chains_numbering_table(as_array=False, region=region, **kwargs) t_light = light_chains_numbering_table(as_array=False, region=region, **kwargs) t_heavy.reset_index(drop=True, inplace=True) t_light.reset_index(drop=True, inplace=True) data = pd.concat([t_light, t_heavy], axis=1, keys=['Light', 'Heavy']) elif chain == 'heavy': data = heavy_chains_numbering_table(as_array=as_array, region=region, **kwargs) elif chain == 'light': data = light_chains_numbering_table(as_array=as_array, region=region, **kwargs) else: raise ValueError("Unknown chain.") if not as_array: data.index = names return data
AbPyTools
/AbPyTools-0.3.2.tar.gz/AbPyTools-0.3.2/abpytools/core/helper_functions.py
helper_functions.py
import os from .flags import * AVAILABLE_FORMATS = [FORMAT_FLAGS.JSON, FORMAT_FLAGS.FASTA, FORMAT_FLAGS.PB2] class CollectionBase: """ CollectionBase is the abpytools base class to develop the collection APIs """ @classmethod def load_from_json(cls, path, n_threads=20, verbose=True, show_progressbar=True): raise NotImplementedError @classmethod def load_from_pb2(cls, path, n_threads=20, verbose=True, show_progressbar=True): raise NotImplementedError @classmethod def load_from_fasta(cls, path, numbering_scheme=NUMBERING_FLAGS.CHOTHIA, n_threads=20, verbose=True, show_progressbar=True): raise NotImplementedError @classmethod def load_from_file(cls, path, n_threads=20, verbose=True, show_progressbar=True, **kwargs): """ Args: path: n_threads: int to specify number of threads to use in loading process verbose: bool controls the level of verbose show_progressbar: bool whether to display the progressbar kwargs: Returns: """ # check if path to file is valid if not os.path.isfile(path): raise ValueError("File does not exist!") file_format = path.split('.')[-1] if file_format not in AVAILABLE_FORMATS: raise ValueError("Expected the file format to be json, pb2 or fasta.") if file_format == FORMAT_FLAGS.JSON: collection = cls.load_from_json(path, n_threads=n_threads, verbose=verbose, show_progressbar=show_progressbar) elif file_format == FORMAT_FLAGS.PB2 and BACKEND_FLAGS.HAS_PROTO: collection = cls.load_from_pb2(path, n_threads=n_threads, verbose=verbose, show_progressbar=show_progressbar) elif file_format == FORMAT_FLAGS.PB2 and not BACKEND_FLAGS.HAS_PROTO: raise ValueError("protobuf has to be enabled to serialise objects with protobuf") elif file_format == FORMAT_FLAGS.FASTA: collection = cls.load_from_fasta(path, n_threads=n_threads, verbose=verbose, show_progressbar=show_progressbar, **kwargs) else: raise NotImplementedError return collection def save_to_json(self, path, update=True): raise NotImplementedError def save_to_pb2(self, path, update=True): raise NotImplementedError def save_to_fasta(self, path, update=True): raise NotImplementedError def save(self, file_format, path, update=True): """ Args: file_format: path: update: Returns: """ # check if path is for a new or existing file if os.path.isfile(path) and update: # read old data and append to it update = True else: # overwrite to path update = False if file_format not in AVAILABLE_FORMATS: raise ValueError("Expected the file format to be json, pb2 or fasta.") if file_format == FORMAT_FLAGS.JSON: collection = self.save_to_json(path, update=update) elif file_format == FORMAT_FLAGS.PB2 and BACKEND_FLAGS.HAS_PROTO: collection = self.save_to_pb2(path, update=update) elif file_format == FORMAT_FLAGS.PB2 and not BACKEND_FLAGS.HAS_PROTO: raise ValueError("protobuf has to be enabled to serialise objects with protobuf") elif file_format == FORMAT_FLAGS.FASTA: collection = self.save_to_fasta(path, update=update) else: raise NotImplementedError return collection
AbPyTools
/AbPyTools-0.3.2.tar.gz/AbPyTools-0.3.2/abpytools/core/base.py
base.py
from .chain import Chain import numpy as np import logging from abpytools.utils import PythonConfig, Download import json import os import pandas as pd from .helper_functions import numbering_table_sequences, numbering_table_region, numbering_table_multiindex from operator import itemgetter from urllib import parse from math import ceil from .base import CollectionBase from ..features.composition import * from ..analysis.distance_metrics import * from ..core.cache import Cache from multiprocessing import Manager, Process from inspect import signature from .utils import (json_ChainCollection_formatter, pb2_ChainCollection_formatter, pb2_ChainCollection_parser, fasta_ChainCollection_parser, json_ChainCollection_parser) from .flags import * # setting up debugging messages logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG) ipython_config = PythonConfig() if ipython_config.ipython_info == 'notebook': from tqdm import tqdm_notebook as tqdm # pragma: no cover else: from tqdm import tqdm if BACKEND_FLAGS.HAS_PROTO: from abpytools.core.formats import ChainCollectionProto class ChainCollection(CollectionBase): """ Object containing Chain objects and to perform analysis on the ensemble. """ def __init__(self, antibody_objects=None, load=True, **kwargs): """ Args: antibody_objects: load: **kwargs: """ if antibody_objects is None: self.antibody_objects = [] else: if isinstance(antibody_objects, ChainCollection): antibody_objects = antibody_objects.antibody_objects elif not isinstance(antibody_objects, list): raise ValueError("Expected a list, instead got object of type {}".format(type(antibody_objects))) elif not all(isinstance(obj, Chain) for obj in antibody_objects) and len(antibody_objects) > 0: raise ValueError("Expected a list containing objects of type Chain") self.antibody_objects = antibody_objects if len(set(x.numbering_scheme for x in antibody_objects)) == 1: self._numbering_scheme = antibody_objects[0].numbering_scheme else: raise ValueError("ChainCollection only support Chain objects with the same numbering scheme.") if len(set(x.chain for x in antibody_objects)) == 1: self._chain = antibody_objects[0].chain elif len(set(x.chain for x in antibody_objects)) == 0: self._chain = '' else: raise ValueError("ChainCollection only support Chain objects with the same chain type.") if load: self.load(**kwargs) def load(self, show_progressbar=True, n_threads=4, verbose=True): self.antibody_objects, self._chain = load_from_antibody_object( antibody_objects=self.antibody_objects, show_progressbar=show_progressbar, n_threads=n_threads, verbose=verbose) @classmethod def load_from_fasta(cls, path, numbering_scheme=NUMBERING_FLAGS.CHOTHIA, n_threads=20, verbose=True, show_progressbar=True): if not os.path.isfile(path): raise ValueError("File does not exist!") with open(path, 'r') as f: antibody_objects = fasta_ChainCollection_parser(f, numbering_scheme=numbering_scheme) chain_collection = cls(antibody_objects=antibody_objects, load=True, n_threads=n_threads, verbose=verbose, show_progressbar=show_progressbar) return chain_collection @classmethod def load_from_pb2(cls, path, n_threads=20, verbose=True, show_progressbar=True): with open(path, 'rb') as f: proto_parser = ChainCollectionProto() proto_parser.ParseFromString(f.read()) antibody_objects = pb2_ChainCollection_parser(proto_parser) chain_collection = cls(antibody_objects=antibody_objects, load=True, n_threads=n_threads, verbose=verbose, show_progressbar=show_progressbar) return chain_collection @classmethod def load_from_json(cls, path, n_threads=20, verbose=True, show_progressbar=True): with open(path, 'r') as f: data = json.load(f) antibody_objects = json_ChainCollection_parser(data) chain_collection = cls(antibody_objects=antibody_objects, load=True, n_threads=n_threads, verbose=verbose, show_progressbar=show_progressbar) return chain_collection def save_to_json(self, path, update=True): with open(os.path.join(path + '.json'), 'w') as f: data = json_ChainCollection_formatter(self.antibody_objects) json.dump(data, f, indent=2) def save_to_pb2(self, path, update=True): proto_parser = ChainCollectionProto() try: with open(os.path.join(path + '.pb2'), 'rb') as f: proto_parser.ParseFromString(f.read()) except IOError: # print("Creating new file") pass pb2_ChainCollection_formatter(self.antibody_objects, proto_parser) with open(os.path.join(path + '.pb2'), 'wb') as f: f.write(proto_parser.SerializeToString()) def save_to_fasta(self, path, update=True): with open(os.path.join(path + '.fasta'), 'w') as f: f.writelines(make_fasta(self.names, self.sequences)) def molecular_weights(self, monoisotopic=False): """ :param monoisotopic: bool whether to use monoisotopic values :return: list """ return [x.ab_molecular_weight(monoisotopic=monoisotopic) for x in self.antibody_objects] def extinction_coefficients(self, extinction_coefficient_database='Standard', reduced=False): """ :param extinction_coefficient_database: string with the name of the database to use :param reduced: bool whether to consider the cysteines to be reduced :return: list """ return [x.ab_ec(extinction_coefficient_database=extinction_coefficient_database, reduced=reduced) for x in self.antibody_objects] def hydrophobicity_matrix(self): if self._chain == CHAIN_FLAGS.HEAVY_CHAIN: num_columns = 158 else: num_columns = 138 abs_hydrophobicity_matrix = np.zeros((len(self.antibody_objects), num_columns)) for row in range(abs_hydrophobicity_matrix.shape[0]): abs_hydrophobicity_matrix[row] = self.antibody_objects[row].hydrophobicity_matrix return abs_hydrophobicity_matrix def get_object(self, name=''): """ :param name: str :return: """ if name in self.names: index = self.names.index(name) return self[index] else: raise ValueError('Could not find sequence with specified name') def ab_region_index(self): """ method to determine index of amino acids in CDR regions :return: dictionary with names as keys and each value is a dictionary with keys CDR and FR 'CDR' entry contains dictionaries with CDR1, CDR2 and CDR3 regions 'FR' entry contains dictionaries with FR1, FR2, FR3 and FR4 regions """ return {x.name: {'CDR': x.ab_regions()[0], 'FR': x.ab_regions()[1]} for x in self.antibody_objects} def numbering_table(self, as_array=False, region='all'): region = numbering_table_region(region) table = np.row_stack( [x.ab_numbering_table(as_array=True, region=region) for x in self.antibody_objects]) if as_array: return table else: # return the data as a pandas.DataFrame -> it's slower but looks nicer and makes it easier to get # the data of interest whole_sequence_dict, whole_sequence = numbering_table_sequences(region, self._numbering_scheme, self._chain) multi_index = numbering_table_multiindex(region=region, whole_sequence_dict=whole_sequence_dict) # create the DataFrame and assign the columns and index names data = pd.DataFrame(data=table) data.columns = multi_index data.index = self.names return data def igblast_server_query(self, chunk_size=50, show_progressbar=True, **kwargs): """ :param show_progressbar: :param chunk_size: :param kwargs: keyword arguments to pass to igblast_options :return: """ # check if query is larger than 50 sequences # if so split into several queries query_list = self._split_to_chunks(chunk_size=chunk_size) n_chunks = ceil(len(self) / chunk_size) - 1 if show_progressbar: for query in tqdm(query_list, total=n_chunks): self._igblast_server_query(query, **kwargs) else: for query in query_list: self._igblast_server_query(query, **kwargs) def _igblast_server_query(self, query, **kwargs): # prepare raw data fasta_query = make_fasta(names=query.names, sequences=query.sequences) # get url with igblast options url = igblast_options(sequences=fasta_query, **kwargs) # send and download query q = Download(url, verbose=False) try: q.download() except ValueError: # pragma: no cover raise ValueError("Check the internet connection.") # pragma: no cover igblast_result = q.html self._parse_igblast_query(igblast_result, query.names) def igblast_local_query(self, file_path): # load in file with open(file_path, 'r') as f: igblast_result = f.readlines() self._parse_igblast_query(igblast_result, self.names) def append(self, antibody_obj): self.antibody_objects += antibody_obj def pop(self, index=-1): if index > len(self): raise ValueError("The given index is outside the range of the object.") element_to_pop = self[index] self._destroy(index=index) return element_to_pop def _destroy(self, index): del self.antibody_objects[index] # def filter(self): # # # TODO: complete method # pass # def set_numbering_scheme(self, numbering_scheme, realign=True): if realign: try: self._numbering_scheme = numbering_scheme self.antibody_objects, self._chain = load_from_antibody_object(self.antibody_objects) except: print("Could not realign sequences, nothing has been changed.") else: self._numbering_scheme = numbering_scheme @property def names(self): return [x.name for x in self.antibody_objects] @property def sequences(self): return [x.sequence for x in self.antibody_objects] @property def aligned_sequences(self): return [x.aligned_sequence for x in self.antibody_objects] @property def n_ab(self): return len(self.sequences) @property def chain(self): if self._chain == '': chains = set([x.chain for x in self.antibody_objects]) if len(chains) == 1: self._chain = next(iter(chains)) return self._chain else: raise ValueError('Different types of chains found in collection!') else: return self._chain @property def numbering_scheme(self): return self._numbering_scheme @property def charge(self): return np.array([x.ab_charge() for x in self.antibody_objects]) @property def total_charge(self): return {x.name: x.ab_total_charge() for x in self.antibody_objects} @property def germline_identity(self): return {x.name: x.germline_identity for x in self.antibody_objects} @property def germline(self): return {x.name: x.germline for x in self.antibody_objects} def _string_summary_basic(self): return "abpytools.ChainCollection Chain type: {}, Number of sequences: {}".format(self._chain, len(self.antibody_objects)) def __repr__(self): return "<%s at 0x%02x>" % (self._string_summary_basic(), id(self)) def __len__(self): return len(self.antibody_objects) def __getitem__(self, indices): if isinstance(indices, int): return self.antibody_objects[indices] else: return ChainCollection(antibody_objects=list(itemgetter(*indices)(self.antibody_objects))) def __add__(self, other): if isinstance(other, ChainCollection): if self.numbering_scheme != other.numbering_scheme: raise ValueError("Concatenation requires ChainCollection " "objects to use the same numbering scheme.") else: new_object_list = self.antibody_objects + other.antibody_objects elif isinstance(other, Chain): if self.numbering_scheme != other.numbering_scheme: raise ValueError("Concatenation requires Chain object to use " "the same numbering scheme as ChainCollection.") else: new_object_list = self.antibody_objects + [other] else: raise ValueError("Concatenation requires other to be of type " "ChainCollection, got {} instead".format(type(other))) return ChainCollection(antibody_objects=new_object_list, load=False) def _split_to_chunks(self, chunk_size=50): """ Helper function to split ChainCollection into size chunk_size and returns generator :param chunk_size: int, size of each chunk :return: generator to iterate of each chunk of size chunk_size """ if self.n_ab > chunk_size: for x in range(0, self.n_ab, chunk_size): yield self[range(x, min(x + chunk_size, self.n_ab))] else: yield self def _parse_igblast_query(self, igblast_result, names): igblast_result_dict = load_igblast_query(igblast_result, names) # unpack results for name in names: obj_i = self.get_object(name=name) obj_i.germline = igblast_result_dict[name][1] obj_i.germline_identity = igblast_result_dict[name][0] def loading_status(self): return [x.status for x in self.antibody_objects] def composition(self, method='count'): """ Amino acid composition of each sequence. Each resulting list is organised alphabetically (see composition.py) :param method: :return: """ if method == 'count': return [order_seq(aa_composition(seq)) for seq in self.sequences] elif method == 'freq': return [order_seq(aa_frequency(seq)) for seq in self.sequences] elif method == 'chou': return chou_pseudo_aa_composition(self.sequences) elif method == 'triad': return triad_method(self.sequences) elif method == 'hydrophobicity': return self.hydrophobicity_matrix() elif method == 'volume': return side_chain_volume(self.sequences) else: raise ValueError("Unknown method") def distance_matrix(self, feature=None, metric='cosine_similarity', multiprocessing=False): """ Returns the distance matrix using a given feature and distance metric :param feature: string with the name of the feature to use :param metric: string with the name of the metric to use :param multiprocessing: bool to turn multiprocessing on/off (True/False) :return: list of lists with distances between all sequences of len(data) with each list of len(data) when i==j M_i,j = 0 """ if feature is None: transformed_data = self.sequences elif isinstance(feature, str): # in this case the features are calculated using a predefined featurisation method (see self.composition) transformed_data = self.composition(method=feature) elif isinstance(feature, list): # a user defined list with vectors if len(feature) != self.n_ab: raise ValueError("Expected a list of size {}, instead got {}.".format(self.n_ab, len(feature))) else: transformed_data = feature else: raise TypeError("Unexpected input for feature argument.") if metric == 'cosine_similarity': distances = self._run_distance_matrix(transformed_data, cosine_similarity, multiprocessing=multiprocessing) elif metric == 'cosine_distance': distances = self._run_distance_matrix(transformed_data, cosine_distance, multiprocessing=multiprocessing) elif metric == 'hamming_distance': # be careful hamming distance only works when all sequences have the same length distances = self._run_distance_matrix(transformed_data, hamming_distance, multiprocessing=multiprocessing) elif metric == 'levenshtein_distance': distances = self._run_distance_matrix(transformed_data, levenshtein_distance, multiprocessing=multiprocessing) elif metric == 'euclidean_distance': distances = self._run_distance_matrix(transformed_data, euclidean_distance, multiprocessing=multiprocessing) elif metric == 'manhattan_distance': distances = self._run_distance_matrix(transformed_data, manhattan_distance, multiprocessing=multiprocessing) elif callable(metric): # user defined metric function user_function_signature = signature(metric) # number of params should be two, can take args with defaults though default_params = sum(['=' in x for x in user_function_signature.parameters]) if len(user_function_signature.parameters) - default_params > 2: raise ValueError("Expected a function with two parameters") else: distances = self._run_distance_matrix(transformed_data, metric, multiprocessing=multiprocessing) else: raise ValueError("Unknown distance metric.") return distances def _run_distance_matrix(self, data, metric, multiprocessing=False): """ Helper function to setup the calculation of each entry in the distance matrix :param data: list with all sequences :param metric: function that takes two string and calculates distance :param multiprocessing: bool to turn multiprocessing on/off (True/False) :return: list of lists with distances between all sequences of len(data) with each list of len(data) when i==j M_i,j = 0 """ if multiprocessing: with Manager() as manager: cache = manager.dict() matrix = manager.dict() jobs = [Process(target=self._distance_matrix, args=(data, i, metric, cache, matrix)) for i in range(len(data))] for j in jobs: j.start() for j in jobs: j.join() # order the data return [matrix[x] for x in range(len(data))] else: cache = Cache(max_cache_size=(len(data) * (len(data) - 1)) / 2) matrix = Cache(max_cache_size=len(data)) for i in range(len(data)): cache.update(i, self._distance_matrix(data, i, metric, cache, matrix)) return [matrix[x] for x in range(len(data))] @staticmethod def _distance_matrix(data, i, metric, cache, matrix): """ Function to calculate distance from the ith sequence of the ith row to the remaining entries in the same row :param data: list with all sequences :param i: int that indicates the matrix row being processed :param metric: function that takes two string and calculates distance :param cache: either a Manager or Cache object to cache results :param matrix: either a Manager or Cache object to store results in a matrix :return: None """ row = [] seq_1 = data[i] for j, seq_2 in enumerate(data): if i == j: row.append(0) continue keys = ('{}-{}'.format(i, j), '{}-{}'.format(j, i)) if keys[0] not in cache or keys[1] not in cache: cache['{}-{}'.format(i, j)] = metric(seq_1, seq_2) if keys[0] in cache: row.append(cache[keys[0]]) elif keys[1] in cache: row.append(cache[keys[0]]) else: raise ValueError("Bug in row {} and column {}".format(i, j)) matrix[i] = row def load_antibody_object(antibody_object): antibody_object.load() return antibody_object def load_from_antibody_object(antibody_objects, show_progressbar=True, n_threads=20, verbose=True): """ Args: antibody_objects (list): show_progressbar (bool): n_threads (int): verbose (bool): Returns: """ if verbose: print("Loading in antibody objects") from queue import Queue import threading q = Queue() for i in range(n_threads): t = threading.Thread(target=worker, args=(q,)) t.daemon = True t.start() if show_progressbar: for antibody_object in tqdm(antibody_objects): q.put(antibody_object) else: for antibody_object in antibody_objects: q.put(antibody_object) q.join() # if show_progressbar: # aprun = parallelexecutor(use_bar='tqdm', n_jobs=n_jobs, timeout=timeout) # else: # aprun = parallelexecutor(use_bar='None', n_jobs=n_jobs, timeout=timeout) # # # load in objects in parallel # antibody_objects = aprun(total=len(antibody_objects))( # delayed(load_antibody_object)(obj) for obj in antibody_objects) status = [x.status for x in antibody_objects] failed = sum([1 if x == 'Not Loaded' or x == 'Failed' else 0 for x in status]) # remove objects that did not load while 'Not Loaded' in status: i = status.index('Not Loaded') del antibody_objects[i], status[i] while 'Failed' in status: i = status.index('Failed') del antibody_objects[i], status[i] if verbose: print("Failed to load {} objects in list".format(failed)) loaded_obj_chains = [x.chain for x in antibody_objects if x.status == 'Loaded'] if len(set(loaded_obj_chains)) == 1: chain = loaded_obj_chains[0] else: raise ValueError("All sequences must be of the same chain type: Light or Heavy", set([x.chain for x in loaded_obj_chains])) n_ab = len(loaded_obj_chains) if n_ab == 0: raise ValueError("Could not find any heavy or light chains in provided file or list of objects") return antibody_objects, chain def load_igblast_query(igblast_result, names): """ :param names: :param igblast_result: :return: """ try: from bs4 import BeautifulSoup except ImportError: raise ImportError("Please install bs4 to parse the IGBLAST html file:" "pip install beautifulsoup4") # instantiate BeautifulSoup object to make life easier with the html text! if isinstance(igblast_result, list): soup = BeautifulSoup(''.join(igblast_result), "lxml") else: soup = BeautifulSoup(igblast_result, "lxml") # get the results found in <div id="content"> and return the text as a string results = soup.find(attrs={'id': "content"}).text # get query names query = re.compile('Query: (.*)') query_ids = query.findall(results) # make sure that all the query names in query are in self.names if not set(names).issubset(set(query_ids)): raise ValueError('Make sure that you gave the same names in ChainCollection as you gave' 'in the query submitted to IGBLAST') # regular expression to get tabular data from each region all_queries = re.compile('(Query: .*?)\n\n\n\n', re.DOTALL) # parse the results with regex and get a list with each query data parsed_results = all_queries.findall(results) # regex to get the FR and CDR information for each string in parsed results region_finder = re.compile('^([CDR\d|FR\d|Total].*)', re.MULTILINE) result_dict = {} # iterate over each string in parsed result which contains the result for individual queries for query_result in parsed_results: # get query name and get the relevant object query_i = query.findall(query_result)[0] # check if the query being parsed is part of the object # (not all queries have to be part of the object, but the object names must be a subset of the queries) if query_i not in set(names): continue # list with CDR and FR info for query result region_info = region_finder.findall(query_result) # get the data from region info with dict comprehension germline_identity = {x.split()[0].split('-')[0]: float(x.split()[-1]) for x in region_info} # get the top germline assignment v_line_assignment = re.compile('V\s{}\t.*'.format(query_i)) # the top germline assignment is at the top (index 0) germline_result = v_line_assignment.findall(results)[0].split() # store the germline assignment and the bit score in a tuple as the germline attribute of Chain germline = (germline_result[2], float(germline_result[-2])) result_dict[query_i] = (germline_identity, germline) return result_dict def worker(q): while True: item = q.get() load_antibody_object(item) q.task_done() def make_fasta(names, sequences): file_string = '' for name, sequence in zip(names, sequences): file_string += '>{}\n'.format(name) file_string += '{}\n'.format(sequence) return file_string def igblast_options(sequences, domain='imgt', germline_db_V='IG_DB/imgt.Homo_sapiens.V.f.orf.p', germline_db_D='IG_DB/imgt.Homo_sapiens.D.f.orf', germline_db_J='IG_DB / imgt.Homo_sapiens.J.f.orf', num_alignments_V=1, num_alignments_D=1, num_alignments_J=1): values = {"queryseq": sequences, "germline_db_V": germline_db_V, "germline_db_D": germline_db_D, "germline_db_J": germline_db_J, "num_alignments_V": str(num_alignments_V), "num_alignments_D": str(num_alignments_D), "num_alignments_J": str(num_alignments_J), "outfmt": "7", "domain": domain, "program": "blastp"} url = "http://www.ncbi.nlm.nih.gov/igblast/igblast.cgi?" url += parse.urlencode(values) return url
AbPyTools
/AbPyTools-0.3.2.tar.gz/AbPyTools-0.3.2/abpytools/core/chain_collection.py
chain_collection.py
from .chain_collection import Chain, ChainCollection import numpy as np from .chain import calculate_charge from abpytools.utils import DataLoader from .helper_functions import germline_identity_pd, to_numbering_table class Fab: def __init__(self, heavy_chain=None, light_chain=None, load=True, name=None): """ Args: heavy_chain: light_chain: load: name: """ # As a convention the order will always be light chain (index 0) and then heavy chain (index 1) # check if it's an Chain class if heavy_chain is None and light_chain is None: raise ValueError('heavy_chain and light_chain must be provided') if isinstance(heavy_chain, Chain): self._heavy_chain = heavy_chain else: raise ValueError('heavy_chain must be a Chain object, but got {} instead'.format(type(heavy_chain))) if isinstance(light_chain, Chain): self._light_chain = light_chain else: raise ValueError('light_chain must be a Chain object, but got {} instead'.format(type(light_chain))) if self._heavy_chain.chain != 'heavy': raise ValueError("heavy_chain is not a heavy chain, it is {}".format(self._heavy_chain.chain)) if self._light_chain.chain != 'light': raise ValueError("light_chain is not a light chain, it is {}".format(self._light_chain.chain)) self._pair_sequence = self[0].sequence + self[1].sequence if isinstance(name, str): self._name = name elif name is None: self._name = 'ID1' # keep the name of the heavy and light chains internally to keep everything in the right order self._internal_heavy_name = self[1].name self._internal_light_name = self[0].name def load(self): self._heavy_chain.load() self._light_chain.load() def molecular_weight(self, monoisotopic=False): return self[0].ab_molecular_weight(monoisotopic=monoisotopic) +\ self[1].ab_molecular_weight(monoisotopic=monoisotopic) def extinction_coefficient(self, reduced=False, normalise=False, **kwargs): light_ec = self[0].ab_ec(reduced=reduced, **kwargs) heavy_ec = self[1].ab_ec(reduced=reduced, **kwargs) if normalise: return (heavy_ec + light_ec) / (self.molecular_weight(**kwargs)) else: return heavy_ec + light_ec def hydrophobicity_matrix(self, **kwargs): return np.concatenate((self[0].ab_hydrophobicity_matrix(**kwargs), self[1].ab_hydrophobicity_matrix(**kwargs))) def charge(self, **kwargs): return np.concatenate((self[0].ab_charge(**kwargs), self[1].ab_charge(**kwargs))) def total_charge(self, ph=7.4, pka_database='Wikipedia'): available_pi_databases = ["EMBOSS", "DTASetect", "Solomon", "Sillero", "Rodwell", "Wikipedia", "Lehninger", "Grimsley"] assert pka_database in available_pi_databases, \ "Selected pI database {} not available. Available databases: {}".format(pka_database, ', '.join(available_pi_databases)) data_loader = DataLoader(data_type='AminoAcidProperties', data=['pI', pka_database]) pka_data = data_loader.get_data() return calculate_charge(sequence=self.sequence, ph=ph, pka_values=pka_data) # def load_igblast_query(self, file_path, chain): # # if chain.lower() == 'light': # self._light_chains.load_igblast_query(file_path=file_path) # elif chain.lower() == 'heavy': # self._heavy_chains.load_igblast_query(file_path=file_path) # else: # raise ValueError('Specify if the data being loaded is for the heavy or light chain') def numbering_table(self, as_array=False, region='all', chain='both'): return to_numbering_table(as_array=as_array, region=region, chain=chain, heavy_chains_numbering_table=self._heavy_chain.ab_numbering_table, light_chains_numbering_table=self._light_chain.ab_numbering_table, names=[self.name]) @property def name(self): return self._name @property def sequence(self): return self._pair_sequence @property def aligned_sequence(self): return self[0].aligned_sequence + self[1].aligned_sequence @property def germline_identity(self): return self._germline_identity() # @property # def germline(self): # # heavy_germline = self._heavy_chains.germline # light_germline = self._light_chains.germline # # return {name_i: {"Heavy": heavy_germline[heavy_i], # "Light": light_germline[light_i]} for name_i, heavy_i, # light_i in zip(self._names, self._internal_heavy_name, # self._internal_light_name)} def _string_summary_basic(self): return "abpytools.Fab Name: {} Sequence length: {}".format(self.name, len(self.sequence)) def __len__(self): return len(self.sequence) def __repr__(self): return "<%s at 0x%02x>" % (self._string_summary_basic(), id(self)) def __getitem__(self, index): if index == 0: # get light chain return self._light_chain elif index == 1: return self._heavy_chain else: raise ValueError("Can only slice object with either 0 (light chain) or 1 (heavy chain)") def _germline_identity(self): # empty dictionaries return false if bool(self[1].germline_identity) is False: # this means there is no information about the germline, # by default it will run a web query # this is a very lazy fix to to do a web query using a Chain object... ChainCollection(antibody_objects=[self[1]]).igblast_server_query() if bool(self[0].germline_identity) is False: ChainCollection(antibody_objects=[self[0]]).igblast_server_query() return germline_identity_pd({self._internal_heavy_name: self[0].germline_identity}, {self._internal_light_name: self[1].germline_identity}, [self._internal_heavy_name], [self._internal_light_name], [self._name])
AbPyTools
/AbPyTools-0.3.2.tar.gz/AbPyTools-0.3.2/abpytools/core/fab.py
fab.py
import re import numpy as np from ..utils import DataLoader, Download, NumberingException import pandas as pd from .helper_functions import numbering_table_sequences, numbering_table_region, numbering_table_multiindex from . import Cache from .flags import * class Chain: """The Chain object represent a single chain variable fragment (scFv) antibody. A scFv can be part of either the heavy or light chain of an antibody. The nature of the chain is determined by querying the sequence to the Abnum server, and is implemented with the Chain.ab_numbering() method. Attributes: numbering (list): the name of each position occupied by amino acids in sequence mw (float): the cached molecular weight pI (float): the cached isoelectric point of the sequence cdr (tuple): tuple with two dictionaries for CDR and FR with the index of the amino acids in each region germline_identity (dict): """ def __init__(self, sequence, name='Chain1', numbering_scheme=NUMBERING_FLAGS.CHOTHIA): """ The default Chain object constructor, which required a string representing a scFv sequence. Args: sequence (str): Amino acid sequence name (name): Name of sequence numbering_scheme (str): numbering scheme name to perform alignment Examples: Instantiate a Chain object with the default constructor >>> from abpytools.core import Chain >>> from abpytools.core.flags import * >>> chain = Chain(sequence='MYSEQUENCE', name='my_seq', numbering_scheme=NUMBERING_FLAGS.CHOTHIA) """ self._raw_sequence = sequence.upper() self._sequence = self._raw_sequence.replace('-', '') self._aligned_sequence = None self._name = name self._chain = None self.numbering = None self.hydrophobicity_matrix = None self.mw = None self.pI = None self.cdr = None self._numbering_scheme = numbering_scheme self._loading_status = 'Not Loaded' self.germline_identity = dict() self.germline = tuple() self._cache = Cache(max_cache_size=10) @classmethod def load_from_string(cls, sequence, name='Chain1', numbering_scheme=NUMBERING_FLAGS.CHOTHIA): """ Returns an instantiated Chain object from a sequence Args: sequence: name: numbering_scheme: Returns: """ new_chain = cls(sequence=sequence, name=name, numbering_scheme=numbering_scheme) new_chain.load() return new_chain def load(self): """ Generates all the data: - Chain Numbering - Hydrophobicity matrix - Molecular weight - pI All the data is then stored in its respective attributes :return: """ if self._loading_status in [NUMBERING_FLAGS.FAILED, NUMBERING_FLAGS.NOT_LOADED]: try: self.numbering = self.ab_numbering() self._loading_status = NUMBERING_FLAGS.LOADED self.load() except ValueError: self._loading_status = NUMBERING_FLAGS.FAILED except NumberingException: self._loading_status = NUMBERING_FLAGS.UNNUMBERED elif self._loading_status == NUMBERING_FLAGS.LOADED: self.hydrophobicity_matrix = self.ab_hydrophobicity_matrix() self.mw = self.ab_molecular_weight() self.pI = self.ab_pi() self.cdr = self.ab_regions() else: # this should never happen... raise ValueError("Unknown loading status") # pragma: no cover @staticmethod def determine_chain_type(numbering): if numbering[0][0] == 'H': chain = CHAIN_FLAGS.HEAVY_CHAIN elif numbering[0][0] == 'L': chain = CHAIN_FLAGS.LIGHT_CHAIN else: # couldn't determine chain type chain = CHAIN_FLAGS.UNKNOWN_CHAIN # pragma: no cover return chain def ab_numbering(self, server=OPTION_FLAGS.ABYSIS, **kwargs): """ Return list Returns: list: """ # store the amino positions/numbering in a list -> len(numbering) == len(self._sequence) numbering = get_ab_numbering(self._sequence, server, self._numbering_scheme, **kwargs) self._chain = self.determine_chain_type(numbering) return numbering def ab_numbering_table(self, as_array=False, replacement='-', region='all'): """ :param region: :param as_array: if True returns numpy.array object, if False returns a pandas.DataFrame :param replacement: value to replace empty positions :return: """ region = numbering_table_region(region=region) # if the object has not been loaded successfully yet need to try and get the numbering scheme using # ab_numbering method if self._loading_status in [NUMBERING_FLAGS.NOT_LOADED, NUMBERING_FLAGS.FAILED]: self.numbering = self.ab_numbering() whole_sequence_dict, whole_sequence = numbering_table_sequences(region=region, numbering_scheme=self._numbering_scheme, chain=self._chain) # now that all the prep has been done we can extract the position from each amino acid # according to the numbering scheme data = np.empty((len(whole_sequence)), dtype=str) for i, position in enumerate(whole_sequence): if position in self.numbering: data[i] = self._sequence[self.numbering.index(position)] else: # if there is no amino acid in the sequence that corresponds to position i we just replace it by # the replacement value, which is by default '-' data[i] = replacement self._aligned_sequence = data if as_array: # return the data as numpy.array -> much faster than creating a pandas.DataFrame return data.reshape(1, -1) else: # return the data as a pandas.DataFrame -> it's slower but looks nicer and makes it easier to get # the data of interest multi_index = numbering_table_multiindex(region=region, whole_sequence_dict=whole_sequence_dict) # create the DataFrame and assign the columns and index names data = pd.DataFrame(data=data).T data.columns = multi_index data.index = [self._name] return data def ab_hydrophobicity_matrix(self, hydrophobicity_scores=HYDROPHOBICITY_FLAGS.EW): # check if all the required parameters are in order if isinstance(hydrophobicity_scores, str): if hydrophobicity_scores not in OPTION_FLAGS.AVAILABLE_HYDROPHOBITY_SCORES: raise ValueError("Chosen hydrophobicity scores ({}) not available. \ Available hydrophobicity scores: {}".format( hydrophobicity_scores, ' ,'.join(OPTION_FLAGS.AVAILABLE_HYDROPHOBITY_SCORES) )) if self._loading_status == NUMBERING_FLAGS.NOT_LOADED: self.numbering = self.ab_numbering() if self._chain == 'NA': raise ValueError("Could not determine chain type") data_loader = DataLoader(data_type='NumberingSchemes', data=[self._numbering_scheme, self._chain]) whole_sequence_dict = data_loader.get_data() # whole_sequence is a list with all the amino acid positions in the selected numbering scheme whole_sequence = whole_sequence_dict # get the dictionary with the hydrophobicity scores data_loader = DataLoader(data_type='AminoAcidProperties', data=['hydrophobicity', hydrophobicity_scores + 'Hydrophobicity']) aa_hydrophobicity_scores = data_loader.get_data() return calculate_hydrophobicity_matrix(whole_sequence=whole_sequence, numbering=self.numbering, aa_hydrophobicity_scores=aa_hydrophobicity_scores, sequence=self._sequence) def ab_regions(self): """ method to determine Chain regions (CDR and Framework) of each amino acid in sequence :return: """ if 'cdrs' not in self._cache: if self._loading_status == NUMBERING_FLAGS.NOT_LOADED: self.numbering, self._chain = self.ab_numbering() if self.numbering == 'NA': raise ValueError("Cannot return CDR positions without the antibody numbering information") data_loader = DataLoader(data_type='CDR_positions', data=[self._numbering_scheme, self._chain]) cdr_positions = data_loader.get_data() data_loader = DataLoader(data_type='Framework_positions', data=[self._numbering_scheme, self._chain]) framework_position = data_loader.get_data() cdrs = calculate_cdr(numbering=self.numbering, cdr_positions=cdr_positions, framework_positions=framework_position) self._cache.update('cdrs', cdrs) return self._cache['cdrs'] def ab_molecular_weight(self, monoisotopic=False): if monoisotopic: data_loader = DataLoader(data_type='AminoAcidProperties', data=['MolecularWeight', 'average']) else: data_loader = DataLoader(data_type='AminoAcidProperties', data=['MolecularWeight', 'monoisotopic']) mw_dict = data_loader.get_data() return calculate_mw(self._sequence, mw_dict) def ab_pi(self, pi_database='Wikipedia'): assert pi_database in OPTION_FLAGS.AVAILABLE_PI_VALUES, \ "Selected pI database {} not available. " \ "Available databases: {}".format(pi_database, ' ,'.join( OPTION_FLAGS.AVAILABLE_PI_VALUES)) data_loader = DataLoader(data_type='AminoAcidProperties', data=['pI', pi_database]) pi_data = data_loader.get_data() return calculate_pi(sequence=self._sequence, pi_data=pi_data) def ab_ec(self, extinction_coefficient_database='Standard', reduced=False, normalise=False, **kwargs): if reduced: extinction_coefficient_database += '_reduced' data_loader = DataLoader(data_type='AminoAcidProperties', data=['ExtinctionCoefficient', extinction_coefficient_database]) ec_data = data_loader.get_data() if normalise: return calculate_ec(sequence=self._sequence, ec_data=ec_data) / self.ab_molecular_weight(**kwargs) else: return calculate_ec(sequence=self._sequence, ec_data=ec_data) def ab_format(self): return {"name": self._name, "sequence": self._sequence, "numbering": self.numbering, "chain": self._chain, "MW": self.mw, "CDR": self.cdr, "numbering_scheme": self._numbering_scheme, "pI": self.pI} def ab_charge(self, align=True, ph=7.4, pka_database='Wikipedia'): """ Method to calculate the charges for each amino acid of antibody :param pka_database: :param ph: :param align: if set to True an alignment will be performed, if it hasn't been done already using the ab_numbering method :return: array with amino acid charges """ assert pka_database in OPTION_FLAGS.AVAILABLE_PI_VALUES, \ "Selected pI database {} not available. " \ "Available databases: {}".format(pka_database, ' ,'.join(OPTION_FLAGS.AVAILABLE_PI_VALUES)) data_loader = DataLoader(data_type='AminoAcidProperties', data=['pI', pka_database]) pka_data = data_loader.get_data() if align: # get the first (and only) row sequence = self.ab_numbering_table(as_array=True)[0] else: sequence = list(self.sequence) return np.array([amino_acid_charge(x, ph, pka_data) for x in sequence]) def ab_total_charge(self, ph=7.4, pka_database=PI_FLAGS.WIKIPEDIA): assert pka_database in OPTION_FLAGS.AVAILABLE_PI_VALUES, \ "Selected pI database {} not available. " \ "Available databases: {}".format(pka_database, ' ,'.join(OPTION_FLAGS.AVAILABLE_PI_VALUES)) data_loader = DataLoader(data_type='AminoAcidProperties', data=['pI', pka_database]) pka_data = data_loader.get_data() return calculate_charge(sequence=self._sequence, ph=ph, pka_values=pka_data) @property def chain(self): return self._chain @property def name(self): return self._name def set_name(self, name): self._name = name @property def sequence(self): return self._sequence @property def aligned_sequence(self): if self._aligned_sequence is None: _ = self.ab_numbering_table(as_array=True, replacement='-') return self._aligned_sequence.tolist() @property def status(self): return self._loading_status @property def numbering_scheme(self): return self._numbering_scheme def _string_summary_basic(self): return "abpytools.Chain Name: {}, Chain type: {}, Sequence length: {}, Status: {}".format( self._name, self._chain, len(self._sequence), self._loading_status) # pragma: no cover def __repr__(self): return "<%s at 0x%02x>" % (self._string_summary_basic(), id(self)) # pragma: no cover def __len__(self): return len(self.sequence) def get_ab_numbering(sequence, server, numbering_scheme, timeout=30): """ :rtype: list """ # check which server to use to get numbering if server == OPTION_FLAGS.ABYSIS: # find out which numbering scheme to use if numbering_scheme == NUMBERING_FLAGS.CHOTHIA: scheme = '-c' elif numbering_scheme in (NUMBERING_FLAGS.CHOTHIA_EXT, NUMBERING_FLAGS.MARTIN): scheme = '-a' elif numbering_scheme == NUMBERING_FLAGS.KABAT: scheme = '-k' else: raise ValueError("{} numbering scheme is unknown.".format(numbering_scheme.capitalize())) # prepare the url string to query server url = f"http://www.bioinf.org.uk/cgi-bin/abnum/abnum.pl?plain=1&aaseq={sequence}&scheme={scheme}" # use the Download class from utils to get output numbering_table = Download(url, verbose=False, timeout=timeout) try: numbering_table.download() except ValueError: raise ValueError("Check the internet connection.") # check whether the server returned an error if numbering_table.html.replace("\n", '') == 'Warning: Unable to number sequence' or len( numbering_table.html.replace("\n", '')) == 0: raise NumberingException("Unable to number sequence") # parse the results parsed_numbering_table = re.findall("[\S| ]+", numbering_table.html) # get the numbering from the parsed table numbering = [x[:-2] for x in parsed_numbering_table if x[-1] != '-'] # TODO: add more server options else: numbering = [''] return numbering def calculate_hydrophobicity_matrix(whole_sequence, numbering, aa_hydrophobicity_scores, sequence): # instantiate numpy array (whole sequence includes all the amino acid positions of the VH/VL, even the ones # that aren't occupied -> these will be filled with zeros # hydrophobicity_matrix = np.zeros(len(whole_sequence)) # # # iterate through each position # for i, position in enumerate(whole_sequence): # # if position in numbering: # position_in_data = numbering.index(position) # hydrophobicity_matrix[i] = aa_hydrophobicity_scores[sequence[position_in_data]] # # return hydrophobicity_matrix # same thing as above but in a comprehension list return np.array([aa_hydrophobicity_scores[sequence[numbering.index(x)]] if x in numbering else 0 for x in whole_sequence]) def calculate_mw(sequence, mw_data): return sum(mw_data[x] for x in sequence) - (len(sequence) - 1) * mw_data['water'] def calculate_ec(sequence, ec_data): # ϵ280 = nW x 5,500 + nY x 1,490 + nC x 125 n_W = sequence.count('W') n_Y = sequence.count('Y') n_C = sequence.count('C') return n_W * ec_data['W'] + n_Y * ec_data['Y'] + n_C * ec_data['C'] def calculate_pi(sequence, pi_data): # algorithm implemented from http://isoelectric.ovh.org/files/practise-isoelectric-point.html # count number of D, E, C, Y, H, K, R d_count = sequence.count('D') e_count = sequence.count('E') c_count = sequence.count('C') y_count = sequence.count('Y') h_count = sequence.count('H') k_count = sequence.count('K') r_count = sequence.count('R') # initiate value of pH and nq (any number above 0) nq = 10 ph = 0 # define precision delta = 0.01 while nq > 0: if ph >= 14: raise Exception("Could not calculate pI (pH reached above 14)") # qn1, qn2, qn3, qn4, qn5, qp1, qp2, qp3, qp4 qn1 = -1 / (1 + 10 ** (pi_data['COOH'] - ph)) # C-terminus charge qn2 = - d_count / (1 + 10 ** (pi_data['D'] - ph)) # D charge qn3 = - e_count / (1 + 10 ** (pi_data['E'] - ph)) # E charge qn4 = - c_count / (1 + 10 ** (pi_data['C'] - ph)) # C charge qn5 = - y_count / (1 + 10 ** (pi_data['Y'] - ph)) # Y charge qp1 = h_count / (1 + 10 ** (ph - pi_data['H'])) # H charge qp2 = 1 / (1 + 10 ** (ph - pi_data['NH2'])) # N-terminus charge qp3 = k_count / (1 + 10 ** (ph - pi_data['K'])) # K charge qp4 = r_count / (1 + 10 ** (ph - pi_data['R'])) # R charge nq = qn1 + qn2 + qn3 + qn4 + qn5 + qp1 + qp2 + qp3 + qp4 # update pH ph += delta return ph def calculate_cdr(numbering, cdr_positions, framework_positions): """ :param numbering: :param cdr_positions: :param framework_positions: :return: """ cdrs = {'CDR1': list(), 'CDR2': list(), 'CDR3': list()} frameworks = {'FR1': list(), 'FR2': list(), 'FR3': list(), 'FR4': list()} for cdr in cdrs.keys(): cdr_positions_i = cdr_positions[cdr] for i, position in enumerate(numbering): if position in cdr_positions_i: cdrs[cdr].append(i) for framework in frameworks.keys(): framework_position_i = framework_positions[framework] for i, position in enumerate(numbering): if position in framework_position_i: frameworks[framework].append(i) return cdrs, frameworks def amino_acid_charge(amino_acid, ph, pka_values): if amino_acid in ['D', 'E', 'C', 'Y']: return -1 / (1 + 10 ** (pka_values[amino_acid] - ph)) elif amino_acid in ['K', 'R', 'H']: return 1 / (1 + 10 ** (ph - pka_values[amino_acid])) else: return 0 def calculate_charge(sequence, ph, pka_values): # This calculation would make more sense but is slower (~1.5-2x) # cooh = -1 / (1 + 10 ** (pka_values['COOH'] - ph)) # nh2 = 1 / (1 + 10 ** (ph - pka_values['NH2'])) # # return sum([amino_acid_charge(x, ph, pka_values) for x in list(sequence)]) + cooh + nh2 # Faster implementation # count number of D, E, C, Y, H, K, R d_count = sequence.count('D') e_count = sequence.count('E') c_count = sequence.count('C') y_count = sequence.count('Y') h_count = sequence.count('H') k_count = sequence.count('K') r_count = sequence.count('R') # qn1, qn2, qn3, qn4, qn5, qp1, qp2, qp3, qp4 qn1 = -1 / (1 + 10 ** (pka_values['COOH'] - ph)) # C-terminus charge qn2 = - d_count / (1 + 10 ** (pka_values['D'] - ph)) # D charge qn3 = - e_count / (1 + 10 ** (pka_values['E'] - ph)) # E charge qn4 = - c_count / (1 + 10 ** (pka_values['C'] - ph)) # C charge qn5 = - y_count / (1 + 10 ** (pka_values['Y'] - ph)) # Y charge qp1 = h_count / (1 + 10 ** (ph - pka_values['H'])) # H charge qp2 = 1 / (1 + 10 ** (ph - pka_values['NH2'])) # N-terminus charge qp3 = k_count / (1 + 10 ** (ph - pka_values['K'])) # K charge qp4 = r_count / (1 + 10 ** (ph - pka_values['R'])) # R charge nq = qn1 + qn2 + qn3 + qn4 + qn5 + qp1 + qp2 + qp3 + qp4 return nq
AbPyTools
/AbPyTools-0.3.2.tar.gz/AbPyTools-0.3.2/abpytools/core/chain.py
chain.py
from abpytools.core.formats.utils import get_protobuf_numbering_scheme, get_numbering_scheme_from_protobuf from abpytools.core.chain import Chain def _name_wrapper(func): """ Internal usage only counter decorator to format sequence names. Args: func: Returns: """ def helper(*args): helper.idi += 1 name = func(*args) if isinstance(name, str): name = name.replace("%%IDI%%", str(helper.idi)) else: names = list() for name_i in name: names.append(name_i.replace("%%IDI%%", str(helper.idi))) name = names return name helper.idi = 0 return helper @_name_wrapper def _chain_name_getter(antibody_object): """ Internal function that sets the name of Chain objects. This is required because it is possible to work with a ChainCollection where a Chain does not have a name. However, a name is required internally to store data on disk. Args: antibody_object (Chain): Returns: """ # if antibody does not have name, generate name: # ID_chain_idi, where chain is heavy/light, idi is i = [1,..,N] if len(antibody_object.name) > 0: name = antibody_object.name else: name = f"ID_{antibody_object.chain}_%%IDI%%" return name @_name_wrapper def _fab_name_getter(name_i, light_chain, heavy_chain): """ Internal function that sets the name of Fab objects when they are stored on disk. Args: name_i (str): light_chain (Chain): heavy_chain (Chain): Returns: """ # if antibody does not have name, generate name: # ID_chain_idi, where chain is heavy/light, idi is i = [1,..,N] if len(name_i) > 0: names = name_i else: names = [] for chain in [light_chain, heavy_chain]: if len(chain.name) > 0: names.append(chain.name) else: names.append(f"ID_{chain.chain}_%%IDI%%") return names ############################################################################## # SAVERS ############################################################################## def json_FabCollection_formatter(fab_object): """ Internal function to serialise FabCollection objects in JSON format Args: fab_object (FabCollection): Returns: """ light_antibody_json = json_ChainCollection_formatter(fab_object._light_chains) heavy_antibody_json = json_ChainCollection_formatter(fab_object._heavy_chains) fab_data = dict() ordered_names = [] for light_antibody, heavy_antibody in zip( light_antibody_json['ordered_names'], heavy_antibody_json['ordered_names']): name = f"{light_antibody_json[light_antibody]['name']}-" \ f"{heavy_antibody_json[heavy_antibody]['name']}" fab_data[name] = [light_antibody_json[light_antibody], heavy_antibody_json[heavy_antibody]] ordered_names.append(name) fab_data['ordered_names'] = ordered_names return fab_data def json_ChainCollection_formatter(chain_objects): """ Internal function to serialise ChainCollection objects in JSON format Args: chain_objects (ChainCollection): Returns: """ data = dict() ordered_names = [] # reset wrapper counter _chain_name_getter.idi = 0 for antibody in chain_objects: name = _chain_name_getter(antibody) ordered_names.append(name) antibody_dict = antibody.ab_format() # use name generated from _chain_name_getter instead antibody_dict['name'] = name data[name] = antibody_dict data['ordered_names'] = ordered_names return data def pb2_ChainCollection_formatter(chain_objects, proto_parser, reset_status=True): """ Internal function to serialise a ChainCollection object to .pb2 format according to definitition in 'format/chain.proto'. Args: chain_objects (ChainCollection): proto_parser (ChainCollectionProto): reset_status (bool): Returns: """ # reset wrapper counter if reset_status: _chain_name_getter.idi = 0 for chain_object in chain_objects: pb2_add_chain(chain_object, proto_parser) def pb2_FabCollection_formatter(fab_object, proto_parser, reset_status=True): """ Internal function to serialise a FabCollection object to .pb2 format according to definitition in 'format/fab.proto'. Args: fab_object (FabCollection): proto_parser (FabCollectionProto): reset_status (bool): Returns: """ # reset wrapper counter if reset_status: _chain_name_getter.idi = 0 for name_i, light_chain_i, heavy_chain_i in zip(fab_object.names, fab_object._light_chains, fab_object._heavy_chains): proto_fab = proto_parser.fabs.add() name_i = _fab_name_getter(name_i, light_chain_i, heavy_chain_i) proto_fab.name = name_i add_Chain_to_protobuf(light_chain_i, proto_fab.light_chain) add_Chain_to_protobuf(heavy_chain_i, proto_fab.heavy_chain) def pb2_add_chain(chain_object, proto_parser): """ Populates a protobuf ProtoChain message from Chain object and adds it to ChainCollectionProto Args: chain_object (Chain): proto_parser (ChainCollectionProto): Returns: """ proto_antibody = proto_parser.chains.add() add_Chain_to_protobuf(chain_object, proto_antibody) def add_Chain_to_protobuf(antibody_obj, proto_obj): """ Helper function to populate a ProtoChain message. Args: antibody_obj (Chain): proto_obj (ChainProto): Returns: """ proto_obj.name = _chain_name_getter(antibody_obj) proto_obj.sequence = antibody_obj.sequence proto_obj.numbering_scheme = get_protobuf_numbering_scheme(antibody_obj.numbering_scheme) proto_obj.numbering.extend(antibody_obj.numbering) proto_obj.chain_type = antibody_obj.chain proto_obj.MW = antibody_obj.mw for x in {**antibody_obj.ab_regions()[0], **antibody_obj.ab_regions()[1]}.items(): proto_obj_cdr = proto_obj.region.add() proto_obj_cdr.region_name = x[0] proto_obj_cdr.region_positions.extend(x[1]) proto_obj.pI = antibody_obj.pI ############################################################################## # PARSERS ############################################################################## def fasta_ChainCollection_parser(raw_fasta, numbering_scheme): names = list() sequences = list() antibody_objects = list() for line in raw_fasta: if line.startswith(">"): names.append(line.replace("\n", "")[1:]) # if line is empty skip line elif line.isspace(): pass else: sequences.append(line.replace("\n", "")) if len(names) != len(sequences): raise ValueError("Error reading file: make sure it is FASTA format") for name, sequence in zip(names, sequences): antibody_objects.append(Chain(name=name, sequence=sequence, numbering_scheme=numbering_scheme)) return antibody_objects def pb2_FabCollection_parser(proto_parser): from abpytools.core.fab import Fab fab_objects = list() for fab_i in proto_parser.fabs: light_chain = pb2_Chain_parser(fab_i.light_chain) heavy_chain = pb2_Chain_parser(fab_i.heavy_chain) fab = Fab(light_chain=light_chain, heavy_chain=heavy_chain, name=fab_i.name) fab_objects.append(fab) return fab_objects def pb2_ChainCollection_parser(proto_parser): antibody_objects = list() names = list() for chain_i in proto_parser.chains: antibody_i = pb2_Chain_parser(chain_i) antibody_i._loading_status = 'Loaded' names.append(chain_i.name) antibody_objects.append(antibody_i) return antibody_objects def pb2_Chain_parser(proto_chain): """ Populate Chain object from protobuf file Args: proto_chain (ChainProto): Returns: """ chain_obj = Chain(name=proto_chain.name, sequence=proto_chain.sequence) if proto_chain.numbering_scheme: chain_obj._numbering_scheme = get_numbering_scheme_from_protobuf(proto_chain.numbering_scheme) if proto_chain.numbering: # convert google.protobuf.pyext._message.RepeatedScalarContainer to list chain_obj.numbering = list(proto_chain.numbering) else: chain_obj.numbering = chain_obj.ab_numbering() if proto_chain.chain_type: chain_obj._chain = proto_chain.chain_type else: chain_obj._chain = Chain.determine_chain_type(proto_chain.numbering) if proto_chain.MW: chain_obj.mw = proto_chain.MW else: chain_obj.mw = chain_obj.ab_molecular_weight() if proto_chain.region: regions = dict() for region in proto_chain.region: regions[region.region_name] = region.region_positions chain_obj.CDR = regions else: chain_obj.CDR = chain_obj.ab_regions() if proto_chain.pI: chain_obj.pI = proto_chain.pI else: chain_obj.pI = chain_obj.ab_pi() return chain_obj def json_FabCollection_parser(raw_data): from abpytools.core.fab import Fab fab_objects = list() ordered_names = raw_data.pop('ordered_names') for key_i in ordered_names: light_chain_i = json_Chain_parser(raw_data[key_i][0], raw_data[key_i][0]["name"]) heavy_chain_i = json_Chain_parser(raw_data[key_i][1], raw_data[key_i][1]["name"]) fab_i = Fab(light_chain=light_chain_i, heavy_chain=heavy_chain_i, name=key_i) fab_objects.append(fab_i) return fab_objects def json_ChainCollection_parser(raw_data): antibody_objects = list() ordered_names = raw_data.pop('ordered_names') for key_i in ordered_names: chain_i = json_Chain_parser(raw_data[key_i], key_i) antibody_objects.append(chain_i) return antibody_objects def json_Chain_parser(antibody_dict, name): antibody_i = Chain(name=name, sequence=antibody_dict['sequence']) antibody_i.numbering = antibody_dict['numbering'] antibody_i._chain = antibody_dict['chain'] antibody_i.mw = antibody_dict['MW'] antibody_i.CDR = antibody_dict["CDR"] antibody_i._numbering_scheme = antibody_dict["numbering_scheme"] antibody_i.pI = antibody_dict["pI"] antibody_i._loading_status = 'Loaded' return antibody_i
AbPyTools
/AbPyTools-0.3.2.tar.gz/AbPyTools-0.3.2/abpytools/core/utils.py
utils.py
from .chain_collection import ChainCollection import numpy as np import pandas as pd from .chain import calculate_charge from abpytools.utils import DataLoader from operator import itemgetter from .fab import Fab from .helper_functions import germline_identity_pd, to_numbering_table from .base import CollectionBase import os import json from .utils import (json_FabCollection_formatter, pb2_FabCollection_formatter, pb2_FabCollection_parser, json_FabCollection_parser) from .flags import * if BACKEND_FLAGS.HAS_PROTO: from abpytools.core.formats import FabCollectionProto class FabCollection(CollectionBase): def __init__(self, fab=None, heavy_chains=None, light_chains=None, names=None): """ Fab object container that handles combinations of light/heavy Chain pairs. Args: fab (list): heavy_chains (ChainCollection): light_chains (ChainCollection): names (list): """ # check if it's a Chain object if heavy_chains is None and light_chains is None and fab is None: raise ValueError('Provide a list of Chain objects or an ChainCollection object') # check if fab object is a list and if all object are abpytools.Fab objects if isinstance(fab, list) and all(isinstance(fab_i, Fab) for fab_i in fab): self._fab = fab self._light_chains = ChainCollection([x[0] for x in self._fab]) self._heavy_chains = ChainCollection([x[1] for x in self._fab]) if fab is None and (heavy_chains is not None and light_chains is not None): if isinstance(heavy_chains, list): self._heavy_chains = ChainCollection(antibody_objects=heavy_chains) elif isinstance(heavy_chains, ChainCollection): self._heavy_chains = heavy_chains else: raise ValueError('Provide a list of Chain objects or an ChainCollection object') if isinstance(light_chains, list): self._light_chains = ChainCollection(antibody_objects=light_chains) elif isinstance(light_chains, ChainCollection): self._light_chains = light_chains else: raise ValueError('Provide a list of Chain objects or an ChainCollection object') if len(self._light_chains.loading_status()) == 0: self._light_chains.load() if len(self._heavy_chains.loading_status()) == 0: self._heavy_chains.load() if self._light_chains.n_ab != self._heavy_chains.n_ab: raise ValueError('Number of heavy chains must be the same of light chains') if isinstance(names, list) and all(isinstance(name, str) for name in names): if len(names) == self._heavy_chains.n_ab: self._names = names else: raise ValueError( 'Length of name list must be the same as length of heavy_chains/light chains lists') elif names is None: self._names = ['{} - {}'.format(heavy, light) for heavy, light in zip(self._heavy_chains.names, self._light_chains.names)] else: raise ValueError("Names expected a list of strings, instead got {}".format(type(names))) self._n_ab = self._light_chains.n_ab self._pair_sequences = [heavy + light for light, heavy in zip(self._heavy_chains.sequences, self._light_chains.sequences)] # keep the name of the heavy and light chains internally to keep everything in the right order self._internal_heavy_name = self._heavy_chains.names self._internal_light_name = self._light_chains.names # even though it makes more sense to draw all these values from the base Fab objects this is much slower # whenever self._n_ab > 1 it makes more sense to use the self._heavy_chain and self._light_chain containers # in all the methods # in essence the abpytools.Fab object is just a representative building block that could in future just # cache data and would then represent a speed up in the calculations def molecular_weights(self, monoisotopic=False): return [heavy + light for heavy, light in zip(self._heavy_chains.molecular_weights(monoisotopic=monoisotopic), self._light_chains.molecular_weights(monoisotopic=monoisotopic))] def extinction_coefficients(self, extinction_coefficient_database='Standard', reduced=False, normalise=False, **kwargs): heavy_ec = self._heavy_chains.extinction_coefficients( extinction_coefficient_database=extinction_coefficient_database, reduced=reduced) light_ec = self._light_chains.extinction_coefficients( extinction_coefficient_database=extinction_coefficient_database, reduced=reduced) if normalise: return [(heavy + light) / mw for heavy, light, mw in zip(heavy_ec, light_ec, self.molecular_weights(**kwargs))] else: return [heavy + light for heavy, light in zip(heavy_ec, light_ec)] def hydrophobicity_matrix(self): return np.column_stack((self._heavy_chains.hydrophobicity_matrix(), self._light_chains.hydrophobicity_matrix())) def charge(self): return np.column_stack((self._heavy_chains.charge, self._light_chains.charge)) def total_charge(self, ph=7.4, pka_database='Wikipedia'): available_pi_databases = ["EMBOSS", "DTASetect", "Solomon", "Sillero", "Rodwell", "Wikipedia", "Lehninger", "Grimsley"] assert pka_database in available_pi_databases, \ "Selected pI database {} not available. Available databases: {}".format(pka_database, ' ,'.join(available_pi_databases)) data_loader = DataLoader(data_type='AminoAcidProperties', data=['pI', pka_database]) pka_data = data_loader.get_data() return [calculate_charge(sequence=seq, ph=ph, pka_values=pka_data) for seq in self.sequences] def igblast_local_query(self, file_path, chain): if chain.lower() == 'light': self._light_chains.igblast_local_query(file_path=file_path) elif chain.lower() == 'heavy': self._heavy_chains.igblast_local_query(file_path=file_path) else: raise ValueError('Specify if the data being loaded is for the heavy or light chain') def igblast_server_query(self, **kwargs): self._light_chains.igblast_server_query(**kwargs) self._heavy_chains.igblast_server_query(**kwargs) def numbering_table(self, as_array=False, region='all', chain='both', **kwargs): return to_numbering_table(as_array=as_array, region=region, chain=chain, heavy_chains_numbering_table=self._heavy_chains.numbering_table, light_chains_numbering_table=self._light_chains.numbering_table, names=self.names, **kwargs) def _germline_pd(self): # empty dictionaries return false, so this condition checks if any of the values are False if all([x for x in self._light_chains.germline_identity.values()]) is False: # this means there is no information about the germline, # by default it will run a web query self._light_chains.igblast_server_query() if all([x for x in self._heavy_chains.germline_identity.values()]) is False: self._heavy_chains.igblast_server_query() heavy_chain_germlines = self._heavy_chains.germline light_chain_germlines = self._light_chains.germline data = np.array([[heavy_chain_germlines[x][0] for x in self._internal_heavy_name], [heavy_chain_germlines[x][1] for x in self._internal_heavy_name], [light_chain_germlines[x][0] for x in self._internal_light_name], [light_chain_germlines[x][1] for x in self._internal_light_name]]).T df = pd.DataFrame(data=data, columns=pd.MultiIndex.from_tuples([('Heavy', 'Assignment'), ('Heavy', 'Score'), ('Light', 'Assignment'), ('Light', 'Score')]), index=self.names) df.loc[:, (slice(None), 'Score')] = df.loc[:, (slice(None), 'Score')].apply(pd.to_numeric) return df def save_to_json(self, path, update=True): with open(os.path.join(path + '.json'), 'w') as f: fab_data = json_FabCollection_formatter(self) json.dump(fab_data, f, indent=2) def save_to_pb2(self, path, update=True): proto_parser = FabCollectionProto() try: with open(os.path.join(path + '.pb2'), 'rb') as f: proto_parser.ParseFromString(f.read()) except IOError: # Creating new file pass pb2_FabCollection_formatter(self, proto_parser) with open(os.path.join(path + '.pb2'), 'wb') as f: f.write(proto_parser.SerializeToString()) def save_to_fasta(self, path, update=True): raise NotImplementedError @classmethod def load_from_json(cls, path, n_threads=20, verbose=True, show_progressbar=True): with open(path, 'r') as f: data = json.load(f) fab_objects = json_FabCollection_parser(data) fab_collection = cls(fab=fab_objects) return fab_collection @classmethod def load_from_pb2(cls, path, n_threads=20, verbose=True, show_progressbar=True): with open(path, 'rb') as f: proto_parser = FabCollectionProto() proto_parser.ParseFromString(f.read()) fab_objects = pb2_FabCollection_parser(proto_parser) fab_collection = cls(fab=fab_objects) return fab_collection @classmethod def load_from_fasta(cls, path, numbering_scheme=NUMBERING_FLAGS.CHOTHIA, n_threads=20, verbose=True, show_progressbar=True): raise NotImplementedError def _get_names_iter(self, chain='both'): if chain == 'both': for light_chain, heavy_chain in zip(self._light_chains, self._heavy_chains): yield f"{light_chain.name}-{heavy_chain.name}" elif chain == 'light': for light_chain in self._light_chains: yield light_chain.name elif chain == 'heavy': for heavy_chain in self._heavy_chains: yield heavy_chain.name else: raise ValueError(f"Unknown chain type ({chain}), available options are:" f"both, light or heavy.") @property def regions(self): heavy_regions = self._heavy_chains.ab_region_index() light_regions = self._light_chains.ab_region_index() return {name: {CHAIN_FLAGS.HEAVY_CHAIN: heavy_regions[heavy], CHAIN_FLAGS.LIGHT_CHAIN: light_regions[light]} for name, heavy, light in zip(self.names, self._internal_heavy_name, self._internal_light_name)} @property def names(self): return self._names @property def sequences(self): return self._pair_sequences @property def aligned_sequences(self): return [heavy + light for light, heavy in zip(self._heavy_chains.aligned_sequences, self._light_chains.aligned_sequences)] @property def n_ab(self): return self._n_ab @property def germline_identity(self): return self._germline_identity() @property def germline(self): return self._germline_pd() def _string_summary_basic(self): return "abpytools.FabCollection Number of sequences: {}".format(self._n_ab) def __len__(self): return self._n_ab def __repr__(self): return "<%s at 0x%02x>" % (self._string_summary_basic(), id(self)) def __getitem__(self, indices): if isinstance(indices, int): return Fab(heavy_chain=self._heavy_chains[indices], light_chain=self._light_chains[indices], name=self.names[indices], load=False) else: return FabCollection(heavy_chains=list(itemgetter(*indices)(self._heavy_chains)), light_chains=list(itemgetter(*indices)(self._light_chains)), names=list(itemgetter(*indices)(self._names))) def _germline_identity(self): # empty dictionaries return false, so this condition checks if any of the values are False if all([x for x in self._light_chains.germline_identity.values()]) is False: # this means there is no information about the germline, # by default it will run a web query self._light_chains.igblast_server_query() if all([x for x in self._heavy_chains.germline_identity.values()]) is False: self._heavy_chains.igblast_server_query() return germline_identity_pd(self._heavy_chains.germline_identity, self._light_chains.germline_identity, self._internal_heavy_name, self._internal_light_name, self._names) def get_object(self, name): """ :param name: str :return: """ if name in self.names: index = self.names.index(name) return self[index] else: raise ValueError('Could not find sequence with specified name')
AbPyTools
/AbPyTools-0.3.2.tar.gz/AbPyTools-0.3.2/abpytools/core/fab_collection.py
fab_collection.py
import json from abpytools.home import Home class DataLoader: def __init__(self, data=[], data_type='', amino_acid_property=[]): """ :param data: list with the information required to access data in json file :param data_type: str must be one of the following: 'CDR_positions', 'NumberingSchemes' or 'AminoAcidProperties' :param amino_acid_property: temporary parameter :param misc: temporary parameter """ self.amino_acid_property = amino_acid_property self.directory_name = Home().homedir self.data = data self.data_types = ['CDR_positions', 'NumberingSchemes', 'AminoAcidProperties', 'Framework_positions'] if data_type not in self.data_types: raise ValueError("{} is not a valid data type. Available data types: {}".format( data_type, ', '.join(self.data_types))) else: self.data_type = data_type # checks values when object is instantiated if self.data_type == 'CDR_positions' or self.data_type == 'NumberingSchemes' \ or self.data_type == 'Framework_positions': if len(self.data) != 2: raise ValueError("Expected 2, instead of {} values.".format(len(self.data))) if self.data[0] not in ['chothia', 'kabat', 'chothia_ext']: raise ValueError("Got {}, but only {} is available".format(self.data[0], 'chothia')) if self.data[1] not in ["light", "heavy"]: raise ValueError("Got {}, but only light and heavy are available".format(self.data[1])) else: if len(self.data) != 2: raise ValueError("Expected 2, instead of {} values.".format(len(self.data))) if self.data[0] not in ["hydrophobicity", "pI", "MolecularWeight", "ExtinctionCoefficient"]: raise ValueError("Got {}, but only {} are available".format(self.data[0], """hydrophobicity, pI, ExtinctionCoefficient and MolecularWeight""")) if self.data[1] not in ["kdHydrophobicity", "wwHydrophobicity", "hhHydrophobicity", "mfHydrophobicity", "ewHydrophobicity", "EMBOSS", "DTASetect", "Solomon", "Sillero", "Rodwell", "Wikipedia", "Lehninger", "Grimsley", "average", "monoisotopic", "Standard", "Standard_reduced"]: raise ValueError("Got {}, but only {} are available".format(self.data[1], """ kdHydrophobicity ,wwHydrophobicity, hhHydrophobicity, mfHydrophobicity, ewHydrophobicity, EMBOSS, DTASetect, Solomon, Sillero, Rodwell, Wikipedia, Lehninger, Grimsley, average, monoisotopic, Standard and Standard_reduced """ )) def get_data(self): if self.data_type == 'CDR_positions': with open('{}/data/CDR_positions.json'.format(self.directory_name), 'r') as f: # need to access numbering scheme and chain type data = json.load(f)[self.data[0]][self.data[1]] elif self.data_type == 'Framework_positions': with open('{}/data/Framework_positions.json'.format(self.directory_name), 'r') as f: # need to access numbering scheme and chain type data = json.load(f)[self.data[0]][self.data[1]] elif self.data_type == 'NumberingSchemes': with open('{}/data/NumberingSchemes.json'.format(self.directory_name), 'r') as f: # need to access numbering scheme and chain type data = json.load(f)[self.data[0]][self.data[1]] else: with open('{}/data/AminoAcidProperties.json'.format(self.directory_name), 'r') as f: data = json.load(f)[self.data[0]][self.data[1]] return data
AbPyTools
/AbPyTools-0.3.2.tar.gz/AbPyTools-0.3.2/abpytools/utils/data_loader.py
data_loader.py
from urllib import request, error class Download: def __init__(self, url='', verbose=False, timeout=5): self.url = url self.verbose = verbose self.html = '' self.error = False self.timeout = timeout def download(self, user_agent='wswp', num_retries=2): # self.html, self.error = download(self.url, self.verbose, user_agent=user_agent, num_retries=num_retries, # timeout=self.timeout) try: self.html = download(self.url, self.verbose, user_agent=user_agent, num_retries=num_retries, timeout=self.timeout) except IOError: raise ValueError("Could not download requested page.") def download(url, verbose, user_agent='wswp', num_retries=2, decoding_format='utf-8', timeout=5): """ Function to download contents from a given url Input: url: str string with the url to download from user_agent: str Default 'wswp' num_retries: int Number of times to retry downloading if there is an error verbose: bool Print out url and errors decoding: "utf-8" Output: returns: str string with contents of given url """ # html_error = False if verbose: print('Downloading:', url) headers = {'User-agent': user_agent} request_obj = request.Request(url, headers=headers) try: with request.urlopen(request_obj, timeout=timeout) as response: html = response.read() except error.URLError as e: if verbose: print('Download error:', e.reason) # html = None # if num_retries > 0: # if hasattr(e, 'code') and 500 <= e.code < 600: # # retry 5XX HTTP errors # return download(url, user_agent, num_retries - 1)[0] # # elif hasattr(e, 'code') and e.code == 404: # else: # html_error = True raise IOError(e.reason) return html.decode(decoding_format)
AbPyTools
/AbPyTools-0.3.2.tar.gz/AbPyTools-0.3.2/abpytools/utils/downloads.py
downloads.py
AbSort ======= Absort is Powerful Python package that performs 8 different types of stable and unstable Sorting algorithms on List Data Structure with full of documentation. This package will be more beneficial(Usefull) for competitive programmers and developers. So based on your requirement choose your sorting algorithm. USAGE ====== importing Library ----------------- - import AbSort Creating the Object sortObj = AbSort.SortingAlgo() awsort ====== myList = [490, 395, 388, 496, 497, 350, 469, 435, 310, 1, 3, 2, 7, 4] - Genearting the Sort Result sortedResult = sortObj.awsort(myList) - Science know the SortedResult variable contains the Sorted List we can print the sorted list saying print(sortedResult) SelectionSort ============= myList = [490, 395, 388, 496, 497, 350, 469, 435, 310, 1, 3, 2, 7, 4] - Genearting the Sort Result sortedResult = sortObj.selectionSort(myList) - Science know the SortedResult variable contains the Sorted List we can print the sorted list saying print(sortedResult) mergeSort ========= myList = [490, 395, 388, 496, 497, 350, 469, 435, 310, 1, 3, 2, 7, 4] - Genearting the Sort Result sortedResult = sortObj.mergeSort(myList) - Science know the SortedResult variable contains the Sorted List we can print the sorted list saying print(sortedResult) quickSort ========= myList = [490, 395, 388, 496, 497, 350, 469, 435, 310, 1, 3, 2, 7, 4] - Genearting the Sort Result sortedResult = sortObj.quickSort(myList) - Science know the SortedResult variable contains the Sorted List we can print the sorted list saying print(sortedResult) bogoSort ======== myList = [490, 395, 388, 496, 497, 350, 469, 435, 310, 1, 3, 2, 7, 4] - Genearting the Sort Result sortedResult = sortObj.bogoSort(myList) - Science know the SortedResult variable contains the Sorted List we can print the sorted list saying print(sortedResult) insertionSort ============= myList = [490, 395, 388, 496, 497, 350, 469, 435, 310, 1, 3, 2, 7, 4] - Genearting the Sort Result sortedResult = sortObj.insertionSort(myList) - Science know the SortedResult variable contains the Sorted List we can print the sorted list saying print(sortedResult) binaryInsertionSort =================== myList = [490, 395, 388, 496, 497, 350, 469, 435, 310, 1, 3, 2, 7, 4] - Genearting the Sort Result sortedResult = sortObj.binaryInsertionSort(myList) - Science know the SortedResult variable contains the Sorted List we can print the sorted list saying print(sortedResult) bubbleSort ========== myList = [490, 395, 388, 496, 497, 350, 469, 435, 310, 1, 3, 2, 7, 4] - Genearting the Sort Result sortedResult = sortObj.bubbleSort(myList) - Science know the SortedResult variable contains the Sorted List we can print the sorted list saying print(sortedResult) Dependencies ============ - Python v3.x is Required.
AbSort
/AbSort-0.0.1.tar.gz/AbSort-0.0.1/README.txt
README.txt
from xml.dom.minidom import parseString try: from http.client import HTTPSConnection except ImportError: from httplib import HTTPSConnection try: from urllib.parse import urlencode except ImportError: from urllib import urlencode __version__ = "1.0" API_SERVER = 'www.notifymyandroid.com' ADD_PATH = '/publicapi/notify' USER_AGENT="PyNMA/v%s"%__version__ def uniq_preserve(seq): # Dave Kirby # Order preserving seen = set() return [x for x in seq if x not in seen and not seen.add(x)] def uniq(seq): # Not order preserving return list({}.fromkeys(seq).keys()) class PyNMA(object): """PyNMA(apikey=[], developerkey=None) takes 2 optional arguments: - (opt) apykey: might me a string containing 1 key or an array of keys - (opt) developerkey: where you can store your developer key """ def __init__(self, apikey=[], developerkey=None): self._developerkey = None self.developerkey(developerkey) if apikey: if type(apikey) == str: apikey = [apikey] self._apikey = uniq(apikey) def addkey(self, key): "Add a key (register ?)" if type(key) == str: if not key in self._apikey: self._apikey.append(key) elif type(key) == list: for k in key: if not k in self._apikey: self._apikey.append(k) def delkey(self, key): "Removes a key (unregister ?)" if type(key) == str: if key in self._apikey: self._apikey.remove(key) elif type(key) == list: for k in key: if key in self._apikey: self._apikey.remove(k) def developerkey(self, developerkey): "Sets the developer key (and check it has the good length)" if type(developerkey) == str and len(developerkey) == 48: self._developerkey = developerkey def push(self, application="", event="", description="", url="", contenttype=None, priority=0, batch_mode=False, html=False): """Pushes a message on the registered API keys. takes 5 arguments: - (req) application: application name [256] - (req) event: event name [1000] - (req) description: description [10000] - (opt) url: url [512] - (opt) contenttype: Content Type (act: None (plain text) or text/html) - (opt) priority: from -2 (lowest) to 2 (highest) (def:0) - (opt) batch_mode: push to all keys at once (def:False) - (opt) html: shortcut for contenttype=text/html Warning: using batch_mode will return error only if all API keys are bad cf: http://nma.usk.bz/api.php """ datas = { 'application': application[:256].encode('utf8'), 'event': event[:1024].encode('utf8'), 'description': description[:10000].encode('utf8'), 'priority': priority } if url: datas['url'] = url[:512] if contenttype == "text/html" or html == True: # Currently only accepted content type datas['content-type'] = "text/html" if self._developerkey: datas['developerkey'] = self._developerkey results = {} if not batch_mode: for key in self._apikey: datas['apikey'] = key res = self.callapi('POST', ADD_PATH, datas) results[key] = res else: datas['apikey'] = ",".join(self._apikey) res = self.callapi('POST', ADD_PATH, datas) results[datas['apikey']] = res return results def callapi(self, method, path, args): headers = { 'User-Agent': USER_AGENT } if method == "POST": headers['Content-type'] = "application/x-www-form-urlencoded" http_handler = HTTPSConnection(API_SERVER) http_handler.request(method, path, urlencode(args), headers) resp = http_handler.getresponse() try: res = self._parse_reponse(resp.read()) except Exception as e: res = {'type': "pynmaerror", 'code': 600, 'message': str(e) } pass return res def _parse_reponse(self, response): root = parseString(response).firstChild for elem in root.childNodes: if elem.nodeType == elem.TEXT_NODE: continue if elem.tagName == 'success': res = dict(list(elem.attributes.items())) res['message'] = "" res['type'] = elem.tagName return res if elem.tagName == 'error': res = dict(list(elem.attributes.items())) res['message'] = elem.firstChild.nodeValue res['type'] = elem.tagName return res
AbakaffeNotifier
/AbakaffeNotifier-1.0.0.tar.gz/AbakaffeNotifier-1.0.0/pynma/pynma.py
pynma.py
class Up(): def Encode(data): code = data x = code.replace("q","あ") x= x.replace("Q","ぃ") x= x.replace("w","い") x= x.replace("W","ぅ") x= x.replace("e","う") x= x.replace("E","ぇ") x= x.replace("r","え") x= x.replace("R","ぉ") x= x.replace("t","お") x= x.replace("T","か") x= x.replace("y","が") x= x.replace("Y","き") x= x.replace("u","ぎ") x= x.replace("U","く") x= x.replace("i","ぐ") x= x.replace("I","け") x= x.replace("o","げ") x= x.replace("O","こ") x= x.replace("p","ご") x= x.replace("P","さ") x= x.replace("a","ざ") x= x.replace("A","し") x= x.replace("s","じ") x= x.replace("S","す") x= x.replace("d","ず") x= x.replace("D","せ") x= x.replace("f","ぜ") x= x.replace("F","そ") x= x.replace("g","た") x= x.replace("G","だ") x= x.replace("h","ち") x= x.replace("H","ぢ") x= x.replace("j","っ") x= x.replace("J","つ") x= x.replace("k","㍿") x= x.replace("K","ボ") x= x.replace("l","ヂ") x= x.replace("L","ザ") x= x.replace("z","デ") x= x.replace("Z","ゴ") x= x.replace("x","ネ") x= x.replace("X","ヌ") x= x.replace("c","ヒ") x= x.replace("C","サ") x= x.replace("v","ポ") x= x.replace("V","ベ") x= x.replace("b","ヸ") x= x.replace("B","テ") x= x.replace("n","コ") x= x.replace("N","ヺ") x= x.replace("m","ホ") x= x.replace("M","ニ") En = x dt = "'''" return (f"{dt}"+f"{En}"+f"{dt}") def decode(data): code = data x = code.replace("あ", "q") x = x.replace("ぃ", "Q") x = x.replace("い", "w") x = x.replace("ぅ", "W") x = x.replace("う", "e") x = x.replace("ぇ", "E") x = x.replace("え", "r") x = x.replace("ぉ", "R") x = x.replace("お", "t") x = x.replace("か", "T") x = x.replace("が", "y") x = x.replace("き", "Y") x = x.replace("ぎ", "u") x = x.replace("く", "U") x = x.replace("ぐ", "i") x = x.replace("け", "I") x = x.replace("げ", "o") x = x.replace("こ", "O") x = x.replace("ご", "p") x = x.replace("さ", "P") x = x.replace("ざ", "a") x = x.replace("し", "A") x = x.replace("じ", "s") x = x.replace("す", "S") x = x.replace("ず", "d") x = x.replace("せ", "D") x = x.replace("ぜ", "f") x = x.replace("そ", "F") x = x.replace("た", "g") x = x.replace("だ", "G") x = x.replace("ち", "h") x = x.replace("ぢ", "H") x = x.replace("っ", "j") x = x.replace("つ", "J") x = x.replace("㍿", "k") x = x.replace("ボ", "K") x = x.replace("ヂ", "l") x = x.replace("ザ", "L") x = x.replace("デ", "z") x = x.replace("ゴ", "Z") x = x.replace("ネ", "x") x = x.replace("ヌ", "X") x = x.replace("ヒ", "c") x = x.replace("サ", "C") x = x.replace("ポ", "v") x = x.replace("ベ", "V") x = x.replace("ヸ", "b") x = x.replace("テ", "B") x = x.replace("コ", "n") x = x.replace("ヺ", "N") x = x.replace("ホ", "m") x = x.replace("ニ", "M") de = x ex = exec (de) return (ex)
Abdullah-Encrypt
/Abdullah-Encrypt-0.0.1.tar.gz/Abdullah-Encrypt-0.0.1/src/Abdullah_Encrypt/Abdullah_Encrypt.py
Abdullah_Encrypt.py
import math import matplotlib.pyplot as plt from .Generaldistribution import Distribution class Gaussian(Distribution): """ Gaussian distribution class for calculating and visualizing a Gaussian distribution. Attributes: mean (float) representing the mean value of the distribution stdev (float) representing the standard deviation of the distribution data_list (list of floats) a list of floats extracted from the data file """ def __init__(self, mu=0, sigma=1): Distribution.__init__(self, mu, sigma) def calculate_mean(self): """Function to calculate the mean of the data set. Args: None Returns: float: mean of the data set """ avg = 1.0 * sum(self.data) / len(self.data) self.mean = avg return self.mean def calculate_stdev(self, sample=True): """Function to calculate the standard deviation of the data set. Args: sample (bool): whether the data represents a sample or population Returns: float: standard deviation of the data set """ if sample: n = len(self.data) - 1 else: n = len(self.data) mean = self.calculate_mean() sigma = 0 for d in self.data: sigma += (d - mean) ** 2 sigma = math.sqrt(sigma / n) self.stdev = sigma return self.stdev def plot_histogram(self): """Function to output a histogram of the instance variable data using matplotlib pyplot library. Args: None Returns: None """ plt.hist(self.data) plt.title('Histogram of Data') plt.xlabel('data') plt.ylabel('count') def pdf(self, x): """Probability density function calculator for the gaussian distribution. Args: x (float): point for calculating the probability density function Returns: float: probability density function output """ return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2) def plot_histogram_pdf(self, n_spaces = 50): """Function to plot the normalized histogram of the data and a plot of the probability density function along the same range Args: n_spaces (int): number of data points Returns: list: x values for the pdf plot list: y values for the pdf plot """ mu = self.mean sigma = self.stdev min_range = min(self.data) max_range = max(self.data) # calculates the interval between x values interval = 1.0 * (max_range - min_range) / n_spaces x = [] y = [] # calculate the x values to visualize for i in range(n_spaces): tmp = min_range + interval*i x.append(tmp) y.append(self.pdf(tmp)) # make the plots fig, axes = plt.subplots(2,sharex=True) fig.subplots_adjust(hspace=.5) axes[0].hist(self.data, density=True) axes[0].set_title('Normed Histogram of Data') axes[0].set_ylabel('Density') axes[1].plot(x, y) axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation') axes[0].set_ylabel('Density') plt.show() return x, y def __add__(self, other): """Function to add together two Gaussian distributions Args: other (Gaussian): Gaussian instance Returns: Gaussian: Gaussian distribution """ result = Gaussian() result.mean = self.mean + other.mean result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2) return result def __repr__(self): """Function to output the characteristics of the Gaussian instance Args: None Returns: string: characteristics of the Gaussian """ return "mean {}, standard deviation {}".format(self.mean, self.stdev)
Abdullah-probability
/Abdullah_probability-0.3.tar.gz/Abdullah_probability-0.3/Abdullah_probability/Gaussiandistribution.py
Gaussiandistribution.py
import math import matplotlib.pyplot as plt from .Generaldistribution import Distribution class Binomial(Distribution): """ Binomial distribution class for calculating and visualizing a Binomial distribution. Attributes: mean (float) representing the mean value of the distribution stdev (float) representing the standard deviation of the distribution data_list (list of floats) a list of floats to be extracted from the data file p (float) representing the probability of an event occurring n (int) number of trials TODO: Fill out all functions below """ def __init__(self, prob=.5, size=20): self.n = size self.p = prob Distribution.__init__(self, self.calculate_mean(), self.calculate_stdev()) def calculate_mean(self): """Function to calculate the mean from p and n Args: None Returns: float: mean of the data set """ self.mean = self.p * self.n return self.mean def calculate_stdev(self): """Function to calculate the standard deviation from p and n. Args: None Returns: float: standard deviation of the data set """ self.stdev = math.sqrt(self.n * self.p * (1 - self.p)) return self.stdev def replace_stats_with_data(self): """Function to calculate p and n from the data set Args: None Returns: float: the p value float: the n value """ self.n = len(self.data) self.p = 1.0 * sum(self.data) / len(self.data) self.mean = self.calculate_mean() self.stdev = self.calculate_stdev() def plot_bar(self): """Function to output a histogram of the instance variable data using matplotlib pyplot library. Args: None Returns: None """ plt.bar(x = ['0', '1'], height = [(1 - self.p) * self.n, self.p * self.n]) plt.title('Bar Chart of Data') plt.xlabel('outcome') plt.ylabel('count') def pdf(self, k): """Probability density function calculator for the gaussian distribution. Args: x (float): point for calculating the probability density function Returns: float: probability density function output """ a = math.factorial(self.n) / (math.factorial(k) * (math.factorial(self.n - k))) b = (self.p ** k) * (1 - self.p) ** (self.n - k) return a * b def plot_bar_pdf(self): """Function to plot the pdf of the binomial distribution Args: None Returns: list: x values for the pdf plot list: y values for the pdf plot """ x = [] y = [] # calculate the x values to visualize for i in range(self.n + 1): x.append(i) y.append(self.pdf(i)) # make the plots plt.bar(x, y) plt.title('Distribution of Outcomes') plt.ylabel('Probability') plt.xlabel('Outcome') plt.show() return x, y def __add__(self, other): """Function to add together two Binomial distributions with equal p Args: other (Binomial): Binomial instance Returns: Binomial: Binomial distribution """ try: assert self.p == other.p, 'p values are not equal' except AssertionError as error: raise result = Binomial() result.n = self.n + other.n result.p = self.p result.calculate_mean() result.calculate_stdev() return result def __repr__(self): """Function to output the characteristics of the Binomial instance Args: None Returns: string: characteristics of the Gaussian """ return "mean {}, standard deviation {}, p {}, n {}".\ format(self.mean, self.stdev, self.p, self.n)
Abdullah-probability
/Abdullah_probability-0.3.tar.gz/Abdullah_probability-0.3/Abdullah_probability/Binomialdistribution.py
Binomialdistribution.py
FIRSTBITS NOTES Abe experimentally supports bidirectional translation between addresses and firstbits as on http://firstbits.com/. Abe will disagree with other firstbits implementations in some cases until the algorithm is better defined and all implementations start to use it. This disagreement has security implications. Do not rely on the firstbits address reported by Abe to match the one on firstbits.com or another site when sending bitcoins. See this forum thread, and note that Abe does not currently implement the algorithm proposed there: https://bitcointalk.org/index.php?topic=16217.msg960077#msg960077 This feature is disabled by default due to performance impact. To enable it, add "use-firstbits" to the configuration *before* first running a version that supports it. If you run without use-firstbits, Abe will default it to false and will never create the table. The Abe.reconfigure module turns firstbits on and off once you have upgraded Abe's schema. Stop all processes using the database, change the use-firstbits setting in abe.conf, and run: python -m Abe.reconfigure --config abe.conf I have tried a few dozen addresses, and they match firstbits.com. Please report issues in the forum thread (https://bitcointalk.org/index.php?topic=22785.msg949105#msg949105) or by email, PM, or the github issue system, since I will not spend much time testing. The new table has four columns: pubkey_id - identifies a public key hash in the pubkey table block_id - a block where this address first appeared in its chain address_version - second component of address, along with pubkey_hash firstbits - lowercase firstbits of the address in this chain Note that address_version for Bitcoin addresses is always "\0" (or "00" in hex). The field exists because Abe supports multiple currencies with different address versions, such as Bitcoin Testnet and Namecoin. To get from address to pubkey_hash and address_version, use, for example, /q/decode_address/ADDRESS. To get from pubkey_hash and address_version to address, use /q/hashtoaddress/HASH/VERSION. Note that the existence of an address in the table does not always imply that the address has the given firstbits. It will if the corresponding block is in the main chain. That is, if block_id matches a row in chain_candidate where in_longest=1 and chain_id=1 (for Bitcoin, or the desired chain_id from the chain table). FIRSTBITS TECHNICAL DESIGN Maintenance of the abe_firstbits table imposes space and time costs on Abe instances. To keep things simple, Abe does not support firstbits calculation in only some chains and not others. If use_firstbits is in effect, a database invariant requires the table to contain all firstbits corresponding to chain_candidate rows where block_height is not null. If use_firstbits is false (the default) then Abe does not touch abe_firstbits. Finding firstbits requires a function that determines whether a given block is descended from another given block. Why? Because several firstbits records may collide with initial substrings of the new address, but only the ones in ancestral blocks can prevent it from receiving the firstbits. A naive implementation of is_descended_from(block, ancestor) would simply look up block's prev_block_id in the block table and repeat until it finds the block at ancestor's block_height. The result would be true iff that block is ancestor. But this would scale linearly with chain length, and I would like a faster function. A naive, fast implementation would introduce a block_ancestor table containing a row for each block pair whose first block is descended from its second block. But this table would grow as the square of the chain length, and that is too big. Abe's implementation (DataStore.is_descended_from) involves a new block table column, search_block_id. Like block.prev_block_id, search_block_id points to an earlier block in the chain, but the earlier block's height is found by a function other than block_height-1. The function depends only on block_height and allows is_descended_from to use a more-or-less binary search. A paper by Chris Okasaki describes a somewhat similar structure: "Purely Functional Random-Access Lists" http://cs.oberlin.edu/~jwalker/refs/fpca95.ps The get_search_height function in util.py computes the search_block_id block height. I am sure it could be improved: def get_search_height(n): if n < 2: return None if n & 1: return n >> 1 if n & 2 else n - (n >> 2) bit = 2 while (n & bit) == 0: bit <<= 1 return n - bit To find a block's ancestor at a given height, Abe tries the search block if it is not too far in the past. Otherwise, it tries the previous block. The pattern of height distances from block to search block should ensure reasonable worst-case performance, but I have not proven this. Given search_block_id, it should be possible to write is_descended_from as a stored procedure in databases that support it. This would be an optional performance and utility improvement, though. Abe would contain the same logic in generic Python code. An alternative table-based approach is libbitcoin's span_left and span_right. I have not got my head around the requirements for adjusting the span values when new side chains appear, though, and I think the more-or-less binary search suffices. John Tobey 2012-06-09
Abe
/Abe-0.7.2.tar.gz/Abe-0.7.2/README-FIRSTBITS.txt
README-FIRSTBITS.txt
Apache 2 FastCGI setup on Debian/Ubuntu ======================================= This document describes how to install and run Abe as a FastCGI process under Apache 2 on a Debian GNU/Linux or Ubuntu system. Advantages of FastCGI over the built-in HTTP server include: * lets browsers cache static content for better performance; * can integrate with an existing website, no :2750 in URLs. These instructions assume root privileges. To begin a privileged session in a terminal window, issue "sudo -i" (Ubuntu) or "su -" (Debian). Install required packages: apt-get install apache2 libapache2-mod-fcgid python-flup apt-get install python-crypto Change directory to the Abe distribution and install Abe: cd bitcoin-abe python setup.py install Replace YOUR.ABE.DOMAIN below with a domain that resolves to this host. The site will be http://YOUR.ABE.DOMAIN/. To embed Abe in an existing site (e.g., http://YOUR.DOMAIN/abe/) prepend a path (e.g., "/abe") in the Alias directives, place them in your existing sites-available file instead of a new VirtualHost, and merge or create your sites /robots.txt with adjusted paths from Abe/htdocs/robots.txt. Replace HTDOCS/DIRECTORY below with the directory containing abe.css; the Apache process must have permission to read it. The following command displays the correct value: python -m Abe.abe --print-htdocs-directory Optionally, replace "/usr/lib/cgi-bin" below with another directory; Apache must have the directory configured with Options +ExecCGI. Create file /etc/apache2/sites-available/abe with these contents: <VirtualHost *> ServerName YOUR.ABE.DOMAIN Alias /static/ HTDOCS/DIRECTORY Alias /robots.txt HTDOCS/DIRECTORY/robots.txt Alias /favicon.ico HTDOCS/DIRECTORY/favicon.ico Alias / /usr/lib/cgi-bin/abe.fcgi/ # Raise this if you get server errors mentioning "mod_fcgid: # read data timeout in 40 seconds" #FcgidIOTimeout 40 # Uncomment to log Abe requests. #ErrorLog /var/log/abe_error.log #LogLevel info #CustomLog /var/log/abe_access.log combined </VirtualHost> Enable the new configuration: a2ensite abe Replace USER with your Unix user name and create file /usr/lib/cgi-bin/abe.fcgi with these contents: #! /usr/bin/python import subprocess, sys, os command=["sudo", "-u", "USER", "/home/USER/cgi-bin/abe", str(os.getpid())] subprocess.Popen(command, stdin=sys.stdin).wait() Make the file executable: chmod +x /usr/lib/cgi-bin/abe.fcgi Replace USER with your Unix user name and use visudo(1) to append the following to /etc/sudoers: # This allows the Apache account (www-data) to run Abe as USER. www-data ALL=(USER) NOPASSWD: /home/USER/cgi-bin/abe Put configuration such as database connection parameters in /home/USER/abe.conf or change the location below. See the sample abe.conf in the Abe distribution for file format. IMPORTANT: Make sure the configuration does NOT contain a "host" or "port" option. Create file /home/USER/cgi-bin/abe with these contents: #! /bin/sh PYTHONUNBUFFERED=1 exec python -m Abe.abe \ --config /home/USER/abe.conf --static-path static/ --watch-pid="$1" Make the file executable: chmod +x /home/USER/cgi-bin/abe Abe should be reachable at http://YOUR.ABE.DOMAIN/. Exit the privileged session: exit
Abe
/Abe-0.7.2.tar.gz/Abe-0.7.2/README-FASTCGI.txt
README-FASTCGI.txt
On recent versions of SQLite, Abe fails to detect the maximum integer size and requires the --int-type=str option for proper functioning. Ubuntu supplies the sqlite3 module in the python-pysqlite2 [sic] package. Create abe-sqlite.conf with contents: dbtype sqlite3 connect-args abe.sqlite int-type str upgrade port 2750 Perform the initial data load: python -m Abe.abe --config abe-sqlite.conf --commit-bytes 100000 --no-serve Look for output such as: block_tx 1 1 block_tx 2 2 ... This step may take several days depending on chain size and hardware. Then run the web server as: python -m Abe.abe --config abe-sqlite.conf You should see: Listening on http://localhost:2750 Verify the installation by browsing the URL shown.
Abe
/Abe-0.7.2.tar.gz/Abe-0.7.2/README-SQLITE.txt
README-SQLITE.txt
Abe setup for MySQL. Run the Bitcoin client to ensure that your copy of the block chain is up to date. Install Python 2.7 and pycrypto. The Debian/Ubuntu packages are python2.7 and python-crypto. Install MySQL 5.x server and MySQL-Python. On Debian/Ubuntu: mysql-server-5.1 and python-mysqldb. Configure the MySQL instance with InnoDB engine support. Often, InnoDB is enabled by default. To check for InnoDB support, issue "SHOW ENGINES" and look in the output for "InnoDB" with "YES" next to it. If "skip-innodb" appears in the server configuration (my.cnf or my.ini) then remove it and restart the server. Log into MySQL as root (e.g.: mysql -u root) and issue the following, replacing "PASSWORD" with a password you choose: create database abe; CREATE USER abe IDENTIFIED BY 'PASSWORD'; grant all on abe.* to abe; Create file abe-my.conf with the following contents, replacing "PASSWORD" as above: dbtype MySQLdb connect-args {"user":"abe","db":"abe","passwd":"PASSWORD"} upgrade port 2750 Perform the initial data load: python -m Abe.abe --config abe-my.conf --commit-bytes 100000 --no-serve Look for output such as: block_tx 1 1 block_tx 2 2 ... This step may take several days depending on chain size and hardware. Then run the web server as: python -m Abe.abe --config abe-my.conf You should see: Listening on http://localhost:2750 Verify the installation by browsing the URL shown.
Abe
/Abe-0.7.2.tar.gz/Abe-0.7.2/README-MYSQL.txt
README-MYSQL.txt
PostgreSQL on Debian/Ubuntu. Run the Bitcoin client to ensure that your copy of the block chain is up to date. Choose or create a system account to run Abe. Replace USER with its username throughout these instructions. apt-get install python2.7 python-crypto postgresql-8.4 python-psycopg2 sudo -u postgres createdb abe sudo -u postgres createuser USER Add the following line to /etc/postgresql/*/main/pg_hba.conf: local abe USER ident Issue: sudo service postgresql reload Create file abe-pg.conf with contents: dbtype psycopg2 connect-args {"database":"abe"} upgrade port 2750 Perform the initial data load: python -m Abe.abe --config abe-pg.conf --commit-bytes 100000 --no-serve Look for output such as: block_tx 1 1 block_tx 2 2 ... This step may take several days depending on chain size and hardware. Then run the web server as: python -m Abe.abe --config abe-pg.conf You should see: Listening on http://localhost:2750 Verify the installation by browsing the URL shown.
Abe
/Abe-0.7.2.tar.gz/Abe-0.7.2/README-POSTGRES.txt
README-POSTGRES.txt
# Abraham Esoteric language interpreter ************************************** Install: -------- pip install abeinterpreter ************************************** Basic Usage: ------------ 0. Import: import abeinterpreter as ai 1. Instantiate the AbeInterpreter class: interp = ai.AbeInterpreter() 2. Interpret code with .interpret(code): interp.interpret(*some abe code here*) 3. Display output with print: print(interp.interpret(*some abe code here*)) ************************************** Types: ------ String: "Hello World!" Int: 42 Float: 3.14 Boolean: True, False ************************************** Commands: --------- Move right x cells: Overhead, the geese flew x miles east. Move left x cells: Overhead, the geese flew x miles west. Assign x to cell: Preparing for the storm, he inscribed x into the stone. Add to cell value: He sold x sheep. Subtract from cell value: They paid for their x mistakes. Print cell value: And Abraham spoke! While loop: He ran into the mountains, but only when ___. This is what happened there: Note: Loop conditions act on current cell value. Loop conditions: If greater than cell val: they had more than x fish If less than cell val: they had less than x fish If equal to cell val: the stone said x Signal loop end: Alas, I digress. Copy: One day he stole his neighbor's goods. Paste: He repented and returned the property. ************************************** Print even integers from 100 to zero: ------------------------------------- He sold 100 sheep. He ran into the mountains, but only when they had more than 0 fish. This is what happened there: And Abraham spoke! They paid for their 2 mistakes. Alas, I digress. And Abraham spoke! **************************************
AbeInterpreter
/abeinterpreter-1.2.tar.gz/abeinterpreter-1.2/README.md
README.md
import re import operator class AbeInterpreter: def __init__(self): self._i = 0 self._tape = [0] self._buffer = [] self._tokens = list() self._loopcons = list() self._loopactions = list() self._currentlooplevel = 0 self._cval = 0 self._output = '' self._has_error = False self._error = '' self._rs = { 'number': '((\-?\d+\.\d*)|(\-?\d+))', 'bool': 'True|False', 'string': '\".*\"' } def _mr(self): raw = self._tokens.pop() v = self._cast(self._getParmType(raw), raw) self._i += v length = len(self._tape) - 1 if self._i > length: for n in range(length, self._i + 1): self._tape.append(0) def _ml(self): raw = self._tokens.pop() v = self._cast(self._getParmType(raw), raw) self._i -= v if self._i < 0: self._has_error = True self._error = 'Not allowed to move left of index 0.' def _incr(self): raw = self._tokens.pop() v = self._cast(self._getParmType(raw), raw) self._tape[self._i] += v def _decr(self): raw = self._tokens.pop() v = self._cast(self._getParmType(raw), raw) self._tape[self._i] -= v def _getInput(self): x = input() x = self._cast(self._getParmType(x), x) self._tape[self._i] = x def _storeVal(self): raw = self._tokens.pop() v = self._cast(self._getParmType(raw), raw) self._buffer = self._buffer[1:] self._tape[self._i] = v def _doLoop(self): raw = self._tokens.pop() loop_id = self._cast(self._getParmType(raw), raw) # get conditions linked to this loop op, val = self._loopcons[loop_id] condition_met = op(self._tape[self._i], val) # if conditions are met, push the loop actions onto the tokens stack if condition_met: self._tokens.extend(self._loopactions[loop_id]) def _defineLoop(self): # get the loop condition's operator (rawOp) and value (rawVal) rawOp = self._tokens.pop() rawVal = self._tokens.pop() ops = {'>':operator.gt, '<':operator.lt, '==':operator.eq} # dict with string as key and function as value op = ops[rawOp] val = self._cast(self._getParmType(rawVal), rawVal) # add the loop condition to the loop condition stack self._loopcons.append([op, val]) loopIndex = len(self._loopactions) self._currentlooplevel += 1 is_inner_loop = self._currentlooplevel > 1 # outermost loop will have loop level 1 # need to evaluate actions within loop self._loopactions.append(list()) while True: if self._tokens[-1] == 'while': self._tokens.pop() self._loopactions[loopIndex].extend(self._defineLoop()) # include tokens for any inner loops if self._tokens[-1] == 'loopend': # can either be loopend token for inner loop or for this loop self._tokens.pop() break # either way, exit while loop else: # append normal token to list of actions for this loop self._loopactions[loopIndex].append(self._tokens.pop()) # when we get to the end of the loop actions, re-evaluate loop condition self._loopactions[loopIndex].append('loop') self._loopactions[loopIndex].append(str(loopIndex)) # adds loop to end of itself for re-eval self._loopactions[loopIndex].reverse() self._currentlooplevel -= 1 if is_inner_loop: # all inner loops will return their loop token to containing loop return ['loop', str(loopIndex)] else: # is base-level loop, so push on loop tokens for stack self._tokens.append(str(loopIndex)) self._tokens.append('loop') def _loopend(self): op, val = self._loopcons[-1] condition_met = op(self._tape[self._i], val) if condition_met: # if loop conditions are met, re-append the loop actions self._tokens.append('loopend') self._tokens.extend(self._loopactions[-1]) def _copyVal(self): self._cval = self._tape[self._i] def _pasteVal(self): self._tape[self._i] = self._cval def _cast(self, typeString, val): d = dict() d['float'] = float d['int'] = int try: if typeString == 'str': val = val.strip('"') return val elif typeString == 'bool': val = (val == 'True') return val return d[typeString](val) except: self._has_error = True self._error = f'Could not cast {val} to type {typeString}' def _getParmType(self, p): tokens = [ ('float', r'\-?\d+\.\d*'), ('int', r'\-?\d+'), ('bool', r'%s' % self._rs['bool']), ('str', r'%s' % self._rs['string']) ] ms = re.compile('|'.join(['(?P<%s>%s)' % tup for tup in tokens])) match = re.match(ms, p) return match.lastgroup def _printCell(self): self._output += str(self._tape[self._i]) + '\n' def _processCond(self, match): s = match.string[match.start():match.end()] s = s.replace('He ran into the mountains, but only when ', '') cond = s.replace('. This is what happened there:', '') # parse condition t = [ ('gt', r'they had more than %s fish' % self._rs['number']), ('lt', r'they had less than %s fish' % self._rs['number']), ('eq', r'the stone said .*') ] op = '' val = '' ms = re.compile('|'.join(["(?P<%s>%s)" % tup for tup in t])) try: optype = re.search(ms, cond).lastgroup except: self._has_error = True self._error = 'Loop condition syntax is incorrect.' return 'error' if optype in ['gt', 'lt', 'deq']: d = re.search(r'%s' % self._rs['number'], cond) if d: val = d.group() elif optype=='eq': op = '==' val = cond.replace('the stone said ', '') else: self._has_error = True return 'error' opdict = {'eq':'==', 'gt':'>', 'lt':'<'} op = opdict[optype] return op, val def _processDigitToken(self, match): s = match.string[match.start():match.end()] ms = re.compile(r'%s' % self._rs['number']) m = re.search(ms, s) v = 0 if m is not None: v = m.group() return v def _processStoreToken(self, match): s = match.string[match.start():match.end()] s = s.replace('Preparing for the storm, he carved ', '') s = s.replace(' into the stone.', '') return s def _consumeToken(self, token): comms = dict() comms['mr'] = self._mr comms['ml'] = self._ml comms['incr'] = self._incr comms['decr'] = self._decr comms['store'] = self._storeVal comms['pin'] = self._getInput comms['pout'] = self._printCell comms['while'] = self._defineLoop comms['loop'] = self._doLoop comms['loopend'] = self._loopend comms['copyval'] = self._copyVal comms['pasteval'] = self._pasteVal comms[token]() def _consumeTokenList(self): while len(self._tokens) > 0: if self._has_error: break self._consumeToken(self._tokens.pop()) def _tokenize(self, s): t = [ ('mr', r'Overhead, the geese flew \d+ miles east\.'), ('ml', r'Overhead, the geese flew \d+ miles west\.'), ('incr', r'He sold .*? sheep\.'), ('decr', r'They paid for their .*? mistakes\.'), ('pout', r'And Abraham spoke!'), ('pin', r'He listened when his wife spoke\.'), ('while', r'He ran into the mountains, but only when .*?\. This is what happened there\:'), ('loopend', r'Alas, I digress\.'), ('store', r'Preparing for the storm, he carved .*? into the stone\.'), ('copyval', r'One day he stole his neighbor\'s goods\.'), ('pasteval', r'He repented and returned the property\.'), ('s', r'\s+') ] ms = re.compile('|'.join(['(?P<%s>%s)' % tup for tup in t])) output = [] lastend = 0 f = [x for x in re.finditer(ms, s)] for m in f: if m.lastgroup == 's': lastend = m.end() continue if m.start() != lastend: self._has_error = True self._error = 'Problem with syntax in position %s' % m.start() return output if m.lastgroup in ['mr', 'ml', 'incr', 'decr']: output.append(m.lastgroup) output.append(self._processDigitToken(m)) elif m.lastgroup == 'store': output.append(m.lastgroup) output.append(self._processStoreToken(m)) elif m.lastgroup == 'while': output.append(m.lastgroup) output.extend(self._processCond(m)) else: output.append(m.lastgroup) lastend = m.end() if lastend != len(s) and s[-1]!=' ': self._has_error = True self._error = 'Problem with syntax in position' output.reverse() return output def interpret(self, abraham_code, *args): self._buffer = [a for a in args] self._i = 0 self._tape = [0] self._tokens = list() self._loopcons = list() self._loopactions = list() self._cval = 0 self._output = '' self._tokens = self._tokenize(abraham_code) self._consumeTokenList() if self._has_error: return self._error else: return self._output
AbeInterpreter
/abeinterpreter-1.2.tar.gz/abeinterpreter-1.2/abeinterpreter.py
abeinterpreter.py
# [AbemaStream](https://xpadev.net/AbemaStream/) [![GitHub license](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/xpadev-net/niconicomments/blob/master/LICENSE) Abemaから生配信をダウンロードするコマンド兼モジュールです This is a script to download streams from Abema Github: https://github.com/xpadev-net/abema-stream PyPI: https://pypi.org/project/AbemaStream/ ## ATTENTION 実行にはFFmpegが必要です ## Restriction - 書き出しファイル名は配信のslot idに固定されています - プレミアムには対応していません ## Installation ``` pip install AbemaStream ``` ## Examples ```python from AbemaStream import AbemaStream AbemaStream("abema-anime", "/path/to/save/mp4") ``` ```bash python -m AbemaStream "abema-anime" "/path/to/save/mp4" ``` ## External Source Code 以下の関数は外部のソースコードを引用しています `_generate_applicationkeysecret`, `_get_videokey_from_ticket` Copyright (c) 2011-2016, Christopher Rosell Copyright (c) 2016-2022, Streamlink Team All rights reserved. Released under the BSD 2-Clause "Simplified" License License: https://github.com/streamlink/streamlink/blob/master/LICENSE source: https://github.com/streamlink/streamlink/blob/master/src/streamlink/plugins/abematv.py
AbemaStream
/AbemaStream-0.0.2.tar.gz/AbemaStream-0.0.2/README.md
README.md
<p align="center"> <b> ABG </b> </p> <p align="center"><a href="https://pepy.tech/project/abg"> <img src="https://static.pepy.tech/personalized-badge/abg?period=total&units=international_system&left_color=black&right_color=black&left_text=Downloads" width="169" height="29.69"/></a></p> ### Requirements - Python 3.7 ᴏʀ higher. - A [ᴛᴇʟᴇɢʀᴀᴍ ᴀᴘɪ ᴋᴇʏ](https://docs.pyrogram.org/intro/setup#api-keys). - ᴀʙɢ [ᴄᴏɴғɪɢ](https://github.com/Abishnoi69/Abg#configuratoins). ### Installing : ```bash pip install -U Abg ``` #### sᴇᴛᴜᴘ ```python from pyrogram import filters, Client from pyrogram.types import CallbackQuery, Message from Abg import patch # type : ignore from Abg.helpers import ikb app = Client("my_account") @app.on_cmd("myinfo") Resultdef my_info(self: Client, ctx: Message): if not ctx.from_user: return name = await ctx.chat.ask("Type Your Name") age = await ctx.chat.ask("Type your age") add = await ctx.chat.ask("Type your address") # you can also use : ctx.reply_text(...) await self.send_msg( chat_id=ctx.chat.id, text=f"Your name is: {name.text}\nYour age is: {age.text}\nyour address is: {add.text}", reply_markup=ikb([[("ʙᴜᴛᴛᴏɴ", "hello")]]), ) # callback @app.on_cb("hello") async def hello(c: Client, q: CallbackQuery): await q.answer("Hello From Abg", show_alert=True) app.run() ``` > #### ᴜsᴇʀ/ʙᴏᴛ ʀɪɢʜᴛs ```python from Abg import patch # all patch from pyrogram.types import Message from pyrogram import Client app = Client("my_account") @app.on_cmd("del", group_only=True) @app.adminsOnly(permissions="can_delete_messages", is_both=True) async def del_msg(c: Client, m: Message): if m.reply_to_message: await m.delete() await c.delete_messages( chat_id=m.chat.id, message_ids=m.reply_to_message.id, ) else: await m.reply_text(text="ᴡʜᴀᴛ ᴅᴏ ʏᴏᴜ ᴡᴀɴɴᴀ ᴅᴇʟᴇᴛᴇ?") return app.run() ``` > ### keyboard's ```python from Abg.inline import InlineKeyboard, InlineButton keyboard = InlineKeyboard(row_width=3) keyboard.add( InlineButton('1', 'inline_keyboard:1'), InlineButton('2', 'inline_keyboard:2'), InlineButton('3', 'inline_keyboard:3'), InlineButton('4', 'inline_keyboard:4'), InlineButton('5', 'inline_keyboard:5'), InlineButton('6', 'inline_keyboard:6'), InlineButton('7', 'inline_keyboard:7') ) ``` #### ʀᴇsᴜʟᴛ <p><img src="https://raw.githubusercontent.com/Abishnoi69/Abg/master/doce/images/add_inline_button.png" alt="add_inline_button"></p> ━━━━━━━━━━━━━━━━━━━━ ### Configuratoins ``` OWNER_ID = ʏᴏᴜʀ ᴛᴇʟᴇɢʀᴀᴍ ɪᴅ. DEV_USERS = ʙᴏᴛ ᴅᴇᴠs ɪᴅ. (ʏᴏᴜ ᴄᴀɴ ᴀᴅᴅ ᴀ ʟɪsᴛ : 1 2 3) LOGGER_ID = ʏᴏᴜʀ ᴘʀɪᴠᴀᴛᴇ ɢʀᴏᴜᴘ/ᴄʜᴀɴɴᴇʟ ɪᴅ. (ʜᴇʀᴇ ʙᴏᴛ sᴇɴᴅ ʟᴏɢs) ``` ━━━━━━━━━━━━━━━━━━━━
Abg
/Abg-2.3.6.tar.gz/Abg-2.3.6/README.md
README.md
import math import numpy as np import Abhilash1_optimizers.Activation as Activation import Abhilash1_optimizers.hyperparameters as hyperparameters import Abhilash1_optimizers.Moment_Initializer as Moment_Initializer #Adamax varaiation of ADAM with L**p norm over L**2 norm class NADAM(): def __init__(alpha,b_1,b_2,epsilon,noise_g): return hyperparameters.hyperparameter.initialise(alpha,b_1,b_2,epsilon,noise_g) def init(m_t,v_t,t,theta): return Moment_Initializer.Moment_Initializer.initialize(m_t,v_t,t,theta) def Nadam_optimizer(data,len_data,max_itr,alpha,b_1,b_2,epsilon,noise_g,act_func,scale): alpha,b_1,b_2,epsilon,noise_g=NADAM.__init__(alpha,b_1,b_2,epsilon,noise_g) m_t,v_t,t,theta_0=NADAM.init(0,0,0,0) final_weight_vector=[] for i in range(len_data): theta_0=data[i] for i in range(max_itr): t+=1 if(act_func=="softPlus"): g_t=Activation.Activation.softplus(theta_0) elif (act_func=="relu"): g_t=Activation.Activation.relu(theta_0) elif (act_func=="elu"): g_t=Activation.Activation.elu(theta_0,alpha) elif (act_func=="selu"): g_t=Activation.Activation.selu(scale,theta_0,theta) elif (act_func=="tanh"): g_t=Activation.Activation.tanh(theta_0) elif (act_func=="hardSigmoid"): g_t=Activation.Activation.hard_sigmoid(theta_0) elif (act_func=="softSign"): g_t=Activation.Activation.softsign(theta_0) elif (act_func=="linear"): g_t=Activation.Activation.linear(theta_0) elif (act_func=="exponential"): g_t=Activation.Activation.exponential(theta_0) g_hat=g_t/(1-b_1**t) m_t=b_1*m_t + (1-b_1)*g_t m_hat=m_t/(1-b_1**(t+1)) v_t=b_2*v_t +(1-b_2)*g_t*g_t v_hat=v_t/(1-b_2**t) m_cap=(1-b_1)*g_hat + b_1*m_hat theta_prev=theta_0 alpha_t=(alpha*(m_cap/(math.sqrt(v_hat)+epsilon))) theta_0=theta_prev-(alpha_t) print("Intrermediate gradients") print("==========================================") print("Previous gradient",theta_prev) print("Present gradient",theta_0) print("==========================================") #if theta_0==theta_prev: # break; final_weight_vector.append(theta_0) return final_weight_vector def initialize(data,max_itr): len_data=len(data) optimized_weights=NADAM.Nadam_optimizer(data,len_data,max_itr,alpha,b_1,b_2,epsilon,noise_g,act_func,scale) print("Optimized Weight Vector") print("=====================================") for i in range(len(optimized_weights)): print("=====",optimized_weights[i]) if __name__=='__main__': print("Verbose") #t_0=Adagrad_optimizer() #print("gradient coefficient",t_0) #solve_grad=poly_func(t_0) #print("Gradient Value",solve_grad) sample_data=[1,0.5,0.7,0.1] NADAM.initialize(sample_data,100)
Abhilash1-optimizers
/Abhilash1_optimizers-0.1.tar.gz/Abhilash1_optimizers-0.1/Abhilash1_optimizers/Nadam.py
Nadam.py
import math import numpy as np import Abhilash1_optimizers.Activation as Activation import Abhilash1_optimizers.hyperparameters as hyperparameters import Abhilash1_optimizers.Moment_Initializer as Moment_Initializer #Adamax varaiation of ADAM with L**p norm over L**2 norm(p->infinity) class Momentum(): def __init__(alpha,b_1,b_2,epsilon,noise_g): return hyperparameters.hyperparameter.initialise(alpha,b_1,b_2,epsilon,noise_g) def init(m_t,v_t,t,theta): return Moment_Initializer.Moment_Initializer.initialize(m_t,v_t,t,theta) def Momentum_optimizer(data,len_data,max_itr,alpha,b_1,b_2,epsilon,noise_g,act_func,scale): alpha,b_1,b_2,epsilon,noise_g=Momentum.__init__(alpha,b_1,b_2,epsilon,noise_g) m_t,v_t,t,theta_0=Momentum.init(0,0,0,0) final_weight_vector=[] for i in range(len_data): theta_0=data[i] for i in range(max_itr): t+=1 if(act_func=="softPlus"): g_t=Activation.Activation.softplus(theta_0) elif (act_func=="relu"): g_t=Activation.Activation.relu(theta_0) elif (act_func=="elu"): g_t=Activation.Activation.elu(theta_0,alpha) elif (act_func=="selu"): g_t=Activation.Activation.selu(scale,theta_0,theta) elif (act_func=="tanh"): g_t=Activation.Activation.tanh(theta_0) elif (act_func=="hardSigmoid"): g_t=Activation.Activation.hard_sigmoid(theta_0) elif (act_func=="softSign"): g_t=Activation.Activation.softsign(theta_0) elif (act_func=="linear"): g_t=Activation.Activation.linear(theta_0) elif (act_func=="exponential"): g_t=Activation.Activation.exponential(theta_0) m_t=b_1*m_t + 1.0*g_t theta_prev=theta_0 alpha_t=(alpha*(m_t)) theta_0=theta_prev-(alpha_t) print("Intrermediate gradients") print("==========================================") print("Previous gradient",theta_prev) print("Present gradient",theta_0) print("==========================================") #if theta_0==theta_prev: # break; final_weight_vector.append(theta_0) return final_weight_vector def initialize(data,max_itr): len_data=len(data) optimized_weights=Momentum.Momentum_optimizer(data,len_data,max_itr,alpha,b_1,b_2,epsilon,noise_g,act_func,scale) print("Optimized Weight Vector") print("=====================================") for i in range(len(optimized_weights)): print("=====",optimized_weights[i]) if __name__=='__main__': print("Verbose") #t_0=Adagrad_optimizer() #print("gradient coefficient",t_0) #solve_grad=poly_func(t_0) #print("Gradient Value",solve_grad) sample_data=[1,0.5,0.7,0.1] Momentum.initialize(sample_data,100)
Abhilash1-optimizers
/Abhilash1_optimizers-0.1.tar.gz/Abhilash1_optimizers-0.1/Abhilash1_optimizers/ClassicMomentum.py
ClassicMomentum.py
import math import numpy as np import pandas as pd import Abhilash1_optimizers.Activation as Activation import Abhilash1_optimizers.hyperparameters as hyperparameters import Abhilash1_optimizers.Moment_Initializer as Moment_Initializer class ADAMM(): def __init__(alpha,b_1,b_2,epsilon,noise_g): return hyperparameters.hyperparameter.initialise(alpha,b_1,b_2,epsilon,noise_g) def init(m_t,v_t,t,theta): return Moment_Initializer.Moment_Initializer.initialize(m_t,v_t,t,theta) def Adam_optimizer(data,len_data,max_itr,alpha,b_1,b_2,epsilon,noise_g,act_func,scale): alpha,b_1,b_2,epsilon,noise_g=hyperparameters.hyperparameter.initialise(alpha,b_1,b_2,epsilon,noise_g) m_t,v_t,t,theta_0=ADAMM.init(0,0,0,0) final_weight_vector=[] for i in range(len_data): theta_0=data[i] for i in range(max_itr): t+=1 if(act_func=="softPlus"): g_t=Activation.Activation.softplus(theta_0) elif (act_func=="relu"): g_t=Activation.Activation.relu(theta_0) elif (act_func=="elu"): g_t=Activation.Activation.elu(theta_0,alpha) elif (act_func=="selu"): g_t=Activation.Activation.selu(scale,theta_0,theta) elif (act_func=="tanh"): g_t=Activation.Activation.tanh(theta_0) elif (act_func=="hardSigmoid"): g_t=Activation.Activation.hard_sigmoid(theta_0) elif (act_func=="softSign"): g_t=Activation.Activation.softsign(theta_0) elif (act_func=="linear"): g_t=Activation.Activation.linear(theta_0) elif (act_func=="exponential"): g_t=Activation.Activation.exponential(theta_0) m_t=b_1*m_t + (1-b_1)*g_t v_t=b_2*v_t +(1-b_2)*g_t*g_t m_hat=m_t/(1-(b_1**t)) v_hat=v_t/(1-(b_2**t)) theta_prev=theta_0 alpha_t=(alpha*(math.sqrt(1-b_2**t)/(1-b_1**t))) theta_0=theta_prev-((alpha_t*(m_t)/(math.sqrt(v_hat) + epsilon))) print("Intrermediate gradients") print("==========================================") print("Previous gradient",theta_prev) print("Present gradient",theta_0) print("==========================================") if theta_0==theta_prev: break; final_weight_vector.append(theta_0) return final_weight_vector def initialize(data,max_itr): len_data=len(data) optimized_weights=ADAMM.Adam_optimizer(data,len_data,max_itr,alpha,b_1,b_2,epsilon,noise_g,act_func,scale) print("Optimized Weight Vector") print("=====================================") for i in range(len(optimized_weights)): print("=====",optimized_weights[i]) if __name__=='__main__': print("Verbose") #t_0=Adagrad_optimizer() #print("gradient coefficient",t_0) #solve_grad=poly_func(t_0) #print("Gradient Value",solve_grad) sample_data=[1,0.5,0.7,0.1] ADAMM.initialize(sample_data,10)
Abhilash1-optimizers
/Abhilash1_optimizers-0.1.tar.gz/Abhilash1_optimizers-0.1/Abhilash1_optimizers/ADAM_modified_update.py
ADAM_modified_update.py
import math import numpy as np import Abhilash1_optimizers.Activation as Activation import Abhilash1_optimizers.hyperparameters as hyperparameters import Abhilash1_optimizers.Moment_Initializer as Moment_Initializer #Adamax varaiation of ADAM with L**p norm over L**2 norm(p->infinity) class ADAMAX(): def __init__(alpha,b_1,b_2,epsilon,noise_g): return hyperparameters.hyperparameter.initialise(alpha,b_1,b_2,epsilon,noise_g) def init(m_t,v_t,t,theta): return Moment_Initializer.Moment_Initializer.initialize(m_t,v_t,t,theta) def Adamax_optimizer(data,len_data,max_itr,alpha,b_1,b_2,epsilon,noise_g,act_func,scale): alpha,b_1,b_2,epsilon,noise_g=ADAMAX.__init__(alpha,b_1,b_2,epsilon,noise_g) m_t,v_t,t,theta_0=ADAMAX.init(0,0,0,0) final_weight_vector=[] for i in range(len_data): theta_0=data[i] for i in range(max_itr): t+=1 if(act_func=="softPlus"): g_t=Activation.Activation.softplus(theta_0) elif (act_func=="relu"): g_t=Activation.Activation.relu(theta_0) elif (act_func=="elu"): g_t=Activation.Activation.elu(theta_0,alpha) elif (act_func=="selu"): g_t=Activation.Activation.selu(scale,theta_0,theta) elif (act_func=="tanh"): g_t=Activation.Activation.tanh(theta_0) elif (act_func=="hardSigmoid"): g_t=Activation.Activation.hard_sigmoid(theta_0) elif (act_func=="softSign"): g_t=Activation.Activation.softsign(theta_0) elif (act_func=="linear"): g_t=Activation.Activation.linear(theta_0) elif (act_func=="exponential"): g_t=Activation.Activation.exponential(theta_0) m_t=b_1*m_t + (1-b_1)*g_t v_t=max(b_2*v_t,g_t) theta_prev=theta_0 alpha_t=(alpha*(1.0/(1-b_1**t))) theta_0=theta_prev-(alpha_t) print("Intrermediate gradients") print("==========================================") print("Previous gradient",theta_prev) print("Present gradient",theta_0) print("==========================================") #if theta_0==theta_prev: # break; final_weight_vector.append(theta_0) return final_weight_vector def initialize(data,max_itr,alpha,b_1,b_2,epsilon,noise_g): len_data=len(data) optimized_weights=ADAMAX.Adamax_optimizer(data,len_data,max_itr,alpha,b_1,b_2,epsilon,noise_g,act_func,scale) print("Optimized Weight Vector") print("=====================================") for i in range(len(optimized_weights)): print("=====",optimized_weights[i]) if __name__=='__main__': print("Verbose") #t_0=Adagrad_optimizer() #print("gradient coefficient",t_0) #solve_grad=poly_func(t_0) #print("Gradient Value",solve_grad) sample_data=[1,0.5,0.7,0.1] ADAMAX.initialize(sample_data,100)
Abhilash1-optimizers
/Abhilash1_optimizers-0.1.tar.gz/Abhilash1_optimizers-0.1/Abhilash1_optimizers/Adamax.py
Adamax.py
#RMSProp import math import numpy as np import Abhilash1_optimizers.Activation as Activation import Abhilash1_optimizers.hyperparameters as hyperparameters import Abhilash1_optimizers.Moment_Initializer as Moment_Initializer #RMSprop algorithm with momentum on rescaled gradient class RMSPROP(): def __init__(alpha,b_1,b_2,epsilon,noise_g): return hyperparameters.hyperparameter.initialise(alpha,b_1,b_2,epsilon,noise_g) def init(m_t,v_t,t,theta): return Moment_Initializer.Moment_Initializer.initialize(m_t,v_t,t,theta) def RMSprop_optimizer(data,len_data,max_itr,alpha,b_1,b_2,epsilon,noise_g,act_func,scale): alpha,b_1,b_2,epsilon,noise_g=RMSPROP.__init__(alpha,b_1,b_2,epsilon,noise_g) m_t,v_t,t,theta_0=RMSPROP.init(0,0,0,0) final_weight_vector=[] for i in range(len_data): theta_0=data[i] for i in range(max_itr): t+=1 if(act_func=="softPlus"): g_t=Activation.Activation.softplus(theta_0) elif (act_func=="relu"): g_t=Activation.Activation.relu(theta_0) elif (act_func=="elu"): g_t=Activation.Activation.elu(theta_0,alpha) elif (act_func=="selu"): g_t=Activation.Activation.selu(scale,theta_0,theta) elif (act_func=="tanh"): g_t=Activation.Activation.tanh(theta_0) elif (act_func=="hardSigmoid"): g_t=Activation.Activation.hard_sigmoid(theta_0) elif (act_func=="softSign"): g_t=Activation.Activation.softsign(theta_0) elif (act_func=="linear"): g_t=Activation.Activation.linear(theta_0) elif (act_func=="exponential"): g_t=Activation.Activation.exponential(theta_0) v_t=b_2*v_t +(1-b_2)*g_t*g_t theta_prev=theta_0 alpha_t=(alpha*(g_t/(math.sqrt(v_t) + epsilon))) theta_0=theta_prev-(alpha_t) print("Intrermediate gradients") print("==========================================") print("Previous gradient",theta_prev) print("Present gradient",theta_0) print("==========================================") #if theta_0==theta_prev: # break; final_weight_vector.append(theta_0) return final_weight_vector def initialize(data,max_itr): len_data=len(data) optimized_weights=RMSPROP.RMSprop_optimizer(data,len_data,max_itr,alpha,b_1,b_2,epsilon,noise_g,act_func,scale) print("Optimized Weight Vector") print("=====================================") #print(len(optimized_weights)) for i in range(len(optimized_weights)): print("=====",optimized_weights[i]) if __name__=='__main__': print("Verbose") #t_0=Adagrad_optimizer() #print("gradient coefficient",t_0) #solve_grad=poly_func(t_0) #print("Gradient Value",solve_grad) sample_data=[1,0.5,0.7,0.1] max_itr=100 #RMSPROP.initialize(sample_data,max_itr)
Abhilash1-optimizers
/Abhilash1_optimizers-0.1.tar.gz/Abhilash1_optimizers-0.1/Abhilash1_optimizers/RMSprop.py
RMSprop.py
import math import numpy as np #import pandas as pd import Abhilash1_optimizers.Activation as Activation import Abhilash1_optimizers.hyperparameters as hyperparameters import Abhilash1_optimizers.Moment_Initializer as Moment_Initializer #Adagrad variation of ADAM with beta_1=0 and beta_2~ 1 with noise class ADAGRAD(): def __init__(alpha,b_1,b_2,epsilon,noise_g): return hyperparameters.hyperparameter.initialise(alpha,b_1,b_2,epsilon,noise_g) def init(m_t,v_t,t,theta): return Moment_Initializer.Moment_Initializer.initialize(m_t,v_t,t,theta) def Adagrad_optimizer(data,len_data,max_itr,alpha,b_1,b_2,epsilon,noise_g,act_func,scale): alpha,b_1,b_2,epsilon,noise_g=ADAGRAD.__init__(alpha,b_1,b_2,epsilon,noise_g) m_t,v_t,t,theta_0=ADAGRAD.init(0,0,0,0) gradient_averages=[] final_weight_vector=[] for i in range(len_data): theta_0=data[i] for i in range(max_itr): t+=1 if(act_func=="softPlus"): g_t=Activation.Activation.softplus(theta_0) elif (act_func=="relu"): g_t=Activation.Activation.relu(theta_0) elif (act_func=="elu"): g_t=Activation.Activation.elu(theta_0,alpha) elif (act_func=="selu"): g_t=Activation.Activation.selu(scale,theta_0,theta) elif (act_func=="tanh"): g_t=Activation.Activation.tanh(theta_0) elif (act_func=="hardSigmoid"): g_t=Activation.Activation.hard_sigmoid(theta_0) elif (act_func=="softSign"): g_t=Activation.Activation.softsign(theta_0) elif (act_func=="linear"): g_t=Activation.Activation.linear(theta_0) elif (act_func=="exponential"): g_t=Activation.Activation.exponential(theta_0) gradient_averages.append(g_t**2) g_sum=sum(gradient_averages) print("gradient_sum",g_sum) m_t=b_1*m_t + (1-b_1)*g_t v_t=b_2*v_t +(1-b_2)*g_t*g_t m_hat=m_t/(1-(b_1**t)) v_hat=v_t/(1-(b_2**t)) theta_prev=theta_0 #alpha_t=(alpha*(math.sqrt(1-b_2**t)/(1-b_1**t))) alpha_t=(alpha*(g_t/(noise_g + math.sqrt(g_sum)))) theta_0=theta_prev-(alpha_t) print("Intrermediate gradients") print("==========================================") print("Previous gradient",theta_prev) print("Present gradient",theta_0) print("==========================================") #if theta_0==theta_prev: # break; final_weight_vector.append(theta_0) return final_weight_vector def initialize(data,max_itr): len_data=len(data) optimized_weights=ADAGRAD.Adagrad_optimizer(data,len_data,max_itr,alpha,b_1,b_2,epsilon,noise_g,act_func,scale) print("Optimized Weight Vector") print("=====================================") for i in range(len(optimized_weights)): print("=====",optimized_weights[i]) if __name__=='__main__': print("Verbose") #t_0=Adagrad_optimizer() #print("gradient coefficient",t_0) #solve_grad=poly_func(t_0) #print("Gradient Value",solve_grad) sample_data=[1,0.5,0.7,0.1] #ADAGRAD.initialize(sample_data,10)
Abhilash1-optimizers
/Abhilash1_optimizers-0.1.tar.gz/Abhilash1_optimizers-0.1/Abhilash1_optimizers/Adagrad.py
Adagrad.py
import math import numpy as np import Abhilash1_optimizers.Activation as Activation import Abhilash1_optimizers.hyperparameters as hyperparameters import Abhilash1_optimizers.Moment_Initializer as Moment_Initializer #Adamax varaiation of ADAM with L**p norm over L**2 norm(p->infinity) class SGD(): def __init__(alpha,b_1,b_2,epsilon,noise_g): return hyperparameters.hyperparameter.initialise(alpha,b_1,b_2,epsilon,noise_g) def init(m_t,v_t,t,theta): return Moment_Initializer.Moment_Initializer.initialize(m_t,v_t,t,theta) def SGD_optimizer(data,len_data,max_itr,alpha,b_1,b_2,epsilon,noise_g,act_func,scale): alpha,b_1,b_2,epsilon,noise_g=SGD.__init__(alpha,b_1,b_2,epsilon,noise_g) m_t,v_t,t,theta_0=SGD.init(0,0,0,0) final_weight_vector=[] for i in range(len_data): theta_0=data[i] for i in range(max_itr): t+=1 if(act_func=="softPlus"): g_t=Activation.Activation.softplus(theta_0) elif (act_func=="relu"): g_t=Activation.Activation.relu(theta_0) elif (act_func=="elu"): g_t=Activation.Activation.elu(theta_0,alpha) elif (act_func=="selu"): g_t=Activation.Activation.selu(scale,theta_0,theta) elif (act_func=="tanh"): g_t=Activation.Activation.tanh(theta_0) elif (act_func=="hardSigmoid"): g_t=Activation.Activation.hard_sigmoid(theta_0) elif (act_func=="softSign"): g_t=Activation.Activation.softsign(theta_0) elif (act_func=="linear"): g_t=Activation.Activation.linear(theta_0) elif (act_func=="exponential"): g_t=Activation.Activation.exponential(theta_0) theta_prev=theta_0 alpha_t=alpha*(g_t) theta_0=theta_prev-(alpha_t) print("Intrermediate gradients") print("==========================================") print("Previous gradient",theta_prev) print("Present gradient",theta_0) print("==========================================") #if theta_0==theta_prev: # break; final_weight_vector.append(theta_0) return final_weight_vector def initialize(data,max_itr): len_data=len(data) optimized_weights=SGD.SGD_optimizer(data,len_data,max_itr,alpha,b_1,b_2,epsilon,noise_g,act_func,scale) print("Optimized Weight Vector") print("=====================================") for i in range(len(optimized_weights)): print("=====",optimized_weights[i]) if __name__=='__main__': print("Verbose") #t_0=Adagrad_optimizer() #print("gradient coefficient",t_0) #solve_grad=poly_func(t_0) #print("Gradient Value",solve_grad) sample_data=[1,0.5,0.7,0.1] SGD.initialize(sample_data,100)
Abhilash1-optimizers
/Abhilash1_optimizers-0.1.tar.gz/Abhilash1_optimizers-0.1/Abhilash1_optimizers/SGD.py
SGD.py
import math import numpy as np import pandas as pd import Abhilash1_optimizers.Activation as Activation import Abhilash1_optimizers.hyperparameters as hyperparameters import Abhilash1_optimizers.Moment_Initializer as Moment_Initializer class ADAM(): def __init__(alpha,b_1,b_2,epsilon,noise_g): return hyperparameters.hyperparameter.initialise(alpha,b_1,b_2,epsilon,noise_g) def init(m_t,v_t,t,theta): return Moment_Initializer.Moment_Initializer.initialize(m_t,v_t,t,theta) def Adam_optimizer(data,len_data,max_itr,alpha,b_1,b_2,epsilon,noise_g,act_func,scale): alpha,b_1,b_2,epsilon,noise_g=hyperparameters.hyperparameter.initialise(alpha,b_1,b_2,epsilon,noise_g) m_t,v_t,t,theta_0=ADAM.init(0,0,0,0) final_weight_vector=[] for i in range(len_data): theta_0=data[i] for i in range(max_itr): t+=1 if(act_func=="softPlus"): g_t=Activation.Activation.softplus(theta_0) elif (act_func=="relu"): g_t=Activation.Activation.relu(theta_0) elif (act_func=="elu"): g_t=Activation.Activation.elu(theta_0,alpha) elif (act_func=="selu"): g_t=Activation.Activation.selu(scale,theta_0,theta) elif (act_func=="tanh"): g_t=Activation.Activation.tanh(theta_0) elif (act_func=="hardSigmoid"): g_t=Activation.Activation.hard_sigmoid(theta_0) elif (act_func=="softSign"): g_t=Activation.Activation.softsign(theta_0) elif (act_func=="linear"): g_t=Activation.Activation.linear(theta_0) elif (act_func=="exponential"): g_t=Activation.Activation.exponential(theta_0) m_t=b_1*m_t + (1-b_1)*g_t v_t=b_2*v_t +(1-b_2)*g_t*g_t m_hat=m_t/(1-(b_1**t)) v_hat=v_t/(1-(b_2**t)) theta_prev=theta_0 theta_0=theta_prev-((alpha*(m_t)/(math.sqrt(v_hat) + epsilon))) print("Intrermediate gradients") print("==========================================") print("Previous gradient",theta_prev) print("Present gradient",theta_0) print("==========================================") if theta_0==theta_prev: break; final_weight_vector.append(theta_0) return final_weight_vector def initialize(data,max_itr): len_data=len(data) optimized_weights=ADAM.Adam_optimizer(data,len_data,max_itr,alpha,b_1,b_2,epsilon,noise_g,act_func,scale) print("Optimized Weight Vector") print("=====================================") for i in range(len(optimized_weights)): print("=====",optimized_weights[i]) if __name__=='__main__': print("Verbose") #t_0=Adagrad_optimizer() #print("gradient coefficient",t_0) #solve_grad=poly_func(t_0) #print("Gradient Value",solve_grad) sample_data=[1,0.5,0.7,0.1] ADAM.initialize(sample_data,10)
Abhilash1-optimizers
/Abhilash1_optimizers-0.1.tar.gz/Abhilash1_optimizers-0.1/Abhilash1_optimizers/Adam_optimizer.py
Adam_optimizer.py
import sys import csv import pandas as pd import os import copy para_num=len(sys.argv) if para_num!=5: print("Incorrect number of paramters") exit(0) inputData=sys.argv[1] try: s=open(inputData) except FileNotFoundError: raise Exception("File doesn't exist") data=pd.read_csv(inputData) x,y=data.shape if y<3: raise Exception("File with three or more column is valid only!") inp_weight=sys.argv[2] inp_impact=sys.argv[3] weight=[] impact=[] for i in range(len(inp_weight)): if i%2!=0 and inp_weight[i]!=',': print("Weights aren't seperated by commas") exit(0) if i%2==0: num=int(inp_weight[i]) weight.append(num) for i in range(len(inp_impact)): if i%2!=0 and inp_impact[i]!=',': print("Impacts aren't seperated by commas") exit(0) if i%2==0: if inp_impact[i]=='+' or inp_impact[i]=='-': impact.append(inp_impact[i]) else: print("Impact is neither +ve or -ve") exit(0) if y-1!=len(weight): print("Number of weight and columns (from 2nd to last column) aren't same") exit(0) if y-1!=len(impact): print("Number of impact and columns (from 2nd to last column) aren't same") exit(0) data_columns=list(data.columns) data=data.values.tolist() c_data=copy.deepcopy(data) #normalized performance value for i in range(1,y): sum=0 for j in range(x): if(isinstance(data[j][i], str)): print("Data in the input file is not numeric") exit(0) else: sum=sum+data[j][i]**2 sum=sum**0.5 for k in range(x): data[k][i]=data[k][i]/sum #weighted normalized decision matrix for i in range(1,y): for j in range(x): data[j][i]=data[j][i]*weight[i-1] #ideal best value and ideal worst value i_best=[] i_worst=[] #calculating ideal best and worst for every feature/column for i in range(1,y): maxi=data[0][i] mini=data[0][i] for j in range(x): if data[j][i]>maxi: maxi=data[j][i] if data[j][i]<mini: mini=data[j][i] if impact[i-1]=='+': i_best.append(maxi) i_worst.append(mini) else: i_best.append(mini) i_worst.append(maxi) #Euclidean distance from ideal best value and ideal worst value s_best=[] s_worst=[] #Calculating euclidean distance for each feature/column for i in range(x): sum1=0 sum2=0 for j in range(1,y): sum1=sum1+(data[i][j]-i_best[j-1])**2 sum2=sum2+(data[i][j]-i_worst[j-1])**2 sum1=sum1**0.5 sum2=sum2**0.5 s_best.append(sum1) s_worst.append(sum2) performance_score=[] temp_score=[] #Calculating performance score for each data row for i in range(x): score=s_worst[i]/(s_best[i]+s_worst[i]) performance_score.append(score) temp_score.append(score) temp_score.sort(reverse=True) #Calculating the ranking rank=[] for i in range(x): for j in range(x): if(performance_score[i]==temp_score[j]): rank.append(j+1) result=[] for i in range (x): l=[] for j in range(y): l.append(c_data[i][j]) l.append(performance_score[i]) l.append(rank[i]) result.append(l) #adding column name to result.csv file data_columns.append("Topsis Score") data_columns.append("Rank") #creating result.csv file result_csv=open(sys.argv[4],'x') #giving column names to csv file fields=data_columns #creating a csv writer object csvwriter = csv.writer(result_csv) #writing the fields csvwriter.writerow(fields) #writing the data rows csvwriter.writerows(result) #closing log csv file result_csv.close() print() print("Result file containing all the input columns, TOPSIS SCORE and RANK is ready!")
Abhishek
/Abhishek-1.0.1.tar.gz/Abhishek-1.0.1/TOPSIS/topsis.py
topsis.py
# Abies #### Audio Processing Framework ## Project Goals There are two aspects to Abies. The hardware and software. The hardware will be an FPGA-based real-time audio processing pipeline. The Abies platform will have I2S connections to ADCs, DACs, and a microcontroller. As well as other peripherals like analog controls and MIDI. There will also be a microcontroller to act as a USB audio interface and data interface. This will let people use a variety of audio sources/sinks. The first application will be a guitar effects "pedal". --- Abies software is a set of python-based tools used for designing and connecting audio effects. The goal is to allow for rapid prototyping of different effects chains and configurations. FPGA builds can take a very long time, so we need an easy way to test different configurations without spending hours compiling. Then once the simulation sounds good, it can be built and uploaded to the FPGA. Abies will use a plugin framework so pre-compiled plugins can be loaded at runtime. ## Simulation To do this, we will first focus on running simulations. We will use SystemC as the backbone of the testing framework. SystemC implements all the necessary constructs to run cycle-based simulations with multiple models at a time. Then we can build "plugins" that can be saved, shared, and loaded at runtime. C++ simulations should be pretty fast and plugins won't need to be recompiled. Configurations can be specified with a netlist, and the simulator will handle the rest. The simulation will use audio files as an input and will save an audio file at the end. ## Builder Verilator can compile verilog code to a SystemC model. The Abies C++ libray will supply templates to wrap the verilated class so that it will be compatible with the framework. Abies can generate all of the files necessary to build this library as a cmake project. Then Abies will use pybind11, cmake, and scikit-build to build a python module as the end product. This sounds rather complicated, but from the user perspective, it will be only a few python functions. *Most* plugins will use the default use case and there should be very few exceptions. These plugins can be uploaded to pypi.org. Pybind11 is convenient, but it has limited CPython API compatibility. Wheels built with pybind11 will only be compatible with the version of Python that they were built with. I don't mind building for many python versions, but I don't want to force developers to do that too. And I want users to not have to compile plugins. They will only need to install python3.10. So we will design this project requiring Python3.10 for all builds. Modules can always be built from an sdist if you want to use a different version of python. I recommend using a virtual environment, and Abies may even use virtual environments behind the scenes anyways. ## Application The python application can import modules, then it will handle running simulations. It will have a command line interface, and eventually a gui. It will be very simple to connect modules. Users will only need to use a little python or edit config files. Plugin designers will only need to know verilog for the HDL design and a little python. No direct C++ programming should be required (unless you want to). --- ## Final Remarks My primary focus is to make this project accessible to as many people as possible. There may be people interested in the musical aspects that may not want to learn complex programming or engineering concepts. So we'll keep it simple and follow the design of other audio daws that people may be familiar with already. I also want to make this project just as accessible to people interested in designing effects. Trying to use/learn Verilog, Python, and C++ all at the same time can be a bit intimidating and challenging for beginners. So designing plugins will be simple as well. Abies will supply project templates and software tools to make running simulations and building plugins one click operations. 99% of modules should work fine using the base template. To make this all work in an easy way, plugin modules will need to adhere to certain standards. This will include naming conventions, following bus standards, implementing certain parameters and ports, and follow Abies standards. Abies will provide verification tools to check if designs obey all the rules.
Abies
/Abies-0.0.5.tar.gz/Abies-0.0.5/README.md
README.md
.. figure:: https://github.com/pybind/pybind11/raw/master/docs/pybind11-logo.png :alt: pybind11 logo **pybind11 — Seamless operability between C++11 and Python** |Latest Documentation Status| |Stable Documentation Status| |Gitter chat| |GitHub Discussions| |CI| |Build status| |Repology| |PyPI package| |Conda-forge| |Python Versions| `Setuptools example <https://github.com/pybind/python_example>`_ • `Scikit-build example <https://github.com/pybind/scikit_build_example>`_ • `CMake example <https://github.com/pybind/cmake_example>`_ .. start **pybind11** is a lightweight header-only library that exposes C++ types in Python and vice versa, mainly to create Python bindings of existing C++ code. Its goals and syntax are similar to the excellent `Boost.Python <http://www.boost.org/doc/libs/1_58_0/libs/python/doc/>`_ library by David Abrahams: to minimize boilerplate code in traditional extension modules by inferring type information using compile-time introspection. The main issue with Boost.Python—and the reason for creating such a similar project—is Boost. Boost is an enormously large and complex suite of utility libraries that works with almost every C++ compiler in existence. This compatibility has its cost: arcane template tricks and workarounds are necessary to support the oldest and buggiest of compiler specimens. Now that C++11-compatible compilers are widely available, this heavy machinery has become an excessively large and unnecessary dependency. Think of this library as a tiny self-contained version of Boost.Python with everything stripped away that isn’t relevant for binding generation. Without comments, the core header files only require ~4K lines of code and depend on Python (2.7 or 3.5+, or PyPy) and the C++ standard library. This compact implementation was possible thanks to some of the new C++11 language features (specifically: tuples, lambda functions and variadic templates). Since its creation, this library has grown beyond Boost.Python in many ways, leading to dramatically simpler binding code in many common situations. Tutorial and reference documentation is provided at `pybind11.readthedocs.io <https://pybind11.readthedocs.io/en/latest>`_. A PDF version of the manual is available `here <https://pybind11.readthedocs.io/_/downloads/en/latest/pdf/>`_. And the source code is always available at `github.com/pybind/pybind11 <https://github.com/pybind/pybind11>`_. Core features ------------- pybind11 can map the following core C++ features to Python: - Functions accepting and returning custom data structures per value, reference, or pointer - Instance methods and static methods - Overloaded functions - Instance attributes and static attributes - Arbitrary exception types - Enumerations - Callbacks - Iterators and ranges - Custom operators - Single and multiple inheritance - STL data structures - Smart pointers with reference counting like ``std::shared_ptr`` - Internal references with correct reference counting - C++ classes with virtual (and pure virtual) methods can be extended in Python Goodies ------- In addition to the core functionality, pybind11 provides some extra goodies: - Python 2.7, 3.5+, and PyPy/PyPy3 7.3 are supported with an implementation-agnostic interface. - It is possible to bind C++11 lambda functions with captured variables. The lambda capture data is stored inside the resulting Python function object. - pybind11 uses C++11 move constructors and move assignment operators whenever possible to efficiently transfer custom data types. - It’s easy to expose the internal storage of custom data types through Pythons’ buffer protocols. This is handy e.g. for fast conversion between C++ matrix classes like Eigen and NumPy without expensive copy operations. - pybind11 can automatically vectorize functions so that they are transparently applied to all entries of one or more NumPy array arguments. - Python's slice-based access and assignment operations can be supported with just a few lines of code. - Everything is contained in just a few header files; there is no need to link against any additional libraries. - Binaries are generally smaller by a factor of at least 2 compared to equivalent bindings generated by Boost.Python. A recent pybind11 conversion of PyRosetta, an enormous Boost.Python binding project, `reported <https://graylab.jhu.edu/Sergey/2016.RosettaCon/PyRosetta-4.pdf>`_ a binary size reduction of **5.4x** and compile time reduction by **5.8x**. - Function signatures are precomputed at compile time (using ``constexpr``), leading to smaller binaries. - With little extra effort, C++ types can be pickled and unpickled similar to regular Python objects. Supported compilers ------------------- 1. Clang/LLVM 3.3 or newer (for Apple Xcode’s clang, this is 5.0.0 or newer) 2. GCC 4.8 or newer 3. Microsoft Visual Studio 2015 Update 3 or newer 4. Intel classic C++ compiler 18 or newer (ICC 20.2 tested in CI) 5. Cygwin/GCC (previously tested on 2.5.1) 6. NVCC (CUDA 11.0 tested in CI) 7. NVIDIA PGI (20.9 tested in CI) About ----- This project was created by `Wenzel Jakob <http://rgl.epfl.ch/people/wjakob>`_. Significant features and/or improvements to the code were contributed by Jonas Adler, Lori A. Burns, Sylvain Corlay, Eric Cousineau, Aaron Gokaslan, Ralf Grosse-Kunstleve, Trent Houliston, Axel Huebl, @hulucc, Yannick Jadoul, Sergey Lyskov Johan Mabille, Tomasz Miąsko, Dean Moldovan, Ben Pritchard, Jason Rhinelander, Boris Schäling, Pim Schellart, Henry Schreiner, Ivan Smirnov, Boris Staletic, and Patrick Stewart. We thank Google for a generous financial contribution to the continuous integration infrastructure used by this project. Contributing ~~~~~~~~~~~~ See the `contributing guide <https://github.com/pybind/pybind11/blob/master/.github/CONTRIBUTING.md>`_ for information on building and contributing to pybind11. License ~~~~~~~ pybind11 is provided under a BSD-style license that can be found in the `LICENSE <https://github.com/pybind/pybind11/blob/master/LICENSE>`_ file. By using, distributing, or contributing to this project, you agree to the terms and conditions of this license. .. |Latest Documentation Status| image:: https://readthedocs.org/projects/pybind11/badge?version=latest :target: http://pybind11.readthedocs.org/en/latest .. |Stable Documentation Status| image:: https://img.shields.io/badge/docs-stable-blue.svg :target: http://pybind11.readthedocs.org/en/stable .. |Gitter chat| image:: https://img.shields.io/gitter/room/gitterHQ/gitter.svg :target: https://gitter.im/pybind/Lobby .. |CI| image:: https://github.com/pybind/pybind11/workflows/CI/badge.svg :target: https://github.com/pybind/pybind11/actions .. |Build status| image:: https://ci.appveyor.com/api/projects/status/riaj54pn4h08xy40?svg=true :target: https://ci.appveyor.com/project/wjakob/pybind11 .. |PyPI package| image:: https://img.shields.io/pypi/v/pybind11.svg :target: https://pypi.org/project/pybind11/ .. |Conda-forge| image:: https://img.shields.io/conda/vn/conda-forge/pybind11.svg :target: https://github.com/conda-forge/pybind11-feedstock .. |Repology| image:: https://repology.org/badge/latest-versions/python:pybind11.svg :target: https://repology.org/project/python:pybind11/versions .. |Python Versions| image:: https://img.shields.io/pypi/pyversions/pybind11.svg :target: https://pypi.org/project/pybind11/ .. |GitHub Discussions| image:: https://img.shields.io/static/v1?label=Discussions&message=Ask&color=blue&logo=github :target: https://github.com/pybind/pybind11/discussions
Abies
/Abies-0.0.5.tar.gz/Abies-0.0.5/extern/pybind11/README.rst
README.rst
import nox nox.options.sessions = ["lint", "tests", "tests_packaging"] PYTHON_VERISONS = ["2.7", "3.5", "3.6", "3.7", "3.8", "3.9", "3.10", "3.11"] @nox.session(reuse_venv=True) def lint(session: nox.Session) -> None: """ Lint the codebase (except for clang-format/tidy). """ session.install("pre-commit") session.run("pre-commit", "run", "-a") @nox.session(python=PYTHON_VERISONS) def tests(session: nox.Session) -> None: """ Run the tests (requires a compiler). """ tmpdir = session.create_tmp() session.install("cmake") session.install("-r", "tests/requirements.txt") session.run( "cmake", "-S", ".", "-B", tmpdir, "-DPYBIND11_WERROR=ON", "-DDOWNLOAD_CATCH=ON", "-DDOWNLOAD_EIGEN=ON", *session.posargs ) session.run("cmake", "--build", tmpdir) session.run("cmake", "--build", tmpdir, "--config=Release", "--target", "check") @nox.session def tests_packaging(session: nox.Session) -> None: """ Run the packaging tests. """ session.install("-r", "tests/requirements.txt", "--prefer-binary") session.run("pytest", "tests/extra_python_package") @nox.session(reuse_venv=True) def docs(session: nox.Session) -> None: """ Build the docs. Pass "serve" to serve. """ session.install("-r", "docs/requirements.txt") session.chdir("docs") if "pdf" in session.posargs: session.run("sphinx-build", "-b", "latexpdf", ".", "_build") return session.run("sphinx-build", "-b", "html", ".", "_build") if "serve" in session.posargs: session.log("Launching docs at http://localhost:8000/ - use Ctrl-C to quit") session.run("python", "-m", "http.server", "8000", "-d", "_build/html") elif session.posargs: session.error("Unsupported argument to docs") @nox.session(reuse_venv=True) def make_changelog(session: nox.Session) -> None: """ Inspect the closed issues and make entries for a changelog. """ session.install("ghapi", "rich") session.run("python", "tools/make_changelog.py") @nox.session(reuse_venv=True) def build(session: nox.Session) -> None: """ Build SDists and wheels. """ session.install("build") session.log("Building normal files") session.run("python", "-m", "build", *session.posargs) session.log("Building pybind11-global files (PYBIND11_GLOBAL_SDIST=1)") session.run( "python", "-m", "build", *session.posargs, env={"PYBIND11_GLOBAL_SDIST": "1"} )
Abies
/Abies-0.0.5.tar.gz/Abies-0.0.5/extern/pybind11/noxfile.py
noxfile.py
Frequently asked questions ########################## "ImportError: dynamic module does not define init function" =========================================================== 1. Make sure that the name specified in PYBIND11_MODULE is identical to the filename of the extension library (without suffixes such as ``.so``). 2. If the above did not fix the issue, you are likely using an incompatible version of Python (for instance, the extension library was compiled against Python 2, while the interpreter is running on top of some version of Python 3, or vice versa). "Symbol not found: ``__Py_ZeroStruct`` / ``_PyInstanceMethod_Type``" ======================================================================== See the first answer. "SystemError: dynamic module not initialized properly" ====================================================== See the first answer. The Python interpreter immediately crashes when importing my module =================================================================== See the first answer. .. _faq_reference_arguments: Limitations involving reference arguments ========================================= In C++, it's fairly common to pass arguments using mutable references or mutable pointers, which allows both read and write access to the value supplied by the caller. This is sometimes done for efficiency reasons, or to realize functions that have multiple return values. Here are two very basic examples: .. code-block:: cpp void increment(int &i) { i++; } void increment_ptr(int *i) { (*i)++; } In Python, all arguments are passed by reference, so there is no general issue in binding such code from Python. However, certain basic Python types (like ``str``, ``int``, ``bool``, ``float``, etc.) are **immutable**. This means that the following attempt to port the function to Python doesn't have the same effect on the value provided by the caller -- in fact, it does nothing at all. .. code-block:: python def increment(i): i += 1 # nope.. pybind11 is also affected by such language-level conventions, which means that binding ``increment`` or ``increment_ptr`` will also create Python functions that don't modify their arguments. Although inconvenient, one workaround is to encapsulate the immutable types in a custom type that does allow modifications. An other alternative involves binding a small wrapper lambda function that returns a tuple with all output arguments (see the remainder of the documentation for examples on binding lambda functions). An example: .. code-block:: cpp int foo(int &i) { i++; return 123; } and the binding code .. code-block:: cpp m.def("foo", [](int i) { int rv = foo(i); return std::make_tuple(rv, i); }); How can I reduce the build time? ================================ It's good practice to split binding code over multiple files, as in the following example: :file:`example.cpp`: .. code-block:: cpp void init_ex1(py::module_ &); void init_ex2(py::module_ &); /* ... */ PYBIND11_MODULE(example, m) { init_ex1(m); init_ex2(m); /* ... */ } :file:`ex1.cpp`: .. code-block:: cpp void init_ex1(py::module_ &m) { m.def("add", [](int a, int b) { return a + b; }); } :file:`ex2.cpp`: .. code-block:: cpp void init_ex2(py::module_ &m) { m.def("sub", [](int a, int b) { return a - b; }); } :command:`python`: .. code-block:: pycon >>> import example >>> example.add(1, 2) 3 >>> example.sub(1, 1) 0 As shown above, the various ``init_ex`` functions should be contained in separate files that can be compiled independently from one another, and then linked together into the same final shared object. Following this approach will: 1. reduce memory requirements per compilation unit. 2. enable parallel builds (if desired). 3. allow for faster incremental builds. For instance, when a single class definition is changed, only a subset of the binding code will generally need to be recompiled. "recursive template instantiation exceeded maximum depth of 256" ================================================================ If you receive an error about excessive recursive template evaluation, try specifying a larger value, e.g. ``-ftemplate-depth=1024`` on GCC/Clang. The culprit is generally the generation of function signatures at compile time using C++14 template metaprogramming. .. _`faq:hidden_visibility`: "‘SomeClass’ declared with greater visibility than the type of its field ‘SomeClass::member’ [-Wattributes]" ============================================================================================================ This error typically indicates that you are compiling without the required ``-fvisibility`` flag. pybind11 code internally forces hidden visibility on all internal code, but if non-hidden (and thus *exported*) code attempts to include a pybind type (for example, ``py::object`` or ``py::list``) you can run into this warning. To avoid it, make sure you are specifying ``-fvisibility=hidden`` when compiling pybind code. As to why ``-fvisibility=hidden`` is necessary, because pybind modules could have been compiled under different versions of pybind itself, it is also important that the symbols defined in one module do not clash with the potentially-incompatible symbols defined in another. While Python extension modules are usually loaded with localized symbols (under POSIX systems typically using ``dlopen`` with the ``RTLD_LOCAL`` flag), this Python default can be changed, but even if it isn't it is not always enough to guarantee complete independence of the symbols involved when not using ``-fvisibility=hidden``. Additionally, ``-fvisibility=hidden`` can deliver considerably binary size savings. (See the following section for more details.) .. _`faq:symhidden`: How can I create smaller binaries? ================================== To do its job, pybind11 extensively relies on a programming technique known as *template metaprogramming*, which is a way of performing computation at compile time using type information. Template metaprogramming usually instantiates code involving significant numbers of deeply nested types that are either completely removed or reduced to just a few instructions during the compiler's optimization phase. However, due to the nested nature of these types, the resulting symbol names in the compiled extension library can be extremely long. For instance, the included test suite contains the following symbol: .. only:: html .. code-block:: none _​_​Z​N​8​p​y​b​i​n​d​1​1​1​2​c​p​p​_​f​u​n​c​t​i​o​n​C​1​I​v​8​E​x​a​m​p​l​e​2​J​R​N​S​t​3​_​_​1​6​v​e​c​t​o​r​I​N​S​3​_​1​2​b​a​s​i​c​_​s​t​r​i​n​g​I​w​N​S​3​_​1​1​c​h​a​r​_​t​r​a​i​t​s​I​w​E​E​N​S​3​_​9​a​l​l​o​c​a​t​o​r​I​w​E​E​E​E​N​S​8​_​I​S​A​_​E​E​E​E​E​J​N​S​_​4​n​a​m​e​E​N​S​_​7​s​i​b​l​i​n​g​E​N​S​_​9​i​s​_​m​e​t​h​o​d​E​A​2​8​_​c​E​E​E​M​T​0​_​F​T​_​D​p​T​1​_​E​D​p​R​K​T​2​_ .. only:: not html .. code-block:: cpp __ZN8pybind1112cpp_functionC1Iv8Example2JRNSt3__16vectorINS3_12basic_stringIwNS3_11char_traitsIwEENS3_9allocatorIwEEEENS8_ISA_EEEEEJNS_4nameENS_7siblingENS_9is_methodEA28_cEEEMT0_FT_DpT1_EDpRKT2_ which is the mangled form of the following function type: .. code-block:: cpp pybind11::cpp_function::cpp_function<void, Example2, std::__1::vector<std::__1::basic_string<wchar_t, std::__1::char_traits<wchar_t>, std::__1::allocator<wchar_t> >, std::__1::allocator<std::__1::basic_string<wchar_t, std::__1::char_traits<wchar_t>, std::__1::allocator<wchar_t> > > >&, pybind11::name, pybind11::sibling, pybind11::is_method, char [28]>(void (Example2::*)(std::__1::vector<std::__1::basic_string<wchar_t, std::__1::char_traits<wchar_t>, std::__1::allocator<wchar_t> >, std::__1::allocator<std::__1::basic_string<wchar_t, std::__1::char_traits<wchar_t>, std::__1::allocator<wchar_t> > > >&), pybind11::name const&, pybind11::sibling const&, pybind11::is_method const&, char const (&) [28]) The memory needed to store just the mangled name of this function (196 bytes) is larger than the actual piece of code (111 bytes) it represents! On the other hand, it's silly to even give this function a name -- after all, it's just a tiny cog in a bigger piece of machinery that is not exposed to the outside world. So we'll generally only want to export symbols for those functions which are actually called from the outside. This can be achieved by specifying the parameter ``-fvisibility=hidden`` to GCC and Clang, which sets the default symbol visibility to *hidden*, which has a tremendous impact on the final binary size of the resulting extension library. (On Visual Studio, symbols are already hidden by default, so nothing needs to be done there.) In addition to decreasing binary size, ``-fvisibility=hidden`` also avoids potential serious issues when loading multiple modules and is required for proper pybind operation. See the previous FAQ entry for more details. Working with ancient Visual Studio 2008 builds on Windows ========================================================= The official Windows distributions of Python are compiled using truly ancient versions of Visual Studio that lack good C++11 support. Some users implicitly assume that it would be impossible to load a plugin built with Visual Studio 2015 into a Python distribution that was compiled using Visual Studio 2008. However, no such issue exists: it's perfectly legitimate to interface DLLs that are built with different compilers and/or C libraries. Common gotchas to watch out for involve not ``free()``-ing memory region that that were ``malloc()``-ed in another shared library, using data structures with incompatible ABIs, and so on. pybind11 is very careful not to make these types of mistakes. How can I properly handle Ctrl-C in long-running functions? =========================================================== Ctrl-C is received by the Python interpreter, and holds it until the GIL is released, so a long-running function won't be interrupted. To interrupt from inside your function, you can use the ``PyErr_CheckSignals()`` function, that will tell if a signal has been raised on the Python side. This function merely checks a flag, so its impact is negligible. When a signal has been received, you must either explicitly interrupt execution by throwing ``py::error_already_set`` (which will propagate the existing ``KeyboardInterrupt``), or clear the error (which you usually will not want): .. code-block:: cpp PYBIND11_MODULE(example, m) { m.def("long running_func", []() { for (;;) { if (PyErr_CheckSignals() != 0) throw py::error_already_set(); // Long running iteration } }); } CMake doesn't detect the right Python version ============================================= The CMake-based build system will try to automatically detect the installed version of Python and link against that. When this fails, or when there are multiple versions of Python and it finds the wrong one, delete ``CMakeCache.txt`` and then add ``-DPYTHON_EXECUTABLE=$(which python)`` to your CMake configure line. (Replace ``$(which python)`` with a path to python if your prefer.) You can alternatively try ``-DPYBIND11_FINDPYTHON=ON``, which will activate the new CMake FindPython support instead of pybind11's custom search. Requires CMake 3.12+, and 3.15+ or 3.18.2+ are even better. You can set this in your ``CMakeLists.txt`` before adding or finding pybind11, as well. Inconsistent detection of Python version in CMake and pybind11 ============================================================== The functions ``find_package(PythonInterp)`` and ``find_package(PythonLibs)`` provided by CMake for Python version detection are modified by pybind11 due to unreliability and limitations that make them unsuitable for pybind11's needs. Instead pybind11 provides its own, more reliable Python detection CMake code. Conflicts can arise, however, when using pybind11 in a project that *also* uses the CMake Python detection in a system with several Python versions installed. This difference may cause inconsistencies and errors if *both* mechanisms are used in the same project. Consider the following CMake code executed in a system with Python 2.7 and 3.x installed: .. code-block:: cmake find_package(PythonInterp) find_package(PythonLibs) find_package(pybind11) It will detect Python 2.7 and pybind11 will pick it as well. In contrast this code: .. code-block:: cmake find_package(pybind11) find_package(PythonInterp) find_package(PythonLibs) will detect Python 3.x for pybind11 and may crash on ``find_package(PythonLibs)`` afterwards. There are three possible solutions: 1. Avoid using ``find_package(PythonInterp)`` and ``find_package(PythonLibs)`` from CMake and rely on pybind11 in detecting Python version. If this is not possible, the CMake machinery should be called *before* including pybind11. 2. Set ``PYBIND11_FINDPYTHON`` to ``True`` or use ``find_package(Python COMPONENTS Interpreter Development)`` on modern CMake (3.12+, 3.15+ better, 3.18.2+ best). Pybind11 in these cases uses the new CMake FindPython instead of the old, deprecated search tools, and these modules are much better at finding the correct Python. 3. Set ``PYBIND11_NOPYTHON`` to ``TRUE``. Pybind11 will not search for Python. However, you will have to use the target-based system, and do more setup yourself, because it does not know about or include things that depend on Python, like ``pybind11_add_module``. This might be ideal for integrating into an existing system, like scikit-build's Python helpers. How to cite this project? ========================= We suggest the following BibTeX template to cite pybind11 in scientific discourse: .. code-block:: bash @misc{pybind11, author = {Wenzel Jakob and Jason Rhinelander and Dean Moldovan}, year = {2017}, note = {https://github.com/pybind/pybind11}, title = {pybind11 -- Seamless operability between C++11 and Python} }
Abies
/Abies-0.0.5.tar.gz/Abies-0.0.5/extern/pybind11/docs/faq.rst
faq.rst
Upgrade guide ############# This is a companion guide to the :doc:`changelog`. While the changelog briefly lists all of the new features, improvements and bug fixes, this upgrade guide focuses only the subset which directly impacts your experience when upgrading to a new version. But it goes into more detail. This includes things like deprecated APIs and their replacements, build system changes, general code modernization and other useful information. .. _upgrade-guide-2.9: v2.9 ==== * Any usage of the recently added ``py::make_simple_namespace`` should be converted to using ``py::module_::import("types").attr("SimpleNamespace")`` instead. * The use of ``_`` in custom type casters can now be replaced with the more readable ``const_name`` instead. The old ``_`` shortcut has been retained unless it is being used as a macro (like for gettext). .. _upgrade-guide-2.7: v2.7 ==== *Before* v2.7, ``py::str`` can hold ``PyUnicodeObject`` or ``PyBytesObject``, and ``py::isinstance<str>()`` is ``true`` for both ``py::str`` and ``py::bytes``. Starting with v2.7, ``py::str`` exclusively holds ``PyUnicodeObject`` (`#2409 <https://github.com/pybind/pybind11/pull/2409>`_), and ``py::isinstance<str>()`` is ``true`` only for ``py::str``. To help in the transition of user code, the ``PYBIND11_STR_LEGACY_PERMISSIVE`` macro is provided as an escape hatch to go back to the legacy behavior. This macro will be removed in future releases. Two types of required fixes are expected to be common: * Accidental use of ``py::str`` instead of ``py::bytes``, masked by the legacy behavior. These are probably very easy to fix, by changing from ``py::str`` to ``py::bytes``. * Reliance on py::isinstance<str>(obj) being ``true`` for ``py::bytes``. This is likely to be easy to fix in most cases by adding ``|| py::isinstance<bytes>(obj)``, but a fix may be more involved, e.g. if ``py::isinstance<T>`` appears in a template. Such situations will require careful review and custom fixes. .. _upgrade-guide-2.6: v2.6 ==== Usage of the ``PYBIND11_OVERLOAD*`` macros and ``get_overload`` function should be replaced by ``PYBIND11_OVERRIDE*`` and ``get_override``. In the future, the old macros may be deprecated and removed. ``py::module`` has been renamed ``py::module_``, but a backward compatible typedef has been included. This change was to avoid a language change in C++20 that requires unqualified ``module`` not be placed at the start of a logical line. Qualified usage is unaffected and the typedef will remain unless the C++ language rules change again. The public constructors of ``py::module_`` have been deprecated. Use ``PYBIND11_MODULE`` or ``module_::create_extension_module`` instead. An error is now thrown when ``__init__`` is forgotten on subclasses. This was incorrect before, but was not checked. Add a call to ``__init__`` if it is missing. A ``py::type_error`` is now thrown when casting to a subclass (like ``py::bytes`` from ``py::object``) if the conversion is not valid. Make a valid conversion instead. The undocumented ``h.get_type()`` method has been deprecated and replaced by ``py::type::of(h)``. Enums now have a ``__str__`` method pre-defined; if you want to override it, the simplest fix is to add the new ``py::prepend()`` tag when defining ``"__str__"``. If ``__eq__`` defined but not ``__hash__``, ``__hash__`` is now set to ``None``, as in normal CPython. You should add ``__hash__`` if you intended the class to be hashable, possibly using the new ``py::hash`` shortcut. The constructors for ``py::array`` now always take signed integers for size, for consistency. This may lead to compiler warnings on some systems. Cast to ``py::ssize_t`` instead of ``std::size_t``. The ``tools/clang`` submodule and ``tools/mkdoc.py`` have been moved to a standalone package, `pybind11-mkdoc`_. If you were using those tools, please use them via a pip install from the new location. The ``pybind11`` package on PyPI no longer fills the wheel "headers" slot - if you were using the headers from this slot, they are available by requesting the ``global`` extra, that is, ``pip install "pybind11[global]"``. (Most users will be unaffected, as the ``pybind11/include`` location is reported by ``python -m pybind11 --includes`` and ``pybind11.get_include()`` is still correct and has not changed since 2.5). .. _pybind11-mkdoc: https://github.com/pybind/pybind11-mkdoc CMake support: -------------- The minimum required version of CMake is now 3.4. Several details of the CMake support have been deprecated; warnings will be shown if you need to change something. The changes are: * ``PYBIND11_CPP_STANDARD=<platform-flag>`` is deprecated, please use ``CMAKE_CXX_STANDARD=<number>`` instead, or any other valid CMake CXX or CUDA standard selection method, like ``target_compile_features``. * If you do not request a standard, pybind11 targets will compile with the compiler default, but not less than C++11, instead of forcing C++14 always. If you depend on the old behavior, please use ``set(CMAKE_CXX_STANDARD 14 CACHE STRING "")`` instead. * Direct ``pybind11::module`` usage should always be accompanied by at least ``set(CMAKE_CXX_VISIBILITY_PRESET hidden)`` or similar - it used to try to manually force this compiler flag (but not correctly on all compilers or with CUDA). * ``pybind11_add_module``'s ``SYSTEM`` argument is deprecated and does nothing; linking now behaves like other imported libraries consistently in both config and submodule mode, and behaves like a ``SYSTEM`` library by default. * If ``PYTHON_EXECUTABLE`` is not set, virtual environments (``venv``, ``virtualenv``, and ``conda``) are prioritized over the standard search (similar to the new FindPython mode). In addition, the following changes may be of interest: * ``CMAKE_INTERPROCEDURAL_OPTIMIZATION`` will be respected by ``pybind11_add_module`` if set instead of linking to ``pybind11::lto`` or ``pybind11::thin_lto``. * Using ``find_package(Python COMPONENTS Interpreter Development)`` before pybind11 will cause pybind11 to use the new Python mechanisms instead of its own custom search, based on a patched version of classic ``FindPythonInterp`` / ``FindPythonLibs``. In the future, this may become the default. A recent (3.15+ or 3.18.2+) version of CMake is recommended. v2.5 ==== The Python package now includes the headers as data in the package itself, as well as in the "headers" wheel slot. ``pybind11 --includes`` and ``pybind11.get_include()`` report the new location, which is always correct regardless of how pybind11 was installed, making the old ``user=`` argument meaningless. If you are not using the function to get the location already, you are encouraged to switch to the package location. v2.2 ==== Deprecation of the ``PYBIND11_PLUGIN`` macro -------------------------------------------- ``PYBIND11_MODULE`` is now the preferred way to create module entry points. The old macro emits a compile-time deprecation warning. .. code-block:: cpp // old PYBIND11_PLUGIN(example) { py::module m("example", "documentation string"); m.def("add", [](int a, int b) { return a + b; }); return m.ptr(); } // new PYBIND11_MODULE(example, m) { m.doc() = "documentation string"; // optional m.def("add", [](int a, int b) { return a + b; }); } New API for defining custom constructors and pickling functions --------------------------------------------------------------- The old placement-new custom constructors have been deprecated. The new approach uses ``py::init()`` and factory functions to greatly improve type safety. Placement-new can be called accidentally with an incompatible type (without any compiler errors or warnings), or it can initialize the same object multiple times if not careful with the Python-side ``__init__`` calls. The new-style custom constructors prevent such mistakes. See :ref:`custom_constructors` for details. .. code-block:: cpp // old -- deprecated (runtime warning shown only in debug mode) py::class<Foo>(m, "Foo") .def("__init__", [](Foo &self, ...) { new (&self) Foo(...); // uses placement-new }); // new py::class<Foo>(m, "Foo") .def(py::init([](...) { // Note: no `self` argument return new Foo(...); // return by raw pointer // or: return std::make_unique<Foo>(...); // return by holder // or: return Foo(...); // return by value (move constructor) })); Mirroring the custom constructor changes, ``py::pickle()`` is now the preferred way to get and set object state. See :ref:`pickling` for details. .. code-block:: cpp // old -- deprecated (runtime warning shown only in debug mode) py::class<Foo>(m, "Foo") ... .def("__getstate__", [](const Foo &self) { return py::make_tuple(self.value1(), self.value2(), ...); }) .def("__setstate__", [](Foo &self, py::tuple t) { new (&self) Foo(t[0].cast<std::string>(), ...); }); // new py::class<Foo>(m, "Foo") ... .def(py::pickle( [](const Foo &self) { // __getstate__ return py::make_tuple(self.value1(), self.value2(), ...); // unchanged }, [](py::tuple t) { // __setstate__, note: no `self` argument return new Foo(t[0].cast<std::string>(), ...); // or: return std::make_unique<Foo>(...); // return by holder // or: return Foo(...); // return by value (move constructor) } )); For both the constructors and pickling, warnings are shown at module initialization time (on import, not when the functions are called). They're only visible when compiled in debug mode. Sample warning: .. code-block:: none pybind11-bound class 'mymodule.Foo' is using an old-style placement-new '__init__' which has been deprecated. See the upgrade guide in pybind11's docs. Stricter enforcement of hidden symbol visibility for pybind11 modules --------------------------------------------------------------------- pybind11 now tries to actively enforce hidden symbol visibility for modules. If you're using either one of pybind11's :doc:`CMake or Python build systems <compiling>` (the two example repositories) and you haven't been exporting any symbols, there's nothing to be concerned about. All the changes have been done transparently in the background. If you were building manually or relied on specific default visibility, read on. Setting default symbol visibility to *hidden* has always been recommended for pybind11 (see :ref:`faq:symhidden`). On Linux and macOS, hidden symbol visibility (in conjunction with the ``strip`` utility) yields much smaller module binaries. `CPython's extension docs`_ also recommend hiding symbols by default, with the goal of avoiding symbol name clashes between modules. Starting with v2.2, pybind11 enforces this more strictly: (1) by declaring all symbols inside the ``pybind11`` namespace as hidden and (2) by including the ``-fvisibility=hidden`` flag on Linux and macOS (only for extension modules, not for embedding the interpreter). .. _CPython's extension docs: https://docs.python.org/3/extending/extending.html#providing-a-c-api-for-an-extension-module The namespace-scope hidden visibility is done automatically in pybind11's headers and it's generally transparent to users. It ensures that: * Modules compiled with different pybind11 versions don't clash with each other. * Some new features, like ``py::module_local`` bindings, can work as intended. The ``-fvisibility=hidden`` flag applies the same visibility to user bindings outside of the ``pybind11`` namespace. It's now set automatic by pybind11's CMake and Python build systems, but this needs to be done manually by users of other build systems. Adding this flag: * Minimizes the chances of symbol conflicts between modules. E.g. if two unrelated modules were statically linked to different (ABI-incompatible) versions of the same third-party library, a symbol clash would be likely (and would end with unpredictable results). * Produces smaller binaries on Linux and macOS, as pointed out previously. Within pybind11's CMake build system, ``pybind11_add_module`` has always been setting the ``-fvisibility=hidden`` flag in release mode. From now on, it's being applied unconditionally, even in debug mode and it can no longer be opted out of with the ``NO_EXTRAS`` option. The ``pybind11::module`` target now also adds this flag to its interface. The ``pybind11::embed`` target is unchanged. The most significant change here is for the ``pybind11::module`` target. If you were previously relying on default visibility, i.e. if your Python module was doubling as a shared library with dependents, you'll need to either export symbols manually (recommended for cross-platform libraries) or factor out the shared library (and have the Python module link to it like the other dependents). As a temporary workaround, you can also restore default visibility using the CMake code below, but this is not recommended in the long run: .. code-block:: cmake target_link_libraries(mymodule PRIVATE pybind11::module) add_library(restore_default_visibility INTERFACE) target_compile_options(restore_default_visibility INTERFACE -fvisibility=default) target_link_libraries(mymodule PRIVATE restore_default_visibility) Local STL container bindings ---------------------------- Previous pybind11 versions could only bind types globally -- all pybind11 modules, even unrelated ones, would have access to the same exported types. However, this would also result in a conflict if two modules exported the same C++ type, which is especially problematic for very common types, e.g. ``std::vector<int>``. :ref:`module_local` were added to resolve this (see that section for a complete usage guide). ``py::class_`` still defaults to global bindings (because these types are usually unique across modules), however in order to avoid clashes of opaque types, ``py::bind_vector`` and ``py::bind_map`` will now bind STL containers as ``py::module_local`` if their elements are: builtins (``int``, ``float``, etc.), not bound using ``py::class_``, or bound as ``py::module_local``. For example, this change allows multiple modules to bind ``std::vector<int>`` without causing conflicts. See :ref:`stl_bind` for more details. When upgrading to this version, if you have multiple modules which depend on a single global binding of an STL container, note that all modules can still accept foreign ``py::module_local`` types in the direction of Python-to-C++. The locality only affects the C++-to-Python direction. If this is needed in multiple modules, you'll need to either: * Add a copy of the same STL binding to all of the modules which need it. * Restore the global status of that single binding by marking it ``py::module_local(false)``. The latter is an easy workaround, but in the long run it would be best to localize all common type bindings in order to avoid conflicts with third-party modules. Negative strides for Python buffer objects and numpy arrays ----------------------------------------------------------- Support for negative strides required changing the integer type from unsigned to signed in the interfaces of ``py::buffer_info`` and ``py::array``. If you have compiler warnings enabled, you may notice some new conversion warnings after upgrading. These can be resolved using ``static_cast``. Deprecation of some ``py::object`` APIs --------------------------------------- To compare ``py::object`` instances by pointer, you should now use ``obj1.is(obj2)`` which is equivalent to ``obj1 is obj2`` in Python. Previously, pybind11 used ``operator==`` for this (``obj1 == obj2``), but that could be confusing and is now deprecated (so that it can eventually be replaced with proper rich object comparison in a future release). For classes which inherit from ``py::object``, ``borrowed`` and ``stolen`` were previously available as protected constructor tags. Now the types should be used directly instead: ``borrowed_t{}`` and ``stolen_t{}`` (`#771 <https://github.com/pybind/pybind11/pull/771>`_). Stricter compile-time error checking ------------------------------------ Some error checks have been moved from run time to compile time. Notably, automatic conversion of ``std::shared_ptr<T>`` is not possible when ``T`` is not directly registered with ``py::class_<T>`` (e.g. ``std::shared_ptr<int>`` or ``std::shared_ptr<std::vector<T>>`` are not automatically convertible). Attempting to bind a function with such arguments now results in a compile-time error instead of waiting to fail at run time. ``py::init<...>()`` constructor definitions are also stricter and now prevent bindings which could cause unexpected behavior: .. code-block:: cpp struct Example { Example(int &); }; py::class_<Example>(m, "Example") .def(py::init<int &>()); // OK, exact match // .def(py::init<int>()); // compile-time error, mismatch A non-``const`` lvalue reference is not allowed to bind to an rvalue. However, note that a constructor taking ``const T &`` can still be registered using ``py::init<T>()`` because a ``const`` lvalue reference can bind to an rvalue. v2.1 ==== Minimum compiler versions are enforced at compile time ------------------------------------------------------ The minimums also apply to v2.0 but the check is now explicit and a compile-time error is raised if the compiler does not meet the requirements: * GCC >= 4.8 * clang >= 3.3 (appleclang >= 5.0) * MSVC >= 2015u3 * Intel C++ >= 15.0 The ``py::metaclass`` attribute is not required for static properties --------------------------------------------------------------------- Binding classes with static properties is now possible by default. The zero-parameter version of ``py::metaclass()`` is deprecated. However, a new one-parameter ``py::metaclass(python_type)`` version was added for rare cases when a custom metaclass is needed to override pybind11's default. .. code-block:: cpp // old -- emits a deprecation warning py::class_<Foo>(m, "Foo", py::metaclass()) .def_property_readonly_static("foo", ...); // new -- static properties work without the attribute py::class_<Foo>(m, "Foo") .def_property_readonly_static("foo", ...); // new -- advanced feature, override pybind11's default metaclass py::class_<Bar>(m, "Bar", py::metaclass(custom_python_type)) ... v2.0 ==== Breaking changes in ``py::class_`` ---------------------------------- These changes were necessary to make type definitions in pybind11 future-proof, to support PyPy via its ``cpyext`` mechanism (`#527 <https://github.com/pybind/pybind11/pull/527>`_), and to improve efficiency (`rev. 86d825 <https://github.com/pybind/pybind11/commit/86d825>`_). 1. Declarations of types that provide access via the buffer protocol must now include the ``py::buffer_protocol()`` annotation as an argument to the ``py::class_`` constructor. .. code-block:: cpp py::class_<Matrix>("Matrix", py::buffer_protocol()) .def(py::init<...>()) .def_buffer(...); 2. Classes which include static properties (e.g. ``def_readwrite_static()``) must now include the ``py::metaclass()`` attribute. Note: this requirement has since been removed in v2.1. If you're upgrading from 1.x, it's recommended to skip directly to v2.1 or newer. 3. This version of pybind11 uses a redesigned mechanism for instantiating trampoline classes that are used to override virtual methods from within Python. This led to the following user-visible syntax change: .. code-block:: cpp // old v1.x syntax py::class_<TrampolineClass>("MyClass") .alias<MyClass>() ... // new v2.x syntax py::class_<MyClass, TrampolineClass>("MyClass") ... Importantly, both the original and the trampoline class are now specified as arguments to the ``py::class_`` template, and the ``alias<..>()`` call is gone. The new scheme has zero overhead in cases when Python doesn't override any functions of the underlying C++ class. `rev. 86d825 <https://github.com/pybind/pybind11/commit/86d825>`_. The class type must be the first template argument given to ``py::class_`` while the trampoline can be mixed in arbitrary order with other arguments (see the following section). Deprecation of the ``py::base<T>()`` attribute ---------------------------------------------- ``py::base<T>()`` was deprecated in favor of specifying ``T`` as a template argument to ``py::class_``. This new syntax also supports multiple inheritance. Note that, while the type being exported must be the first argument in the ``py::class_<Class, ...>`` template, the order of the following types (bases, holder and/or trampoline) is not important. .. code-block:: cpp // old v1.x py::class_<Derived>("Derived", py::base<Base>()); // new v2.x py::class_<Derived, Base>("Derived"); // new -- multiple inheritance py::class_<Derived, Base1, Base2>("Derived"); // new -- apart from `Derived` the argument order can be arbitrary py::class_<Derived, Base1, Holder, Base2, Trampoline>("Derived"); Out-of-the-box support for ``std::shared_ptr`` ---------------------------------------------- The relevant type caster is now built in, so it's no longer necessary to include a declaration of the form: .. code-block:: cpp PYBIND11_DECLARE_HOLDER_TYPE(T, std::shared_ptr<T>) Continuing to do so won’t cause an error or even a deprecation warning, but it's completely redundant. Deprecation of a few ``py::object`` APIs ---------------------------------------- All of the old-style calls emit deprecation warnings. +---------------------------------------+---------------------------------------------+ | Old syntax | New syntax | +=======================================+=============================================+ | ``obj.call(args...)`` | ``obj(args...)`` | +---------------------------------------+---------------------------------------------+ | ``obj.str()`` | ``py::str(obj)`` | +---------------------------------------+---------------------------------------------+ | ``auto l = py::list(obj); l.check()`` | ``py::isinstance<py::list>(obj)`` | +---------------------------------------+---------------------------------------------+ | ``py::object(ptr, true)`` | ``py::reinterpret_borrow<py::object>(ptr)`` | +---------------------------------------+---------------------------------------------+ | ``py::object(ptr, false)`` | ``py::reinterpret_steal<py::object>(ptr)`` | +---------------------------------------+---------------------------------------------+ | ``if (obj.attr("foo"))`` | ``if (py::hasattr(obj, "foo"))`` | +---------------------------------------+---------------------------------------------+ | ``if (obj["bar"])`` | ``if (obj.contains("bar"))`` | +---------------------------------------+---------------------------------------------+
Abies
/Abies-0.0.5.tar.gz/Abies-0.0.5/extern/pybind11/docs/upgrade.rst
upgrade.rst
.. _installing: Installing the library ###################### There are several ways to get the pybind11 source, which lives at `pybind/pybind11 on GitHub <https://github.com/pybind/pybind11>`_. The pybind11 developers recommend one of the first three ways listed here, submodule, PyPI, or conda-forge, for obtaining pybind11. .. _include_as_a_submodule: Include as a submodule ====================== When you are working on a project in Git, you can use the pybind11 repository as a submodule. From your git repository, use: .. code-block:: bash git submodule add -b stable ../../pybind/pybind11 extern/pybind11 git submodule update --init This assumes you are placing your dependencies in ``extern/``, and that you are using GitHub; if you are not using GitHub, use the full https or ssh URL instead of the relative URL ``../../pybind/pybind11`` above. Some other servers also require the ``.git`` extension (GitHub does not). From here, you can now include ``extern/pybind11/include``, or you can use the various integration tools (see :ref:`compiling`) pybind11 provides directly from the local folder. Include with PyPI ================= You can download the sources and CMake files as a Python package from PyPI using Pip. Just use: .. code-block:: bash pip install pybind11 This will provide pybind11 in a standard Python package format. If you want pybind11 available directly in your environment root, you can use: .. code-block:: bash pip install "pybind11[global]" This is not recommended if you are installing with your system Python, as it will add files to ``/usr/local/include/pybind11`` and ``/usr/local/share/cmake/pybind11``, so unless that is what you want, it is recommended only for use in virtual environments or your ``pyproject.toml`` file (see :ref:`compiling`). Include with conda-forge ======================== You can use pybind11 with conda packaging via `conda-forge <https://github.com/conda-forge/pybind11-feedstock>`_: .. code-block:: bash conda install -c conda-forge pybind11 Include with vcpkg ================== You can download and install pybind11 using the Microsoft `vcpkg <https://github.com/Microsoft/vcpkg/>`_ dependency manager: .. code-block:: bash git clone https://github.com/Microsoft/vcpkg.git cd vcpkg ./bootstrap-vcpkg.sh ./vcpkg integrate install vcpkg install pybind11 The pybind11 port in vcpkg is kept up to date by Microsoft team members and community contributors. If the version is out of date, please `create an issue or pull request <https://github.com/Microsoft/vcpkg/>`_ on the vcpkg repository. Global install with brew ======================== The brew package manager (Homebrew on macOS, or Linuxbrew on Linux) has a `pybind11 package <https://github.com/Homebrew/homebrew-core/blob/master/Formula/pybind11.rb>`_. To install: .. code-block:: bash brew install pybind11 .. We should list Conan, and possibly a few other C++ package managers (hunter, .. perhaps). Conan has a very clean CMake integration that would be good to show. Other options ============= Other locations you can find pybind11 are `listed here <https://repology.org/project/python:pybind11/versions>`_; these are maintained by various packagers and the community.
Abies
/Abies-0.0.5.tar.gz/Abies-0.0.5/extern/pybind11/docs/installing.rst
installing.rst
Limitations ########### Design choices ^^^^^^^^^^^^^^ pybind11 strives to be a general solution to binding generation, but it also has certain limitations: - pybind11 casts away ``const``-ness in function arguments and return values. This is in line with the Python language, which has no concept of ``const`` values. This means that some additional care is needed to avoid bugs that would be caught by the type checker in a traditional C++ program. - The NumPy interface ``pybind11::array`` greatly simplifies accessing numerical data from C++ (and vice versa), but it's not a full-blown array class like ``Eigen::Array`` or ``boost.multi_array``. ``Eigen`` objects are directly supported, however, with ``pybind11/eigen.h``. Large but useful features could be implemented in pybind11 but would lead to a significant increase in complexity. Pybind11 strives to be simple and compact. Users who require large new features are encouraged to write an extension to pybind11; see `pybind11_json <https://github.com/pybind/pybind11_json>`_ for an example. Known bugs ^^^^^^^^^^ These are issues that hopefully will one day be fixed, but currently are unsolved. If you know how to help with one of these issues, contributions are welcome! - Intel 20.2 is currently having an issue with the test suite. `#2573 <https://github.com/pybind/pybind11/pull/2573>`_ - Debug mode Python does not support 1-5 tests in the test suite currently. `#2422 <https://github.com/pybind/pybind11/pull/2422>`_ - PyPy3 7.3.1 and 7.3.2 have issues with several tests on 32-bit Windows. Known limitations ^^^^^^^^^^^^^^^^^ These are issues that are probably solvable, but have not been fixed yet. A clean, well written patch would likely be accepted to solve them. - Type casters are not kept alive recursively. `#2527 <https://github.com/pybind/pybind11/issues/2527>`_ One consequence is that containers of ``char *`` are currently not supported. `#2245 <https://github.com/pybind/pybind11/issues/2245>`_ - The ``cpptest`` does not run on Windows with Python 3.8 or newer, due to DLL loader changes. User code that is correctly installed should not be affected. `#2560 <https://github.com/pybind/pybind11/issue/2560>`_ Python 3.9.0 warning ^^^^^^^^^^^^^^^^^^^^ Combining older versions of pybind11 (< 2.6.0) with Python on exactly 3.9.0 will trigger undefined behavior that typically manifests as crashes during interpreter shutdown (but could also destroy your data. **You have been warned**). This issue was `fixed in Python <https://github.com/python/cpython/pull/22670>`_. As a mitigation for this bug, pybind11 2.6.0 or newer includes a workaround specifically when Python 3.9.0 is detected at runtime, leaking about 50 bytes of memory when a callback function is garbage collected. For reference, the pybind11 test suite has about 2,000 such callbacks, but only 49 are garbage collected before the end-of-process. Wheels (even if built with Python 3.9.0) will correctly avoid the leak when run in Python 3.9.1, and this does not affect other 3.X versions.
Abies
/Abies-0.0.5.tar.gz/Abies-0.0.5/extern/pybind11/docs/limitations.rst
limitations.rst
.. only:: latex Intro ===== .. include:: readme.rst .. only:: not latex Contents: .. toctree:: :maxdepth: 1 changelog upgrade .. toctree:: :caption: The Basics :maxdepth: 2 installing basics classes compiling .. toctree:: :caption: Advanced Topics :maxdepth: 2 advanced/functions advanced/classes advanced/exceptions advanced/smart_ptrs advanced/cast/index advanced/pycpp/index advanced/embedding advanced/misc .. toctree:: :caption: Extra Information :maxdepth: 1 faq benchmark limitations reference cmake/index
Abies
/Abies-0.0.5.tar.gz/Abies-0.0.5/extern/pybind11/docs/index.rst
index.rst
On version numbers ^^^^^^^^^^^^^^^^^^ The two version numbers (C++ and Python) must match when combined (checked when you build the PyPI package), and must be a valid `PEP 440 <https://www.python.org/dev/peps/pep-0440>`_ version when combined. For example: .. code-block:: C++ #define PYBIND11_VERSION_MAJOR X #define PYBIND11_VERSION_MINOR Y #define PYBIND11_VERSION_PATCH Z.dev1 For beta, ``PYBIND11_VERSION_PATCH`` should be ``Z.b1``. RC's can be ``Z.rc1``. Always include the dot (even though PEP 440 allows it to be dropped). For a final release, this must be a simple integer. There is also a HEX version of the version just below. To release a new version of pybind11: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ If you don't have nox, you should either use ``pipx run nox`` instead, or use ``pipx install nox`` or ``brew install nox`` (Unix). - Update the version number - Update ``PYBIND11_VERSION_MAJOR`` etc. in ``include/pybind11/detail/common.h``. PATCH should be a simple integer. - Update the version HEX just below, as well. - Update ``pybind11/_version.py`` (match above) - Run ``nox -s tests_packaging`` to ensure this was done correctly. - Ensure that all the information in ``setup.cfg`` is up-to-date, like supported Python versions. - Add release date in ``docs/changelog.rst``. - Check to make sure `needs-changelog <https://github.com/pybind/pybind11/pulls?q=is%3Apr+is%3Aclosed+label%3A%22needs+changelog%22>`_ issues are entered in the changelog (clear the label when done). - ``git add`` and ``git commit``, ``git push``. **Ensure CI passes**. (If it fails due to a known flake issue, either ignore or restart CI.) - Add a release branch if this is a new minor version, or update the existing release branch if it is a patch version - New branch: ``git checkout -b vX.Y``, ``git push -u origin vX.Y`` - Update branch: ``git checkout vX.Y``, ``git merge <release branch>``, ``git push`` - Update tags (optional; if you skip this, the GitHub release makes a non-annotated tag for you) - ``git tag -a vX.Y.Z -m 'vX.Y.Z release'``. - ``git push --tags``. - Update stable - ``git checkout stable`` - ``git merge master`` - ``git push`` - Make a GitHub release (this shows up in the UI, sends new release notifications to users watching releases, and also uploads PyPI packages). (Note: if you do not use an existing tag, this creates a new lightweight tag for you, so you could skip the above step.) - GUI method: Under `releases <https://github.com/pybind/pybind11/releases>`_ click "Draft a new release" on the far right, fill in the tag name (if you didn't tag above, it will be made here), fill in a release name like "Version X.Y.Z", and copy-and-paste the markdown-formatted (!) changelog into the description (usually ``cat docs/changelog.rst | pandoc -f rst -t gfm``). Check "pre-release" if this is a beta/RC. - CLI method: with ``gh`` installed, run ``gh release create vX.Y.Z -t "Version X.Y.Z"`` If this is a pre-release, add ``-p``. - Get back to work - Make sure you are on master, not somewhere else: ``git checkout master`` - Update version macros in ``include/pybind11/detail/common.h`` (set PATCH to ``0.dev1`` and increment MINOR). - Update ``_version.py`` to match - Run ``nox -s tests_packaging`` to ensure this was done correctly. - Add a spot for in-development updates in ``docs/changelog.rst``. - ``git add``, ``git commit``, ``git push`` If a version branch is updated, remember to set PATCH to ``1.dev1``. If you'd like to bump homebrew, run: .. code-block:: console brew bump-formula-pr --url https://github.com/pybind/pybind11/archive/vX.Y.Z.tar.gz Conda-forge should automatically make a PR in a few hours, and automatically merge it if there are no issues. Manual packaging ^^^^^^^^^^^^^^^^ If you need to manually upload releases, you can download the releases from the job artifacts and upload them with twine. You can also make the files locally (not recommended in general, as your local directory is more likely to be "dirty" and SDists love picking up random unrelated/hidden files); this is the procedure: .. code-block:: bash nox -s build twine upload dist/* This makes SDists and wheels, and the final line uploads them.
Abies
/Abies-0.0.5.tar.gz/Abies-0.0.5/extern/pybind11/docs/release.rst
release.rst
.. _changelog: Changelog ######### Starting with version 1.8.0, pybind11 releases use a `semantic versioning <http://semver.org>`_ policy. Version 2.9.1 (Feb 2, 2022) --------------------------- Changes: * If possible, attach Python exception with ``py::raise_from`` to ``TypeError`` when casting from C++ to Python. This will give additional info if Python exceptions occur in the caster. Adds a test case of trying to convert a set from C++ to Python when the hash function is not defined in Python. `#3605 <https://github.com/pybind/pybind11/pull/3605>`_ * Add a mapping of C++11 nested exceptions to their Python exception equivalent using ``py::raise_from``. This attaches the nested exceptions in Python using the ``__cause__`` field. `#3608 <https://github.com/pybind/pybind11/pull/3608>`_ * Propagate Python exception traceback using ``raise_from`` if a pybind11 function runs out of overloads. `#3671 <https://github.com/pybind/pybind11/pull/3671>`_ * ``py::multiple_inheritance`` is now only needed when C++ bases are hidden from pybind11. `#3650 <https://github.com/pybind/pybind11/pull/3650>`_ and `#3659 <https://github.com/pybind/pybind11/pull/3659>`_ Bug fixes: * Remove a boolean cast in ``numpy.h`` that causes MSVC C4800 warnings when compiling against Python 3.10 or newer. `#3669 <https://github.com/pybind/pybind11/pull/3669>`_ * Render ``py::bool_`` and ``py::float_`` as ``bool`` and ``float`` respectively. `#3622 <https://github.com/pybind/pybind11/pull/3622>`_ Build system improvements: * Fix CMake extension suffix computation on Python 3.10+. `#3663 <https://github.com/pybind/pybind11/pull/3663>`_ * Allow ``CMAKE_ARGS`` to override CMake args in pybind11's own ``setup.py``. `#3577 <https://github.com/pybind/pybind11/pull/3577>`_ * Remove a few deprecated c-headers. `#3610 <https://github.com/pybind/pybind11/pull/3610>`_ * More uniform handling of test targets. `#3590 <https://github.com/pybind/pybind11/pull/3590>`_ * Add clang-tidy readability check to catch potentially swapped function args. `#3611 <https://github.com/pybind/pybind11/pull/3611>`_ Version 2.9.0 (Dec 28, 2021) ---------------------------- This is the last version to support Python 2.7 and 3.5. New Features: * Allow ``py::args`` to be followed by other arguments; the remaining arguments are implicitly keyword-only, as if a ``py::kw_only{}`` annotation had been used. `#3402 <https://github.com/pybind/pybind11/pull/3402>`_ Changes: * Make str/bytes/memoryview more interoperable with ``std::string_view``. `#3521 <https://github.com/pybind/pybind11/pull/3521>`_ * Replace ``_`` with ``const_name`` in internals, avoid defining ``pybind::_`` if ``_`` defined as macro (common gettext usage) `#3423 <https://github.com/pybind/pybind11/pull/3423>`_ Bug fixes: * Fix a rare warning about extra copy in an Eigen constructor. `#3486 <https://github.com/pybind/pybind11/pull/3486>`_ * Fix caching of the C++ overrides. `#3465 <https://github.com/pybind/pybind11/pull/3465>`_ * Add missing ``std::forward`` calls to some ``cpp_function`` overloads. `#3443 <https://github.com/pybind/pybind11/pull/3443>`_ * Support PyPy 7.3.7 and the PyPy3.8 beta. Test python-3.11 on PRs with the ``python dev`` label. `#3419 <https://github.com/pybind/pybind11/pull/3419>`_ * Replace usage of deprecated ``Eigen::MappedSparseMatrix`` with ``Eigen::Map<Eigen::SparseMatrix<...>>`` for Eigen 3.3+. `#3499 <https://github.com/pybind/pybind11/pull/3499>`_ * Tweaks to support Microsoft Visual Studio 2022. `#3497 <https://github.com/pybind/pybind11/pull/3497>`_ Build system improvements: * Nicer CMake printout and IDE organisation for pybind11's own tests. `#3479 <https://github.com/pybind/pybind11/pull/3479>`_ * CMake: report version type as part of the version string to avoid a spurious space in the package status message. `#3472 <https://github.com/pybind/pybind11/pull/3472>`_ * Flags starting with ``-g`` in ``$CFLAGS`` and ``$CPPFLAGS`` are no longer overridden by ``.Pybind11Extension``. `#3436 <https://github.com/pybind/pybind11/pull/3436>`_ * Ensure ThreadPool is closed in ``setup_helpers``. `#3548 <https://github.com/pybind/pybind11/pull/3548>`_ * Avoid LTS on ``mips64`` and ``ppc64le`` (reported broken). `#3557 <https://github.com/pybind/pybind11/pull/3557>`_ v2.8.1 (Oct 27, 2021) --------------------- Changes and additions: * The simple namespace creation shortcut added in 2.8.0 was deprecated due to usage of CPython internal API, and will be removed soon. Use ``py::module_::import("types").attr("SimpleNamespace")``. `#3374 <https://github.com/pybinyyd/pybind11/pull/3374>`_ * Add C++ Exception type to throw and catch ``AttributeError``. Useful for defining custom ``__setattr__`` and ``__getattr__`` methods. `#3387 <https://github.com/pybind/pybind11/pull/3387>`_ Fixes: * Fixed the potential for dangling references when using properties with ``std::optional`` types. `#3376 <https://github.com/pybind/pybind11/pull/3376>`_ * Modernize usage of ``PyCodeObject`` on Python 3.9+ (moving toward support for Python 3.11a1) `#3368 <https://github.com/pybind/pybind11/pull/3368>`_ * A long-standing bug in ``eigen.h`` was fixed (originally PR #3343). The bug was unmasked by newly added ``static_assert``'s in the Eigen 3.4.0 release. `#3352 <https://github.com/pybind/pybind11/pull/3352>`_ * Support multiple raw inclusion of CMake helper files (Conan.io does this for multi-config generators). `#3420 <https://github.com/pybind/pybind11/pull/3420>`_ * Fix harmless warning on upcoming CMake 3.22. `#3368 <https://github.com/pybind/pybind11/pull/3368>`_ * Fix 2.8.0 regression with MSVC 2017 + C++17 mode + Python 3. `#3407 <https://github.com/pybind/pybind11/pull/3407>`_ * Fix 2.8.0 regression that caused undefined behavior (typically segfaults) in ``make_key_iterator``/``make_value_iterator`` if dereferencing the iterator returned a temporary value instead of a reference. `#3348 <https://github.com/pybind/pybind11/pull/3348>`_ v2.8.0 (Oct 4, 2021) -------------------- New features: * Added ``py::raise_from`` to enable chaining exceptions. `#3215 <https://github.com/pybind/pybind11/pull/3215>`_ * Allow exception translators to be optionally registered local to a module instead of applying globally across all pybind11 modules. Use ``register_local_exception_translator(ExceptionTranslator&& translator)`` instead of ``register_exception_translator(ExceptionTranslator&& translator)`` to keep your exception remapping code local to the module. `#2650 <https://github.com/pybinyyd/pybind11/pull/2650>`_ * Add ``make_simple_namespace`` function for instantiating Python ``SimpleNamespace`` objects. **Deprecated in 2.8.1.** `#2840 <https://github.com/pybind/pybind11/pull/2840>`_ * ``pybind11::scoped_interpreter`` and ``initialize_interpreter`` have new arguments to allow ``sys.argv`` initialization. `#2341 <https://github.com/pybind/pybind11/pull/2341>`_ * Allow Python builtins to be used as callbacks in CPython. `#1413 <https://github.com/pybind/pybind11/pull/1413>`_ * Added ``view`` to view arrays with a different datatype. `#987 <https://github.com/pybind/pybind11/pull/987>`_ * Implemented ``reshape`` on arrays. `#984 <https://github.com/pybind/pybind11/pull/984>`_ * Enable defining custom ``__new__`` methods on classes by fixing bug preventing overriding methods if they have non-pybind11 siblings. `#3265 <https://github.com/pybind/pybind11/pull/3265>`_ * Add ``make_value_iterator()``, and fix ``make_key_iterator()`` to return references instead of copies. `#3293 <https://github.com/pybind/pybind11/pull/3293>`_ * Improve the classes generated by ``bind_map``: `#3310 <https://github.com/pybind/pybind11/pull/3310>`_ * Change ``.items`` from an iterator to a dictionary view. * Add ``.keys`` and ``.values`` (both dictionary views). * Allow ``__contains__`` to take any object. * ``pybind11::custom_type_setup`` was added, for customizing the ``PyHeapTypeObject`` corresponding to a class, which may be useful for enabling garbage collection support, among other things. `#3287 <https://github.com/pybind/pybind11/pull/3287>`_ Changes: * Set ``__file__`` constant when running ``eval_file`` in an embedded interpreter. `#3233 <https://github.com/pybind/pybind11/pull/3233>`_ * Python objects and (C++17) ``std::optional`` now accepted in ``py::slice`` constructor. `#1101 <https://github.com/pybind/pybind11/pull/1101>`_ * The pybind11 proxy types ``str``, ``bytes``, ``bytearray``, ``tuple``, ``list`` now consistently support passing ``ssize_t`` values for sizes and indexes. Previously, only ``size_t`` was accepted in several interfaces. `#3219 <https://github.com/pybind/pybind11/pull/3219>`_ * Avoid evaluating ``PYBIND11_TLS_REPLACE_VALUE`` arguments more than once. `#3290 <https://github.com/pybind/pybind11/pull/3290>`_ Fixes: * Bug fix: enum value's ``__int__`` returning non-int when underlying type is bool or of char type. `#1334 <https://github.com/pybind/pybind11/pull/1334>`_ * Fixes bug in setting error state in Capsule's pointer methods. `#3261 <https://github.com/pybind/pybind11/pull/3261>`_ * A long-standing memory leak in ``py::cpp_function::initialize`` was fixed. `#3229 <https://github.com/pybind/pybind11/pull/3229>`_ * Fixes thread safety for some ``pybind11::type_caster`` which require lifetime extension, such as for ``std::string_view``. `#3237 <https://github.com/pybind/pybind11/pull/3237>`_ * Restore compatibility with gcc 4.8.4 as distributed by ubuntu-trusty, linuxmint-17. `#3270 <https://github.com/pybind/pybind11/pull/3270>`_ Build system improvements: * Fix regression in CMake Python package config: improper use of absolute path. `#3144 <https://github.com/pybind/pybind11/pull/3144>`_ * Cached Python version information could become stale when CMake was re-run with a different Python version. The build system now detects this and updates this information. `#3299 <https://github.com/pybind/pybind11/pull/3299>`_ * Specified UTF8-encoding in setup.py calls of open(). `#3137 <https://github.com/pybind/pybind11/pull/3137>`_ * Fix a harmless warning from CMake 3.21 with the classic Python discovery. `#3220 <https://github.com/pybind/pybind11/pull/3220>`_ * Eigen repo and version can now be specified as cmake options. `#3324 <https://github.com/pybind/pybind11/pull/3324>`_ Backend and tidying up: * Reduced thread-local storage required for keeping alive temporary data for type conversion to one key per ABI version, rather than one key per extension module. This makes the total thread-local storage required by pybind11 2 keys per ABI version. `#3275 <https://github.com/pybind/pybind11/pull/3275>`_ * Optimize NumPy array construction with additional moves. `#3183 <https://github.com/pybind/pybind11/pull/3183>`_ * Conversion to ``std::string`` and ``std::string_view`` now avoids making an extra copy of the data on Python >= 3.3. `#3257 <https://github.com/pybind/pybind11/pull/3257>`_ * Remove const modifier from certain C++ methods on Python collections (``list``, ``set``, ``dict``) such as (``clear()``, ``append()``, ``insert()``, etc...) and annotated them with ``py-non-const``. * Enable readability ``clang-tidy-const-return`` and remove useless consts. `#3254 <https://github.com/pybind/pybind11/pull/3254>`_ `#3194 <https://github.com/pybind/pybind11/pull/3194>`_ * The clang-tidy ``google-explicit-constructor`` option was enabled. `#3250 <https://github.com/pybind/pybind11/pull/3250>`_ * Mark a pytype move constructor as noexcept (perf). `#3236 <https://github.com/pybind/pybind11/pull/3236>`_ * Enable clang-tidy check to guard against inheritance slicing. `#3210 <https://github.com/pybind/pybind11/pull/3210>`_ * Legacy warning suppression pragma were removed from eigen.h. On Unix platforms, please use -isystem for Eigen include directories, to suppress compiler warnings originating from Eigen headers. Note that CMake does this by default. No adjustments are needed for Windows. `#3198 <https://github.com/pybind/pybind11/pull/3198>`_ * Format pybind11 with isort consistent ordering of imports `#3195 <https://github.com/pybind/pybind11/pull/3195>`_ * The warnings-suppression "pragma clamp" at the top/bottom of pybind11 was removed, clearing the path to refactoring and IWYU cleanup. `#3186 <https://github.com/pybind/pybind11/pull/3186>`_ * Enable most bugprone checks in clang-tidy and fix the found potential bugs and poor coding styles. `#3166 <https://github.com/pybind/pybind11/pull/3166>`_ * Add ``clang-tidy-readability`` rules to make boolean casts explicit improving code readability. Also enabled other misc and readability clang-tidy checks. `#3148 <https://github.com/pybind/pybind11/pull/3148>`_ * Move object in ``.pop()`` for list. `#3116 <https://github.com/pybind/pybind11/pull/3116>`_ v2.7.1 (Aug 3, 2021) --------------------- Minor missing functionality added: * Allow Python builtins to be used as callbacks in CPython. `#1413 <https://github.com/pybind/pybind11/pull/1413>`_ Bug fixes: * Fix regression in CMake Python package config: improper use of absolute path. `#3144 <https://github.com/pybind/pybind11/pull/3144>`_ * Fix Mingw64 and add to the CI testing matrix. `#3132 <https://github.com/pybind/pybind11/pull/3132>`_ * Specified UTF8-encoding in setup.py calls of open(). `#3137 <https://github.com/pybind/pybind11/pull/3137>`_ * Add clang-tidy-readability rules to make boolean casts explicit improving code readability. Also enabled other misc and readability clang-tidy checks. `#3148 <https://github.com/pybind/pybind11/pull/3148>`_ * Move object in ``.pop()`` for list. `#3116 <https://github.com/pybind/pybind11/pull/3116>`_ Backend and tidying up: * Removed and fixed warning suppressions. `#3127 <https://github.com/pybind/pybind11/pull/3127>`_ `#3129 <https://github.com/pybind/pybind11/pull/3129>`_ `#3135 <https://github.com/pybind/pybind11/pull/3135>`_ `#3141 <https://github.com/pybind/pybind11/pull/3141>`_ `#3142 <https://github.com/pybind/pybind11/pull/3142>`_ `#3150 <https://github.com/pybind/pybind11/pull/3150>`_ `#3152 <https://github.com/pybind/pybind11/pull/3152>`_ `#3160 <https://github.com/pybind/pybind11/pull/3160>`_ `#3161 <https://github.com/pybind/pybind11/pull/3161>`_ v2.7.0 (Jul 16, 2021) --------------------- New features: * Enable ``py::implicitly_convertible<py::none, ...>`` for ``py::class_``-wrapped types. `#3059 <https://github.com/pybind/pybind11/pull/3059>`_ * Allow function pointer extraction from overloaded functions. `#2944 <https://github.com/pybind/pybind11/pull/2944>`_ * NumPy: added ``.char_()`` to type which gives the NumPy public ``char`` result, which also distinguishes types by bit length (unlike ``.kind()``). `#2864 <https://github.com/pybind/pybind11/pull/2864>`_ * Add ``pybind11::bytearray`` to manipulate ``bytearray`` similar to ``bytes``. `#2799 <https://github.com/pybind/pybind11/pull/2799>`_ * ``pybind11/stl/filesystem.h`` registers a type caster that, on C++17/Python 3.6+, converts ``std::filesystem::path`` to ``pathlib.Path`` and any ``os.PathLike`` to ``std::filesystem::path``. `#2730 <https://github.com/pybind/pybind11/pull/2730>`_ * A ``PYBIND11_VERSION_HEX`` define was added, similar to ``PY_VERSION_HEX``. `#3120 <https://github.com/pybind/pybind11/pull/3120>`_ Changes: * ``py::str`` changed to exclusively hold ``PyUnicodeObject``. Previously ``py::str`` could also hold ``bytes``, which is probably surprising, was never documented, and can mask bugs (e.g. accidental use of ``py::str`` instead of ``py::bytes``). `#2409 <https://github.com/pybind/pybind11/pull/2409>`_ * Add a safety guard to ensure that the Python GIL is held when C++ calls back into Python via ``object_api<>::operator()`` (e.g. ``py::function`` ``__call__``). (This feature is available for Python 3.6+ only.) `#2919 <https://github.com/pybind/pybind11/pull/2919>`_ * Catch a missing ``self`` argument in calls to ``__init__()``. `#2914 <https://github.com/pybind/pybind11/pull/2914>`_ * Use ``std::string_view`` if available to avoid a copy when passing an object to a ``std::ostream``. `#3042 <https://github.com/pybind/pybind11/pull/3042>`_ * An important warning about thread safety was added to the ``iostream.h`` documentation; attempts to make ``py::scoped_ostream_redirect`` thread safe have been removed, as it was only partially effective. `#2995 <https://github.com/pybind/pybind11/pull/2995>`_ Fixes: * Performance: avoid unnecessary strlen calls. `#3058 <https://github.com/pybind/pybind11/pull/3058>`_ * Fix auto-generated documentation string when using ``const T`` in ``pyarray_t``. `#3020 <https://github.com/pybind/pybind11/pull/3020>`_ * Unify error messages thrown by ``simple_collector``/``unpacking_collector``. `#3013 <https://github.com/pybind/pybind11/pull/3013>`_ * ``pybind11::builtin_exception`` is now explicitly exported, which means the types included/defined in different modules are identical, and exceptions raised in different modules can be caught correctly. The documentation was updated to explain that custom exceptions that are used across module boundaries need to be explicitly exported as well. `#2999 <https://github.com/pybind/pybind11/pull/2999>`_ * Fixed exception when printing UTF-8 to a ``scoped_ostream_redirect``. `#2982 <https://github.com/pybind/pybind11/pull/2982>`_ * Pickle support enhancement: ``setstate`` implementation will attempt to ``setattr`` ``__dict__`` only if the unpickled ``dict`` object is not empty, to not force use of ``py::dynamic_attr()`` unnecessarily. `#2972 <https://github.com/pybind/pybind11/pull/2972>`_ * Allow negative timedelta values to roundtrip. `#2870 <https://github.com/pybind/pybind11/pull/2870>`_ * Fix unchecked errors could potentially swallow signals/other exceptions. `#2863 <https://github.com/pybind/pybind11/pull/2863>`_ * Add null pointer check with ``std::localtime``. `#2846 <https://github.com/pybind/pybind11/pull/2846>`_ * Fix the ``weakref`` constructor from ``py::object`` to create a new ``weakref`` on conversion. `#2832 <https://github.com/pybind/pybind11/pull/2832>`_ * Avoid relying on exceptions in C++17 when getting a ``shared_ptr`` holder from a ``shared_from_this`` class. `#2819 <https://github.com/pybind/pybind11/pull/2819>`_ * Allow the codec's exception to be raised instead of :code:`RuntimeError` when casting from :code:`py::str` to :code:`std::string`. `#2903 <https://github.com/pybind/pybind11/pull/2903>`_ Build system improvements: * In ``setup_helpers.py``, test for platforms that have some multiprocessing features but lack semaphores, which ``ParallelCompile`` requires. `#3043 <https://github.com/pybind/pybind11/pull/3043>`_ * Fix ``pybind11_INCLUDE_DIR`` in case ``CMAKE_INSTALL_INCLUDEDIR`` is absolute. `#3005 <https://github.com/pybind/pybind11/pull/3005>`_ * Fix bug not respecting ``WITH_SOABI`` or ``WITHOUT_SOABI`` to CMake. `#2938 <https://github.com/pybind/pybind11/pull/2938>`_ * Fix the default ``Pybind11Extension`` compilation flags with a Mingw64 python. `#2921 <https://github.com/pybind/pybind11/pull/2921>`_ * Clang on Windows: do not pass ``/MP`` (ignored flag). `#2824 <https://github.com/pybind/pybind11/pull/2824>`_ * ``pybind11.setup_helpers.intree_extensions`` can be used to generate ``Pybind11Extension`` instances from cpp files placed in the Python package source tree. `#2831 <https://github.com/pybind/pybind11/pull/2831>`_ Backend and tidying up: * Enable clang-tidy performance, readability, and modernization checks throughout the codebase to enforce best coding practices. `#3046 <https://github.com/pybind/pybind11/pull/3046>`_, `#3049 <https://github.com/pybind/pybind11/pull/3049>`_, `#3051 <https://github.com/pybind/pybind11/pull/3051>`_, `#3052 <https://github.com/pybind/pybind11/pull/3052>`_, `#3080 <https://github.com/pybind/pybind11/pull/3080>`_, and `#3094 <https://github.com/pybind/pybind11/pull/3094>`_ * Checks for common misspellings were added to the pre-commit hooks. `#3076 <https://github.com/pybind/pybind11/pull/3076>`_ * Changed ``Werror`` to stricter ``Werror-all`` for Intel compiler and fixed minor issues. `#2948 <https://github.com/pybind/pybind11/pull/2948>`_ * Fixed compilation with GCC < 5 when the user defines ``_GLIBCXX_USE_CXX11_ABI``. `#2956 <https://github.com/pybind/pybind11/pull/2956>`_ * Added nox support for easier local testing and linting of contributions. `#3101 <https://github.com/pybind/pybind11/pull/3101>`_ and `#3121 <https://github.com/pybind/pybind11/pull/3121>`_ * Avoid RTD style issue with docutils 0.17+. `#3119 <https://github.com/pybind/pybind11/pull/3119>`_ * Support pipx run, such as ``pipx run pybind11 --include`` for a quick compile. `#3117 <https://github.com/pybind/pybind11/pull/3117>`_ v2.6.2 (Jan 26, 2021) --------------------- Minor missing functionality added: * enum: add missing Enum.value property. `#2739 <https://github.com/pybind/pybind11/pull/2739>`_ * Allow thread termination to be avoided during shutdown for CPython 3.7+ via ``.disarm`` for ``gil_scoped_acquire``/``gil_scoped_release``. `#2657 <https://github.com/pybind/pybind11/pull/2657>`_ Fixed or improved behavior in a few special cases: * Fix bug where the constructor of ``object`` subclasses would not throw on being passed a Python object of the wrong type. `#2701 <https://github.com/pybind/pybind11/pull/2701>`_ * The ``type_caster`` for integers does not convert Python objects with ``__int__`` anymore with ``noconvert`` or during the first round of trying overloads. `#2698 <https://github.com/pybind/pybind11/pull/2698>`_ * When casting to a C++ integer, ``__index__`` is always called and not considered as conversion, consistent with Python 3.8+. `#2801 <https://github.com/pybind/pybind11/pull/2801>`_ Build improvements: * Setup helpers: ``extra_compile_args`` and ``extra_link_args`` automatically set by Pybind11Extension are now prepended, which allows them to be overridden by user-set ``extra_compile_args`` and ``extra_link_args``. `#2808 <https://github.com/pybind/pybind11/pull/2808>`_ * Setup helpers: Don't trigger unused parameter warning. `#2735 <https://github.com/pybind/pybind11/pull/2735>`_ * CMake: Support running with ``--warn-uninitialized`` active. `#2806 <https://github.com/pybind/pybind11/pull/2806>`_ * CMake: Avoid error if included from two submodule directories. `#2804 <https://github.com/pybind/pybind11/pull/2804>`_ * CMake: Fix ``STATIC`` / ``SHARED`` being ignored in FindPython mode. `#2796 <https://github.com/pybind/pybind11/pull/2796>`_ * CMake: Respect the setting for ``CMAKE_CXX_VISIBILITY_PRESET`` if defined. `#2793 <https://github.com/pybind/pybind11/pull/2793>`_ * CMake: Fix issue with FindPython2/FindPython3 not working with ``pybind11::embed``. `#2662 <https://github.com/pybind/pybind11/pull/2662>`_ * CMake: mixing local and installed pybind11's would prioritize the installed one over the local one (regression in 2.6.0). `#2716 <https://github.com/pybind/pybind11/pull/2716>`_ Bug fixes: * Fixed segfault in multithreaded environments when using ``scoped_ostream_redirect``. `#2675 <https://github.com/pybind/pybind11/pull/2675>`_ * Leave docstring unset when all docstring-related options are disabled, rather than set an empty string. `#2745 <https://github.com/pybind/pybind11/pull/2745>`_ * The module key in builtins that pybind11 uses to store its internals changed from std::string to a python str type (more natural on Python 2, no change on Python 3). `#2814 <https://github.com/pybind/pybind11/pull/2814>`_ * Fixed assertion error related to unhandled (later overwritten) exception in CPython 3.8 and 3.9 debug builds. `#2685 <https://github.com/pybind/pybind11/pull/2685>`_ * Fix ``py::gil_scoped_acquire`` assert with CPython 3.9 debug build. `#2683 <https://github.com/pybind/pybind11/pull/2683>`_ * Fix issue with a test failing on pytest 6.2. `#2741 <https://github.com/pybind/pybind11/pull/2741>`_ Warning fixes: * Fix warning modifying constructor parameter 'flag' that shadows a field of 'set_flag' ``[-Wshadow-field-in-constructor-modified]``. `#2780 <https://github.com/pybind/pybind11/pull/2780>`_ * Suppressed some deprecation warnings about old-style ``__init__``/``__setstate__`` in the tests. `#2759 <https://github.com/pybind/pybind11/pull/2759>`_ Valgrind work: * Fix invalid access when calling a pybind11 ``__init__`` on a non-pybind11 class instance. `#2755 <https://github.com/pybind/pybind11/pull/2755>`_ * Fixed various minor memory leaks in pybind11's test suite. `#2758 <https://github.com/pybind/pybind11/pull/2758>`_ * Resolved memory leak in cpp_function initialization when exceptions occurred. `#2756 <https://github.com/pybind/pybind11/pull/2756>`_ * Added a Valgrind build, checking for leaks and memory-related UB, to CI. `#2746 <https://github.com/pybind/pybind11/pull/2746>`_ Compiler support: * Intel compiler was not activating C++14 support due to a broken define. `#2679 <https://github.com/pybind/pybind11/pull/2679>`_ * Support ICC and NVIDIA HPC SDK in C++17 mode. `#2729 <https://github.com/pybind/pybind11/pull/2729>`_ * Support Intel OneAPI compiler (ICC 20.2) and add to CI. `#2573 <https://github.com/pybind/pybind11/pull/2573>`_ v2.6.1 (Nov 11, 2020) --------------------- * ``py::exec``, ``py::eval``, and ``py::eval_file`` now add the builtins module as ``"__builtins__"`` to their ``globals`` argument, better matching ``exec`` and ``eval`` in pure Python. `#2616 <https://github.com/pybind/pybind11/pull/2616>`_ * ``setup_helpers`` will no longer set a minimum macOS version higher than the current version. `#2622 <https://github.com/pybind/pybind11/pull/2622>`_ * Allow deleting static properties. `#2629 <https://github.com/pybind/pybind11/pull/2629>`_ * Seal a leak in ``def_buffer``, cleaning up the ``capture`` object after the ``class_`` object goes out of scope. `#2634 <https://github.com/pybind/pybind11/pull/2634>`_ * ``pybind11_INCLUDE_DIRS`` was incorrect, potentially causing a regression if it was expected to include ``PYTHON_INCLUDE_DIRS`` (please use targets instead). `#2636 <https://github.com/pybind/pybind11/pull/2636>`_ * Added parameter names to the ``py::enum_`` constructor and methods, avoiding ``arg0`` in the generated docstrings. `#2637 <https://github.com/pybind/pybind11/pull/2637>`_ * Added ``needs_recompile`` optional function to the ``ParallelCompiler`` helper, to allow a recompile to be skipped based on a user-defined function. `#2643 <https://github.com/pybind/pybind11/pull/2643>`_ v2.6.0 (Oct 21, 2020) --------------------- See :ref:`upgrade-guide-2.6` for help upgrading to the new version. New features: * Keyword-only arguments supported in Python 2 or 3 with ``py::kw_only()``. `#2100 <https://github.com/pybind/pybind11/pull/2100>`_ * Positional-only arguments supported in Python 2 or 3 with ``py::pos_only()``. `#2459 <https://github.com/pybind/pybind11/pull/2459>`_ * ``py::is_final()`` class modifier to block subclassing (CPython only). `#2151 <https://github.com/pybind/pybind11/pull/2151>`_ * Added ``py::prepend()``, allowing a function to be placed at the beginning of the overload chain. `#1131 <https://github.com/pybind/pybind11/pull/1131>`_ * Access to the type object now provided with ``py::type::of<T>()`` and ``py::type::of(h)``. `#2364 <https://github.com/pybind/pybind11/pull/2364>`_ * Perfect forwarding support for methods. `#2048 <https://github.com/pybind/pybind11/pull/2048>`_ * Added ``py::error_already_set::discard_as_unraisable()``. `#2372 <https://github.com/pybind/pybind11/pull/2372>`_ * ``py::hash`` is now public. `#2217 <https://github.com/pybind/pybind11/pull/2217>`_ * ``py::class_<union_type>`` is now supported. Note that writing to one data member of the union and reading another (type punning) is UB in C++. Thus pybind11-bound enums should never be used for such conversions. `#2320 <https://github.com/pybind/pybind11/pull/2320>`_. * Classes now check local scope when registering members, allowing a subclass to have a member with the same name as a parent (such as an enum). `#2335 <https://github.com/pybind/pybind11/pull/2335>`_ Code correctness features: * Error now thrown when ``__init__`` is forgotten on subclasses. `#2152 <https://github.com/pybind/pybind11/pull/2152>`_ * Throw error if conversion to a pybind11 type if the Python object isn't a valid instance of that type, such as ``py::bytes(o)`` when ``py::object o`` isn't a bytes instance. `#2349 <https://github.com/pybind/pybind11/pull/2349>`_ * Throw if conversion to ``str`` fails. `#2477 <https://github.com/pybind/pybind11/pull/2477>`_ API changes: * ``py::module`` was renamed ``py::module_`` to avoid issues with C++20 when used unqualified, but an alias ``py::module`` is provided for backward compatibility. `#2489 <https://github.com/pybind/pybind11/pull/2489>`_ * Public constructors for ``py::module_`` have been deprecated; please use ``pybind11::module_::create_extension_module`` if you were using the public constructor (fairly rare after ``PYBIND11_MODULE`` was introduced). `#2552 <https://github.com/pybind/pybind11/pull/2552>`_ * ``PYBIND11_OVERLOAD*`` macros and ``get_overload`` function replaced by correctly-named ``PYBIND11_OVERRIDE*`` and ``get_override``, fixing inconsistencies in the presence of a closing ``;`` in these macros. ``get_type_overload`` is deprecated. `#2325 <https://github.com/pybind/pybind11/pull/2325>`_ Packaging / building improvements: * The Python package was reworked to be more powerful and useful. `#2433 <https://github.com/pybind/pybind11/pull/2433>`_ * :ref:`build-setuptools` is easier thanks to a new ``pybind11.setup_helpers`` module, which provides utilities to use setuptools with pybind11. It can be used via PEP 518, ``setup_requires``, or by directly importing or copying ``setup_helpers.py`` into your project. * CMake configuration files are now included in the Python package. Use ``pybind11.get_cmake_dir()`` or ``python -m pybind11 --cmakedir`` to get the directory with the CMake configuration files, or include the site-packages location in your ``CMAKE_MODULE_PATH``. Or you can use the new ``pybind11[global]`` extra when you install ``pybind11``, which installs the CMake files and headers into your base environment in the standard location. * ``pybind11-config`` is another way to write ``python -m pybind11`` if you have your PATH set up. * Added external typing support to the helper module, code from ``import pybind11`` can now be type checked. `#2588 <https://github.com/pybind/pybind11/pull/2588>`_ * Minimum CMake required increased to 3.4. `#2338 <https://github.com/pybind/pybind11/pull/2338>`_ and `#2370 <https://github.com/pybind/pybind11/pull/2370>`_ * Full integration with CMake’s C++ standard system and compile features replaces ``PYBIND11_CPP_STANDARD``. * Generated config file is now portable to different Python/compiler/CMake versions. * Virtual environments prioritized if ``PYTHON_EXECUTABLE`` is not set (``venv``, ``virtualenv``, and ``conda``) (similar to the new FindPython mode). * Other CMake features now natively supported, like ``CMAKE_INTERPROCEDURAL_OPTIMIZATION``, ``set(CMAKE_CXX_VISIBILITY_PRESET hidden)``. * ``CUDA`` as a language is now supported. * Helper functions ``pybind11_strip``, ``pybind11_extension``, ``pybind11_find_import`` added, see :doc:`cmake/index`. * Optional :ref:`find-python-mode` and :ref:`nopython-mode` with CMake. `#2370 <https://github.com/pybind/pybind11/pull/2370>`_ * Uninstall target added. `#2265 <https://github.com/pybind/pybind11/pull/2265>`_ and `#2346 <https://github.com/pybind/pybind11/pull/2346>`_ * ``pybind11_add_module()`` now accepts an optional ``OPT_SIZE`` flag that switches the binding target to size-based optimization if the global build type can not always be fixed to ``MinSizeRel`` (except in debug mode, where optimizations remain disabled). ``MinSizeRel`` or this flag reduces binary size quite substantially (~25% on some platforms). `#2463 <https://github.com/pybind/pybind11/pull/2463>`_ Smaller or developer focused features and fixes: * Moved ``mkdoc.py`` to a new repo, `pybind11-mkdoc`_. There are no longer submodules in the main repo. * ``py::memoryview`` segfault fix and update, with new ``py::memoryview::from_memory`` in Python 3, and documentation. `#2223 <https://github.com/pybind/pybind11/pull/2223>`_ * Fix for ``buffer_info`` on Python 2. `#2503 <https://github.com/pybind/pybind11/pull/2503>`_ * If ``__eq__`` defined but not ``__hash__``, ``__hash__`` is now set to ``None``. `#2291 <https://github.com/pybind/pybind11/pull/2291>`_ * ``py::ellipsis`` now also works on Python 2. `#2360 <https://github.com/pybind/pybind11/pull/2360>`_ * Pointer to ``std::tuple`` & ``std::pair`` supported in cast. `#2334 <https://github.com/pybind/pybind11/pull/2334>`_ * Small fixes in NumPy support. ``py::array`` now uses ``py::ssize_t`` as first argument type. `#2293 <https://github.com/pybind/pybind11/pull/2293>`_ * Added missing signature for ``py::array``. `#2363 <https://github.com/pybind/pybind11/pull/2363>`_ * ``unchecked_mutable_reference`` has access to operator ``()`` and ``[]`` when const. `#2514 <https://github.com/pybind/pybind11/pull/2514>`_ * ``py::vectorize`` is now supported on functions that return void. `#1969 <https://github.com/pybind/pybind11/pull/1969>`_ * ``py::capsule`` supports ``get_pointer`` and ``set_pointer``. `#1131 <https://github.com/pybind/pybind11/pull/1131>`_ * Fix crash when different instances share the same pointer of the same type. `#2252 <https://github.com/pybind/pybind11/pull/2252>`_ * Fix for ``py::len`` not clearing Python's error state when it fails and throws. `#2575 <https://github.com/pybind/pybind11/pull/2575>`_ * Bugfixes related to more extensive testing, new GitHub Actions CI. `#2321 <https://github.com/pybind/pybind11/pull/2321>`_ * Bug in timezone issue in Eastern hemisphere midnight fixed. `#2438 <https://github.com/pybind/pybind11/pull/2438>`_ * ``std::chrono::time_point`` now works when the resolution is not the same as the system. `#2481 <https://github.com/pybind/pybind11/pull/2481>`_ * Bug fixed where ``py::array_t`` could accept arrays that did not match the requested ordering. `#2484 <https://github.com/pybind/pybind11/pull/2484>`_ * Avoid a segfault on some compilers when types are removed in Python. `#2564 <https://github.com/pybind/pybind11/pull/2564>`_ * ``py::arg::none()`` is now also respected when passing keyword arguments. `#2611 <https://github.com/pybind/pybind11/pull/2611>`_ * PyPy fixes, PyPy 7.3.x now supported, including PyPy3. (Known issue with PyPy2 and Windows `#2596 <https://github.com/pybind/pybind11/issues/2596>`_). `#2146 <https://github.com/pybind/pybind11/pull/2146>`_ * CPython 3.9.0 workaround for undefined behavior (macOS segfault). `#2576 <https://github.com/pybind/pybind11/pull/2576>`_ * CPython 3.9 warning fixes. `#2253 <https://github.com/pybind/pybind11/pull/2253>`_ * Improved C++20 support, now tested in CI. `#2489 <https://github.com/pybind/pybind11/pull/2489>`_ `#2599 <https://github.com/pybind/pybind11/pull/2599>`_ * Improved but still incomplete debug Python interpreter support. `#2025 <https://github.com/pybind/pybind11/pull/2025>`_ * NVCC (CUDA 11) now supported and tested in CI. `#2461 <https://github.com/pybind/pybind11/pull/2461>`_ * NVIDIA PGI compilers now supported and tested in CI. `#2475 <https://github.com/pybind/pybind11/pull/2475>`_ * At least Intel 18 now explicitly required when compiling with Intel. `#2577 <https://github.com/pybind/pybind11/pull/2577>`_ * Extensive style checking in CI, with `pre-commit`_ support. Code modernization, checked by clang-tidy. * Expanded docs, including new main page, new installing section, and CMake helpers page, along with over a dozen new sections on existing pages. * In GitHub, new docs for contributing and new issue templates. .. _pre-commit: https://pre-commit.com .. _pybind11-mkdoc: https://github.com/pybind/pybind11-mkdoc v2.5.0 (Mar 31, 2020) ----------------------------------------------------- * Use C++17 fold expressions in type casters, if available. This can improve performance during overload resolution when functions have multiple arguments. `#2043 <https://github.com/pybind/pybind11/pull/2043>`_. * Changed include directory resolution in ``pybind11/__init__.py`` and installation in ``setup.py``. This fixes a number of open issues where pybind11 headers could not be found in certain environments. `#1995 <https://github.com/pybind/pybind11/pull/1995>`_. * C++20 ``char8_t`` and ``u8string`` support. `#2026 <https://github.com/pybind/pybind11/pull/2026>`_. * CMake: search for Python 3.9. `bb9c91 <https://github.com/pybind/pybind11/commit/bb9c91>`_. * Fixes for MSYS-based build environments. `#2087 <https://github.com/pybind/pybind11/pull/2087>`_, `#2053 <https://github.com/pybind/pybind11/pull/2053>`_. * STL bindings for ``std::vector<...>::clear``. `#2074 <https://github.com/pybind/pybind11/pull/2074>`_. * Read-only flag for ``py::buffer``. `#1466 <https://github.com/pybind/pybind11/pull/1466>`_. * Exception handling during module initialization. `bf2b031 <https://github.com/pybind/pybind11/commit/bf2b031>`_. * Support linking against a CPython debug build. `#2025 <https://github.com/pybind/pybind11/pull/2025>`_. * Fixed issues involving the availability and use of aligned ``new`` and ``delete``. `#1988 <https://github.com/pybind/pybind11/pull/1988>`_, `759221 <https://github.com/pybind/pybind11/commit/759221>`_. * Fixed a resource leak upon interpreter shutdown. `#2020 <https://github.com/pybind/pybind11/pull/2020>`_. * Fixed error handling in the boolean caster. `#1976 <https://github.com/pybind/pybind11/pull/1976>`_. v2.4.3 (Oct 15, 2019) ----------------------------------------------------- * Adapt pybind11 to a C API convention change in Python 3.8. `#1950 <https://github.com/pybind/pybind11/pull/1950>`_. v2.4.2 (Sep 21, 2019) ----------------------------------------------------- * Replaced usage of a C++14 only construct. `#1929 <https://github.com/pybind/pybind11/pull/1929>`_. * Made an ifdef future-proof for Python >= 4. `f3109d <https://github.com/pybind/pybind11/commit/f3109d>`_. v2.4.1 (Sep 20, 2019) ----------------------------------------------------- * Fixed a problem involving implicit conversion from enumerations to integers on Python 3.8. `#1780 <https://github.com/pybind/pybind11/pull/1780>`_. v2.4.0 (Sep 19, 2019) ----------------------------------------------------- * Try harder to keep pybind11-internal data structures separate when there are potential ABI incompatibilities. Fixes crashes that occurred when loading multiple pybind11 extensions that were e.g. compiled by GCC (libstdc++) and Clang (libc++). `#1588 <https://github.com/pybind/pybind11/pull/1588>`_ and `c9f5a <https://github.com/pybind/pybind11/commit/c9f5a>`_. * Added support for ``__await__``, ``__aiter__``, and ``__anext__`` protocols. `#1842 <https://github.com/pybind/pybind11/pull/1842>`_. * ``pybind11_add_module()``: don't strip symbols when compiling in ``RelWithDebInfo`` mode. `#1980 <https://github.com/pybind/pybind11/pull/1980>`_. * ``enum_``: Reproduce Python behavior when comparing against invalid values (e.g. ``None``, strings, etc.). Add back support for ``__invert__()``. `#1912 <https://github.com/pybind/pybind11/pull/1912>`_, `#1907 <https://github.com/pybind/pybind11/pull/1907>`_. * List insertion operation for ``py::list``. Added ``.empty()`` to all collection types. Added ``py::set::contains()`` and ``py::dict::contains()``. `#1887 <https://github.com/pybind/pybind11/pull/1887>`_, `#1884 <https://github.com/pybind/pybind11/pull/1884>`_, `#1888 <https://github.com/pybind/pybind11/pull/1888>`_. * ``py::details::overload_cast_impl`` is available in C++11 mode, can be used like ``overload_cast`` with an additional set of parentheses. `#1581 <https://github.com/pybind/pybind11/pull/1581>`_. * Fixed ``get_include()`` on Conda. `#1877 <https://github.com/pybind/pybind11/pull/1877>`_. * ``stl_bind.h``: negative indexing support. `#1882 <https://github.com/pybind/pybind11/pull/1882>`_. * Minor CMake fix to add MinGW compatibility. `#1851 <https://github.com/pybind/pybind11/pull/1851>`_. * GIL-related fixes. `#1836 <https://github.com/pybind/pybind11/pull/1836>`_, `8b90b <https://github.com/pybind/pybind11/commit/8b90b>`_. * Other very minor/subtle fixes and improvements. `#1329 <https://github.com/pybind/pybind11/pull/1329>`_, `#1910 <https://github.com/pybind/pybind11/pull/1910>`_, `#1863 <https://github.com/pybind/pybind11/pull/1863>`_, `#1847 <https://github.com/pybind/pybind11/pull/1847>`_, `#1890 <https://github.com/pybind/pybind11/pull/1890>`_, `#1860 <https://github.com/pybind/pybind11/pull/1860>`_, `#1848 <https://github.com/pybind/pybind11/pull/1848>`_, `#1821 <https://github.com/pybind/pybind11/pull/1821>`_, `#1837 <https://github.com/pybind/pybind11/pull/1837>`_, `#1833 <https://github.com/pybind/pybind11/pull/1833>`_, `#1748 <https://github.com/pybind/pybind11/pull/1748>`_, `#1852 <https://github.com/pybind/pybind11/pull/1852>`_. v2.3.0 (June 11, 2019) ----------------------------------------------------- * Significantly reduced module binary size (10-20%) when compiled in C++11 mode with GCC/Clang, or in any mode with MSVC. Function signatures are now always precomputed at compile time (this was previously only available in C++14 mode for non-MSVC compilers). `#934 <https://github.com/pybind/pybind11/pull/934>`_. * Add basic support for tag-based static polymorphism, where classes provide a method to returns the desired type of an instance. `#1326 <https://github.com/pybind/pybind11/pull/1326>`_. * Python type wrappers (``py::handle``, ``py::object``, etc.) now support map Python's number protocol onto C++ arithmetic operators such as ``operator+``, ``operator/=``, etc. `#1511 <https://github.com/pybind/pybind11/pull/1511>`_. * A number of improvements related to enumerations: 1. The ``enum_`` implementation was rewritten from scratch to reduce code bloat. Rather than instantiating a full implementation for each enumeration, most code is now contained in a generic base class. `#1511 <https://github.com/pybind/pybind11/pull/1511>`_. 2. The ``value()`` method of ``py::enum_`` now accepts an optional docstring that will be shown in the documentation of the associated enumeration. `#1160 <https://github.com/pybind/pybind11/pull/1160>`_. 3. check for already existing enum value and throw an error if present. `#1453 <https://github.com/pybind/pybind11/pull/1453>`_. * Support for over-aligned type allocation via C++17's aligned ``new`` statement. `#1582 <https://github.com/pybind/pybind11/pull/1582>`_. * Added ``py::ellipsis()`` method for slicing of multidimensional NumPy arrays `#1502 <https://github.com/pybind/pybind11/pull/1502>`_. * Numerous Improvements to the ``mkdoc.py`` script for extracting documentation from C++ header files. `#1788 <https://github.com/pybind/pybind11/pull/1788>`_. * ``pybind11_add_module()``: allow including Python as a ``SYSTEM`` include path. `#1416 <https://github.com/pybind/pybind11/pull/1416>`_. * ``pybind11/stl.h`` does not convert strings to ``vector<string>`` anymore. `#1258 <https://github.com/pybind/pybind11/issues/1258>`_. * Mark static methods as such to fix auto-generated Sphinx documentation. `#1732 <https://github.com/pybind/pybind11/pull/1732>`_. * Re-throw forced unwind exceptions (e.g. during pthread termination). `#1208 <https://github.com/pybind/pybind11/pull/1208>`_. * Added ``__contains__`` method to the bindings of maps (``std::map``, ``std::unordered_map``). `#1767 <https://github.com/pybind/pybind11/pull/1767>`_. * Improvements to ``gil_scoped_acquire``. `#1211 <https://github.com/pybind/pybind11/pull/1211>`_. * Type caster support for ``std::deque<T>``. `#1609 <https://github.com/pybind/pybind11/pull/1609>`_. * Support for ``std::unique_ptr`` holders, whose deleters differ between a base and derived class. `#1353 <https://github.com/pybind/pybind11/pull/1353>`_. * Construction of STL array/vector-like data structures from iterators. Added an ``extend()`` operation. `#1709 <https://github.com/pybind/pybind11/pull/1709>`_, * CMake build system improvements for projects that include non-C++ files (e.g. plain C, CUDA) in ``pybind11_add_module`` et al. `#1678 <https://github.com/pybind/pybind11/pull/1678>`_. * Fixed asynchronous invocation and deallocation of Python functions wrapped in ``std::function``. `#1595 <https://github.com/pybind/pybind11/pull/1595>`_. * Fixes regarding return value policy propagation in STL type casters. `#1603 <https://github.com/pybind/pybind11/pull/1603>`_. * Fixed scoped enum comparisons. `#1571 <https://github.com/pybind/pybind11/pull/1571>`_. * Fixed iostream redirection for code that releases the GIL. `#1368 <https://github.com/pybind/pybind11/pull/1368>`_, * A number of CI-related fixes. `#1757 <https://github.com/pybind/pybind11/pull/1757>`_, `#1744 <https://github.com/pybind/pybind11/pull/1744>`_, `#1670 <https://github.com/pybind/pybind11/pull/1670>`_. v2.2.4 (September 11, 2018) ----------------------------------------------------- * Use new Python 3.7 Thread Specific Storage (TSS) implementation if available. `#1454 <https://github.com/pybind/pybind11/pull/1454>`_, `#1517 <https://github.com/pybind/pybind11/pull/1517>`_. * Fixes for newer MSVC versions and C++17 mode. `#1347 <https://github.com/pybind/pybind11/pull/1347>`_, `#1462 <https://github.com/pybind/pybind11/pull/1462>`_. * Propagate return value policies to type-specific casters when casting STL containers. `#1455 <https://github.com/pybind/pybind11/pull/1455>`_. * Allow ostream-redirection of more than 1024 characters. `#1479 <https://github.com/pybind/pybind11/pull/1479>`_. * Set ``Py_DEBUG`` define when compiling against a debug Python build. `#1438 <https://github.com/pybind/pybind11/pull/1438>`_. * Untangle integer logic in number type caster to work for custom types that may only be castable to a restricted set of builtin types. `#1442 <https://github.com/pybind/pybind11/pull/1442>`_. * CMake build system: Remember Python version in cache file. `#1434 <https://github.com/pybind/pybind11/pull/1434>`_. * Fix for custom smart pointers: use ``std::addressof`` to obtain holder address instead of ``operator&``. `#1435 <https://github.com/pybind/pybind11/pull/1435>`_. * Properly report exceptions thrown during module initialization. `#1362 <https://github.com/pybind/pybind11/pull/1362>`_. * Fixed a segmentation fault when creating empty-shaped NumPy array. `#1371 <https://github.com/pybind/pybind11/pull/1371>`_. * The version of Intel C++ compiler must be >= 2017, and this is now checked by the header files. `#1363 <https://github.com/pybind/pybind11/pull/1363>`_. * A few minor typo fixes and improvements to the test suite, and patches that silence compiler warnings. * Vectors now support construction from generators, as well as ``extend()`` from a list or generator. `#1496 <https://github.com/pybind/pybind11/pull/1496>`_. v2.2.3 (April 29, 2018) ----------------------------------------------------- * The pybind11 header location detection was replaced by a new implementation that no longer depends on ``pip`` internals (the recently released ``pip`` 10 has restricted access to this API). `#1190 <https://github.com/pybind/pybind11/pull/1190>`_. * Small adjustment to an implementation detail to work around a compiler segmentation fault in Clang 3.3/3.4. `#1350 <https://github.com/pybind/pybind11/pull/1350>`_. * The minimal supported version of the Intel compiler was >= 17.0 since pybind11 v2.1. This check is now explicit, and a compile-time error is raised if the compiler meet the requirement. `#1363 <https://github.com/pybind/pybind11/pull/1363>`_. * Fixed an endianness-related fault in the test suite. `#1287 <https://github.com/pybind/pybind11/pull/1287>`_. v2.2.2 (February 7, 2018) ----------------------------------------------------- * Fixed a segfault when combining embedded interpreter shutdown/reinitialization with external loaded pybind11 modules. `#1092 <https://github.com/pybind/pybind11/pull/1092>`_. * Eigen support: fixed a bug where Nx1/1xN numpy inputs couldn't be passed as arguments to Eigen vectors (which for Eigen are simply compile-time fixed Nx1/1xN matrices). `#1106 <https://github.com/pybind/pybind11/pull/1106>`_. * Clarified to license by moving the licensing of contributions from ``LICENSE`` into ``CONTRIBUTING.md``: the licensing of contributions is not actually part of the software license as distributed. This isn't meant to be a substantial change in the licensing of the project, but addresses concerns that the clause made the license non-standard. `#1109 <https://github.com/pybind/pybind11/issues/1109>`_. * Fixed a regression introduced in 2.1 that broke binding functions with lvalue character literal arguments. `#1128 <https://github.com/pybind/pybind11/pull/1128>`_. * MSVC: fix for compilation failures under /permissive-, and added the flag to the appveyor test suite. `#1155 <https://github.com/pybind/pybind11/pull/1155>`_. * Fixed ``__qualname__`` generation, and in turn, fixes how class names (especially nested class names) are shown in generated docstrings. `#1171 <https://github.com/pybind/pybind11/pull/1171>`_. * Updated the FAQ with a suggested project citation reference. `#1189 <https://github.com/pybind/pybind11/pull/1189>`_. * Added fixes for deprecation warnings when compiled under C++17 with ``-Wdeprecated`` turned on, and add ``-Wdeprecated`` to the test suite compilation flags. `#1191 <https://github.com/pybind/pybind11/pull/1191>`_. * Fixed outdated PyPI URLs in ``setup.py``. `#1213 <https://github.com/pybind/pybind11/pull/1213>`_. * Fixed a refcount leak for arguments that end up in a ``py::args`` argument for functions with both fixed positional and ``py::args`` arguments. `#1216 <https://github.com/pybind/pybind11/pull/1216>`_. * Fixed a potential segfault resulting from possible premature destruction of ``py::args``/``py::kwargs`` arguments with overloaded functions. `#1223 <https://github.com/pybind/pybind11/pull/1223>`_. * Fixed ``del map[item]`` for a ``stl_bind.h`` bound stl map. `#1229 <https://github.com/pybind/pybind11/pull/1229>`_. * Fixed a regression from v2.1.x where the aggregate initialization could unintentionally end up at a constructor taking a templated ``std::initializer_list<T>`` argument. `#1249 <https://github.com/pybind/pybind11/pull/1249>`_. * Fixed an issue where calling a function with a keep_alive policy on the same nurse/patient pair would cause the internal patient storage to needlessly grow (unboundedly, if the nurse is long-lived). `#1251 <https://github.com/pybind/pybind11/issues/1251>`_. * Various other minor fixes. v2.2.1 (September 14, 2017) ----------------------------------------------------- * Added ``py::module_::reload()`` member function for reloading a module. `#1040 <https://github.com/pybind/pybind11/pull/1040>`_. * Fixed a reference leak in the number converter. `#1078 <https://github.com/pybind/pybind11/pull/1078>`_. * Fixed compilation with Clang on host GCC < 5 (old libstdc++ which isn't fully C++11 compliant). `#1062 <https://github.com/pybind/pybind11/pull/1062>`_. * Fixed a regression where the automatic ``std::vector<bool>`` caster would fail to compile. The same fix also applies to any container which returns element proxies instead of references. `#1053 <https://github.com/pybind/pybind11/pull/1053>`_. * Fixed a regression where the ``py::keep_alive`` policy could not be applied to constructors. `#1065 <https://github.com/pybind/pybind11/pull/1065>`_. * Fixed a nullptr dereference when loading a ``py::module_local`` type that's only registered in an external module. `#1058 <https://github.com/pybind/pybind11/pull/1058>`_. * Fixed implicit conversion of accessors to types derived from ``py::object``. `#1076 <https://github.com/pybind/pybind11/pull/1076>`_. * The ``name`` in ``PYBIND11_MODULE(name, variable)`` can now be a macro. `#1082 <https://github.com/pybind/pybind11/pull/1082>`_. * Relaxed overly strict ``py::pickle()`` check for matching get and set types. `#1064 <https://github.com/pybind/pybind11/pull/1064>`_. * Conversion errors now try to be more informative when it's likely that a missing header is the cause (e.g. forgetting ``<pybind11/stl.h>``). `#1077 <https://github.com/pybind/pybind11/pull/1077>`_. v2.2.0 (August 31, 2017) ----------------------------------------------------- * Support for embedding the Python interpreter. See the :doc:`documentation page </advanced/embedding>` for a full overview of the new features. `#774 <https://github.com/pybind/pybind11/pull/774>`_, `#889 <https://github.com/pybind/pybind11/pull/889>`_, `#892 <https://github.com/pybind/pybind11/pull/892>`_, `#920 <https://github.com/pybind/pybind11/pull/920>`_. .. code-block:: cpp #include <pybind11/embed.h> namespace py = pybind11; int main() { py::scoped_interpreter guard{}; // start the interpreter and keep it alive py::print("Hello, World!"); // use the Python API } * Support for inheriting from multiple C++ bases in Python. `#693 <https://github.com/pybind/pybind11/pull/693>`_. .. code-block:: python from cpp_module import CppBase1, CppBase2 class PyDerived(CppBase1, CppBase2): def __init__(self): CppBase1.__init__(self) # C++ bases must be initialized explicitly CppBase2.__init__(self) * ``PYBIND11_MODULE`` is now the preferred way to create module entry points. ``PYBIND11_PLUGIN`` is deprecated. See :ref:`macros` for details. `#879 <https://github.com/pybind/pybind11/pull/879>`_. .. code-block:: cpp // new PYBIND11_MODULE(example, m) { m.def("add", [](int a, int b) { return a + b; }); } // old PYBIND11_PLUGIN(example) { py::module m("example"); m.def("add", [](int a, int b) { return a + b; }); return m.ptr(); } * pybind11's headers and build system now more strictly enforce hidden symbol visibility for extension modules. This should be seamless for most users, but see the :doc:`upgrade` if you use a custom build system. `#995 <https://github.com/pybind/pybind11/pull/995>`_. * Support for ``py::module_local`` types which allow multiple modules to export the same C++ types without conflicts. This is useful for opaque types like ``std::vector<int>``. ``py::bind_vector`` and ``py::bind_map`` now default to ``py::module_local`` if their elements are builtins or local types. See :ref:`module_local` for details. `#949 <https://github.com/pybind/pybind11/pull/949>`_, `#981 <https://github.com/pybind/pybind11/pull/981>`_, `#995 <https://github.com/pybind/pybind11/pull/995>`_, `#997 <https://github.com/pybind/pybind11/pull/997>`_. * Custom constructors can now be added very easily using lambdas or factory functions which return a class instance by value, pointer or holder. This supersedes the old placement-new ``__init__`` technique. See :ref:`custom_constructors` for details. `#805 <https://github.com/pybind/pybind11/pull/805>`_, `#1014 <https://github.com/pybind/pybind11/pull/1014>`_. .. code-block:: cpp struct Example { Example(std::string); }; py::class_<Example>(m, "Example") .def(py::init<std::string>()) // existing constructor .def(py::init([](int n) { // custom constructor return std::make_unique<Example>(std::to_string(n)); })); * Similarly to custom constructors, pickling support functions are now bound using the ``py::pickle()`` adaptor which improves type safety. See the :doc:`upgrade` and :ref:`pickling` for details. `#1038 <https://github.com/pybind/pybind11/pull/1038>`_. * Builtin support for converting C++17 standard library types and general conversion improvements: 1. C++17 ``std::variant`` is supported right out of the box. C++11/14 equivalents (e.g. ``boost::variant``) can also be added with a simple user-defined specialization. See :ref:`cpp17_container_casters` for details. `#811 <https://github.com/pybind/pybind11/pull/811>`_, `#845 <https://github.com/pybind/pybind11/pull/845>`_, `#989 <https://github.com/pybind/pybind11/pull/989>`_. 2. Out-of-the-box support for C++17 ``std::string_view``. `#906 <https://github.com/pybind/pybind11/pull/906>`_. 3. Improved compatibility of the builtin ``optional`` converter. `#874 <https://github.com/pybind/pybind11/pull/874>`_. 4. The ``bool`` converter now accepts ``numpy.bool_`` and types which define ``__bool__`` (Python 3.x) or ``__nonzero__`` (Python 2.7). `#925 <https://github.com/pybind/pybind11/pull/925>`_. 5. C++-to-Python casters are now more efficient and move elements out of rvalue containers whenever possible. `#851 <https://github.com/pybind/pybind11/pull/851>`_, `#936 <https://github.com/pybind/pybind11/pull/936>`_, `#938 <https://github.com/pybind/pybind11/pull/938>`_. 6. Fixed ``bytes`` to ``std::string/char*`` conversion on Python 3. `#817 <https://github.com/pybind/pybind11/pull/817>`_. 7. Fixed lifetime of temporary C++ objects created in Python-to-C++ conversions. `#924 <https://github.com/pybind/pybind11/pull/924>`_. * Scope guard call policy for RAII types, e.g. ``py::call_guard<py::gil_scoped_release>()``, ``py::call_guard<py::scoped_ostream_redirect>()``. See :ref:`call_policies` for details. `#740 <https://github.com/pybind/pybind11/pull/740>`_. * Utility for redirecting C++ streams to Python (e.g. ``std::cout`` -> ``sys.stdout``). Scope guard ``py::scoped_ostream_redirect`` in C++ and a context manager in Python. See :ref:`ostream_redirect`. `#1009 <https://github.com/pybind/pybind11/pull/1009>`_. * Improved handling of types and exceptions across module boundaries. `#915 <https://github.com/pybind/pybind11/pull/915>`_, `#951 <https://github.com/pybind/pybind11/pull/951>`_, `#995 <https://github.com/pybind/pybind11/pull/995>`_. * Fixed destruction order of ``py::keep_alive`` nurse/patient objects in reference cycles. `#856 <https://github.com/pybind/pybind11/pull/856>`_. * NumPy and buffer protocol related improvements: 1. Support for negative strides in Python buffer objects/numpy arrays. This required changing integers from unsigned to signed for the related C++ APIs. Note: If you have compiler warnings enabled, you may notice some new conversion warnings after upgrading. These can be resolved with ``static_cast``. `#782 <https://github.com/pybind/pybind11/pull/782>`_. 2. Support ``std::complex`` and arrays inside ``PYBIND11_NUMPY_DTYPE``. `#831 <https://github.com/pybind/pybind11/pull/831>`_, `#832 <https://github.com/pybind/pybind11/pull/832>`_. 3. Support for constructing ``py::buffer_info`` and ``py::arrays`` using arbitrary containers or iterators instead of requiring a ``std::vector``. `#788 <https://github.com/pybind/pybind11/pull/788>`_, `#822 <https://github.com/pybind/pybind11/pull/822>`_, `#860 <https://github.com/pybind/pybind11/pull/860>`_. 4. Explicitly check numpy version and require >= 1.7.0. `#819 <https://github.com/pybind/pybind11/pull/819>`_. * Support for allowing/prohibiting ``None`` for specific arguments and improved ``None`` overload resolution order. See :ref:`none_arguments` for details. `#843 <https://github.com/pybind/pybind11/pull/843>`_. `#859 <https://github.com/pybind/pybind11/pull/859>`_. * Added ``py::exec()`` as a shortcut for ``py::eval<py::eval_statements>()`` and support for C++11 raw string literals as input. See :ref:`eval`. `#766 <https://github.com/pybind/pybind11/pull/766>`_, `#827 <https://github.com/pybind/pybind11/pull/827>`_. * ``py::vectorize()`` ignores non-vectorizable arguments and supports member functions. `#762 <https://github.com/pybind/pybind11/pull/762>`_. * Support for bound methods as callbacks (``pybind11/functional.h``). `#815 <https://github.com/pybind/pybind11/pull/815>`_. * Allow aliasing pybind11 methods: ``cls.attr("foo") = cls.attr("bar")``. `#802 <https://github.com/pybind/pybind11/pull/802>`_. * Don't allow mixed static/non-static overloads. `#804 <https://github.com/pybind/pybind11/pull/804>`_. * Fixed overriding static properties in derived classes. `#784 <https://github.com/pybind/pybind11/pull/784>`_. * Added support for write only properties. `#1144 <https://github.com/pybind/pybind11/pull/1144>`_. * Improved deduction of member functions of a derived class when its bases aren't registered with pybind11. `#855 <https://github.com/pybind/pybind11/pull/855>`_. .. code-block:: cpp struct Base { int foo() { return 42; } } struct Derived : Base {} // Now works, but previously required also binding `Base` py::class_<Derived>(m, "Derived") .def("foo", &Derived::foo); // function is actually from `Base` * The implementation of ``py::init<>`` now uses C++11 brace initialization syntax to construct instances, which permits binding implicit constructors of aggregate types. `#1015 <https://github.com/pybind/pybind11/pull/1015>`_. .. code-block:: cpp struct Aggregate { int a; std::string b; }; py::class_<Aggregate>(m, "Aggregate") .def(py::init<int, const std::string &>()); * Fixed issues with multiple inheritance with offset base/derived pointers. `#812 <https://github.com/pybind/pybind11/pull/812>`_, `#866 <https://github.com/pybind/pybind11/pull/866>`_, `#960 <https://github.com/pybind/pybind11/pull/960>`_. * Fixed reference leak of type objects. `#1030 <https://github.com/pybind/pybind11/pull/1030>`_. * Improved support for the ``/std:c++14`` and ``/std:c++latest`` modes on MSVC 2017. `#841 <https://github.com/pybind/pybind11/pull/841>`_, `#999 <https://github.com/pybind/pybind11/pull/999>`_. * Fixed detection of private operator new on MSVC. `#893 <https://github.com/pybind/pybind11/pull/893>`_, `#918 <https://github.com/pybind/pybind11/pull/918>`_. * Intel C++ compiler compatibility fixes. `#937 <https://github.com/pybind/pybind11/pull/937>`_. * Fixed implicit conversion of ``py::enum_`` to integer types on Python 2.7. `#821 <https://github.com/pybind/pybind11/pull/821>`_. * Added ``py::hash`` to fetch the hash value of Python objects, and ``.def(hash(py::self))`` to provide the C++ ``std::hash`` as the Python ``__hash__`` method. `#1034 <https://github.com/pybind/pybind11/pull/1034>`_. * Fixed ``__truediv__`` on Python 2 and ``__itruediv__`` on Python 3. `#867 <https://github.com/pybind/pybind11/pull/867>`_. * ``py::capsule`` objects now support the ``name`` attribute. This is useful for interfacing with ``scipy.LowLevelCallable``. `#902 <https://github.com/pybind/pybind11/pull/902>`_. * Fixed ``py::make_iterator``'s ``__next__()`` for past-the-end calls. `#897 <https://github.com/pybind/pybind11/pull/897>`_. * Added ``error_already_set::matches()`` for checking Python exceptions. `#772 <https://github.com/pybind/pybind11/pull/772>`_. * Deprecated ``py::error_already_set::clear()``. It's no longer needed following a simplification of the ``py::error_already_set`` class. `#954 <https://github.com/pybind/pybind11/pull/954>`_. * Deprecated ``py::handle::operator==()`` in favor of ``py::handle::is()`` `#825 <https://github.com/pybind/pybind11/pull/825>`_. * Deprecated ``py::object::borrowed``/``py::object::stolen``. Use ``py::object::borrowed_t{}``/``py::object::stolen_t{}`` instead. `#771 <https://github.com/pybind/pybind11/pull/771>`_. * Changed internal data structure versioning to avoid conflicts between modules compiled with different revisions of pybind11. `#1012 <https://github.com/pybind/pybind11/pull/1012>`_. * Additional compile-time and run-time error checking and more informative messages. `#786 <https://github.com/pybind/pybind11/pull/786>`_, `#794 <https://github.com/pybind/pybind11/pull/794>`_, `#803 <https://github.com/pybind/pybind11/pull/803>`_. * Various minor improvements and fixes. `#764 <https://github.com/pybind/pybind11/pull/764>`_, `#791 <https://github.com/pybind/pybind11/pull/791>`_, `#795 <https://github.com/pybind/pybind11/pull/795>`_, `#840 <https://github.com/pybind/pybind11/pull/840>`_, `#844 <https://github.com/pybind/pybind11/pull/844>`_, `#846 <https://github.com/pybind/pybind11/pull/846>`_, `#849 <https://github.com/pybind/pybind11/pull/849>`_, `#858 <https://github.com/pybind/pybind11/pull/858>`_, `#862 <https://github.com/pybind/pybind11/pull/862>`_, `#871 <https://github.com/pybind/pybind11/pull/871>`_, `#872 <https://github.com/pybind/pybind11/pull/872>`_, `#881 <https://github.com/pybind/pybind11/pull/881>`_, `#888 <https://github.com/pybind/pybind11/pull/888>`_, `#899 <https://github.com/pybind/pybind11/pull/899>`_, `#928 <https://github.com/pybind/pybind11/pull/928>`_, `#931 <https://github.com/pybind/pybind11/pull/931>`_, `#944 <https://github.com/pybind/pybind11/pull/944>`_, `#950 <https://github.com/pybind/pybind11/pull/950>`_, `#952 <https://github.com/pybind/pybind11/pull/952>`_, `#962 <https://github.com/pybind/pybind11/pull/962>`_, `#965 <https://github.com/pybind/pybind11/pull/965>`_, `#970 <https://github.com/pybind/pybind11/pull/970>`_, `#978 <https://github.com/pybind/pybind11/pull/978>`_, `#979 <https://github.com/pybind/pybind11/pull/979>`_, `#986 <https://github.com/pybind/pybind11/pull/986>`_, `#1020 <https://github.com/pybind/pybind11/pull/1020>`_, `#1027 <https://github.com/pybind/pybind11/pull/1027>`_, `#1037 <https://github.com/pybind/pybind11/pull/1037>`_. * Testing improvements. `#798 <https://github.com/pybind/pybind11/pull/798>`_, `#882 <https://github.com/pybind/pybind11/pull/882>`_, `#898 <https://github.com/pybind/pybind11/pull/898>`_, `#900 <https://github.com/pybind/pybind11/pull/900>`_, `#921 <https://github.com/pybind/pybind11/pull/921>`_, `#923 <https://github.com/pybind/pybind11/pull/923>`_, `#963 <https://github.com/pybind/pybind11/pull/963>`_. v2.1.1 (April 7, 2017) ----------------------------------------------------- * Fixed minimum version requirement for MSVC 2015u3 `#773 <https://github.com/pybind/pybind11/pull/773>`_. v2.1.0 (March 22, 2017) ----------------------------------------------------- * pybind11 now performs function overload resolution in two phases. The first phase only considers exact type matches, while the second allows for implicit conversions to take place. A special ``noconvert()`` syntax can be used to completely disable implicit conversions for specific arguments. `#643 <https://github.com/pybind/pybind11/pull/643>`_, `#634 <https://github.com/pybind/pybind11/pull/634>`_, `#650 <https://github.com/pybind/pybind11/pull/650>`_. * Fixed a regression where static properties no longer worked with classes using multiple inheritance. The ``py::metaclass`` attribute is no longer necessary (and deprecated as of this release) when binding classes with static properties. `#679 <https://github.com/pybind/pybind11/pull/679>`_, * Classes bound using ``pybind11`` can now use custom metaclasses. `#679 <https://github.com/pybind/pybind11/pull/679>`_, * ``py::args`` and ``py::kwargs`` can now be mixed with other positional arguments when binding functions using pybind11. `#611 <https://github.com/pybind/pybind11/pull/611>`_. * Improved support for C++11 unicode string and character types; added extensive documentation regarding pybind11's string conversion behavior. `#624 <https://github.com/pybind/pybind11/pull/624>`_, `#636 <https://github.com/pybind/pybind11/pull/636>`_, `#715 <https://github.com/pybind/pybind11/pull/715>`_. * pybind11 can now avoid expensive copies when converting Eigen arrays to NumPy arrays (and vice versa). `#610 <https://github.com/pybind/pybind11/pull/610>`_. * The "fast path" in ``py::vectorize`` now works for any full-size group of C or F-contiguous arrays. The non-fast path is also faster since it no longer performs copies of the input arguments (except when type conversions are necessary). `#610 <https://github.com/pybind/pybind11/pull/610>`_. * Added fast, unchecked access to NumPy arrays via a proxy object. `#746 <https://github.com/pybind/pybind11/pull/746>`_. * Transparent support for class-specific ``operator new`` and ``operator delete`` implementations. `#755 <https://github.com/pybind/pybind11/pull/755>`_. * Slimmer and more efficient STL-compatible iterator interface for sequence types. `#662 <https://github.com/pybind/pybind11/pull/662>`_. * Improved custom holder type support. `#607 <https://github.com/pybind/pybind11/pull/607>`_. * ``nullptr`` to ``None`` conversion fixed in various builtin type casters. `#732 <https://github.com/pybind/pybind11/pull/732>`_. * ``enum_`` now exposes its members via a special ``__members__`` attribute. `#666 <https://github.com/pybind/pybind11/pull/666>`_. * ``std::vector`` bindings created using ``stl_bind.h`` can now optionally implement the buffer protocol. `#488 <https://github.com/pybind/pybind11/pull/488>`_. * Automated C++ reference documentation using doxygen and breathe. `#598 <https://github.com/pybind/pybind11/pull/598>`_. * Added minimum compiler version assertions. `#727 <https://github.com/pybind/pybind11/pull/727>`_. * Improved compatibility with C++1z. `#677 <https://github.com/pybind/pybind11/pull/677>`_. * Improved ``py::capsule`` API. Can be used to implement cleanup callbacks that are involved at module destruction time. `#752 <https://github.com/pybind/pybind11/pull/752>`_. * Various minor improvements and fixes. `#595 <https://github.com/pybind/pybind11/pull/595>`_, `#588 <https://github.com/pybind/pybind11/pull/588>`_, `#589 <https://github.com/pybind/pybind11/pull/589>`_, `#603 <https://github.com/pybind/pybind11/pull/603>`_, `#619 <https://github.com/pybind/pybind11/pull/619>`_, `#648 <https://github.com/pybind/pybind11/pull/648>`_, `#695 <https://github.com/pybind/pybind11/pull/695>`_, `#720 <https://github.com/pybind/pybind11/pull/720>`_, `#723 <https://github.com/pybind/pybind11/pull/723>`_, `#729 <https://github.com/pybind/pybind11/pull/729>`_, `#724 <https://github.com/pybind/pybind11/pull/724>`_, `#742 <https://github.com/pybind/pybind11/pull/742>`_, `#753 <https://github.com/pybind/pybind11/pull/753>`_. v2.0.1 (Jan 4, 2017) ----------------------------------------------------- * Fix pointer to reference error in type_caster on MSVC `#583 <https://github.com/pybind/pybind11/pull/583>`_. * Fixed a segmentation in the test suite due to a typo `cd7eac <https://github.com/pybind/pybind11/commit/cd7eac>`_. v2.0.0 (Jan 1, 2017) ----------------------------------------------------- * Fixed a reference counting regression affecting types with custom metaclasses (introduced in v2.0.0-rc1). `#571 <https://github.com/pybind/pybind11/pull/571>`_. * Quenched a CMake policy warning. `#570 <https://github.com/pybind/pybind11/pull/570>`_. v2.0.0-rc1 (Dec 23, 2016) ----------------------------------------------------- The pybind11 developers are excited to issue a release candidate of pybind11 with a subsequent v2.0.0 release planned in early January next year. An incredible amount of effort by went into pybind11 over the last ~5 months, leading to a release that is jam-packed with exciting new features and numerous usability improvements. The following list links PRs or individual commits whenever applicable. Happy Christmas! * Support for binding C++ class hierarchies that make use of multiple inheritance. `#410 <https://github.com/pybind/pybind11/pull/410>`_. * PyPy support: pybind11 now supports nightly builds of PyPy and will interoperate with the future 5.7 release. No code changes are necessary, everything "just" works as usual. Note that we only target the Python 2.7 branch for now; support for 3.x will be added once its ``cpyext`` extension support catches up. A few minor features remain unsupported for the time being (notably dynamic attributes in custom types). `#527 <https://github.com/pybind/pybind11/pull/527>`_. * Significant work on the documentation -- in particular, the monolithic ``advanced.rst`` file was restructured into a easier to read hierarchical organization. `#448 <https://github.com/pybind/pybind11/pull/448>`_. * Many NumPy-related improvements: 1. Object-oriented API to access and modify NumPy ``ndarray`` instances, replicating much of the corresponding NumPy C API functionality. `#402 <https://github.com/pybind/pybind11/pull/402>`_. 2. NumPy array ``dtype`` array descriptors are now first-class citizens and are exposed via a new class ``py::dtype``. 3. Structured dtypes can be registered using the ``PYBIND11_NUMPY_DTYPE()`` macro. Special ``array`` constructors accepting dtype objects were also added. One potential caveat involving this change: format descriptor strings should now be accessed via ``format_descriptor::format()`` (however, for compatibility purposes, the old syntax ``format_descriptor::value`` will still work for non-structured data types). `#308 <https://github.com/pybind/pybind11/pull/308>`_. 4. Further improvements to support structured dtypes throughout the system. `#472 <https://github.com/pybind/pybind11/pull/472>`_, `#474 <https://github.com/pybind/pybind11/pull/474>`_, `#459 <https://github.com/pybind/pybind11/pull/459>`_, `#453 <https://github.com/pybind/pybind11/pull/453>`_, `#452 <https://github.com/pybind/pybind11/pull/452>`_, and `#505 <https://github.com/pybind/pybind11/pull/505>`_. 5. Fast access operators. `#497 <https://github.com/pybind/pybind11/pull/497>`_. 6. Constructors for arrays whose storage is owned by another object. `#440 <https://github.com/pybind/pybind11/pull/440>`_. 7. Added constructors for ``array`` and ``array_t`` explicitly accepting shape and strides; if strides are not provided, they are deduced assuming C-contiguity. Also added simplified constructors for 1-dimensional case. 8. Added buffer/NumPy support for ``char[N]`` and ``std::array<char, N>`` types. 9. Added ``memoryview`` wrapper type which is constructible from ``buffer_info``. * Eigen: many additional conversions and support for non-contiguous arrays/slices. `#427 <https://github.com/pybind/pybind11/pull/427>`_, `#315 <https://github.com/pybind/pybind11/pull/315>`_, `#316 <https://github.com/pybind/pybind11/pull/316>`_, `#312 <https://github.com/pybind/pybind11/pull/312>`_, and `#267 <https://github.com/pybind/pybind11/pull/267>`_ * Incompatible changes in ``class_<...>::class_()``: 1. Declarations of types that provide access via the buffer protocol must now include the ``py::buffer_protocol()`` annotation as an argument to the ``class_`` constructor. 2. Declarations of types that require a custom metaclass (i.e. all classes which include static properties via commands such as ``def_readwrite_static()``) must now include the ``py::metaclass()`` annotation as an argument to the ``class_`` constructor. These two changes were necessary to make type definitions in pybind11 future-proof, and to support PyPy via its cpyext mechanism. `#527 <https://github.com/pybind/pybind11/pull/527>`_. 3. This version of pybind11 uses a redesigned mechanism for instantiating trampoline classes that are used to override virtual methods from within Python. This led to the following user-visible syntax change: instead of .. code-block:: cpp py::class_<TrampolineClass>("MyClass") .alias<MyClass>() .... write .. code-block:: cpp py::class_<MyClass, TrampolineClass>("MyClass") .... Importantly, both the original and the trampoline class are now specified as an arguments (in arbitrary order) to the ``py::class_`` template, and the ``alias<..>()`` call is gone. The new scheme has zero overhead in cases when Python doesn't override any functions of the underlying C++ class. `rev. 86d825 <https://github.com/pybind/pybind11/commit/86d825>`_. * Added ``eval`` and ``eval_file`` functions for evaluating expressions and statements from a string or file. `rev. 0d3fc3 <https://github.com/pybind/pybind11/commit/0d3fc3>`_. * pybind11 can now create types with a modifiable dictionary. `#437 <https://github.com/pybind/pybind11/pull/437>`_ and `#444 <https://github.com/pybind/pybind11/pull/444>`_. * Support for translation of arbitrary C++ exceptions to Python counterparts. `#296 <https://github.com/pybind/pybind11/pull/296>`_ and `#273 <https://github.com/pybind/pybind11/pull/273>`_. * Report full backtraces through mixed C++/Python code, better reporting for import errors, fixed GIL management in exception processing. `#537 <https://github.com/pybind/pybind11/pull/537>`_, `#494 <https://github.com/pybind/pybind11/pull/494>`_, `rev. e72d95 <https://github.com/pybind/pybind11/commit/e72d95>`_, and `rev. 099d6e <https://github.com/pybind/pybind11/commit/099d6e>`_. * Support for bit-level operations, comparisons, and serialization of C++ enumerations. `#503 <https://github.com/pybind/pybind11/pull/503>`_, `#508 <https://github.com/pybind/pybind11/pull/508>`_, `#380 <https://github.com/pybind/pybind11/pull/380>`_, `#309 <https://github.com/pybind/pybind11/pull/309>`_. `#311 <https://github.com/pybind/pybind11/pull/311>`_. * The ``class_`` constructor now accepts its template arguments in any order. `#385 <https://github.com/pybind/pybind11/pull/385>`_. * Attribute and item accessors now have a more complete interface which makes it possible to chain attributes as in ``obj.attr("a")[key].attr("b").attr("method")(1, 2, 3)``. `#425 <https://github.com/pybind/pybind11/pull/425>`_. * Major redesign of the default and conversion constructors in ``pytypes.h``. `#464 <https://github.com/pybind/pybind11/pull/464>`_. * Added built-in support for ``std::shared_ptr`` holder type. It is no longer necessary to to include a declaration of the form ``PYBIND11_DECLARE_HOLDER_TYPE(T, std::shared_ptr<T>)`` (though continuing to do so won't cause an error). `#454 <https://github.com/pybind/pybind11/pull/454>`_. * New ``py::overload_cast`` casting operator to select among multiple possible overloads of a function. An example: .. code-block:: cpp py::class_<Pet>(m, "Pet") .def("set", py::overload_cast<int>(&Pet::set), "Set the pet's age") .def("set", py::overload_cast<const std::string &>(&Pet::set), "Set the pet's name"); This feature only works on C++14-capable compilers. `#541 <https://github.com/pybind/pybind11/pull/541>`_. * C++ types are automatically cast to Python types, e.g. when assigning them as an attribute. For instance, the following is now legal: .. code-block:: cpp py::module m = /* ... */ m.attr("constant") = 123; (Previously, a ``py::cast`` call was necessary to avoid a compilation error.) `#551 <https://github.com/pybind/pybind11/pull/551>`_. * Redesigned ``pytest``-based test suite. `#321 <https://github.com/pybind/pybind11/pull/321>`_. * Instance tracking to detect reference leaks in test suite. `#324 <https://github.com/pybind/pybind11/pull/324>`_ * pybind11 can now distinguish between multiple different instances that are located at the same memory address, but which have different types. `#329 <https://github.com/pybind/pybind11/pull/329>`_. * Improved logic in ``move`` return value policy. `#510 <https://github.com/pybind/pybind11/pull/510>`_, `#297 <https://github.com/pybind/pybind11/pull/297>`_. * Generalized unpacking API to permit calling Python functions from C++ using notation such as ``foo(a1, a2, *args, "ka"_a=1, "kb"_a=2, **kwargs)``. `#372 <https://github.com/pybind/pybind11/pull/372>`_. * ``py::print()`` function whose behavior matches that of the native Python ``print()`` function. `#372 <https://github.com/pybind/pybind11/pull/372>`_. * Added ``py::dict`` keyword constructor:``auto d = dict("number"_a=42, "name"_a="World");``. `#372 <https://github.com/pybind/pybind11/pull/372>`_. * Added ``py::str::format()`` method and ``_s`` literal: ``py::str s = "1 + 2 = {}"_s.format(3);``. `#372 <https://github.com/pybind/pybind11/pull/372>`_. * Added ``py::repr()`` function which is equivalent to Python's builtin ``repr()``. `#333 <https://github.com/pybind/pybind11/pull/333>`_. * Improved construction and destruction logic for holder types. It is now possible to reference instances with smart pointer holder types without constructing the holder if desired. The ``PYBIND11_DECLARE_HOLDER_TYPE`` macro now accepts an optional second parameter to indicate whether the holder type uses intrusive reference counting. `#533 <https://github.com/pybind/pybind11/pull/533>`_ and `#561 <https://github.com/pybind/pybind11/pull/561>`_. * Mapping a stateless C++ function to Python and back is now "for free" (i.e. no extra indirections or argument conversion overheads). `rev. 954b79 <https://github.com/pybind/pybind11/commit/954b79>`_. * Bindings for ``std::valarray<T>``. `#545 <https://github.com/pybind/pybind11/pull/545>`_. * Improved support for C++17 capable compilers. `#562 <https://github.com/pybind/pybind11/pull/562>`_. * Bindings for ``std::optional<t>``. `#475 <https://github.com/pybind/pybind11/pull/475>`_, `#476 <https://github.com/pybind/pybind11/pull/476>`_, `#479 <https://github.com/pybind/pybind11/pull/479>`_, `#499 <https://github.com/pybind/pybind11/pull/499>`_, and `#501 <https://github.com/pybind/pybind11/pull/501>`_. * ``stl_bind.h``: general improvements and support for ``std::map`` and ``std::unordered_map``. `#490 <https://github.com/pybind/pybind11/pull/490>`_, `#282 <https://github.com/pybind/pybind11/pull/282>`_, `#235 <https://github.com/pybind/pybind11/pull/235>`_. * The ``std::tuple``, ``std::pair``, ``std::list``, and ``std::vector`` type casters now accept any Python sequence type as input. `rev. 107285 <https://github.com/pybind/pybind11/commit/107285>`_. * Improved CMake Python detection on multi-architecture Linux. `#532 <https://github.com/pybind/pybind11/pull/532>`_. * Infrastructure to selectively disable or enable parts of the automatically generated docstrings. `#486 <https://github.com/pybind/pybind11/pull/486>`_. * ``reference`` and ``reference_internal`` are now the default return value properties for static and non-static properties, respectively. `#473 <https://github.com/pybind/pybind11/pull/473>`_. (the previous defaults were ``automatic``). `#473 <https://github.com/pybind/pybind11/pull/473>`_. * Support for ``std::unique_ptr`` with non-default deleters or no deleter at all (``py::nodelete``). `#384 <https://github.com/pybind/pybind11/pull/384>`_. * Deprecated ``handle::call()`` method. The new syntax to call Python functions is simply ``handle()``. It can also be invoked explicitly via ``handle::operator<X>()``, where ``X`` is an optional return value policy. * Print more informative error messages when ``make_tuple()`` or ``cast()`` fail. `#262 <https://github.com/pybind/pybind11/pull/262>`_. * Creation of holder types for classes deriving from ``std::enable_shared_from_this<>`` now also works for ``const`` values. `#260 <https://github.com/pybind/pybind11/pull/260>`_. * ``make_iterator()`` improvements for better compatibility with various types (now uses prefix increment operator); it now also accepts iterators with different begin/end types as long as they are equality comparable. `#247 <https://github.com/pybind/pybind11/pull/247>`_. * ``arg()`` now accepts a wider range of argument types for default values. `#244 <https://github.com/pybind/pybind11/pull/244>`_. * Support ``keep_alive`` where the nurse object may be ``None``. `#341 <https://github.com/pybind/pybind11/pull/341>`_. * Added constructors for ``str`` and ``bytes`` from zero-terminated char pointers, and from char pointers and length. Added constructors for ``str`` from ``bytes`` and for ``bytes`` from ``str``, which will perform UTF-8 decoding/encoding as required. * Many other improvements of library internals without user-visible changes 1.8.1 (July 12, 2016) ---------------------- * Fixed a rare but potentially very severe issue when the garbage collector ran during pybind11 type creation. 1.8.0 (June 14, 2016) ---------------------- * Redesigned CMake build system which exports a convenient ``pybind11_add_module`` function to parent projects. * ``std::vector<>`` type bindings analogous to Boost.Python's ``indexing_suite`` * Transparent conversion of sparse and dense Eigen matrices and vectors (``eigen.h``) * Added an ``ExtraFlags`` template argument to the NumPy ``array_t<>`` wrapper to disable an enforced cast that may lose precision, e.g. to create overloads for different precisions and complex vs real-valued matrices. * Prevent implicit conversion of floating point values to integral types in function arguments * Fixed incorrect default return value policy for functions returning a shared pointer * Don't allow registering a type via ``class_`` twice * Don't allow casting a ``None`` value into a C++ lvalue reference * Fixed a crash in ``enum_::operator==`` that was triggered by the ``help()`` command * Improved detection of whether or not custom C++ types can be copy/move-constructed * Extended ``str`` type to also work with ``bytes`` instances * Added a ``"name"_a`` user defined string literal that is equivalent to ``py::arg("name")``. * When specifying function arguments via ``py::arg``, the test that verifies the number of arguments now runs at compile time. * Added ``[[noreturn]]`` attribute to ``pybind11_fail()`` to quench some compiler warnings * List function arguments in exception text when the dispatch code cannot find a matching overload * Added ``PYBIND11_OVERLOAD_NAME`` and ``PYBIND11_OVERLOAD_PURE_NAME`` macros which can be used to override virtual methods whose name differs in C++ and Python (e.g. ``__call__`` and ``operator()``) * Various minor ``iterator`` and ``make_iterator()`` improvements * Transparently support ``__bool__`` on Python 2.x and Python 3.x * Fixed issue with destructor of unpickled object not being called * Minor CMake build system improvements on Windows * New ``pybind11::args`` and ``pybind11::kwargs`` types to create functions which take an arbitrary number of arguments and keyword arguments * New syntax to call a Python function from C++ using ``*args`` and ``*kwargs`` * The functions ``def_property_*`` now correctly process docstring arguments (these formerly caused a segmentation fault) * Many ``mkdoc.py`` improvements (enumerations, template arguments, ``DOC()`` macro accepts more arguments) * Cygwin support * Documentation improvements (pickling support, ``keep_alive``, macro usage) 1.7 (April 30, 2016) ---------------------- * Added a new ``move`` return value policy that triggers C++11 move semantics. The automatic return value policy falls back to this case whenever a rvalue reference is encountered * Significantly more general GIL state routines that are used instead of Python's troublesome ``PyGILState_Ensure`` and ``PyGILState_Release`` API * Redesign of opaque types that drastically simplifies their usage * Extended ability to pass values of type ``[const] void *`` * ``keep_alive`` fix: don't fail when there is no patient * ``functional.h``: acquire the GIL before calling a Python function * Added Python RAII type wrappers ``none`` and ``iterable`` * Added ``*args`` and ``*kwargs`` pass-through parameters to ``pybind11.get_include()`` function * Iterator improvements and fixes * Documentation on return value policies and opaque types improved 1.6 (April 30, 2016) ---------------------- * Skipped due to upload to PyPI gone wrong and inability to recover (https://github.com/pypa/packaging-problems/issues/74) 1.5 (April 21, 2016) ---------------------- * For polymorphic types, use RTTI to try to return the closest type registered with pybind11 * Pickling support for serializing and unserializing C++ instances to a byte stream in Python * Added a convenience routine ``make_iterator()`` which turns a range indicated by a pair of C++ iterators into a iterable Python object * Added ``len()`` and a variadic ``make_tuple()`` function * Addressed a rare issue that could confuse the current virtual function dispatcher and another that could lead to crashes in multi-threaded applications * Added a ``get_include()`` function to the Python module that returns the path of the directory containing the installed pybind11 header files * Documentation improvements: import issues, symbol visibility, pickling, limitations * Added casting support for ``std::reference_wrapper<>`` 1.4 (April 7, 2016) -------------------------- * Transparent type conversion for ``std::wstring`` and ``wchar_t`` * Allow passing ``nullptr``-valued strings * Transparent passing of ``void *`` pointers using capsules * Transparent support for returning values wrapped in ``std::unique_ptr<>`` * Improved docstring generation for compatibility with Sphinx * Nicer debug error message when default parameter construction fails * Support for "opaque" types that bypass the transparent conversion layer for STL containers * Redesigned type casting interface to avoid ambiguities that could occasionally cause compiler errors * Redesigned property implementation; fixes crashes due to an unfortunate default return value policy * Anaconda package generation support 1.3 (March 8, 2016) -------------------------- * Added support for the Intel C++ compiler (v15+) * Added support for the STL unordered set/map data structures * Added support for the STL linked list data structure * NumPy-style broadcasting support in ``pybind11::vectorize`` * pybind11 now displays more verbose error messages when ``arg::operator=()`` fails * pybind11 internal data structures now live in a version-dependent namespace to avoid ABI issues * Many, many bugfixes involving corner cases and advanced usage 1.2 (February 7, 2016) -------------------------- * Optional: efficient generation of function signatures at compile time using C++14 * Switched to a simpler and more general way of dealing with function default arguments. Unused keyword arguments in function calls are now detected and cause errors as expected * New ``keep_alive`` call policy analogous to Boost.Python's ``with_custodian_and_ward`` * New ``pybind11::base<>`` attribute to indicate a subclass relationship * Improved interface for RAII type wrappers in ``pytypes.h`` * Use RAII type wrappers consistently within pybind11 itself. This fixes various potential refcount leaks when exceptions occur * Added new ``bytes`` RAII type wrapper (maps to ``string`` in Python 2.7) * Made handle and related RAII classes const correct, using them more consistently everywhere now * Got rid of the ugly ``__pybind11__`` attributes on the Python side---they are now stored in a C++ hash table that is not visible in Python * Fixed refcount leaks involving NumPy arrays and bound functions * Vastly improved handling of shared/smart pointers * Removed an unnecessary copy operation in ``pybind11::vectorize`` * Fixed naming clashes when both pybind11 and NumPy headers are included * Added conversions for additional exception types * Documentation improvements (using multiple extension modules, smart pointers, other minor clarifications) * unified infrastructure for parsing variadic arguments in ``class_`` and cpp_function * Fixed license text (was: ZLIB, should have been: 3-clause BSD) * Python 3.2 compatibility * Fixed remaining issues when accessing types in another plugin module * Added enum comparison and casting methods * Improved SFINAE-based detection of whether types are copy-constructible * Eliminated many warnings about unused variables and the use of ``offsetof()`` * Support for ``std::array<>`` conversions 1.1 (December 7, 2015) -------------------------- * Documentation improvements (GIL, wrapping functions, casting, fixed many typos) * Generalized conversion of integer types * Improved support for casting function objects * Improved support for ``std::shared_ptr<>`` conversions * Initial support for ``std::set<>`` conversions * Fixed type resolution issue for types defined in a separate plugin module * CMake build system improvements * Factored out generic functionality to non-templated code (smaller code size) * Added a code size / compile time benchmark vs Boost.Python * Added an appveyor CI script 1.0 (October 15, 2015) ------------------------ * Initial release
Abies
/Abies-0.0.5.tar.gz/Abies-0.0.5/extern/pybind11/docs/changelog.rst
changelog.rst
.. _compiling: Build systems ############# .. _build-setuptools: Building with setuptools ======================== For projects on PyPI, building with setuptools is the way to go. Sylvain Corlay has kindly provided an example project which shows how to set up everything, including automatic generation of documentation using Sphinx. Please refer to the [python_example]_ repository. .. [python_example] https://github.com/pybind/python_example A helper file is provided with pybind11 that can simplify usage with setuptools. To use pybind11 inside your ``setup.py``, you have to have some system to ensure that ``pybind11`` is installed when you build your package. There are four possible ways to do this, and pybind11 supports all four: You can ask all users to install pybind11 beforehand (bad), you can use :ref:`setup_helpers-pep518` (good, but very new and requires Pip 10), :ref:`setup_helpers-setup_requires` (discouraged by Python packagers now that PEP 518 is available, but it still works everywhere), or you can :ref:`setup_helpers-copy-manually` (always works but you have to manually sync your copy to get updates). An example of a ``setup.py`` using pybind11's helpers: .. code-block:: python from glob import glob from setuptools import setup from pybind11.setup_helpers import Pybind11Extension ext_modules = [ Pybind11Extension( "python_example", sorted(glob("src/*.cpp")), # Sort source files for reproducibility ), ] setup(..., ext_modules=ext_modules) If you want to do an automatic search for the highest supported C++ standard, that is supported via a ``build_ext`` command override; it will only affect ``Pybind11Extensions``: .. code-block:: python from glob import glob from setuptools import setup from pybind11.setup_helpers import Pybind11Extension, build_ext ext_modules = [ Pybind11Extension( "python_example", sorted(glob("src/*.cpp")), ), ] setup(..., cmdclass={"build_ext": build_ext}, ext_modules=ext_modules) If you have single-file extension modules that are directly stored in the Python source tree (``foo.cpp`` in the same directory as where a ``foo.py`` would be located), you can also generate ``Pybind11Extensions`` using ``setup_helpers.intree_extensions``: ``intree_extensions(["path/to/foo.cpp", ...])`` returns a list of ``Pybind11Extensions`` which can be passed to ``ext_modules``, possibly after further customizing their attributes (``libraries``, ``include_dirs``, etc.). By doing so, a ``foo.*.so`` extension module will be generated and made available upon installation. ``intree_extension`` will automatically detect if you are using a ``src``-style layout (as long as no namespace packages are involved), but you can also explicitly pass ``package_dir`` to it (as in ``setuptools.setup``). Since pybind11 does not require NumPy when building, a light-weight replacement for NumPy's parallel compilation distutils tool is included. Use it like this: .. code-block:: python from pybind11.setup_helpers import ParallelCompile # Optional multithreaded build ParallelCompile("NPY_NUM_BUILD_JOBS").install() setup(...) The argument is the name of an environment variable to control the number of threads, such as ``NPY_NUM_BUILD_JOBS`` (as used by NumPy), though you can set something different if you want; ``CMAKE_BUILD_PARALLEL_LEVEL`` is another choice a user might expect. You can also pass ``default=N`` to set the default number of threads (0 will take the number of threads available) and ``max=N``, the maximum number of threads; if you have a large extension you may want set this to a memory dependent number. If you are developing rapidly and have a lot of C++ files, you may want to avoid rebuilding files that have not changed. For simple cases were you are using ``pip install -e .`` and do not have local headers, you can skip the rebuild if an object file is newer than its source (headers are not checked!) with the following: .. code-block:: python from pybind11.setup_helpers import ParallelCompile, naive_recompile ParallelCompile("NPY_NUM_BUILD_JOBS", needs_recompile=naive_recompile).install() If you have a more complex build, you can implement a smarter function and pass it to ``needs_recompile``, or you can use [Ccache]_ instead. ``CXX="cache g++" pip install -e .`` would be the way to use it with GCC, for example. Unlike the simple solution, this even works even when not compiling in editable mode, but it does require Ccache to be installed. Keep in mind that Pip will not even attempt to rebuild if it thinks it has already built a copy of your code, which it deduces from the version number. One way to avoid this is to use [setuptools_scm]_, which will generate a version number that includes the number of commits since your last tag and a hash for a dirty directory. Another way to force a rebuild is purge your cache or use Pip's ``--no-cache-dir`` option. .. [Ccache] https://ccache.dev .. [setuptools_scm] https://github.com/pypa/setuptools_scm .. _setup_helpers-pep518: PEP 518 requirements (Pip 10+ required) --------------------------------------- If you use `PEP 518's <https://www.python.org/dev/peps/pep-0518/>`_ ``pyproject.toml`` file, you can ensure that ``pybind11`` is available during the compilation of your project. When this file exists, Pip will make a new virtual environment, download just the packages listed here in ``requires=``, and build a wheel (binary Python package). It will then throw away the environment, and install your wheel. Your ``pyproject.toml`` file will likely look something like this: .. code-block:: toml [build-system] requires = ["setuptools>=42", "wheel", "pybind11~=2.6.1"] build-backend = "setuptools.build_meta" .. note:: The main drawback to this method is that a `PEP 517`_ compliant build tool, such as Pip 10+, is required for this approach to work; older versions of Pip completely ignore this file. If you distribute binaries (called wheels in Python) using something like `cibuildwheel`_, remember that ``setup.py`` and ``pyproject.toml`` are not even contained in the wheel, so this high Pip requirement is only for source builds, and will not affect users of your binary wheels. If you are building SDists and wheels, then `pypa-build`_ is the recommended official tool. .. _PEP 517: https://www.python.org/dev/peps/pep-0517/ .. _cibuildwheel: https://cibuildwheel.readthedocs.io .. _pypa-build: https://pypa-build.readthedocs.io/en/latest/ .. _setup_helpers-setup_requires: Classic ``setup_requires`` -------------------------- If you want to support old versions of Pip with the classic ``setup_requires=["pybind11"]`` keyword argument to setup, which triggers a two-phase ``setup.py`` run, then you will need to use something like this to ensure the first pass works (which has not yet installed the ``setup_requires`` packages, since it can't install something it does not know about): .. code-block:: python try: from pybind11.setup_helpers import Pybind11Extension except ImportError: from setuptools import Extension as Pybind11Extension It doesn't matter that the Extension class is not the enhanced subclass for the first pass run; and the second pass will have the ``setup_requires`` requirements. This is obviously more of a hack than the PEP 518 method, but it supports ancient versions of Pip. .. _setup_helpers-copy-manually: Copy manually ------------- You can also copy ``setup_helpers.py`` directly to your project; it was designed to be usable standalone, like the old example ``setup.py``. You can set ``include_pybind11=False`` to skip including the pybind11 package headers, so you can use it with git submodules and a specific git version. If you use this, you will need to import from a local file in ``setup.py`` and ensure the helper file is part of your MANIFEST. Closely related, if you include pybind11 as a subproject, you can run the ``setup_helpers.py`` inplace. If loaded correctly, this should even pick up the correct include for pybind11, though you can turn it off as shown above if you want to input it manually. Suggested usage if you have pybind11 as a submodule in ``extern/pybind11``: .. code-block:: python DIR = os.path.abspath(os.path.dirname(__file__)) sys.path.append(os.path.join(DIR, "extern", "pybind11")) from pybind11.setup_helpers import Pybind11Extension # noqa: E402 del sys.path[-1] .. versionchanged:: 2.6 Added ``setup_helpers`` file. Building with cppimport ======================== [cppimport]_ is a small Python import hook that determines whether there is a C++ source file whose name matches the requested module. If there is, the file is compiled as a Python extension using pybind11 and placed in the same folder as the C++ source file. Python is then able to find the module and load it. .. [cppimport] https://github.com/tbenthompson/cppimport .. _cmake: Building with CMake =================== For C++ codebases that have an existing CMake-based build system, a Python extension module can be created with just a few lines of code: .. code-block:: cmake cmake_minimum_required(VERSION 3.4...3.18) project(example LANGUAGES CXX) add_subdirectory(pybind11) pybind11_add_module(example example.cpp) This assumes that the pybind11 repository is located in a subdirectory named :file:`pybind11` and that the code is located in a file named :file:`example.cpp`. The CMake command ``add_subdirectory`` will import the pybind11 project which provides the ``pybind11_add_module`` function. It will take care of all the details needed to build a Python extension module on any platform. A working sample project, including a way to invoke CMake from :file:`setup.py` for PyPI integration, can be found in the [cmake_example]_ repository. .. [cmake_example] https://github.com/pybind/cmake_example .. versionchanged:: 2.6 CMake 3.4+ is required. Further information can be found at :doc:`cmake/index`. pybind11_add_module ------------------- To ease the creation of Python extension modules, pybind11 provides a CMake function with the following signature: .. code-block:: cmake pybind11_add_module(<name> [MODULE | SHARED] [EXCLUDE_FROM_ALL] [NO_EXTRAS] [THIN_LTO] [OPT_SIZE] source1 [source2 ...]) This function behaves very much like CMake's builtin ``add_library`` (in fact, it's a wrapper function around that command). It will add a library target called ``<name>`` to be built from the listed source files. In addition, it will take care of all the Python-specific compiler and linker flags as well as the OS- and Python-version-specific file extension. The produced target ``<name>`` can be further manipulated with regular CMake commands. ``MODULE`` or ``SHARED`` may be given to specify the type of library. If no type is given, ``MODULE`` is used by default which ensures the creation of a Python-exclusive module. Specifying ``SHARED`` will create a more traditional dynamic library which can also be linked from elsewhere. ``EXCLUDE_FROM_ALL`` removes this target from the default build (see CMake docs for details). Since pybind11 is a template library, ``pybind11_add_module`` adds compiler flags to ensure high quality code generation without bloat arising from long symbol names and duplication of code in different translation units. It sets default visibility to *hidden*, which is required for some pybind11 features and functionality when attempting to load multiple pybind11 modules compiled under different pybind11 versions. It also adds additional flags enabling LTO (Link Time Optimization) and strip unneeded symbols. See the :ref:`FAQ entry <faq:symhidden>` for a more detailed explanation. These latter optimizations are never applied in ``Debug`` mode. If ``NO_EXTRAS`` is given, they will always be disabled, even in ``Release`` mode. However, this will result in code bloat and is generally not recommended. As stated above, LTO is enabled by default. Some newer compilers also support different flavors of LTO such as `ThinLTO`_. Setting ``THIN_LTO`` will cause the function to prefer this flavor if available. The function falls back to regular LTO if ``-flto=thin`` is not available. If ``CMAKE_INTERPROCEDURAL_OPTIMIZATION`` is set (either ``ON`` or ``OFF``), then that will be respected instead of the built-in flag search. .. note:: If you want to set the property form on targets or the ``CMAKE_INTERPROCEDURAL_OPTIMIZATION_<CONFIG>`` versions of this, you should still use ``set(CMAKE_INTERPROCEDURAL_OPTIMIZATION OFF)`` (otherwise a no-op) to disable pybind11's ipo flags. The ``OPT_SIZE`` flag enables size-based optimization equivalent to the standard ``/Os`` or ``-Os`` compiler flags and the ``MinSizeRel`` build type, which avoid optimizations that that can substantially increase the size of the resulting binary. This flag is particularly useful in projects that are split into performance-critical parts and associated bindings. In this case, we can compile the project in release mode (and hence, optimize performance globally), and specify ``OPT_SIZE`` for the binding target, where size might be the main concern as performance is often less critical here. A ~25% size reduction has been observed in practice. This flag only changes the optimization behavior at a per-target level and takes precedence over the global CMake build type (``Release``, ``RelWithDebInfo``) except for ``Debug`` builds, where optimizations remain disabled. .. _ThinLTO: http://clang.llvm.org/docs/ThinLTO.html Configuration variables ----------------------- By default, pybind11 will compile modules with the compiler default or the minimum standard required by pybind11, whichever is higher. You can set the standard explicitly with `CMAKE_CXX_STANDARD <https://cmake.org/cmake/help/latest/variable/CMAKE_CXX_STANDARD.html>`_: .. code-block:: cmake set(CMAKE_CXX_STANDARD 14 CACHE STRING "C++ version selection") # or 11, 14, 17, 20 set(CMAKE_CXX_STANDARD_REQUIRED ON) # optional, ensure standard is supported set(CMAKE_CXX_EXTENSIONS OFF) # optional, keep compiler extensions off The variables can also be set when calling CMake from the command line using the ``-D<variable>=<value>`` flag. You can also manually set ``CXX_STANDARD`` on a target or use ``target_compile_features`` on your targets - anything that CMake supports. Classic Python support: The target Python version can be selected by setting ``PYBIND11_PYTHON_VERSION`` or an exact Python installation can be specified with ``PYTHON_EXECUTABLE``. For example: .. code-block:: bash cmake -DPYBIND11_PYTHON_VERSION=3.6 .. # Another method: cmake -DPYTHON_EXECUTABLE=/path/to/python .. # This often is a good way to get the current Python, works in environments: cmake -DPYTHON_EXECUTABLE=$(python3 -c "import sys; print(sys.executable)") .. find_package vs. add_subdirectory --------------------------------- For CMake-based projects that don't include the pybind11 repository internally, an external installation can be detected through ``find_package(pybind11)``. See the `Config file`_ docstring for details of relevant CMake variables. .. code-block:: cmake cmake_minimum_required(VERSION 3.4...3.18) project(example LANGUAGES CXX) find_package(pybind11 REQUIRED) pybind11_add_module(example example.cpp) Note that ``find_package(pybind11)`` will only work correctly if pybind11 has been correctly installed on the system, e. g. after downloading or cloning the pybind11 repository : .. code-block:: bash # Classic CMake cd pybind11 mkdir build cd build cmake .. make install # CMake 3.15+ cd pybind11 cmake -S . -B build cmake --build build -j 2 # Build on 2 cores cmake --install build Once detected, the aforementioned ``pybind11_add_module`` can be employed as before. The function usage and configuration variables are identical no matter if pybind11 is added as a subdirectory or found as an installed package. You can refer to the same [cmake_example]_ repository for a full sample project -- just swap out ``add_subdirectory`` for ``find_package``. .. _Config file: https://github.com/pybind/pybind11/blob/master/tools/pybind11Config.cmake.in .. _find-python-mode: FindPython mode --------------- CMake 3.12+ (3.15+ recommended, 3.18.2+ ideal) added a new module called FindPython that had a highly improved search algorithm and modern targets and tools. If you use FindPython, pybind11 will detect this and use the existing targets instead: .. code-block:: cmake cmake_minimum_required(VERSION 3.15...3.19) project(example LANGUAGES CXX) find_package(Python COMPONENTS Interpreter Development REQUIRED) find_package(pybind11 CONFIG REQUIRED) # or add_subdirectory(pybind11) pybind11_add_module(example example.cpp) You can also use the targets (as listed below) with FindPython. If you define ``PYBIND11_FINDPYTHON``, pybind11 will perform the FindPython step for you (mostly useful when building pybind11's own tests, or as a way to change search algorithms from the CMake invocation, with ``-DPYBIND11_FINDPYTHON=ON``. .. warning:: If you use FindPython2 and FindPython3 to dual-target Python, use the individual targets listed below, and avoid targets that directly include Python parts. There are `many ways to hint or force a discovery of a specific Python installation <https://cmake.org/cmake/help/latest/module/FindPython.html>`_), setting ``Python_ROOT_DIR`` may be the most common one (though with virtualenv/venv support, and Conda support, this tends to find the correct Python version more often than the old system did). .. warning:: When the Python libraries (i.e. ``libpythonXX.a`` and ``libpythonXX.so`` on Unix) are not available, as is the case on a manylinux image, the ``Development`` component will not be resolved by ``FindPython``. When not using the embedding functionality, CMake 3.18+ allows you to specify ``Development.Module`` instead of ``Development`` to resolve this issue. .. versionadded:: 2.6 Advanced: interface library targets ----------------------------------- Pybind11 supports modern CMake usage patterns with a set of interface targets, available in all modes. The targets provided are: ``pybind11::headers`` Just the pybind11 headers and minimum compile requirements ``pybind11::python2_no_register`` Quiets the warning/error when mixing C++14 or higher and Python 2 ``pybind11::pybind11`` Python headers + ``pybind11::headers`` + ``pybind11::python2_no_register`` (Python 2 only) ``pybind11::python_link_helper`` Just the "linking" part of pybind11:module ``pybind11::module`` Everything for extension modules - ``pybind11::pybind11`` + ``Python::Module`` (FindPython CMake 3.15+) or ``pybind11::python_link_helper`` ``pybind11::embed`` Everything for embedding the Python interpreter - ``pybind11::pybind11`` + ``Python::Embed`` (FindPython) or Python libs ``pybind11::lto`` / ``pybind11::thin_lto`` An alternative to `INTERPROCEDURAL_OPTIMIZATION` for adding link-time optimization. ``pybind11::windows_extras`` ``/bigobj`` and ``/mp`` for MSVC. ``pybind11::opt_size`` ``/Os`` for MSVC, ``-Os`` for other compilers. Does nothing for debug builds. Two helper functions are also provided: ``pybind11_strip(target)`` Strips a target (uses ``CMAKE_STRIP`` after the target is built) ``pybind11_extension(target)`` Sets the correct extension (with SOABI) for a target. You can use these targets to build complex applications. For example, the ``add_python_module`` function is identical to: .. code-block:: cmake cmake_minimum_required(VERSION 3.4) project(example LANGUAGES CXX) find_package(pybind11 REQUIRED) # or add_subdirectory(pybind11) add_library(example MODULE main.cpp) target_link_libraries(example PRIVATE pybind11::module pybind11::lto pybind11::windows_extras) pybind11_extension(example) pybind11_strip(example) set_target_properties(example PROPERTIES CXX_VISIBILITY_PRESET "hidden" CUDA_VISIBILITY_PRESET "hidden") Instead of setting properties, you can set ``CMAKE_*`` variables to initialize these correctly. .. warning:: Since pybind11 is a metatemplate library, it is crucial that certain compiler flags are provided to ensure high quality code generation. In contrast to the ``pybind11_add_module()`` command, the CMake interface provides a *composable* set of targets to ensure that you retain flexibility. It can be especially important to provide or set these properties; the :ref:`FAQ <faq:symhidden>` contains an explanation on why these are needed. .. versionadded:: 2.6 .. _nopython-mode: Advanced: NOPYTHON mode ----------------------- If you want complete control, you can set ``PYBIND11_NOPYTHON`` to completely disable Python integration (this also happens if you run ``FindPython2`` and ``FindPython3`` without running ``FindPython``). This gives you complete freedom to integrate into an existing system (like `Scikit-Build's <https://scikit-build.readthedocs.io>`_ ``PythonExtensions``). ``pybind11_add_module`` and ``pybind11_extension`` will be unavailable, and the targets will be missing any Python specific behavior. .. versionadded:: 2.6 Embedding the Python interpreter -------------------------------- In addition to extension modules, pybind11 also supports embedding Python into a C++ executable or library. In CMake, simply link with the ``pybind11::embed`` target. It provides everything needed to get the interpreter running. The Python headers and libraries are attached to the target. Unlike ``pybind11::module``, there is no need to manually set any additional properties here. For more information about usage in C++, see :doc:`/advanced/embedding`. .. code-block:: cmake cmake_minimum_required(VERSION 3.4...3.18) project(example LANGUAGES CXX) find_package(pybind11 REQUIRED) # or add_subdirectory(pybind11) add_executable(example main.cpp) target_link_libraries(example PRIVATE pybind11::embed) .. _building_manually: Building manually ================= pybind11 is a header-only library, hence it is not necessary to link against any special libraries and there are no intermediate (magic) translation steps. On Linux, you can compile an example such as the one given in :ref:`simple_example` using the following command: .. code-block:: bash $ c++ -O3 -Wall -shared -std=c++11 -fPIC $(python3 -m pybind11 --includes) example.cpp -o example$(python3-config --extension-suffix) The flags given here assume that you're using Python 3. For Python 2, just change the executable appropriately (to ``python`` or ``python2``). The ``python3 -m pybind11 --includes`` command fetches the include paths for both pybind11 and Python headers. This assumes that pybind11 has been installed using ``pip`` or ``conda``. If it hasn't, you can also manually specify ``-I <path-to-pybind11>/include`` together with the Python includes path ``python3-config --includes``. Note that Python 2.7 modules don't use a special suffix, so you should simply use ``example.so`` instead of ``example$(python3-config --extension-suffix)``. Besides, the ``--extension-suffix`` option may or may not be available, depending on the distribution; in the latter case, the module extension can be manually set to ``.so``. On macOS: the build command is almost the same but it also requires passing the ``-undefined dynamic_lookup`` flag so as to ignore missing symbols when building the module: .. code-block:: bash $ c++ -O3 -Wall -shared -std=c++11 -undefined dynamic_lookup $(python3 -m pybind11 --includes) example.cpp -o example$(python3-config --extension-suffix) In general, it is advisable to include several additional build parameters that can considerably reduce the size of the created binary. Refer to section :ref:`cmake` for a detailed example of a suitable cross-platform CMake-based build system that works on all platforms including Windows. .. note:: On Linux and macOS, it's better to (intentionally) not link against ``libpython``. The symbols will be resolved when the extension library is loaded into a Python binary. This is preferable because you might have several different installations of a given Python version (e.g. the system-provided Python, and one that ships with a piece of commercial software). In this way, the plugin will work with both versions, instead of possibly importing a second Python library into a process that already contains one (which will lead to a segfault). Building with Bazel =================== You can build with the Bazel build system using the `pybind11_bazel <https://github.com/pybind/pybind11_bazel>`_ repository. Generating binding code automatically ===================================== The ``Binder`` project is a tool for automatic generation of pybind11 binding code by introspecting existing C++ codebases using LLVM/Clang. See the [binder]_ documentation for details. .. [binder] http://cppbinder.readthedocs.io/en/latest/about.html [AutoWIG]_ is a Python library that wraps automatically compiled libraries into high-level languages. It parses C++ code using LLVM/Clang technologies and generates the wrappers using the Mako templating engine. The approach is automatic, extensible, and applies to very complex C++ libraries, composed of thousands of classes or incorporating modern meta-programming constructs. .. [AutoWIG] https://github.com/StatisKit/AutoWIG [robotpy-build]_ is a is a pure python, cross platform build tool that aims to simplify creation of python wheels for pybind11 projects, and provide cross-project dependency management. Additionally, it is able to autogenerate customizable pybind11-based wrappers by parsing C++ header files. .. [robotpy-build] https://robotpy-build.readthedocs.io
Abies
/Abies-0.0.5.tar.gz/Abies-0.0.5/extern/pybind11/docs/compiling.rst
compiling.rst
Benchmark ========= The following is the result of a synthetic benchmark comparing both compilation time and module size of pybind11 against Boost.Python. A detailed report about a Boost.Python to pybind11 conversion of a real project is available here: [#f1]_. .. [#f1] http://graylab.jhu.edu/RosettaCon2016/PyRosetta-4.pdf Setup ----- A python script (see the ``docs/benchmark.py`` file) was used to generate a set of files with dummy classes whose count increases for each successive benchmark (between 1 and 2048 classes in powers of two). Each class has four methods with a randomly generated signature with a return value and four arguments. (There was no particular reason for this setup other than the desire to generate many unique function signatures whose count could be controlled in a simple way.) Here is an example of the binding code for one class: .. code-block:: cpp ... class cl034 { public: cl279 *fn_000(cl084 *, cl057 *, cl065 *, cl042 *); cl025 *fn_001(cl098 *, cl262 *, cl414 *, cl121 *); cl085 *fn_002(cl445 *, cl297 *, cl145 *, cl421 *); cl470 *fn_003(cl200 *, cl323 *, cl332 *, cl492 *); }; ... PYBIND11_MODULE(example, m) { ... py::class_<cl034>(m, "cl034") .def("fn_000", &cl034::fn_000) .def("fn_001", &cl034::fn_001) .def("fn_002", &cl034::fn_002) .def("fn_003", &cl034::fn_003) ... } The Boost.Python version looks almost identical except that a return value policy had to be specified as an argument to ``def()``. For both libraries, compilation was done with .. code-block:: bash Apple LLVM version 7.0.2 (clang-700.1.81) and the following compilation flags .. code-block:: bash g++ -Os -shared -rdynamic -undefined dynamic_lookup -fvisibility=hidden -std=c++14 Compilation time ---------------- The following log-log plot shows how the compilation time grows for an increasing number of class and function declarations. pybind11 includes many fewer headers, which initially leads to shorter compilation times, but the performance is ultimately fairly similar (pybind11 is 19.8 seconds faster for the largest largest file with 2048 classes and a total of 8192 methods -- a modest **1.2x** speedup relative to Boost.Python, which required 116.35 seconds). .. only:: not latex .. image:: pybind11_vs_boost_python1.svg .. only:: latex .. image:: pybind11_vs_boost_python1.png Module size ----------- Differences between the two libraries become much more pronounced when considering the file size of the generated Python plugin: for the largest file, the binary generated by Boost.Python required 16.8 MiB, which was **2.17 times** / **9.1 megabytes** larger than the output generated by pybind11. For very small inputs, Boost.Python has an edge in the plot below -- however, note that it stores many definitions in an external library, whose size was not included here, hence the comparison is slightly shifted in Boost.Python's favor. .. only:: not latex .. image:: pybind11_vs_boost_python2.svg .. only:: latex .. image:: pybind11_vs_boost_python2.png
Abies
/Abies-0.0.5.tar.gz/Abies-0.0.5/extern/pybind11/docs/benchmark.rst
benchmark.rst
.. _reference: .. warning:: Please be advised that the reference documentation discussing pybind11 internals is currently incomplete. Please refer to the previous sections and the pybind11 header files for the nitty gritty details. Reference ######### .. _macros: Macros ====== .. doxygendefine:: PYBIND11_MODULE .. _core_types: Convenience classes for arbitrary Python types ============================================== Common member functions ----------------------- .. doxygenclass:: object_api :members: Without reference counting -------------------------- .. doxygenclass:: handle :members: With reference counting ----------------------- .. doxygenclass:: object :members: .. doxygenfunction:: reinterpret_borrow .. doxygenfunction:: reinterpret_steal Convenience classes for specific Python types ============================================= .. doxygenclass:: module_ :members: .. doxygengroup:: pytypes :members: Convenience functions converting to Python types ================================================ .. doxygenfunction:: make_tuple(Args&&...) .. doxygenfunction:: make_iterator(Iterator, Sentinel, Extra &&...) .. doxygenfunction:: make_iterator(Type &, Extra&&...) .. doxygenfunction:: make_key_iterator(Iterator, Sentinel, Extra &&...) .. doxygenfunction:: make_key_iterator(Type &, Extra&&...) .. doxygenfunction:: make_value_iterator(Iterator, Sentinel, Extra &&...) .. doxygenfunction:: make_value_iterator(Type &, Extra&&...) .. _extras: Passing extra arguments to ``def`` or ``class_`` ================================================ .. doxygengroup:: annotations :members: Embedding the interpreter ========================= .. doxygendefine:: PYBIND11_EMBEDDED_MODULE .. doxygenfunction:: initialize_interpreter .. doxygenfunction:: finalize_interpreter .. doxygenclass:: scoped_interpreter Redirecting C++ streams ======================= .. doxygenclass:: scoped_ostream_redirect .. doxygenclass:: scoped_estream_redirect .. doxygenfunction:: add_ostream_redirect Python built-in functions ========================= .. doxygengroup:: python_builtins :members: Inheritance =========== See :doc:`/classes` and :doc:`/advanced/classes` for more detail. .. doxygendefine:: PYBIND11_OVERRIDE .. doxygendefine:: PYBIND11_OVERRIDE_PURE .. doxygendefine:: PYBIND11_OVERRIDE_NAME .. doxygendefine:: PYBIND11_OVERRIDE_PURE_NAME .. doxygenfunction:: get_override Exceptions ========== .. doxygenclass:: error_already_set :members: .. doxygenclass:: builtin_exception :members: Literals ======== .. doxygennamespace:: literals
Abies
/Abies-0.0.5.tar.gz/Abies-0.0.5/extern/pybind11/docs/reference.rst
reference.rst
import datetime as dt import os import random nfns = 4 # Functions per class nargs = 4 # Arguments per function def generate_dummy_code_pybind11(nclasses=10): decl = "" bindings = "" for cl in range(nclasses): decl += "class cl%03i;\n" % cl decl += "\n" for cl in range(nclasses): decl += "class cl%03i {\n" % cl decl += "public:\n" bindings += ' py::class_<cl%03i>(m, "cl%03i")\n' % (cl, cl) for fn in range(nfns): ret = random.randint(0, nclasses - 1) params = [random.randint(0, nclasses - 1) for i in range(nargs)] decl += " cl%03i *fn_%03i(" % (ret, fn) decl += ", ".join("cl%03i *" % p for p in params) decl += ");\n" bindings += ' .def("fn_%03i", &cl%03i::fn_%03i)\n' % (fn, cl, fn) decl += "};\n\n" bindings += " ;\n" result = "#include <pybind11/pybind11.h>\n\n" result += "namespace py = pybind11;\n\n" result += decl + "\n" result += "PYBIND11_MODULE(example, m) {\n" result += bindings result += "}" return result def generate_dummy_code_boost(nclasses=10): decl = "" bindings = "" for cl in range(nclasses): decl += "class cl%03i;\n" % cl decl += "\n" for cl in range(nclasses): decl += "class cl%03i {\n" % cl decl += "public:\n" bindings += ' py::class_<cl%03i>("cl%03i")\n' % (cl, cl) for fn in range(nfns): ret = random.randint(0, nclasses - 1) params = [random.randint(0, nclasses - 1) for i in range(nargs)] decl += " cl%03i *fn_%03i(" % (ret, fn) decl += ", ".join("cl%03i *" % p for p in params) decl += ");\n" bindings += ( ' .def("fn_%03i", &cl%03i::fn_%03i, py::return_value_policy<py::manage_new_object>())\n' % (fn, cl, fn) ) decl += "};\n\n" bindings += " ;\n" result = "#include <boost/python.hpp>\n\n" result += "namespace py = boost::python;\n\n" result += decl + "\n" result += "BOOST_PYTHON_MODULE(example) {\n" result += bindings result += "}" return result for codegen in [generate_dummy_code_pybind11, generate_dummy_code_boost]: print("{") for i in range(0, 10): nclasses = 2 ** i with open("test.cpp", "w") as f: f.write(codegen(nclasses)) n1 = dt.datetime.now() os.system( "g++ -Os -shared -rdynamic -undefined dynamic_lookup " "-fvisibility=hidden -std=c++14 test.cpp -I include " "-I /System/Library/Frameworks/Python.framework/Headers -o test.so" ) n2 = dt.datetime.now() elapsed = (n2 - n1).total_seconds() size = os.stat("test.so").st_size print(" {%i, %f, %i}," % (nclasses * nfns, elapsed, size)) print("}")
Abies
/Abies-0.0.5.tar.gz/Abies-0.0.5/extern/pybind11/docs/benchmark.py
benchmark.py
.. _basics: First steps ########### This sections demonstrates the basic features of pybind11. Before getting started, make sure that development environment is set up to compile the included set of test cases. Compiling the test cases ======================== Linux/macOS ----------- On Linux you'll need to install the **python-dev** or **python3-dev** packages as well as **cmake**. On macOS, the included python version works out of the box, but **cmake** must still be installed. After installing the prerequisites, run .. code-block:: bash mkdir build cd build cmake .. make check -j 4 The last line will both compile and run the tests. Windows ------- On Windows, only **Visual Studio 2015** and newer are supported since pybind11 relies on various C++11 language features that break older versions of Visual Studio. .. Note:: To use the C++17 in Visual Studio 2017 (MSVC 14.1), pybind11 requires the flag ``/permissive-`` to be passed to the compiler `to enforce standard conformance`_. When building with Visual Studio 2019, this is not strictly necessary, but still advised. .. _`to enforce standard conformance`: https://docs.microsoft.com/en-us/cpp/build/reference/permissive-standards-conformance?view=vs-2017 To compile and run the tests: .. code-block:: batch mkdir build cd build cmake .. cmake --build . --config Release --target check This will create a Visual Studio project, compile and run the target, all from the command line. .. Note:: If all tests fail, make sure that the Python binary and the testcases are compiled for the same processor type and bitness (i.e. either **i386** or **x86_64**). You can specify **x86_64** as the target architecture for the generated Visual Studio project using ``cmake -A x64 ..``. .. seealso:: Advanced users who are already familiar with Boost.Python may want to skip the tutorial and look at the test cases in the :file:`tests` directory, which exercise all features of pybind11. Header and namespace conventions ================================ For brevity, all code examples assume that the following two lines are present: .. code-block:: cpp #include <pybind11/pybind11.h> namespace py = pybind11; Some features may require additional headers, but those will be specified as needed. .. _simple_example: Creating bindings for a simple function ======================================= Let's start by creating Python bindings for an extremely simple function, which adds two numbers and returns their result: .. code-block:: cpp int add(int i, int j) { return i + j; } For simplicity [#f1]_, we'll put both this function and the binding code into a file named :file:`example.cpp` with the following contents: .. code-block:: cpp #include <pybind11/pybind11.h> int add(int i, int j) { return i + j; } PYBIND11_MODULE(example, m) { m.doc() = "pybind11 example plugin"; // optional module docstring m.def("add", &add, "A function that adds two numbers"); } .. [#f1] In practice, implementation and binding code will generally be located in separate files. The :func:`PYBIND11_MODULE` macro creates a function that will be called when an ``import`` statement is issued from within Python. The module name (``example``) is given as the first macro argument (it should not be in quotes). The second argument (``m``) defines a variable of type :class:`py::module_ <module>` which is the main interface for creating bindings. The method :func:`module_::def` generates binding code that exposes the ``add()`` function to Python. .. note:: Notice how little code was needed to expose our function to Python: all details regarding the function's parameters and return value were automatically inferred using template metaprogramming. This overall approach and the used syntax are borrowed from Boost.Python, though the underlying implementation is very different. pybind11 is a header-only library, hence it is not necessary to link against any special libraries and there are no intermediate (magic) translation steps. On Linux, the above example can be compiled using the following command: .. code-block:: bash $ c++ -O3 -Wall -shared -std=c++11 -fPIC $(python3 -m pybind11 --includes) example.cpp -o example$(python3-config --extension-suffix) .. note:: If you used :ref:`include_as_a_submodule` to get the pybind11 source, then use ``$(python3-config --includes) -Iextern/pybind11/include`` instead of ``$(python3 -m pybind11 --includes)`` in the above compilation, as explained in :ref:`building_manually`. For more details on the required compiler flags on Linux and macOS, see :ref:`building_manually`. For complete cross-platform compilation instructions, refer to the :ref:`compiling` page. The `python_example`_ and `cmake_example`_ repositories are also a good place to start. They are both complete project examples with cross-platform build systems. The only difference between the two is that `python_example`_ uses Python's ``setuptools`` to build the module, while `cmake_example`_ uses CMake (which may be preferable for existing C++ projects). .. _python_example: https://github.com/pybind/python_example .. _cmake_example: https://github.com/pybind/cmake_example Building the above C++ code will produce a binary module file that can be imported to Python. Assuming that the compiled module is located in the current directory, the following interactive Python session shows how to load and execute the example: .. code-block:: pycon $ python Python 2.7.10 (default, Aug 22 2015, 20:33:39) [GCC 4.2.1 Compatible Apple LLVM 7.0.0 (clang-700.0.59.1)] on darwin Type "help", "copyright", "credits" or "license" for more information. >>> import example >>> example.add(1, 2) 3L >>> .. _keyword_args: Keyword arguments ================= With a simple code modification, it is possible to inform Python about the names of the arguments ("i" and "j" in this case). .. code-block:: cpp m.def("add", &add, "A function which adds two numbers", py::arg("i"), py::arg("j")); :class:`arg` is one of several special tag classes which can be used to pass metadata into :func:`module_::def`. With this modified binding code, we can now call the function using keyword arguments, which is a more readable alternative particularly for functions taking many parameters: .. code-block:: pycon >>> import example >>> example.add(i=1, j=2) 3L The keyword names also appear in the function signatures within the documentation. .. code-block:: pycon >>> help(example) .... FUNCTIONS add(...) Signature : (i: int, j: int) -> int A function which adds two numbers A shorter notation for named arguments is also available: .. code-block:: cpp // regular notation m.def("add1", &add, py::arg("i"), py::arg("j")); // shorthand using namespace pybind11::literals; m.def("add2", &add, "i"_a, "j"_a); The :var:`_a` suffix forms a C++11 literal which is equivalent to :class:`arg`. Note that the literal operator must first be made visible with the directive ``using namespace pybind11::literals``. This does not bring in anything else from the ``pybind11`` namespace except for literals. .. _default_args: Default arguments ================= Suppose now that the function to be bound has default arguments, e.g.: .. code-block:: cpp int add(int i = 1, int j = 2) { return i + j; } Unfortunately, pybind11 cannot automatically extract these parameters, since they are not part of the function's type information. However, they are simple to specify using an extension of :class:`arg`: .. code-block:: cpp m.def("add", &add, "A function which adds two numbers", py::arg("i") = 1, py::arg("j") = 2); The default values also appear within the documentation. .. code-block:: pycon >>> help(example) .... FUNCTIONS add(...) Signature : (i: int = 1, j: int = 2) -> int A function which adds two numbers The shorthand notation is also available for default arguments: .. code-block:: cpp // regular notation m.def("add1", &add, py::arg("i") = 1, py::arg("j") = 2); // shorthand m.def("add2", &add, "i"_a=1, "j"_a=2); Exporting variables =================== To expose a value from C++, use the ``attr`` function to register it in a module as shown below. Built-in types and general objects (more on that later) are automatically converted when assigned as attributes, and can be explicitly converted using the function ``py::cast``. .. code-block:: cpp PYBIND11_MODULE(example, m) { m.attr("the_answer") = 42; py::object world = py::cast("World"); m.attr("what") = world; } These are then accessible from Python: .. code-block:: pycon >>> import example >>> example.the_answer 42 >>> example.what 'World' .. _supported_types: Supported data types ==================== A large number of data types are supported out of the box and can be used seamlessly as functions arguments, return values or with ``py::cast`` in general. For a full overview, see the :doc:`advanced/cast/index` section.
Abies
/Abies-0.0.5.tar.gz/Abies-0.0.5/extern/pybind11/docs/basics.rst
basics.rst
.. _classes: Object-oriented code #################### Creating bindings for a custom type =================================== Let's now look at a more complex example where we'll create bindings for a custom C++ data structure named ``Pet``. Its definition is given below: .. code-block:: cpp struct Pet { Pet(const std::string &name) : name(name) { } void setName(const std::string &name_) { name = name_; } const std::string &getName() const { return name; } std::string name; }; The binding code for ``Pet`` looks as follows: .. code-block:: cpp #include <pybind11/pybind11.h> namespace py = pybind11; PYBIND11_MODULE(example, m) { py::class_<Pet>(m, "Pet") .def(py::init<const std::string &>()) .def("setName", &Pet::setName) .def("getName", &Pet::getName); } :class:`class_` creates bindings for a C++ *class* or *struct*-style data structure. :func:`init` is a convenience function that takes the types of a constructor's parameters as template arguments and wraps the corresponding constructor (see the :ref:`custom_constructors` section for details). An interactive Python session demonstrating this example is shown below: .. code-block:: pycon % python >>> import example >>> p = example.Pet("Molly") >>> print(p) <example.Pet object at 0x10cd98060> >>> p.getName() u'Molly' >>> p.setName("Charly") >>> p.getName() u'Charly' .. seealso:: Static member functions can be bound in the same way using :func:`class_::def_static`. Keyword and default arguments ============================= It is possible to specify keyword and default arguments using the syntax discussed in the previous chapter. Refer to the sections :ref:`keyword_args` and :ref:`default_args` for details. Binding lambda functions ======================== Note how ``print(p)`` produced a rather useless summary of our data structure in the example above: .. code-block:: pycon >>> print(p) <example.Pet object at 0x10cd98060> To address this, we could bind a utility function that returns a human-readable summary to the special method slot named ``__repr__``. Unfortunately, there is no suitable functionality in the ``Pet`` data structure, and it would be nice if we did not have to change it. This can easily be accomplished by binding a Lambda function instead: .. code-block:: cpp py::class_<Pet>(m, "Pet") .def(py::init<const std::string &>()) .def("setName", &Pet::setName) .def("getName", &Pet::getName) .def("__repr__", [](const Pet &a) { return "<example.Pet named '" + a.name + "'>"; } ); Both stateless [#f1]_ and stateful lambda closures are supported by pybind11. With the above change, the same Python code now produces the following output: .. code-block:: pycon >>> print(p) <example.Pet named 'Molly'> .. [#f1] Stateless closures are those with an empty pair of brackets ``[]`` as the capture object. .. _properties: Instance and static fields ========================== We can also directly expose the ``name`` field using the :func:`class_::def_readwrite` method. A similar :func:`class_::def_readonly` method also exists for ``const`` fields. .. code-block:: cpp py::class_<Pet>(m, "Pet") .def(py::init<const std::string &>()) .def_readwrite("name", &Pet::name) // ... remainder ... This makes it possible to write .. code-block:: pycon >>> p = example.Pet("Molly") >>> p.name u'Molly' >>> p.name = "Charly" >>> p.name u'Charly' Now suppose that ``Pet::name`` was a private internal variable that can only be accessed via setters and getters. .. code-block:: cpp class Pet { public: Pet(const std::string &name) : name(name) { } void setName(const std::string &name_) { name = name_; } const std::string &getName() const { return name; } private: std::string name; }; In this case, the method :func:`class_::def_property` (:func:`class_::def_property_readonly` for read-only data) can be used to provide a field-like interface within Python that will transparently call the setter and getter functions: .. code-block:: cpp py::class_<Pet>(m, "Pet") .def(py::init<const std::string &>()) .def_property("name", &Pet::getName, &Pet::setName) // ... remainder ... Write only properties can be defined by passing ``nullptr`` as the input for the read function. .. seealso:: Similar functions :func:`class_::def_readwrite_static`, :func:`class_::def_readonly_static` :func:`class_::def_property_static`, and :func:`class_::def_property_readonly_static` are provided for binding static variables and properties. Please also see the section on :ref:`static_properties` in the advanced part of the documentation. Dynamic attributes ================== Native Python classes can pick up new attributes dynamically: .. code-block:: pycon >>> class Pet: ... name = "Molly" ... >>> p = Pet() >>> p.name = "Charly" # overwrite existing >>> p.age = 2 # dynamically add a new attribute By default, classes exported from C++ do not support this and the only writable attributes are the ones explicitly defined using :func:`class_::def_readwrite` or :func:`class_::def_property`. .. code-block:: cpp py::class_<Pet>(m, "Pet") .def(py::init<>()) .def_readwrite("name", &Pet::name); Trying to set any other attribute results in an error: .. code-block:: pycon >>> p = example.Pet() >>> p.name = "Charly" # OK, attribute defined in C++ >>> p.age = 2 # fail AttributeError: 'Pet' object has no attribute 'age' To enable dynamic attributes for C++ classes, the :class:`py::dynamic_attr` tag must be added to the :class:`py::class_` constructor: .. code-block:: cpp py::class_<Pet>(m, "Pet", py::dynamic_attr()) .def(py::init<>()) .def_readwrite("name", &Pet::name); Now everything works as expected: .. code-block:: pycon >>> p = example.Pet() >>> p.name = "Charly" # OK, overwrite value in C++ >>> p.age = 2 # OK, dynamically add a new attribute >>> p.__dict__ # just like a native Python class {'age': 2} Note that there is a small runtime cost for a class with dynamic attributes. Not only because of the addition of a ``__dict__``, but also because of more expensive garbage collection tracking which must be activated to resolve possible circular references. Native Python classes incur this same cost by default, so this is not anything to worry about. By default, pybind11 classes are more efficient than native Python classes. Enabling dynamic attributes just brings them on par. .. _inheritance: Inheritance and automatic downcasting ===================================== Suppose now that the example consists of two data structures with an inheritance relationship: .. code-block:: cpp struct Pet { Pet(const std::string &name) : name(name) { } std::string name; }; struct Dog : Pet { Dog(const std::string &name) : Pet(name) { } std::string bark() const { return "woof!"; } }; There are two different ways of indicating a hierarchical relationship to pybind11: the first specifies the C++ base class as an extra template parameter of the :class:`class_`: .. code-block:: cpp py::class_<Pet>(m, "Pet") .def(py::init<const std::string &>()) .def_readwrite("name", &Pet::name); // Method 1: template parameter: py::class_<Dog, Pet /* <- specify C++ parent type */>(m, "Dog") .def(py::init<const std::string &>()) .def("bark", &Dog::bark); Alternatively, we can also assign a name to the previously bound ``Pet`` :class:`class_` object and reference it when binding the ``Dog`` class: .. code-block:: cpp py::class_<Pet> pet(m, "Pet"); pet.def(py::init<const std::string &>()) .def_readwrite("name", &Pet::name); // Method 2: pass parent class_ object: py::class_<Dog>(m, "Dog", pet /* <- specify Python parent type */) .def(py::init<const std::string &>()) .def("bark", &Dog::bark); Functionality-wise, both approaches are equivalent. Afterwards, instances will expose fields and methods of both types: .. code-block:: pycon >>> p = example.Dog("Molly") >>> p.name u'Molly' >>> p.bark() u'woof!' The C++ classes defined above are regular non-polymorphic types with an inheritance relationship. This is reflected in Python: .. code-block:: cpp // Return a base pointer to a derived instance m.def("pet_store", []() { return std::unique_ptr<Pet>(new Dog("Molly")); }); .. code-block:: pycon >>> p = example.pet_store() >>> type(p) # `Dog` instance behind `Pet` pointer Pet # no pointer downcasting for regular non-polymorphic types >>> p.bark() AttributeError: 'Pet' object has no attribute 'bark' The function returned a ``Dog`` instance, but because it's a non-polymorphic type behind a base pointer, Python only sees a ``Pet``. In C++, a type is only considered polymorphic if it has at least one virtual function and pybind11 will automatically recognize this: .. code-block:: cpp struct PolymorphicPet { virtual ~PolymorphicPet() = default; }; struct PolymorphicDog : PolymorphicPet { std::string bark() const { return "woof!"; } }; // Same binding code py::class_<PolymorphicPet>(m, "PolymorphicPet"); py::class_<PolymorphicDog, PolymorphicPet>(m, "PolymorphicDog") .def(py::init<>()) .def("bark", &PolymorphicDog::bark); // Again, return a base pointer to a derived instance m.def("pet_store2", []() { return std::unique_ptr<PolymorphicPet>(new PolymorphicDog); }); .. code-block:: pycon >>> p = example.pet_store2() >>> type(p) PolymorphicDog # automatically downcast >>> p.bark() u'woof!' Given a pointer to a polymorphic base, pybind11 performs automatic downcasting to the actual derived type. Note that this goes beyond the usual situation in C++: we don't just get access to the virtual functions of the base, we get the concrete derived type including functions and attributes that the base type may not even be aware of. .. seealso:: For more information about polymorphic behavior see :ref:`overriding_virtuals`. Overloaded methods ================== Sometimes there are several overloaded C++ methods with the same name taking different kinds of input arguments: .. code-block:: cpp struct Pet { Pet(const std::string &name, int age) : name(name), age(age) { } void set(int age_) { age = age_; } void set(const std::string &name_) { name = name_; } std::string name; int age; }; Attempting to bind ``Pet::set`` will cause an error since the compiler does not know which method the user intended to select. We can disambiguate by casting them to function pointers. Binding multiple functions to the same Python name automatically creates a chain of function overloads that will be tried in sequence. .. code-block:: cpp py::class_<Pet>(m, "Pet") .def(py::init<const std::string &, int>()) .def("set", static_cast<void (Pet::*)(int)>(&Pet::set), "Set the pet's age") .def("set", static_cast<void (Pet::*)(const std::string &)>(&Pet::set), "Set the pet's name"); The overload signatures are also visible in the method's docstring: .. code-block:: pycon >>> help(example.Pet) class Pet(__builtin__.object) | Methods defined here: | | __init__(...) | Signature : (Pet, str, int) -> NoneType | | set(...) | 1. Signature : (Pet, int) -> NoneType | | Set the pet's age | | 2. Signature : (Pet, str) -> NoneType | | Set the pet's name If you have a C++14 compatible compiler [#cpp14]_, you can use an alternative syntax to cast the overloaded function: .. code-block:: cpp py::class_<Pet>(m, "Pet") .def("set", py::overload_cast<int>(&Pet::set), "Set the pet's age") .def("set", py::overload_cast<const std::string &>(&Pet::set), "Set the pet's name"); Here, ``py::overload_cast`` only requires the parameter types to be specified. The return type and class are deduced. This avoids the additional noise of ``void (Pet::*)()`` as seen in the raw cast. If a function is overloaded based on constness, the ``py::const_`` tag should be used: .. code-block:: cpp struct Widget { int foo(int x, float y); int foo(int x, float y) const; }; py::class_<Widget>(m, "Widget") .def("foo_mutable", py::overload_cast<int, float>(&Widget::foo)) .def("foo_const", py::overload_cast<int, float>(&Widget::foo, py::const_)); If you prefer the ``py::overload_cast`` syntax but have a C++11 compatible compiler only, you can use ``py::detail::overload_cast_impl`` with an additional set of parentheses: .. code-block:: cpp template <typename... Args> using overload_cast_ = pybind11::detail::overload_cast_impl<Args...>; py::class_<Pet>(m, "Pet") .def("set", overload_cast_<int>()(&Pet::set), "Set the pet's age") .def("set", overload_cast_<const std::string &>()(&Pet::set), "Set the pet's name"); .. [#cpp14] A compiler which supports the ``-std=c++14`` flag or Visual Studio 2015 Update 2 and newer. .. note:: To define multiple overloaded constructors, simply declare one after the other using the ``.def(py::init<...>())`` syntax. The existing machinery for specifying keyword and default arguments also works. Enumerations and internal types =============================== Let's now suppose that the example class contains internal types like enumerations, e.g.: .. code-block:: cpp struct Pet { enum Kind { Dog = 0, Cat }; struct Attributes { float age = 0; }; Pet(const std::string &name, Kind type) : name(name), type(type) { } std::string name; Kind type; Attributes attr; }; The binding code for this example looks as follows: .. code-block:: cpp py::class_<Pet> pet(m, "Pet"); pet.def(py::init<const std::string &, Pet::Kind>()) .def_readwrite("name", &Pet::name) .def_readwrite("type", &Pet::type) .def_readwrite("attr", &Pet::attr); py::enum_<Pet::Kind>(pet, "Kind") .value("Dog", Pet::Kind::Dog) .value("Cat", Pet::Kind::Cat) .export_values(); py::class_<Pet::Attributes> attributes(pet, "Attributes") .def(py::init<>()) .def_readwrite("age", &Pet::Attributes::age); To ensure that the nested types ``Kind`` and ``Attributes`` are created within the scope of ``Pet``, the ``pet`` :class:`class_` instance must be supplied to the :class:`enum_` and :class:`class_` constructor. The :func:`enum_::export_values` function exports the enum entries into the parent scope, which should be skipped for newer C++11-style strongly typed enums. .. code-block:: pycon >>> p = Pet("Lucy", Pet.Cat) >>> p.type Kind.Cat >>> int(p.type) 1L The entries defined by the enumeration type are exposed in the ``__members__`` property: .. code-block:: pycon >>> Pet.Kind.__members__ {'Dog': Kind.Dog, 'Cat': Kind.Cat} The ``name`` property returns the name of the enum value as a unicode string. .. note:: It is also possible to use ``str(enum)``, however these accomplish different goals. The following shows how these two approaches differ. .. code-block:: pycon >>> p = Pet("Lucy", Pet.Cat) >>> pet_type = p.type >>> pet_type Pet.Cat >>> str(pet_type) 'Pet.Cat' >>> pet_type.name 'Cat' .. note:: When the special tag ``py::arithmetic()`` is specified to the ``enum_`` constructor, pybind11 creates an enumeration that also supports rudimentary arithmetic and bit-level operations like comparisons, and, or, xor, negation, etc. .. code-block:: cpp py::enum_<Pet::Kind>(pet, "Kind", py::arithmetic()) ... By default, these are omitted to conserve space.
Abies
/Abies-0.0.5.tar.gz/Abies-0.0.5/extern/pybind11/docs/classes.rst
classes.rst
Functions ######### Before proceeding with this section, make sure that you are already familiar with the basics of binding functions and classes, as explained in :doc:`/basics` and :doc:`/classes`. The following guide is applicable to both free and member functions, i.e. *methods* in Python. .. _return_value_policies: Return value policies ===================== Python and C++ use fundamentally different ways of managing the memory and lifetime of objects managed by them. This can lead to issues when creating bindings for functions that return a non-trivial type. Just by looking at the type information, it is not clear whether Python should take charge of the returned value and eventually free its resources, or if this is handled on the C++ side. For this reason, pybind11 provides a several *return value policy* annotations that can be passed to the :func:`module_::def` and :func:`class_::def` functions. The default policy is :enum:`return_value_policy::automatic`. Return value policies are tricky, and it's very important to get them right. Just to illustrate what can go wrong, consider the following simple example: .. code-block:: cpp /* Function declaration */ Data *get_data() { return _data; /* (pointer to a static data structure) */ } ... /* Binding code */ m.def("get_data", &get_data); // <-- KABOOM, will cause crash when called from Python What's going on here? When ``get_data()`` is called from Python, the return value (a native C++ type) must be wrapped to turn it into a usable Python type. In this case, the default return value policy (:enum:`return_value_policy::automatic`) causes pybind11 to assume ownership of the static ``_data`` instance. When Python's garbage collector eventually deletes the Python wrapper, pybind11 will also attempt to delete the C++ instance (via ``operator delete()``) due to the implied ownership. At this point, the entire application will come crashing down, though errors could also be more subtle and involve silent data corruption. In the above example, the policy :enum:`return_value_policy::reference` should have been specified so that the global data instance is only *referenced* without any implied transfer of ownership, i.e.: .. code-block:: cpp m.def("get_data", &get_data, py::return_value_policy::reference); On the other hand, this is not the right policy for many other situations, where ignoring ownership could lead to resource leaks. As a developer using pybind11, it's important to be familiar with the different return value policies, including which situation calls for which one of them. The following table provides an overview of available policies: .. tabularcolumns:: |p{0.5\textwidth}|p{0.45\textwidth}| +--------------------------------------------------+----------------------------------------------------------------------------+ | Return value policy | Description | +==================================================+============================================================================+ | :enum:`return_value_policy::take_ownership` | Reference an existing object (i.e. do not create a new copy) and take | | | ownership. Python will call the destructor and delete operator when the | | | object's reference count reaches zero. Undefined behavior ensues when the | | | C++ side does the same, or when the data was not dynamically allocated. | +--------------------------------------------------+----------------------------------------------------------------------------+ | :enum:`return_value_policy::copy` | Create a new copy of the returned object, which will be owned by Python. | | | This policy is comparably safe because the lifetimes of the two instances | | | are decoupled. | +--------------------------------------------------+----------------------------------------------------------------------------+ | :enum:`return_value_policy::move` | Use ``std::move`` to move the return value contents into a new instance | | | that will be owned by Python. This policy is comparably safe because the | | | lifetimes of the two instances (move source and destination) are decoupled.| +--------------------------------------------------+----------------------------------------------------------------------------+ | :enum:`return_value_policy::reference` | Reference an existing object, but do not take ownership. The C++ side is | | | responsible for managing the object's lifetime and deallocating it when | | | it is no longer used. Warning: undefined behavior will ensue when the C++ | | | side deletes an object that is still referenced and used by Python. | +--------------------------------------------------+----------------------------------------------------------------------------+ | :enum:`return_value_policy::reference_internal` | Indicates that the lifetime of the return value is tied to the lifetime | | | of a parent object, namely the implicit ``this``, or ``self`` argument of | | | the called method or property. Internally, this policy works just like | | | :enum:`return_value_policy::reference` but additionally applies a | | | ``keep_alive<0, 1>`` *call policy* (described in the next section) that | | | prevents the parent object from being garbage collected as long as the | | | return value is referenced by Python. This is the default policy for | | | property getters created via ``def_property``, ``def_readwrite``, etc. | +--------------------------------------------------+----------------------------------------------------------------------------+ | :enum:`return_value_policy::automatic` | This policy falls back to the policy | | | :enum:`return_value_policy::take_ownership` when the return value is a | | | pointer. Otherwise, it uses :enum:`return_value_policy::move` or | | | :enum:`return_value_policy::copy` for rvalue and lvalue references, | | | respectively. See above for a description of what all of these different | | | policies do. This is the default policy for ``py::class_``-wrapped types. | +--------------------------------------------------+----------------------------------------------------------------------------+ | :enum:`return_value_policy::automatic_reference` | As above, but use policy :enum:`return_value_policy::reference` when the | | | return value is a pointer. This is the default conversion policy for | | | function arguments when calling Python functions manually from C++ code | | | (i.e. via ``handle::operator()``) and the casters in ``pybind11/stl.h``. | | | You probably won't need to use this explicitly. | +--------------------------------------------------+----------------------------------------------------------------------------+ Return value policies can also be applied to properties: .. code-block:: cpp class_<MyClass>(m, "MyClass") .def_property("data", &MyClass::getData, &MyClass::setData, py::return_value_policy::copy); Technically, the code above applies the policy to both the getter and the setter function, however, the setter doesn't really care about *return* value policies which makes this a convenient terse syntax. Alternatively, targeted arguments can be passed through the :class:`cpp_function` constructor: .. code-block:: cpp class_<MyClass>(m, "MyClass") .def_property("data", py::cpp_function(&MyClass::getData, py::return_value_policy::copy), py::cpp_function(&MyClass::setData) ); .. warning:: Code with invalid return value policies might access uninitialized memory or free data structures multiple times, which can lead to hard-to-debug non-determinism and segmentation faults, hence it is worth spending the time to understand all the different options in the table above. .. note:: One important aspect of the above policies is that they only apply to instances which pybind11 has *not* seen before, in which case the policy clarifies essential questions about the return value's lifetime and ownership. When pybind11 knows the instance already (as identified by its type and address in memory), it will return the existing Python object wrapper rather than creating a new copy. .. note:: The next section on :ref:`call_policies` discusses *call policies* that can be specified *in addition* to a return value policy from the list above. Call policies indicate reference relationships that can involve both return values and parameters of functions. .. note:: As an alternative to elaborate call policies and lifetime management logic, consider using smart pointers (see the section on :ref:`smart_pointers` for details). Smart pointers can tell whether an object is still referenced from C++ or Python, which generally eliminates the kinds of inconsistencies that can lead to crashes or undefined behavior. For functions returning smart pointers, it is not necessary to specify a return value policy. .. _call_policies: Additional call policies ======================== In addition to the above return value policies, further *call policies* can be specified to indicate dependencies between parameters or ensure a certain state for the function call. Keep alive ---------- In general, this policy is required when the C++ object is any kind of container and another object is being added to the container. ``keep_alive<Nurse, Patient>`` indicates that the argument with index ``Patient`` should be kept alive at least until the argument with index ``Nurse`` is freed by the garbage collector. Argument indices start at one, while zero refers to the return value. For methods, index ``1`` refers to the implicit ``this`` pointer, while regular arguments begin at index ``2``. Arbitrarily many call policies can be specified. When a ``Nurse`` with value ``None`` is detected at runtime, the call policy does nothing. When the nurse is not a pybind11-registered type, the implementation internally relies on the ability to create a *weak reference* to the nurse object. When the nurse object is not a pybind11-registered type and does not support weak references, an exception will be thrown. If you use an incorrect argument index, you will get a ``RuntimeError`` saying ``Could not activate keep_alive!``. You should review the indices you're using. Consider the following example: here, the binding code for a list append operation ties the lifetime of the newly added element to the underlying container: .. code-block:: cpp py::class_<List>(m, "List") .def("append", &List::append, py::keep_alive<1, 2>()); For consistency, the argument indexing is identical for constructors. Index ``1`` still refers to the implicit ``this`` pointer, i.e. the object which is being constructed. Index ``0`` refers to the return type which is presumed to be ``void`` when a constructor is viewed like a function. The following example ties the lifetime of the constructor element to the constructed object: .. code-block:: cpp py::class_<Nurse>(m, "Nurse") .def(py::init<Patient &>(), py::keep_alive<1, 2>()); .. note:: ``keep_alive`` is analogous to the ``with_custodian_and_ward`` (if Nurse, Patient != 0) and ``with_custodian_and_ward_postcall`` (if Nurse/Patient == 0) policies from Boost.Python. Call guard ---------- The ``call_guard<T>`` policy allows any scope guard type ``T`` to be placed around the function call. For example, this definition: .. code-block:: cpp m.def("foo", foo, py::call_guard<T>()); is equivalent to the following pseudocode: .. code-block:: cpp m.def("foo", [](args...) { T scope_guard; return foo(args...); // forwarded arguments }); The only requirement is that ``T`` is default-constructible, but otherwise any scope guard will work. This is very useful in combination with ``gil_scoped_release``. See :ref:`gil`. Multiple guards can also be specified as ``py::call_guard<T1, T2, T3...>``. The constructor order is left to right and destruction happens in reverse. .. seealso:: The file :file:`tests/test_call_policies.cpp` contains a complete example that demonstrates using `keep_alive` and `call_guard` in more detail. .. _python_objects_as_args: Python objects as arguments =========================== pybind11 exposes all major Python types using thin C++ wrapper classes. These wrapper classes can also be used as parameters of functions in bindings, which makes it possible to directly work with native Python types on the C++ side. For instance, the following statement iterates over a Python ``dict``: .. code-block:: cpp void print_dict(const py::dict& dict) { /* Easily interact with Python types */ for (auto item : dict) std::cout << "key=" << std::string(py::str(item.first)) << ", " << "value=" << std::string(py::str(item.second)) << std::endl; } It can be exported: .. code-block:: cpp m.def("print_dict", &print_dict); And used in Python as usual: .. code-block:: pycon >>> print_dict({"foo": 123, "bar": "hello"}) key=foo, value=123 key=bar, value=hello For more information on using Python objects in C++, see :doc:`/advanced/pycpp/index`. Accepting \*args and \*\*kwargs =============================== Python provides a useful mechanism to define functions that accept arbitrary numbers of arguments and keyword arguments: .. code-block:: python def generic(*args, **kwargs): ... # do something with args and kwargs Such functions can also be created using pybind11: .. code-block:: cpp void generic(py::args args, const py::kwargs& kwargs) { /// .. do something with args if (kwargs) /// .. do something with kwargs } /// Binding code m.def("generic", &generic); The class ``py::args`` derives from ``py::tuple`` and ``py::kwargs`` derives from ``py::dict``. You may also use just one or the other, and may combine these with other arguments. Note, however, that ``py::kwargs`` must always be the last argument of the function, and ``py::args`` implies that any further arguments are keyword-only (see :ref:`keyword_only_arguments`). Please refer to the other examples for details on how to iterate over these, and on how to cast their entries into C++ objects. A demonstration is also available in ``tests/test_kwargs_and_defaults.cpp``. .. note:: When combining \*args or \*\*kwargs with :ref:`keyword_args` you should *not* include ``py::arg`` tags for the ``py::args`` and ``py::kwargs`` arguments. Default arguments revisited =========================== The section on :ref:`default_args` previously discussed basic usage of default arguments using pybind11. One noteworthy aspect of their implementation is that default arguments are converted to Python objects right at declaration time. Consider the following example: .. code-block:: cpp py::class_<MyClass>("MyClass") .def("myFunction", py::arg("arg") = SomeType(123)); In this case, pybind11 must already be set up to deal with values of the type ``SomeType`` (via a prior instantiation of ``py::class_<SomeType>``), or an exception will be thrown. Another aspect worth highlighting is that the "preview" of the default argument in the function signature is generated using the object's ``__repr__`` method. If not available, the signature may not be very helpful, e.g.: .. code-block:: pycon FUNCTIONS ... | myFunction(...) | Signature : (MyClass, arg : SomeType = <SomeType object at 0x101b7b080>) -> NoneType ... The first way of addressing this is by defining ``SomeType.__repr__``. Alternatively, it is possible to specify the human-readable preview of the default argument manually using the ``arg_v`` notation: .. code-block:: cpp py::class_<MyClass>("MyClass") .def("myFunction", py::arg_v("arg", SomeType(123), "SomeType(123)")); Sometimes it may be necessary to pass a null pointer value as a default argument. In this case, remember to cast it to the underlying type in question, like so: .. code-block:: cpp py::class_<MyClass>("MyClass") .def("myFunction", py::arg("arg") = static_cast<SomeType *>(nullptr)); .. _keyword_only_arguments: Keyword-only arguments ====================== Python 3 introduced keyword-only arguments by specifying an unnamed ``*`` argument in a function definition: .. code-block:: python def f(a, *, b): # a can be positional or via keyword; b must be via keyword pass f(a=1, b=2) # good f(b=2, a=1) # good f(1, b=2) # good f(1, 2) # TypeError: f() takes 1 positional argument but 2 were given Pybind11 provides a ``py::kw_only`` object that allows you to implement the same behaviour by specifying the object between positional and keyword-only argument annotations when registering the function: .. code-block:: cpp m.def("f", [](int a, int b) { /* ... */ }, py::arg("a"), py::kw_only(), py::arg("b")); Note that you currently cannot combine this with a ``py::args`` argument. This feature does *not* require Python 3 to work. .. versionadded:: 2.6 As of pybind11 2.9, a ``py::args`` argument implies that any following arguments are keyword-only, as if ``py::kw_only()`` had been specified in the same relative location of the argument list as the ``py::args`` argument. The ``py::kw_only()`` may be included to be explicit about this, but is not required. (Prior to 2.9 ``py::args`` may only occur at the end of the argument list, or immediately before a ``py::kwargs`` argument at the end). .. versionadded:: 2.9 Positional-only arguments ========================= Python 3.8 introduced a new positional-only argument syntax, using ``/`` in the function definition (note that this has been a convention for CPython positional arguments, such as in ``pow()``, since Python 2). You can do the same thing in any version of Python using ``py::pos_only()``: .. code-block:: cpp m.def("f", [](int a, int b) { /* ... */ }, py::arg("a"), py::pos_only(), py::arg("b")); You now cannot give argument ``a`` by keyword. This can be combined with keyword-only arguments, as well. .. versionadded:: 2.6 .. _nonconverting_arguments: Non-converting arguments ======================== Certain argument types may support conversion from one type to another. Some examples of conversions are: * :ref:`implicit_conversions` declared using ``py::implicitly_convertible<A,B>()`` * Calling a method accepting a double with an integer argument * Calling a ``std::complex<float>`` argument with a non-complex python type (for example, with a float). (Requires the optional ``pybind11/complex.h`` header). * Calling a function taking an Eigen matrix reference with a numpy array of the wrong type or of an incompatible data layout. (Requires the optional ``pybind11/eigen.h`` header). This behaviour is sometimes undesirable: the binding code may prefer to raise an error rather than convert the argument. This behaviour can be obtained through ``py::arg`` by calling the ``.noconvert()`` method of the ``py::arg`` object, such as: .. code-block:: cpp m.def("floats_only", [](double f) { return 0.5 * f; }, py::arg("f").noconvert()); m.def("floats_preferred", [](double f) { return 0.5 * f; }, py::arg("f")); Attempting the call the second function (the one without ``.noconvert()``) with an integer will succeed, but attempting to call the ``.noconvert()`` version will fail with a ``TypeError``: .. code-block:: pycon >>> floats_preferred(4) 2.0 >>> floats_only(4) Traceback (most recent call last): File "<stdin>", line 1, in <module> TypeError: floats_only(): incompatible function arguments. The following argument types are supported: 1. (f: float) -> float Invoked with: 4 You may, of course, combine this with the :var:`_a` shorthand notation (see :ref:`keyword_args`) and/or :ref:`default_args`. It is also permitted to omit the argument name by using the ``py::arg()`` constructor without an argument name, i.e. by specifying ``py::arg().noconvert()``. .. note:: When specifying ``py::arg`` options it is necessary to provide the same number of options as the bound function has arguments. Thus if you want to enable no-convert behaviour for just one of several arguments, you will need to specify a ``py::arg()`` annotation for each argument with the no-convert argument modified to ``py::arg().noconvert()``. .. _none_arguments: Allow/Prohibiting None arguments ================================ When a C++ type registered with :class:`py::class_` is passed as an argument to a function taking the instance as pointer or shared holder (e.g. ``shared_ptr`` or a custom, copyable holder as described in :ref:`smart_pointers`), pybind allows ``None`` to be passed from Python which results in calling the C++ function with ``nullptr`` (or an empty holder) for the argument. To explicitly enable or disable this behaviour, using the ``.none`` method of the :class:`py::arg` object: .. code-block:: cpp py::class_<Dog>(m, "Dog").def(py::init<>()); py::class_<Cat>(m, "Cat").def(py::init<>()); m.def("bark", [](Dog *dog) -> std::string { if (dog) return "woof!"; /* Called with a Dog instance */ else return "(no dog)"; /* Called with None, dog == nullptr */ }, py::arg("dog").none(true)); m.def("meow", [](Cat *cat) -> std::string { // Can't be called with None argument return "meow"; }, py::arg("cat").none(false)); With the above, the Python call ``bark(None)`` will return the string ``"(no dog)"``, while attempting to call ``meow(None)`` will raise a ``TypeError``: .. code-block:: pycon >>> from animals import Dog, Cat, bark, meow >>> bark(Dog()) 'woof!' >>> meow(Cat()) 'meow' >>> bark(None) '(no dog)' >>> meow(None) Traceback (most recent call last): File "<stdin>", line 1, in <module> TypeError: meow(): incompatible function arguments. The following argument types are supported: 1. (cat: animals.Cat) -> str Invoked with: None The default behaviour when the tag is unspecified is to allow ``None``. .. note:: Even when ``.none(true)`` is specified for an argument, ``None`` will be converted to a ``nullptr`` *only* for custom and :ref:`opaque <opaque>` types. Pointers to built-in types (``double *``, ``int *``, ...) and STL types (``std::vector<T> *``, ...; if ``pybind11/stl.h`` is included) are copied when converted to C++ (see :doc:`/advanced/cast/overview`) and will not allow ``None`` as argument. To pass optional argument of these copied types consider using ``std::optional<T>`` .. _overload_resolution: Overload resolution order ========================= When a function or method with multiple overloads is called from Python, pybind11 determines which overload to call in two passes. The first pass attempts to call each overload without allowing argument conversion (as if every argument had been specified as ``py::arg().noconvert()`` as described above). If no overload succeeds in the no-conversion first pass, a second pass is attempted in which argument conversion is allowed (except where prohibited via an explicit ``py::arg().noconvert()`` attribute in the function definition). If the second pass also fails a ``TypeError`` is raised. Within each pass, overloads are tried in the order they were registered with pybind11. If the ``py::prepend()`` tag is added to the definition, a function can be placed at the beginning of the overload sequence instead, allowing user overloads to proceed built in functions. What this means in practice is that pybind11 will prefer any overload that does not require conversion of arguments to an overload that does, but otherwise prefers earlier-defined overloads to later-defined ones. .. note:: pybind11 does *not* further prioritize based on the number/pattern of overloaded arguments. That is, pybind11 does not prioritize a function requiring one conversion over one requiring three, but only prioritizes overloads requiring no conversion at all to overloads that require conversion of at least one argument. .. versionadded:: 2.6 The ``py::prepend()`` tag. Binding functions with template parameters ========================================== You can bind functions that have template parameters. Here's a function: .. code-block:: cpp template <typename T> void set(T t); C++ templates cannot be instantiated at runtime, so you cannot bind the non-instantiated function: .. code-block:: cpp // BROKEN (this will not compile) m.def("set", &set); You must bind each instantiated function template separately. You may bind each instantiation with the same name, which will be treated the same as an overloaded function: .. code-block:: cpp m.def("set", &set<int>); m.def("set", &set<std::string>); Sometimes it's more clear to bind them with separate names, which is also an option: .. code-block:: cpp m.def("setInt", &set<int>); m.def("setString", &set<std::string>);
Abies
/Abies-0.0.5.tar.gz/Abies-0.0.5/extern/pybind11/docs/advanced/functions.rst
functions.rst
Smart pointers ############## std::unique_ptr =============== Given a class ``Example`` with Python bindings, it's possible to return instances wrapped in C++11 unique pointers, like so .. code-block:: cpp std::unique_ptr<Example> create_example() { return std::unique_ptr<Example>(new Example()); } .. code-block:: cpp m.def("create_example", &create_example); In other words, there is nothing special that needs to be done. While returning unique pointers in this way is allowed, it is *illegal* to use them as function arguments. For instance, the following function signature cannot be processed by pybind11. .. code-block:: cpp void do_something_with_example(std::unique_ptr<Example> ex) { ... } The above signature would imply that Python needs to give up ownership of an object that is passed to this function, which is generally not possible (for instance, the object might be referenced elsewhere). std::shared_ptr =============== The binding generator for classes, :class:`class_`, can be passed a template type that denotes a special *holder* type that is used to manage references to the object. If no such holder type template argument is given, the default for a type named ``Type`` is ``std::unique_ptr<Type>``, which means that the object is deallocated when Python's reference count goes to zero. It is possible to switch to other types of reference counting wrappers or smart pointers, which is useful in codebases that rely on them. For instance, the following snippet causes ``std::shared_ptr`` to be used instead. .. code-block:: cpp py::class_<Example, std::shared_ptr<Example> /* <- holder type */> obj(m, "Example"); Note that any particular class can only be associated with a single holder type. One potential stumbling block when using holder types is that they need to be applied consistently. Can you guess what's broken about the following binding code? .. code-block:: cpp class Child { }; class Parent { public: Parent() : child(std::make_shared<Child>()) { } Child *get_child() { return child.get(); } /* Hint: ** DON'T DO THIS ** */ private: std::shared_ptr<Child> child; }; PYBIND11_MODULE(example, m) { py::class_<Child, std::shared_ptr<Child>>(m, "Child"); py::class_<Parent, std::shared_ptr<Parent>>(m, "Parent") .def(py::init<>()) .def("get_child", &Parent::get_child); } The following Python code will cause undefined behavior (and likely a segmentation fault). .. code-block:: python from example import Parent print(Parent().get_child()) The problem is that ``Parent::get_child()`` returns a pointer to an instance of ``Child``, but the fact that this instance is already managed by ``std::shared_ptr<...>`` is lost when passing raw pointers. In this case, pybind11 will create a second independent ``std::shared_ptr<...>`` that also claims ownership of the pointer. In the end, the object will be freed **twice** since these shared pointers have no way of knowing about each other. There are two ways to resolve this issue: 1. For types that are managed by a smart pointer class, never use raw pointers in function arguments or return values. In other words: always consistently wrap pointers into their designated holder types (such as ``std::shared_ptr<...>``). In this case, the signature of ``get_child()`` should be modified as follows: .. code-block:: cpp std::shared_ptr<Child> get_child() { return child; } 2. Adjust the definition of ``Child`` by specifying ``std::enable_shared_from_this<T>`` (see cppreference_ for details) as a base class. This adds a small bit of information to ``Child`` that allows pybind11 to realize that there is already an existing ``std::shared_ptr<...>`` and communicate with it. In this case, the declaration of ``Child`` should look as follows: .. _cppreference: http://en.cppreference.com/w/cpp/memory/enable_shared_from_this .. code-block:: cpp class Child : public std::enable_shared_from_this<Child> { }; .. _smart_pointers: Custom smart pointers ===================== pybind11 supports ``std::unique_ptr`` and ``std::shared_ptr`` right out of the box. For any other custom smart pointer, transparent conversions can be enabled using a macro invocation similar to the following. It must be declared at the top namespace level before any binding code: .. code-block:: cpp PYBIND11_DECLARE_HOLDER_TYPE(T, SmartPtr<T>); The first argument of :func:`PYBIND11_DECLARE_HOLDER_TYPE` should be a placeholder name that is used as a template parameter of the second argument. Thus, feel free to use any identifier, but use it consistently on both sides; also, don't use the name of a type that already exists in your codebase. The macro also accepts a third optional boolean parameter that is set to false by default. Specify .. code-block:: cpp PYBIND11_DECLARE_HOLDER_TYPE(T, SmartPtr<T>, true); if ``SmartPtr<T>`` can always be initialized from a ``T*`` pointer without the risk of inconsistencies (such as multiple independent ``SmartPtr`` instances believing that they are the sole owner of the ``T*`` pointer). A common situation where ``true`` should be passed is when the ``T`` instances use *intrusive* reference counting. Please take a look at the :ref:`macro_notes` before using this feature. By default, pybind11 assumes that your custom smart pointer has a standard interface, i.e. provides a ``.get()`` member function to access the underlying raw pointer. If this is not the case, pybind11's ``holder_helper`` must be specialized: .. code-block:: cpp // Always needed for custom holder types PYBIND11_DECLARE_HOLDER_TYPE(T, SmartPtr<T>); // Only needed if the type's `.get()` goes by another name namespace pybind11 { namespace detail { template <typename T> struct holder_helper<SmartPtr<T>> { // <-- specialization static const T *get(const SmartPtr<T> &p) { return p.getPointer(); } }; }} The above specialization informs pybind11 that the custom ``SmartPtr`` class provides ``.get()`` functionality via ``.getPointer()``. .. seealso:: The file :file:`tests/test_smart_ptr.cpp` contains a complete example that demonstrates how to work with custom reference-counting holder types in more detail.
Abies
/Abies-0.0.5.tar.gz/Abies-0.0.5/extern/pybind11/docs/advanced/smart_ptrs.rst
smart_ptrs.rst
Exceptions ########## Built-in C++ to Python exception translation ============================================ When Python calls C++ code through pybind11, pybind11 provides a C++ exception handler that will trap C++ exceptions, translate them to the corresponding Python exception, and raise them so that Python code can handle them. pybind11 defines translations for ``std::exception`` and its standard subclasses, and several special exception classes that translate to specific Python exceptions. Note that these are not actually Python exceptions, so they cannot be examined using the Python C API. Instead, they are pure C++ objects that pybind11 will translate the corresponding Python exception when they arrive at its exception handler. .. tabularcolumns:: |p{0.5\textwidth}|p{0.45\textwidth}| +--------------------------------------+--------------------------------------+ | Exception thrown by C++ | Translated to Python exception type | +======================================+======================================+ | :class:`std::exception` | ``RuntimeError`` | +--------------------------------------+--------------------------------------+ | :class:`std::bad_alloc` | ``MemoryError`` | +--------------------------------------+--------------------------------------+ | :class:`std::domain_error` | ``ValueError`` | +--------------------------------------+--------------------------------------+ | :class:`std::invalid_argument` | ``ValueError`` | +--------------------------------------+--------------------------------------+ | :class:`std::length_error` | ``ValueError`` | +--------------------------------------+--------------------------------------+ | :class:`std::out_of_range` | ``IndexError`` | +--------------------------------------+--------------------------------------+ | :class:`std::range_error` | ``ValueError`` | +--------------------------------------+--------------------------------------+ | :class:`std::overflow_error` | ``OverflowError`` | +--------------------------------------+--------------------------------------+ | :class:`pybind11::stop_iteration` | ``StopIteration`` (used to implement | | | custom iterators) | +--------------------------------------+--------------------------------------+ | :class:`pybind11::index_error` | ``IndexError`` (used to indicate out | | | of bounds access in ``__getitem__``, | | | ``__setitem__``, etc.) | +--------------------------------------+--------------------------------------+ | :class:`pybind11::key_error` | ``KeyError`` (used to indicate out | | | of bounds access in ``__getitem__``, | | | ``__setitem__`` in dict-like | | | objects, etc.) | +--------------------------------------+--------------------------------------+ | :class:`pybind11::value_error` | ``ValueError`` (used to indicate | | | wrong value passed in | | | ``container.remove(...)``) | +--------------------------------------+--------------------------------------+ | :class:`pybind11::type_error` | ``TypeError`` | +--------------------------------------+--------------------------------------+ | :class:`pybind11::buffer_error` | ``BufferError`` | +--------------------------------------+--------------------------------------+ | :class:`pybind11::import_error` | ``ImportError`` | +--------------------------------------+--------------------------------------+ | :class:`pybind11::attribute_error` | ``AttributeError`` | +--------------------------------------+--------------------------------------+ | Any other exception | ``RuntimeError`` | +--------------------------------------+--------------------------------------+ Exception translation is not bidirectional. That is, *catching* the C++ exceptions defined above will not trap exceptions that originate from Python. For that, catch :class:`pybind11::error_already_set`. See :ref:`below <handling_python_exceptions_cpp>` for further details. There is also a special exception :class:`cast_error` that is thrown by :func:`handle::call` when the input arguments cannot be converted to Python objects. Registering custom translators ============================== If the default exception conversion policy described above is insufficient, pybind11 also provides support for registering custom exception translators. Similar to pybind11 classes, exception translators can be local to the module they are defined in or global to the entire python session. To register a simple exception conversion that translates a C++ exception into a new Python exception using the C++ exception's ``what()`` method, a helper function is available: .. code-block:: cpp py::register_exception<CppExp>(module, "PyExp"); This call creates a Python exception class with the name ``PyExp`` in the given module and automatically converts any encountered exceptions of type ``CppExp`` into Python exceptions of type ``PyExp``. A matching function is available for registering a local exception translator: .. code-block:: cpp py::register_local_exception<CppExp>(module, "PyExp"); It is possible to specify base class for the exception using the third parameter, a ``handle``: .. code-block:: cpp py::register_exception<CppExp>(module, "PyExp", PyExc_RuntimeError); py::register_local_exception<CppExp>(module, "PyExp", PyExc_RuntimeError); Then ``PyExp`` can be caught both as ``PyExp`` and ``RuntimeError``. The class objects of the built-in Python exceptions are listed in the Python documentation on `Standard Exceptions <https://docs.python.org/3/c-api/exceptions.html#standard-exceptions>`_. The default base class is ``PyExc_Exception``. When more advanced exception translation is needed, the functions ``py::register_exception_translator(translator)`` and ``py::register_local_exception_translator(translator)`` can be used to register functions that can translate arbitrary exception types (and which may include additional logic to do so). The functions takes a stateless callable (e.g. a function pointer or a lambda function without captured variables) with the call signature ``void(std::exception_ptr)``. When a C++ exception is thrown, the registered exception translators are tried in reverse order of registration (i.e. the last registered translator gets the first shot at handling the exception). All local translators will be tried before a global translator is tried. Inside the translator, ``std::rethrow_exception`` should be used within a try block to re-throw the exception. One or more catch clauses to catch the appropriate exceptions should then be used with each clause using ``PyErr_SetString`` to set a Python exception or ``ex(string)`` to set the python exception to a custom exception type (see below). To declare a custom Python exception type, declare a ``py::exception`` variable and use this in the associated exception translator (note: it is often useful to make this a static declaration when using it inside a lambda expression without requiring capturing). The following example demonstrates this for a hypothetical exception classes ``MyCustomException`` and ``OtherException``: the first is translated to a custom python exception ``MyCustomError``, while the second is translated to a standard python RuntimeError: .. code-block:: cpp static py::exception<MyCustomException> exc(m, "MyCustomError"); py::register_exception_translator([](std::exception_ptr p) { try { if (p) std::rethrow_exception(p); } catch (const MyCustomException &e) { exc(e.what()); } catch (const OtherException &e) { PyErr_SetString(PyExc_RuntimeError, e.what()); } }); Multiple exceptions can be handled by a single translator, as shown in the example above. If the exception is not caught by the current translator, the previously registered one gets a chance. If none of the registered exception translators is able to handle the exception, it is handled by the default converter as described in the previous section. .. seealso:: The file :file:`tests/test_exceptions.cpp` contains examples of various custom exception translators and custom exception types. .. note:: Call either ``PyErr_SetString`` or a custom exception's call operator (``exc(string)``) for every exception caught in a custom exception translator. Failure to do so will cause Python to crash with ``SystemError: error return without exception set``. Exceptions that you do not plan to handle should simply not be caught, or may be explicitly (re-)thrown to delegate it to the other, previously-declared existing exception translators. Note that ``libc++`` and ``libstdc++`` `behave differently <https://stackoverflow.com/questions/19496643/using-clang-fvisibility-hidden-and-typeinfo-and-type-erasure/28827430>`_ with ``-fvisibility=hidden``. Therefore exceptions that are used across ABI boundaries need to be explicitly exported, as exercised in ``tests/test_exceptions.h``. See also: "Problems with C++ exceptions" under `GCC Wiki <https://gcc.gnu.org/wiki/Visibility>`_. Local vs Global Exception Translators ===================================== When a global exception translator is registered, it will be applied across all modules in the reverse order of registration. This can create behavior where the order of module import influences how exceptions are translated. If module1 has the following translator: .. code-block:: cpp py::register_exception_translator([](std::exception_ptr p) { try { if (p) std::rethrow_exception(p); } catch (const std::invalid_argument &e) { PyErr_SetString("module1 handled this") } } and module2 has the following similar translator: .. code-block:: cpp py::register_exception_translator([](std::exception_ptr p) { try { if (p) std::rethrow_exception(p); } catch (const std::invalid_argument &e) { PyErr_SetString("module2 handled this") } } then which translator handles the invalid_argument will be determined by the order that module1 and module2 are imported. Since exception translators are applied in the reverse order of registration, which ever module was imported last will "win" and that translator will be applied. If there are multiple pybind11 modules that share exception types (either standard built-in or custom) loaded into a single python instance and consistent error handling behavior is needed, then local translators should be used. Changing the previous example to use ``register_local_exception_translator`` would mean that when invalid_argument is thrown in the module2 code, the module2 translator will always handle it, while in module1, the module1 translator will do the same. .. _handling_python_exceptions_cpp: Handling exceptions from Python in C++ ====================================== When C++ calls Python functions, such as in a callback function or when manipulating Python objects, and Python raises an ``Exception``, pybind11 converts the Python exception into a C++ exception of type :class:`pybind11::error_already_set` whose payload contains a C++ string textual summary and the actual Python exception. ``error_already_set`` is used to propagate Python exception back to Python (or possibly, handle them in C++). .. tabularcolumns:: |p{0.5\textwidth}|p{0.45\textwidth}| +--------------------------------------+--------------------------------------+ | Exception raised in Python | Thrown as C++ exception type | +======================================+======================================+ | Any Python ``Exception`` | :class:`pybind11::error_already_set` | +--------------------------------------+--------------------------------------+ For example: .. code-block:: cpp try { // open("missing.txt", "r") auto file = py::module_::import("io").attr("open")("missing.txt", "r"); auto text = file.attr("read")(); file.attr("close")(); } catch (py::error_already_set &e) { if (e.matches(PyExc_FileNotFoundError)) { py::print("missing.txt not found"); } else if (e.matches(PyExc_PermissionError)) { py::print("missing.txt found but not accessible"); } else { throw; } } Note that C++ to Python exception translation does not apply here, since that is a method for translating C++ exceptions to Python, not vice versa. The error raised from Python is always ``error_already_set``. This example illustrates this behavior: .. code-block:: cpp try { py::eval("raise ValueError('The Ring')"); } catch (py::value_error &boromir) { // Boromir never gets the ring assert(false); } catch (py::error_already_set &frodo) { // Frodo gets the ring py::print("I will take the ring"); } try { // py::value_error is a request for pybind11 to raise a Python exception throw py::value_error("The ball"); } catch (py::error_already_set &cat) { // cat won't catch the ball since // py::value_error is not a Python exception assert(false); } catch (py::value_error &dog) { // dog will catch the ball py::print("Run Spot run"); throw; // Throw it again (pybind11 will raise ValueError) } Handling errors from the Python C API ===================================== Where possible, use :ref:`pybind11 wrappers <wrappers>` instead of calling the Python C API directly. When calling the Python C API directly, in addition to manually managing reference counts, one must follow the pybind11 error protocol, which is outlined here. After calling the Python C API, if Python returns an error, ``throw py::error_already_set();``, which allows pybind11 to deal with the exception and pass it back to the Python interpreter. This includes calls to the error setting functions such as ``PyErr_SetString``. .. code-block:: cpp PyErr_SetString(PyExc_TypeError, "C API type error demo"); throw py::error_already_set(); // But it would be easier to simply... throw py::type_error("pybind11 wrapper type error"); Alternately, to ignore the error, call `PyErr_Clear <https://docs.python.org/3/c-api/exceptions.html#c.PyErr_Clear>`_. Any Python error must be thrown or cleared, or Python/pybind11 will be left in an invalid state. Chaining exceptions ('raise from') ================================== In Python 3.3 a mechanism for indicating that exceptions were caused by other exceptions was introduced: .. code-block:: py try: print(1 / 0) except Exception as exc: raise RuntimeError("could not divide by zero") from exc To do a similar thing in pybind11, you can use the ``py::raise_from`` function. It sets the current python error indicator, so to continue propagating the exception you should ``throw py::error_already_set()`` (Python 3 only). .. code-block:: cpp try { py::eval("print(1 / 0")); } catch (py::error_already_set &e) { py::raise_from(e, PyExc_RuntimeError, "could not divide by zero"); throw py::error_already_set(); } .. versionadded:: 2.8 .. _unraisable_exceptions: Handling unraisable exceptions ============================== If a Python function invoked from a C++ destructor or any function marked ``noexcept(true)`` (collectively, "noexcept functions") throws an exception, there is no way to propagate the exception, as such functions may not throw. Should they throw or fail to catch any exceptions in their call graph, the C++ runtime calls ``std::terminate()`` to abort immediately. Similarly, Python exceptions raised in a class's ``__del__`` method do not propagate, but are logged by Python as an unraisable error. In Python 3.8+, a `system hook is triggered <https://docs.python.org/3/library/sys.html#sys.unraisablehook>`_ and an auditing event is logged. Any noexcept function should have a try-catch block that traps class:`error_already_set` (or any other exception that can occur). Note that pybind11 wrappers around Python exceptions such as :class:`pybind11::value_error` are *not* Python exceptions; they are C++ exceptions that pybind11 catches and converts to Python exceptions. Noexcept functions cannot propagate these exceptions either. A useful approach is to convert them to Python exceptions and then ``discard_as_unraisable`` as shown below. .. code-block:: cpp void nonthrowing_func() noexcept(true) { try { // ... } catch (py::error_already_set &eas) { // Discard the Python error using Python APIs, using the C++ magic // variable __func__. Python already knows the type and value and of the // exception object. eas.discard_as_unraisable(__func__); } catch (const std::exception &e) { // Log and discard C++ exceptions. third_party::log(e); } } .. versionadded:: 2.6
Abies
/Abies-0.0.5.tar.gz/Abies-0.0.5/extern/pybind11/docs/advanced/exceptions.rst
exceptions.rst
Classes ####### This section presents advanced binding code for classes and it is assumed that you are already familiar with the basics from :doc:`/classes`. .. _overriding_virtuals: Overriding virtual functions in Python ====================================== Suppose that a C++ class or interface has a virtual function that we'd like to override from within Python (we'll focus on the class ``Animal``; ``Dog`` is given as a specific example of how one would do this with traditional C++ code). .. code-block:: cpp class Animal { public: virtual ~Animal() { } virtual std::string go(int n_times) = 0; }; class Dog : public Animal { public: std::string go(int n_times) override { std::string result; for (int i=0; i<n_times; ++i) result += "woof! "; return result; } }; Let's also suppose that we are given a plain function which calls the function ``go()`` on an arbitrary ``Animal`` instance. .. code-block:: cpp std::string call_go(Animal *animal) { return animal->go(3); } Normally, the binding code for these classes would look as follows: .. code-block:: cpp PYBIND11_MODULE(example, m) { py::class_<Animal>(m, "Animal") .def("go", &Animal::go); py::class_<Dog, Animal>(m, "Dog") .def(py::init<>()); m.def("call_go", &call_go); } However, these bindings are impossible to extend: ``Animal`` is not constructible, and we clearly require some kind of "trampoline" that redirects virtual calls back to Python. Defining a new type of ``Animal`` from within Python is possible but requires a helper class that is defined as follows: .. code-block:: cpp class PyAnimal : public Animal { public: /* Inherit the constructors */ using Animal::Animal; /* Trampoline (need one for each virtual function) */ std::string go(int n_times) override { PYBIND11_OVERRIDE_PURE( std::string, /* Return type */ Animal, /* Parent class */ go, /* Name of function in C++ (must match Python name) */ n_times /* Argument(s) */ ); } }; The macro :c:macro:`PYBIND11_OVERRIDE_PURE` should be used for pure virtual functions, and :c:macro:`PYBIND11_OVERRIDE` should be used for functions which have a default implementation. There are also two alternate macros :c:macro:`PYBIND11_OVERRIDE_PURE_NAME` and :c:macro:`PYBIND11_OVERRIDE_NAME` which take a string-valued name argument between the *Parent class* and *Name of the function* slots, which defines the name of function in Python. This is required when the C++ and Python versions of the function have different names, e.g. ``operator()`` vs ``__call__``. The binding code also needs a few minor adaptations (highlighted): .. code-block:: cpp :emphasize-lines: 2,3 PYBIND11_MODULE(example, m) { py::class_<Animal, PyAnimal /* <--- trampoline*/>(m, "Animal") .def(py::init<>()) .def("go", &Animal::go); py::class_<Dog, Animal>(m, "Dog") .def(py::init<>()); m.def("call_go", &call_go); } Importantly, pybind11 is made aware of the trampoline helper class by specifying it as an extra template argument to :class:`class_`. (This can also be combined with other template arguments such as a custom holder type; the order of template types does not matter). Following this, we are able to define a constructor as usual. Bindings should be made against the actual class, not the trampoline helper class. .. code-block:: cpp :emphasize-lines: 3 py::class_<Animal, PyAnimal /* <--- trampoline*/>(m, "Animal"); .def(py::init<>()) .def("go", &PyAnimal::go); /* <--- THIS IS WRONG, use &Animal::go */ Note, however, that the above is sufficient for allowing python classes to extend ``Animal``, but not ``Dog``: see :ref:`virtual_and_inheritance` for the necessary steps required to providing proper overriding support for inherited classes. The Python session below shows how to override ``Animal::go`` and invoke it via a virtual method call. .. code-block:: pycon >>> from example import * >>> d = Dog() >>> call_go(d) u'woof! woof! woof! ' >>> class Cat(Animal): ... def go(self, n_times): ... return "meow! " * n_times ... >>> c = Cat() >>> call_go(c) u'meow! meow! meow! ' If you are defining a custom constructor in a derived Python class, you *must* ensure that you explicitly call the bound C++ constructor using ``__init__``, *regardless* of whether it is a default constructor or not. Otherwise, the memory for the C++ portion of the instance will be left uninitialized, which will generally leave the C++ instance in an invalid state and cause undefined behavior if the C++ instance is subsequently used. .. versionchanged:: 2.6 The default pybind11 metaclass will throw a ``TypeError`` when it detects that ``__init__`` was not called by a derived class. Here is an example: .. code-block:: python class Dachshund(Dog): def __init__(self, name): Dog.__init__(self) # Without this, a TypeError is raised. self.name = name def bark(self): return "yap!" Note that a direct ``__init__`` constructor *should be called*, and ``super()`` should not be used. For simple cases of linear inheritance, ``super()`` may work, but once you begin mixing Python and C++ multiple inheritance, things will fall apart due to differences between Python's MRO and C++'s mechanisms. Please take a look at the :ref:`macro_notes` before using this feature. .. note:: When the overridden type returns a reference or pointer to a type that pybind11 converts from Python (for example, numeric values, std::string, and other built-in value-converting types), there are some limitations to be aware of: - because in these cases there is no C++ variable to reference (the value is stored in the referenced Python variable), pybind11 provides one in the PYBIND11_OVERRIDE macros (when needed) with static storage duration. Note that this means that invoking the overridden method on *any* instance will change the referenced value stored in *all* instances of that type. - Attempts to modify a non-const reference will not have the desired effect: it will change only the static cache variable, but this change will not propagate to underlying Python instance, and the change will be replaced the next time the override is invoked. .. warning:: The :c:macro:`PYBIND11_OVERRIDE` and accompanying macros used to be called ``PYBIND11_OVERLOAD`` up until pybind11 v2.5.0, and :func:`get_override` used to be called ``get_overload``. This naming was corrected and the older macro and function names may soon be deprecated, in order to reduce confusion with overloaded functions and methods and ``py::overload_cast`` (see :ref:`classes`). .. seealso:: The file :file:`tests/test_virtual_functions.cpp` contains a complete example that demonstrates how to override virtual functions using pybind11 in more detail. .. _virtual_and_inheritance: Combining virtual functions and inheritance =========================================== When combining virtual methods with inheritance, you need to be sure to provide an override for each method for which you want to allow overrides from derived python classes. For example, suppose we extend the above ``Animal``/``Dog`` example as follows: .. code-block:: cpp class Animal { public: virtual std::string go(int n_times) = 0; virtual std::string name() { return "unknown"; } }; class Dog : public Animal { public: std::string go(int n_times) override { std::string result; for (int i=0; i<n_times; ++i) result += bark() + " "; return result; } virtual std::string bark() { return "woof!"; } }; then the trampoline class for ``Animal`` must, as described in the previous section, override ``go()`` and ``name()``, but in order to allow python code to inherit properly from ``Dog``, we also need a trampoline class for ``Dog`` that overrides both the added ``bark()`` method *and* the ``go()`` and ``name()`` methods inherited from ``Animal`` (even though ``Dog`` doesn't directly override the ``name()`` method): .. code-block:: cpp class PyAnimal : public Animal { public: using Animal::Animal; // Inherit constructors std::string go(int n_times) override { PYBIND11_OVERRIDE_PURE(std::string, Animal, go, n_times); } std::string name() override { PYBIND11_OVERRIDE(std::string, Animal, name, ); } }; class PyDog : public Dog { public: using Dog::Dog; // Inherit constructors std::string go(int n_times) override { PYBIND11_OVERRIDE(std::string, Dog, go, n_times); } std::string name() override { PYBIND11_OVERRIDE(std::string, Dog, name, ); } std::string bark() override { PYBIND11_OVERRIDE(std::string, Dog, bark, ); } }; .. note:: Note the trailing commas in the ``PYBIND11_OVERRIDE`` calls to ``name()`` and ``bark()``. These are needed to portably implement a trampoline for a function that does not take any arguments. For functions that take a nonzero number of arguments, the trailing comma must be omitted. A registered class derived from a pybind11-registered class with virtual methods requires a similar trampoline class, *even if* it doesn't explicitly declare or override any virtual methods itself: .. code-block:: cpp class Husky : public Dog {}; class PyHusky : public Husky { public: using Husky::Husky; // Inherit constructors std::string go(int n_times) override { PYBIND11_OVERRIDE_PURE(std::string, Husky, go, n_times); } std::string name() override { PYBIND11_OVERRIDE(std::string, Husky, name, ); } std::string bark() override { PYBIND11_OVERRIDE(std::string, Husky, bark, ); } }; There is, however, a technique that can be used to avoid this duplication (which can be especially helpful for a base class with several virtual methods). The technique involves using template trampoline classes, as follows: .. code-block:: cpp template <class AnimalBase = Animal> class PyAnimal : public AnimalBase { public: using AnimalBase::AnimalBase; // Inherit constructors std::string go(int n_times) override { PYBIND11_OVERRIDE_PURE(std::string, AnimalBase, go, n_times); } std::string name() override { PYBIND11_OVERRIDE(std::string, AnimalBase, name, ); } }; template <class DogBase = Dog> class PyDog : public PyAnimal<DogBase> { public: using PyAnimal<DogBase>::PyAnimal; // Inherit constructors // Override PyAnimal's pure virtual go() with a non-pure one: std::string go(int n_times) override { PYBIND11_OVERRIDE(std::string, DogBase, go, n_times); } std::string bark() override { PYBIND11_OVERRIDE(std::string, DogBase, bark, ); } }; This technique has the advantage of requiring just one trampoline method to be declared per virtual method and pure virtual method override. It does, however, require the compiler to generate at least as many methods (and possibly more, if both pure virtual and overridden pure virtual methods are exposed, as above). The classes are then registered with pybind11 using: .. code-block:: cpp py::class_<Animal, PyAnimal<>> animal(m, "Animal"); py::class_<Dog, Animal, PyDog<>> dog(m, "Dog"); py::class_<Husky, Dog, PyDog<Husky>> husky(m, "Husky"); // ... add animal, dog, husky definitions Note that ``Husky`` did not require a dedicated trampoline template class at all, since it neither declares any new virtual methods nor provides any pure virtual method implementations. With either the repeated-virtuals or templated trampoline methods in place, you can now create a python class that inherits from ``Dog``: .. code-block:: python class ShihTzu(Dog): def bark(self): return "yip!" .. seealso:: See the file :file:`tests/test_virtual_functions.cpp` for complete examples using both the duplication and templated trampoline approaches. .. _extended_aliases: Extended trampoline class functionality ======================================= .. _extended_class_functionality_forced_trampoline: Forced trampoline class initialisation -------------------------------------- The trampoline classes described in the previous sections are, by default, only initialized when needed. More specifically, they are initialized when a python class actually inherits from a registered type (instead of merely creating an instance of the registered type), or when a registered constructor is only valid for the trampoline class but not the registered class. This is primarily for performance reasons: when the trampoline class is not needed for anything except virtual method dispatching, not initializing the trampoline class improves performance by avoiding needing to do a run-time check to see if the inheriting python instance has an overridden method. Sometimes, however, it is useful to always initialize a trampoline class as an intermediate class that does more than just handle virtual method dispatching. For example, such a class might perform extra class initialization, extra destruction operations, and might define new members and methods to enable a more python-like interface to a class. In order to tell pybind11 that it should *always* initialize the trampoline class when creating new instances of a type, the class constructors should be declared using ``py::init_alias<Args, ...>()`` instead of the usual ``py::init<Args, ...>()``. This forces construction via the trampoline class, ensuring member initialization and (eventual) destruction. .. seealso:: See the file :file:`tests/test_virtual_functions.cpp` for complete examples showing both normal and forced trampoline instantiation. Different method signatures --------------------------- The macro's introduced in :ref:`overriding_virtuals` cover most of the standard use cases when exposing C++ classes to Python. Sometimes it is hard or unwieldy to create a direct one-on-one mapping between the arguments and method return type. An example would be when the C++ signature contains output arguments using references (See also :ref:`faq_reference_arguments`). Another way of solving this is to use the method body of the trampoline class to do conversions to the input and return of the Python method. The main building block to do so is the :func:`get_override`, this function allows retrieving a method implemented in Python from within the trampoline's methods. Consider for example a C++ method which has the signature ``bool myMethod(int32_t& value)``, where the return indicates whether something should be done with the ``value``. This can be made convenient on the Python side by allowing the Python function to return ``None`` or an ``int``: .. code-block:: cpp bool MyClass::myMethod(int32_t& value) { pybind11::gil_scoped_acquire gil; // Acquire the GIL while in this scope. // Try to look up the overridden method on the Python side. pybind11::function override = pybind11::get_override(this, "myMethod"); if (override) { // method is found auto obj = override(value); // Call the Python function. if (py::isinstance<py::int_>(obj)) { // check if it returned a Python integer type value = obj.cast<int32_t>(); // Cast it and assign it to the value. return true; // Return true; value should be used. } else { return false; // Python returned none, return false. } } return false; // Alternatively return MyClass::myMethod(value); } .. _custom_constructors: Custom constructors =================== The syntax for binding constructors was previously introduced, but it only works when a constructor of the appropriate arguments actually exists on the C++ side. To extend this to more general cases, pybind11 makes it possible to bind factory functions as constructors. For example, suppose you have a class like this: .. code-block:: cpp class Example { private: Example(int); // private constructor public: // Factory function: static Example create(int a) { return Example(a); } }; py::class_<Example>(m, "Example") .def(py::init(&Example::create)); While it is possible to create a straightforward binding of the static ``create`` method, it may sometimes be preferable to expose it as a constructor on the Python side. This can be accomplished by calling ``.def(py::init(...))`` with the function reference returning the new instance passed as an argument. It is also possible to use this approach to bind a function returning a new instance by raw pointer or by the holder (e.g. ``std::unique_ptr``). The following example shows the different approaches: .. code-block:: cpp class Example { private: Example(int); // private constructor public: // Factory function - returned by value: static Example create(int a) { return Example(a); } // These constructors are publicly callable: Example(double); Example(int, int); Example(std::string); }; py::class_<Example>(m, "Example") // Bind the factory function as a constructor: .def(py::init(&Example::create)) // Bind a lambda function returning a pointer wrapped in a holder: .def(py::init([](std::string arg) { return std::unique_ptr<Example>(new Example(arg)); })) // Return a raw pointer: .def(py::init([](int a, int b) { return new Example(a, b); })) // You can mix the above with regular C++ constructor bindings as well: .def(py::init<double>()) ; When the constructor is invoked from Python, pybind11 will call the factory function and store the resulting C++ instance in the Python instance. When combining factory functions constructors with :ref:`virtual function trampolines <overriding_virtuals>` there are two approaches. The first is to add a constructor to the alias class that takes a base value by rvalue-reference. If such a constructor is available, it will be used to construct an alias instance from the value returned by the factory function. The second option is to provide two factory functions to ``py::init()``: the first will be invoked when no alias class is required (i.e. when the class is being used but not inherited from in Python), and the second will be invoked when an alias is required. You can also specify a single factory function that always returns an alias instance: this will result in behaviour similar to ``py::init_alias<...>()``, as described in the :ref:`extended trampoline class documentation <extended_aliases>`. The following example shows the different factory approaches for a class with an alias: .. code-block:: cpp #include <pybind11/factory.h> class Example { public: // ... virtual ~Example() = default; }; class PyExample : public Example { public: using Example::Example; PyExample(Example &&base) : Example(std::move(base)) {} }; py::class_<Example, PyExample>(m, "Example") // Returns an Example pointer. If a PyExample is needed, the Example // instance will be moved via the extra constructor in PyExample, above. .def(py::init([]() { return new Example(); })) // Two callbacks: .def(py::init([]() { return new Example(); } /* no alias needed */, []() { return new PyExample(); } /* alias needed */)) // *Always* returns an alias instance (like py::init_alias<>()) .def(py::init([]() { return new PyExample(); })) ; Brace initialization -------------------- ``pybind11::init<>`` internally uses C++11 brace initialization to call the constructor of the target class. This means that it can be used to bind *implicit* constructors as well: .. code-block:: cpp struct Aggregate { int a; std::string b; }; py::class_<Aggregate>(m, "Aggregate") .def(py::init<int, const std::string &>()); .. note:: Note that brace initialization preferentially invokes constructor overloads taking a ``std::initializer_list``. In the rare event that this causes an issue, you can work around it by using ``py::init(...)`` with a lambda function that constructs the new object as desired. .. _classes_with_non_public_destructors: Non-public destructors ====================== If a class has a private or protected destructor (as might e.g. be the case in a singleton pattern), a compile error will occur when creating bindings via pybind11. The underlying issue is that the ``std::unique_ptr`` holder type that is responsible for managing the lifetime of instances will reference the destructor even if no deallocations ever take place. In order to expose classes with private or protected destructors, it is possible to override the holder type via a holder type argument to ``class_``. Pybind11 provides a helper class ``py::nodelete`` that disables any destructor invocations. In this case, it is crucial that instances are deallocated on the C++ side to avoid memory leaks. .. code-block:: cpp /* ... definition ... */ class MyClass { private: ~MyClass() { } }; /* ... binding code ... */ py::class_<MyClass, std::unique_ptr<MyClass, py::nodelete>>(m, "MyClass") .def(py::init<>()) .. _destructors_that_call_python: Destructors that call Python ============================ If a Python function is invoked from a C++ destructor, an exception may be thrown of type :class:`error_already_set`. If this error is thrown out of a class destructor, ``std::terminate()`` will be called, terminating the process. Class destructors must catch all exceptions of type :class:`error_already_set` to discard the Python exception using :func:`error_already_set::discard_as_unraisable`. Every Python function should be treated as *possibly throwing*. When a Python generator stops yielding items, Python will throw a ``StopIteration`` exception, which can pass though C++ destructors if the generator's stack frame holds the last reference to C++ objects. For more information, see :ref:`the documentation on exceptions <unraisable_exceptions>`. .. code-block:: cpp class MyClass { public: ~MyClass() { try { py::print("Even printing is dangerous in a destructor"); py::exec("raise ValueError('This is an unraisable exception')"); } catch (py::error_already_set &e) { // error_context should be information about where/why the occurred, // e.g. use __func__ to get the name of the current function e.discard_as_unraisable(__func__); } } }; .. note:: pybind11 does not support C++ destructors marked ``noexcept(false)``. .. versionadded:: 2.6 .. _implicit_conversions: Implicit conversions ==================== Suppose that instances of two types ``A`` and ``B`` are used in a project, and that an ``A`` can easily be converted into an instance of type ``B`` (examples of this could be a fixed and an arbitrary precision number type). .. code-block:: cpp py::class_<A>(m, "A") /// ... members ... py::class_<B>(m, "B") .def(py::init<A>()) /// ... members ... m.def("func", [](const B &) { /* .... */ } ); To invoke the function ``func`` using a variable ``a`` containing an ``A`` instance, we'd have to write ``func(B(a))`` in Python. On the other hand, C++ will automatically apply an implicit type conversion, which makes it possible to directly write ``func(a)``. In this situation (i.e. where ``B`` has a constructor that converts from ``A``), the following statement enables similar implicit conversions on the Python side: .. code-block:: cpp py::implicitly_convertible<A, B>(); .. note:: Implicit conversions from ``A`` to ``B`` only work when ``B`` is a custom data type that is exposed to Python via pybind11. To prevent runaway recursion, implicit conversions are non-reentrant: an implicit conversion invoked as part of another implicit conversion of the same type (i.e. from ``A`` to ``B``) will fail. .. _static_properties: Static properties ================= The section on :ref:`properties` discussed the creation of instance properties that are implemented in terms of C++ getters and setters. Static properties can also be created in a similar way to expose getters and setters of static class attributes. Note that the implicit ``self`` argument also exists in this case and is used to pass the Python ``type`` subclass instance. This parameter will often not be needed by the C++ side, and the following example illustrates how to instantiate a lambda getter function that ignores it: .. code-block:: cpp py::class_<Foo>(m, "Foo") .def_property_readonly_static("foo", [](py::object /* self */) { return Foo(); }); Operator overloading ==================== Suppose that we're given the following ``Vector2`` class with a vector addition and scalar multiplication operation, all implemented using overloaded operators in C++. .. code-block:: cpp class Vector2 { public: Vector2(float x, float y) : x(x), y(y) { } Vector2 operator+(const Vector2 &v) const { return Vector2(x + v.x, y + v.y); } Vector2 operator*(float value) const { return Vector2(x * value, y * value); } Vector2& operator+=(const Vector2 &v) { x += v.x; y += v.y; return *this; } Vector2& operator*=(float v) { x *= v; y *= v; return *this; } friend Vector2 operator*(float f, const Vector2 &v) { return Vector2(f * v.x, f * v.y); } std::string toString() const { return "[" + std::to_string(x) + ", " + std::to_string(y) + "]"; } private: float x, y; }; The following snippet shows how the above operators can be conveniently exposed to Python. .. code-block:: cpp #include <pybind11/operators.h> PYBIND11_MODULE(example, m) { py::class_<Vector2>(m, "Vector2") .def(py::init<float, float>()) .def(py::self + py::self) .def(py::self += py::self) .def(py::self *= float()) .def(float() * py::self) .def(py::self * float()) .def(-py::self) .def("__repr__", &Vector2::toString); } Note that a line like .. code-block:: cpp .def(py::self * float()) is really just short hand notation for .. code-block:: cpp .def("__mul__", [](const Vector2 &a, float b) { return a * b; }, py::is_operator()) This can be useful for exposing additional operators that don't exist on the C++ side, or to perform other types of customization. The ``py::is_operator`` flag marker is needed to inform pybind11 that this is an operator, which returns ``NotImplemented`` when invoked with incompatible arguments rather than throwing a type error. .. note:: To use the more convenient ``py::self`` notation, the additional header file :file:`pybind11/operators.h` must be included. .. seealso:: The file :file:`tests/test_operator_overloading.cpp` contains a complete example that demonstrates how to work with overloaded operators in more detail. .. _pickling: Pickling support ================ Python's ``pickle`` module provides a powerful facility to serialize and de-serialize a Python object graph into a binary data stream. To pickle and unpickle C++ classes using pybind11, a ``py::pickle()`` definition must be provided. Suppose the class in question has the following signature: .. code-block:: cpp class Pickleable { public: Pickleable(const std::string &value) : m_value(value) { } const std::string &value() const { return m_value; } void setExtra(int extra) { m_extra = extra; } int extra() const { return m_extra; } private: std::string m_value; int m_extra = 0; }; Pickling support in Python is enabled by defining the ``__setstate__`` and ``__getstate__`` methods [#f3]_. For pybind11 classes, use ``py::pickle()`` to bind these two functions: .. code-block:: cpp py::class_<Pickleable>(m, "Pickleable") .def(py::init<std::string>()) .def("value", &Pickleable::value) .def("extra", &Pickleable::extra) .def("setExtra", &Pickleable::setExtra) .def(py::pickle( [](const Pickleable &p) { // __getstate__ /* Return a tuple that fully encodes the state of the object */ return py::make_tuple(p.value(), p.extra()); }, [](py::tuple t) { // __setstate__ if (t.size() != 2) throw std::runtime_error("Invalid state!"); /* Create a new C++ instance */ Pickleable p(t[0].cast<std::string>()); /* Assign any additional state */ p.setExtra(t[1].cast<int>()); return p; } )); The ``__setstate__`` part of the ``py::pickle()`` definition follows the same rules as the single-argument version of ``py::init()``. The return type can be a value, pointer or holder type. See :ref:`custom_constructors` for details. An instance can now be pickled as follows: .. code-block:: python try: import cPickle as pickle # Use cPickle on Python 2.7 except ImportError: import pickle p = Pickleable("test_value") p.setExtra(15) data = pickle.dumps(p, 2) .. note:: Note that only the cPickle module is supported on Python 2.7. The second argument to ``dumps`` is also crucial: it selects the pickle protocol version 2, since the older version 1 is not supported. Newer versions are also fine—for instance, specify ``-1`` to always use the latest available version. Beware: failure to follow these instructions will cause important pybind11 memory allocation routines to be skipped during unpickling, which will likely lead to memory corruption and/or segmentation faults. .. seealso:: The file :file:`tests/test_pickling.cpp` contains a complete example that demonstrates how to pickle and unpickle types using pybind11 in more detail. .. [#f3] http://docs.python.org/3/library/pickle.html#pickling-class-instances Deepcopy support ================ Python normally uses references in assignments. Sometimes a real copy is needed to prevent changing all copies. The ``copy`` module [#f5]_ provides these capabilities. On Python 3, a class with pickle support is automatically also (deep)copy compatible. However, performance can be improved by adding custom ``__copy__`` and ``__deepcopy__`` methods. With Python 2.7, these custom methods are mandatory for (deep)copy compatibility, because pybind11 only supports cPickle. For simple classes (deep)copy can be enabled by using the copy constructor, which should look as follows: .. code-block:: cpp py::class_<Copyable>(m, "Copyable") .def("__copy__", [](const Copyable &self) { return Copyable(self); }) .def("__deepcopy__", [](const Copyable &self, py::dict) { return Copyable(self); }, "memo"_a); .. note:: Dynamic attributes will not be copied in this example. .. [#f5] https://docs.python.org/3/library/copy.html Multiple Inheritance ==================== pybind11 can create bindings for types that derive from multiple base types (aka. *multiple inheritance*). To do so, specify all bases in the template arguments of the ``class_`` declaration: .. code-block:: cpp py::class_<MyType, BaseType1, BaseType2, BaseType3>(m, "MyType") ... The base types can be specified in arbitrary order, and they can even be interspersed with alias types and holder types (discussed earlier in this document)---pybind11 will automatically find out which is which. The only requirement is that the first template argument is the type to be declared. It is also permitted to inherit multiply from exported C++ classes in Python, as well as inheriting from multiple Python and/or pybind11-exported classes. There is one caveat regarding the implementation of this feature: When only one base type is specified for a C++ type that actually has multiple bases, pybind11 will assume that it does not participate in multiple inheritance, which can lead to undefined behavior. In such cases, add the tag ``multiple_inheritance`` to the class constructor: .. code-block:: cpp py::class_<MyType, BaseType2>(m, "MyType", py::multiple_inheritance()); The tag is redundant and does not need to be specified when multiple base types are listed. .. _module_local: Module-local class bindings =========================== When creating a binding for a class, pybind11 by default makes that binding "global" across modules. What this means is that a type defined in one module can be returned from any module resulting in the same Python type. For example, this allows the following: .. code-block:: cpp // In the module1.cpp binding code for module1: py::class_<Pet>(m, "Pet") .def(py::init<std::string>()) .def_readonly("name", &Pet::name); .. code-block:: cpp // In the module2.cpp binding code for module2: m.def("create_pet", [](std::string name) { return new Pet(name); }); .. code-block:: pycon >>> from module1 import Pet >>> from module2 import create_pet >>> pet1 = Pet("Kitty") >>> pet2 = create_pet("Doggy") >>> pet2.name() 'Doggy' When writing binding code for a library, this is usually desirable: this allows, for example, splitting up a complex library into multiple Python modules. In some cases, however, this can cause conflicts. For example, suppose two unrelated modules make use of an external C++ library and each provide custom bindings for one of that library's classes. This will result in an error when a Python program attempts to import both modules (directly or indirectly) because of conflicting definitions on the external type: .. code-block:: cpp // dogs.cpp // Binding for external library class: py::class<pets::Pet>(m, "Pet") .def("name", &pets::Pet::name); // Binding for local extension class: py::class<Dog, pets::Pet>(m, "Dog") .def(py::init<std::string>()); .. code-block:: cpp // cats.cpp, in a completely separate project from the above dogs.cpp. // Binding for external library class: py::class<pets::Pet>(m, "Pet") .def("get_name", &pets::Pet::name); // Binding for local extending class: py::class<Cat, pets::Pet>(m, "Cat") .def(py::init<std::string>()); .. code-block:: pycon >>> import cats >>> import dogs Traceback (most recent call last): File "<stdin>", line 1, in <module> ImportError: generic_type: type "Pet" is already registered! To get around this, you can tell pybind11 to keep the external class binding localized to the module by passing the ``py::module_local()`` attribute into the ``py::class_`` constructor: .. code-block:: cpp // Pet binding in dogs.cpp: py::class<pets::Pet>(m, "Pet", py::module_local()) .def("name", &pets::Pet::name); .. code-block:: cpp // Pet binding in cats.cpp: py::class<pets::Pet>(m, "Pet", py::module_local()) .def("get_name", &pets::Pet::name); This makes the Python-side ``dogs.Pet`` and ``cats.Pet`` into distinct classes, avoiding the conflict and allowing both modules to be loaded. C++ code in the ``dogs`` module that casts or returns a ``Pet`` instance will result in a ``dogs.Pet`` Python instance, while C++ code in the ``cats`` module will result in a ``cats.Pet`` Python instance. This does come with two caveats, however: First, external modules cannot return or cast a ``Pet`` instance to Python (unless they also provide their own local bindings). Second, from the Python point of view they are two distinct classes. Note that the locality only applies in the C++ -> Python direction. When passing such a ``py::module_local`` type into a C++ function, the module-local classes are still considered. This means that if the following function is added to any module (including but not limited to the ``cats`` and ``dogs`` modules above) it will be callable with either a ``dogs.Pet`` or ``cats.Pet`` argument: .. code-block:: cpp m.def("pet_name", [](const pets::Pet &pet) { return pet.name(); }); For example, suppose the above function is added to each of ``cats.cpp``, ``dogs.cpp`` and ``frogs.cpp`` (where ``frogs.cpp`` is some other module that does *not* bind ``Pets`` at all). .. code-block:: pycon >>> import cats, dogs, frogs # No error because of the added py::module_local() >>> mycat, mydog = cats.Cat("Fluffy"), dogs.Dog("Rover") >>> (cats.pet_name(mycat), dogs.pet_name(mydog)) ('Fluffy', 'Rover') >>> (cats.pet_name(mydog), dogs.pet_name(mycat), frogs.pet_name(mycat)) ('Rover', 'Fluffy', 'Fluffy') It is possible to use ``py::module_local()`` registrations in one module even if another module registers the same type globally: within the module with the module-local definition, all C++ instances will be cast to the associated bound Python type. In other modules any such values are converted to the global Python type created elsewhere. .. note:: STL bindings (as provided via the optional :file:`pybind11/stl_bind.h` header) apply ``py::module_local`` by default when the bound type might conflict with other modules; see :ref:`stl_bind` for details. .. note:: The localization of the bound types is actually tied to the shared object or binary generated by the compiler/linker. For typical modules created with ``PYBIND11_MODULE()``, this distinction is not significant. It is possible, however, when :ref:`embedding` to embed multiple modules in the same binary (see :ref:`embedding_modules`). In such a case, the localization will apply across all embedded modules within the same binary. .. seealso:: The file :file:`tests/test_local_bindings.cpp` contains additional examples that demonstrate how ``py::module_local()`` works. Binding protected member functions ================================== It's normally not possible to expose ``protected`` member functions to Python: .. code-block:: cpp class A { protected: int foo() const { return 42; } }; py::class_<A>(m, "A") .def("foo", &A::foo); // error: 'foo' is a protected member of 'A' On one hand, this is good because non-``public`` members aren't meant to be accessed from the outside. But we may want to make use of ``protected`` functions in derived Python classes. The following pattern makes this possible: .. code-block:: cpp class A { protected: int foo() const { return 42; } }; class Publicist : public A { // helper type for exposing protected functions public: using A::foo; // inherited with different access modifier }; py::class_<A>(m, "A") // bind the primary class .def("foo", &Publicist::foo); // expose protected methods via the publicist This works because ``&Publicist::foo`` is exactly the same function as ``&A::foo`` (same signature and address), just with a different access modifier. The only purpose of the ``Publicist`` helper class is to make the function name ``public``. If the intent is to expose ``protected`` ``virtual`` functions which can be overridden in Python, the publicist pattern can be combined with the previously described trampoline: .. code-block:: cpp class A { public: virtual ~A() = default; protected: virtual int foo() const { return 42; } }; class Trampoline : public A { public: int foo() const override { PYBIND11_OVERRIDE(int, A, foo, ); } }; class Publicist : public A { public: using A::foo; }; py::class_<A, Trampoline>(m, "A") // <-- `Trampoline` here .def("foo", &Publicist::foo); // <-- `Publicist` here, not `Trampoline`! .. note:: MSVC 2015 has a compiler bug (fixed in version 2017) which requires a more explicit function binding in the form of ``.def("foo", static_cast<int (A::*)() const>(&Publicist::foo));`` where ``int (A::*)() const`` is the type of ``A::foo``. Binding final classes ===================== Some classes may not be appropriate to inherit from. In C++11, classes can use the ``final`` specifier to ensure that a class cannot be inherited from. The ``py::is_final`` attribute can be used to ensure that Python classes cannot inherit from a specified type. The underlying C++ type does not need to be declared final. .. code-block:: cpp class IsFinal final {}; py::class_<IsFinal>(m, "IsFinal", py::is_final()); When you try to inherit from such a class in Python, you will now get this error: .. code-block:: pycon >>> class PyFinalChild(IsFinal): ... pass ... TypeError: type 'IsFinal' is not an acceptable base type .. note:: This attribute is currently ignored on PyPy .. versionadded:: 2.6 Binding classes with template parameters ======================================== pybind11 can also wrap classes that have template parameters. Consider these classes: .. code-block:: cpp struct Cat {}; struct Dog {}; template <typename PetType> struct Cage { Cage(PetType& pet); PetType& get(); }; C++ templates may only be instantiated at compile time, so pybind11 can only wrap instantiated templated classes. You cannot wrap a non-instantiated template: .. code-block:: cpp // BROKEN (this will not compile) py::class_<Cage>(m, "Cage"); .def("get", &Cage::get); You must explicitly specify each template/type combination that you want to wrap separately. .. code-block:: cpp // ok py::class_<Cage<Cat>>(m, "CatCage") .def("get", &Cage<Cat>::get); // ok py::class_<Cage<Dog>>(m, "DogCage") .def("get", &Cage<Dog>::get); If your class methods have template parameters you can wrap those as well, but once again each instantiation must be explicitly specified: .. code-block:: cpp typename <typename T> struct MyClass { template <typename V> T fn(V v); }; py::class<MyClass<int>>(m, "MyClassT") .def("fn", &MyClass<int>::fn<std::string>); Custom automatic downcasters ============================ As explained in :ref:`inheritance`, pybind11 comes with built-in understanding of the dynamic type of polymorphic objects in C++; that is, returning a Pet to Python produces a Python object that knows it's wrapping a Dog, if Pet has virtual methods and pybind11 knows about Dog and this Pet is in fact a Dog. Sometimes, you might want to provide this automatic downcasting behavior when creating bindings for a class hierarchy that does not use standard C++ polymorphism, such as LLVM [#f4]_. As long as there's some way to determine at runtime whether a downcast is safe, you can proceed by specializing the ``pybind11::polymorphic_type_hook`` template: .. code-block:: cpp enum class PetKind { Cat, Dog, Zebra }; struct Pet { // Not polymorphic: has no virtual methods const PetKind kind; int age = 0; protected: Pet(PetKind _kind) : kind(_kind) {} }; struct Dog : Pet { Dog() : Pet(PetKind::Dog) {} std::string sound = "woof!"; std::string bark() const { return sound; } }; namespace pybind11 { template<> struct polymorphic_type_hook<Pet> { static const void *get(const Pet *src, const std::type_info*& type) { // note that src may be nullptr if (src && src->kind == PetKind::Dog) { type = &typeid(Dog); return static_cast<const Dog*>(src); } return src; } }; } // namespace pybind11 When pybind11 wants to convert a C++ pointer of type ``Base*`` to a Python object, it calls ``polymorphic_type_hook<Base>::get()`` to determine if a downcast is possible. The ``get()`` function should use whatever runtime information is available to determine if its ``src`` parameter is in fact an instance of some class ``Derived`` that inherits from ``Base``. If it finds such a ``Derived``, it sets ``type = &typeid(Derived)`` and returns a pointer to the ``Derived`` object that contains ``src``. Otherwise, it just returns ``src``, leaving ``type`` at its default value of nullptr. If you set ``type`` to a type that pybind11 doesn't know about, no downcasting will occur, and the original ``src`` pointer will be used with its static type ``Base*``. It is critical that the returned pointer and ``type`` argument of ``get()`` agree with each other: if ``type`` is set to something non-null, the returned pointer must point to the start of an object whose type is ``type``. If the hierarchy being exposed uses only single inheritance, a simple ``return src;`` will achieve this just fine, but in the general case, you must cast ``src`` to the appropriate derived-class pointer (e.g. using ``static_cast<Derived>(src)``) before allowing it to be returned as a ``void*``. .. [#f4] https://llvm.org/docs/HowToSetUpLLVMStyleRTTI.html .. note:: pybind11's standard support for downcasting objects whose types have virtual methods is implemented using ``polymorphic_type_hook`` too, using the standard C++ ability to determine the most-derived type of a polymorphic object using ``typeid()`` and to cast a base pointer to that most-derived type (even if you don't know what it is) using ``dynamic_cast<void*>``. .. seealso:: The file :file:`tests/test_tagbased_polymorphic.cpp` contains a more complete example, including a demonstration of how to provide automatic downcasting for an entire class hierarchy without writing one get() function for each class. Accessing the type object ========================= You can get the type object from a C++ class that has already been registered using: .. code-block:: cpp py::type T_py = py::type::of<T>(); You can directly use ``py::type::of(ob)`` to get the type object from any python object, just like ``type(ob)`` in Python. .. note:: Other types, like ``py::type::of<int>()``, do not work, see :ref:`type-conversions`. .. versionadded:: 2.6 Custom type setup ================= For advanced use cases, such as enabling garbage collection support, you may wish to directly manipulate the ``PyHeapTypeObject`` corresponding to a ``py::class_`` definition. You can do that using ``py::custom_type_setup``: .. code-block:: cpp struct OwnsPythonObjects { py::object value = py::none(); }; py::class_<OwnsPythonObjects> cls( m, "OwnsPythonObjects", py::custom_type_setup([](PyHeapTypeObject *heap_type) { auto *type = &heap_type->ht_type; type->tp_flags |= Py_TPFLAGS_HAVE_GC; type->tp_traverse = [](PyObject *self_base, visitproc visit, void *arg) { auto &self = py::cast<OwnsPythonObjects&>(py::handle(self_base)); Py_VISIT(self.value.ptr()); return 0; }; type->tp_clear = [](PyObject *self_base) { auto &self = py::cast<OwnsPythonObjects&>(py::handle(self_base)); self.value = py::none(); return 0; }; })); cls.def(py::init<>()); cls.def_readwrite("value", &OwnsPythonObjects::value); .. versionadded:: 2.8
Abies
/Abies-0.0.5.tar.gz/Abies-0.0.5/extern/pybind11/docs/advanced/classes.rst
classes.rst