repository_name
stringlengths 7
55
| func_path_in_repository
stringlengths 4
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 75
104k
| language
stringclasses 1
value | func_code_string
stringlengths 75
104k
| func_code_tokens
sequencelengths 19
28.4k
| func_documentation_string
stringlengths 1
46.9k
| func_documentation_tokens
sequencelengths 1
1.97k
| split_name
stringclasses 1
value | func_code_url
stringlengths 87
315
|
---|---|---|---|---|---|---|---|---|---|---|
molmod/molmod | molmod/io/atrj.py | SectionFile._skip_section | def _skip_section(self):
"""Skip a section"""
self._last = self._f.readline()
while len(self._last) > 0 and len(self._last[0].strip()) == 0:
self._last = self._f.readline() | python | def _skip_section(self):
"""Skip a section"""
self._last = self._f.readline()
while len(self._last) > 0 and len(self._last[0].strip()) == 0:
self._last = self._f.readline() | [
"def",
"_skip_section",
"(",
"self",
")",
":",
"self",
".",
"_last",
"=",
"self",
".",
"_f",
".",
"readline",
"(",
")",
"while",
"len",
"(",
"self",
".",
"_last",
")",
">",
"0",
"and",
"len",
"(",
"self",
".",
"_last",
"[",
"0",
"]",
".",
"strip",
"(",
")",
")",
"==",
"0",
":",
"self",
".",
"_last",
"=",
"self",
".",
"_f",
".",
"readline",
"(",
")"
] | Skip a section | [
"Skip",
"a",
"section"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/io/atrj.py#L51-L55 |
molmod/molmod | molmod/io/atrj.py | SectionFile._read_section | def _read_section(self):
"""Read and return an entire section"""
lines = [self._last[self._last.find(":")+1:]]
self._last = self._f.readline()
while len(self._last) > 0 and len(self._last[0].strip()) == 0:
lines.append(self._last)
self._last = self._f.readline()
return lines | python | def _read_section(self):
"""Read and return an entire section"""
lines = [self._last[self._last.find(":")+1:]]
self._last = self._f.readline()
while len(self._last) > 0 and len(self._last[0].strip()) == 0:
lines.append(self._last)
self._last = self._f.readline()
return lines | [
"def",
"_read_section",
"(",
"self",
")",
":",
"lines",
"=",
"[",
"self",
".",
"_last",
"[",
"self",
".",
"_last",
".",
"find",
"(",
"\":\"",
")",
"+",
"1",
":",
"]",
"]",
"self",
".",
"_last",
"=",
"self",
".",
"_f",
".",
"readline",
"(",
")",
"while",
"len",
"(",
"self",
".",
"_last",
")",
">",
"0",
"and",
"len",
"(",
"self",
".",
"_last",
"[",
"0",
"]",
".",
"strip",
"(",
")",
")",
"==",
"0",
":",
"lines",
".",
"append",
"(",
"self",
".",
"_last",
")",
"self",
".",
"_last",
"=",
"self",
".",
"_f",
".",
"readline",
"(",
")",
"return",
"lines"
] | Read and return an entire section | [
"Read",
"and",
"return",
"an",
"entire",
"section"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/io/atrj.py#L57-L64 |
molmod/molmod | molmod/io/atrj.py | SectionFile.get_next | def get_next(self, label):
"""Get the next section with the given label"""
while self._get_current_label() != label:
self._skip_section()
return self._read_section() | python | def get_next(self, label):
"""Get the next section with the given label"""
while self._get_current_label() != label:
self._skip_section()
return self._read_section() | [
"def",
"get_next",
"(",
"self",
",",
"label",
")",
":",
"while",
"self",
".",
"_get_current_label",
"(",
")",
"!=",
"label",
":",
"self",
".",
"_skip_section",
"(",
")",
"return",
"self",
".",
"_read_section",
"(",
")"
] | Get the next section with the given label | [
"Get",
"the",
"next",
"section",
"with",
"the",
"given",
"label"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/io/atrj.py#L66-L70 |
molmod/molmod | molmod/io/atrj.py | ATRJReader._read_frame | def _read_frame(self):
"""Read a single frame from the trajectory"""
self._secfile.get_next("Frame Number")
frame = ATRJFrame()
# Read the time and energy
energy_lines = self._secfile.get_next("Time/Energy")
energy_words = energy_lines[0].split()
frame.time = float(energy_words[0])*picosecond
frame.step = int(energy_words[1])
frame.total_energy = float(energy_words[2])*kcalmol
# Read the coordinates
coord_lines = self._secfile.get_next("Coordinates")
frame.coordinates = np.zeros((self.num_atoms, 3), float)
for index, line in enumerate(coord_lines):
words = line.split()
frame.coordinates[index, 0] = float(words[1])
frame.coordinates[index, 1] = float(words[2])
frame.coordinates[index, 2] = float(words[3])
frame.coordinates *= angstrom
# Done
return frame | python | def _read_frame(self):
"""Read a single frame from the trajectory"""
self._secfile.get_next("Frame Number")
frame = ATRJFrame()
# Read the time and energy
energy_lines = self._secfile.get_next("Time/Energy")
energy_words = energy_lines[0].split()
frame.time = float(energy_words[0])*picosecond
frame.step = int(energy_words[1])
frame.total_energy = float(energy_words[2])*kcalmol
# Read the coordinates
coord_lines = self._secfile.get_next("Coordinates")
frame.coordinates = np.zeros((self.num_atoms, 3), float)
for index, line in enumerate(coord_lines):
words = line.split()
frame.coordinates[index, 0] = float(words[1])
frame.coordinates[index, 1] = float(words[2])
frame.coordinates[index, 2] = float(words[3])
frame.coordinates *= angstrom
# Done
return frame | [
"def",
"_read_frame",
"(",
"self",
")",
":",
"self",
".",
"_secfile",
".",
"get_next",
"(",
"\"Frame Number\"",
")",
"frame",
"=",
"ATRJFrame",
"(",
")",
"# Read the time and energy",
"energy_lines",
"=",
"self",
".",
"_secfile",
".",
"get_next",
"(",
"\"Time/Energy\"",
")",
"energy_words",
"=",
"energy_lines",
"[",
"0",
"]",
".",
"split",
"(",
")",
"frame",
".",
"time",
"=",
"float",
"(",
"energy_words",
"[",
"0",
"]",
")",
"*",
"picosecond",
"frame",
".",
"step",
"=",
"int",
"(",
"energy_words",
"[",
"1",
"]",
")",
"frame",
".",
"total_energy",
"=",
"float",
"(",
"energy_words",
"[",
"2",
"]",
")",
"*",
"kcalmol",
"# Read the coordinates",
"coord_lines",
"=",
"self",
".",
"_secfile",
".",
"get_next",
"(",
"\"Coordinates\"",
")",
"frame",
".",
"coordinates",
"=",
"np",
".",
"zeros",
"(",
"(",
"self",
".",
"num_atoms",
",",
"3",
")",
",",
"float",
")",
"for",
"index",
",",
"line",
"in",
"enumerate",
"(",
"coord_lines",
")",
":",
"words",
"=",
"line",
".",
"split",
"(",
")",
"frame",
".",
"coordinates",
"[",
"index",
",",
"0",
"]",
"=",
"float",
"(",
"words",
"[",
"1",
"]",
")",
"frame",
".",
"coordinates",
"[",
"index",
",",
"1",
"]",
"=",
"float",
"(",
"words",
"[",
"2",
"]",
")",
"frame",
".",
"coordinates",
"[",
"index",
",",
"2",
"]",
"=",
"float",
"(",
"words",
"[",
"3",
"]",
")",
"frame",
".",
"coordinates",
"*=",
"angstrom",
"# Done",
"return",
"frame"
] | Read a single frame from the trajectory | [
"Read",
"a",
"single",
"frame",
"from",
"the",
"trajectory"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/io/atrj.py#L107-L127 |
molmod/molmod | molmod/io/cube.py | get_cube_points | def get_cube_points(origin, axes, nrep):
'''Generate the Cartesian coordinates of the points in a cube file
*Arguemnts:*
origin
The cartesian coordinate for the origin of the grid.
axes
The 3 by 3 array with the grid spacings as rows.
nrep
The number of grid points along each axis.
'''
points = np.zeros((nrep[0], nrep[1], nrep[2], 3), float)
points[:] = origin
points[:] += np.outer(np.arange(nrep[0], dtype=float), axes[0]).reshape((-1,1,1,3))
points[:] += np.outer(np.arange(nrep[1], dtype=float), axes[1]).reshape((1,-1,1,3))
points[:] += np.outer(np.arange(nrep[2], dtype=float), axes[2]).reshape((1,1,-1,3))
return points | python | def get_cube_points(origin, axes, nrep):
'''Generate the Cartesian coordinates of the points in a cube file
*Arguemnts:*
origin
The cartesian coordinate for the origin of the grid.
axes
The 3 by 3 array with the grid spacings as rows.
nrep
The number of grid points along each axis.
'''
points = np.zeros((nrep[0], nrep[1], nrep[2], 3), float)
points[:] = origin
points[:] += np.outer(np.arange(nrep[0], dtype=float), axes[0]).reshape((-1,1,1,3))
points[:] += np.outer(np.arange(nrep[1], dtype=float), axes[1]).reshape((1,-1,1,3))
points[:] += np.outer(np.arange(nrep[2], dtype=float), axes[2]).reshape((1,1,-1,3))
return points | [
"def",
"get_cube_points",
"(",
"origin",
",",
"axes",
",",
"nrep",
")",
":",
"points",
"=",
"np",
".",
"zeros",
"(",
"(",
"nrep",
"[",
"0",
"]",
",",
"nrep",
"[",
"1",
"]",
",",
"nrep",
"[",
"2",
"]",
",",
"3",
")",
",",
"float",
")",
"points",
"[",
":",
"]",
"=",
"origin",
"points",
"[",
":",
"]",
"+=",
"np",
".",
"outer",
"(",
"np",
".",
"arange",
"(",
"nrep",
"[",
"0",
"]",
",",
"dtype",
"=",
"float",
")",
",",
"axes",
"[",
"0",
"]",
")",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"1",
",",
"1",
",",
"3",
")",
")",
"points",
"[",
":",
"]",
"+=",
"np",
".",
"outer",
"(",
"np",
".",
"arange",
"(",
"nrep",
"[",
"1",
"]",
",",
"dtype",
"=",
"float",
")",
",",
"axes",
"[",
"1",
"]",
")",
".",
"reshape",
"(",
"(",
"1",
",",
"-",
"1",
",",
"1",
",",
"3",
")",
")",
"points",
"[",
":",
"]",
"+=",
"np",
".",
"outer",
"(",
"np",
".",
"arange",
"(",
"nrep",
"[",
"2",
"]",
",",
"dtype",
"=",
"float",
")",
",",
"axes",
"[",
"2",
"]",
")",
".",
"reshape",
"(",
"(",
"1",
",",
"1",
",",
"-",
"1",
",",
"3",
")",
")",
"return",
"points"
] | Generate the Cartesian coordinates of the points in a cube file
*Arguemnts:*
origin
The cartesian coordinate for the origin of the grid.
axes
The 3 by 3 array with the grid spacings as rows.
nrep
The number of grid points along each axis. | [
"Generate",
"the",
"Cartesian",
"coordinates",
"of",
"the",
"points",
"in",
"a",
"cube",
"file"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/io/cube.py#L37-L56 |
molmod/molmod | molmod/io/cube.py | Cube.from_file | def from_file(cls, filename):
'''Create a cube object by loading data from a file.
*Arguemnts:*
filename
The file to load. It must contain the header with the
description of the grid and the molecule.
'''
with open(filename) as f:
molecule, origin, axes, nrep, subtitle, nuclear_charges = \
read_cube_header(f)
data = np.zeros(tuple(nrep), float)
tmp = data.ravel()
counter = 0
while True:
line = f.readline()
if len(line) == 0:
break
words = line.split()
for word in words:
tmp[counter] = float(word)
counter += 1
return cls(molecule, origin, axes, nrep, data, subtitle, nuclear_charges) | python | def from_file(cls, filename):
'''Create a cube object by loading data from a file.
*Arguemnts:*
filename
The file to load. It must contain the header with the
description of the grid and the molecule.
'''
with open(filename) as f:
molecule, origin, axes, nrep, subtitle, nuclear_charges = \
read_cube_header(f)
data = np.zeros(tuple(nrep), float)
tmp = data.ravel()
counter = 0
while True:
line = f.readline()
if len(line) == 0:
break
words = line.split()
for word in words:
tmp[counter] = float(word)
counter += 1
return cls(molecule, origin, axes, nrep, data, subtitle, nuclear_charges) | [
"def",
"from_file",
"(",
"cls",
",",
"filename",
")",
":",
"with",
"open",
"(",
"filename",
")",
"as",
"f",
":",
"molecule",
",",
"origin",
",",
"axes",
",",
"nrep",
",",
"subtitle",
",",
"nuclear_charges",
"=",
"read_cube_header",
"(",
"f",
")",
"data",
"=",
"np",
".",
"zeros",
"(",
"tuple",
"(",
"nrep",
")",
",",
"float",
")",
"tmp",
"=",
"data",
".",
"ravel",
"(",
")",
"counter",
"=",
"0",
"while",
"True",
":",
"line",
"=",
"f",
".",
"readline",
"(",
")",
"if",
"len",
"(",
"line",
")",
"==",
"0",
":",
"break",
"words",
"=",
"line",
".",
"split",
"(",
")",
"for",
"word",
"in",
"words",
":",
"tmp",
"[",
"counter",
"]",
"=",
"float",
"(",
"word",
")",
"counter",
"+=",
"1",
"return",
"cls",
"(",
"molecule",
",",
"origin",
",",
"axes",
",",
"nrep",
",",
"data",
",",
"subtitle",
",",
"nuclear_charges",
")"
] | Create a cube object by loading data from a file.
*Arguemnts:*
filename
The file to load. It must contain the header with the
description of the grid and the molecule. | [
"Create",
"a",
"cube",
"object",
"by",
"loading",
"data",
"from",
"a",
"file",
"."
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/io/cube.py#L167-L190 |
molmod/molmod | molmod/io/cube.py | Cube.write_to_file | def write_to_file(self, fn):
'''Write the cube to a file in the Gaussian cube format.'''
with open(fn, 'w') as f:
f.write(' {}\n'.format(self.molecule.title))
f.write(' {}\n'.format(self.subtitle))
def write_grid_line(n, v):
f.write('%5i % 11.6f % 11.6f % 11.6f\n' % (n, v[0], v[1], v[2]))
write_grid_line(self.molecule.size, self.origin)
write_grid_line(self.data.shape[0], self.axes[0])
write_grid_line(self.data.shape[1], self.axes[1])
write_grid_line(self.data.shape[2], self.axes[2])
def write_atom_line(n, nc, v):
f.write('%5i % 11.6f % 11.6f % 11.6f % 11.6f\n' % (n, nc, v[0], v[1], v[2]))
for i in range(self.molecule.size):
write_atom_line(self.molecule.numbers[i], self.nuclear_charges[i],
self.molecule.coordinates[i])
for i0 in range(self.data.shape[0]):
for i1 in range(self.data.shape[1]):
col = 0
for i2 in range(self.data.shape[2]):
value = self.data[i0, i1, i2]
if col % 6 == 5:
f.write(' % 12.5e\n' % value)
else:
f.write(' % 12.5e' % value)
col += 1
if col % 6 != 5:
f.write('\n') | python | def write_to_file(self, fn):
'''Write the cube to a file in the Gaussian cube format.'''
with open(fn, 'w') as f:
f.write(' {}\n'.format(self.molecule.title))
f.write(' {}\n'.format(self.subtitle))
def write_grid_line(n, v):
f.write('%5i % 11.6f % 11.6f % 11.6f\n' % (n, v[0], v[1], v[2]))
write_grid_line(self.molecule.size, self.origin)
write_grid_line(self.data.shape[0], self.axes[0])
write_grid_line(self.data.shape[1], self.axes[1])
write_grid_line(self.data.shape[2], self.axes[2])
def write_atom_line(n, nc, v):
f.write('%5i % 11.6f % 11.6f % 11.6f % 11.6f\n' % (n, nc, v[0], v[1], v[2]))
for i in range(self.molecule.size):
write_atom_line(self.molecule.numbers[i], self.nuclear_charges[i],
self.molecule.coordinates[i])
for i0 in range(self.data.shape[0]):
for i1 in range(self.data.shape[1]):
col = 0
for i2 in range(self.data.shape[2]):
value = self.data[i0, i1, i2]
if col % 6 == 5:
f.write(' % 12.5e\n' % value)
else:
f.write(' % 12.5e' % value)
col += 1
if col % 6 != 5:
f.write('\n') | [
"def",
"write_to_file",
"(",
"self",
",",
"fn",
")",
":",
"with",
"open",
"(",
"fn",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"' {}\\n'",
".",
"format",
"(",
"self",
".",
"molecule",
".",
"title",
")",
")",
"f",
".",
"write",
"(",
"' {}\\n'",
".",
"format",
"(",
"self",
".",
"subtitle",
")",
")",
"def",
"write_grid_line",
"(",
"n",
",",
"v",
")",
":",
"f",
".",
"write",
"(",
"'%5i % 11.6f % 11.6f % 11.6f\\n'",
"%",
"(",
"n",
",",
"v",
"[",
"0",
"]",
",",
"v",
"[",
"1",
"]",
",",
"v",
"[",
"2",
"]",
")",
")",
"write_grid_line",
"(",
"self",
".",
"molecule",
".",
"size",
",",
"self",
".",
"origin",
")",
"write_grid_line",
"(",
"self",
".",
"data",
".",
"shape",
"[",
"0",
"]",
",",
"self",
".",
"axes",
"[",
"0",
"]",
")",
"write_grid_line",
"(",
"self",
".",
"data",
".",
"shape",
"[",
"1",
"]",
",",
"self",
".",
"axes",
"[",
"1",
"]",
")",
"write_grid_line",
"(",
"self",
".",
"data",
".",
"shape",
"[",
"2",
"]",
",",
"self",
".",
"axes",
"[",
"2",
"]",
")",
"def",
"write_atom_line",
"(",
"n",
",",
"nc",
",",
"v",
")",
":",
"f",
".",
"write",
"(",
"'%5i % 11.6f % 11.6f % 11.6f % 11.6f\\n'",
"%",
"(",
"n",
",",
"nc",
",",
"v",
"[",
"0",
"]",
",",
"v",
"[",
"1",
"]",
",",
"v",
"[",
"2",
"]",
")",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"molecule",
".",
"size",
")",
":",
"write_atom_line",
"(",
"self",
".",
"molecule",
".",
"numbers",
"[",
"i",
"]",
",",
"self",
".",
"nuclear_charges",
"[",
"i",
"]",
",",
"self",
".",
"molecule",
".",
"coordinates",
"[",
"i",
"]",
")",
"for",
"i0",
"in",
"range",
"(",
"self",
".",
"data",
".",
"shape",
"[",
"0",
"]",
")",
":",
"for",
"i1",
"in",
"range",
"(",
"self",
".",
"data",
".",
"shape",
"[",
"1",
"]",
")",
":",
"col",
"=",
"0",
"for",
"i2",
"in",
"range",
"(",
"self",
".",
"data",
".",
"shape",
"[",
"2",
"]",
")",
":",
"value",
"=",
"self",
".",
"data",
"[",
"i0",
",",
"i1",
",",
"i2",
"]",
"if",
"col",
"%",
"6",
"==",
"5",
":",
"f",
".",
"write",
"(",
"' % 12.5e\\n'",
"%",
"value",
")",
"else",
":",
"f",
".",
"write",
"(",
"' % 12.5e'",
"%",
"value",
")",
"col",
"+=",
"1",
"if",
"col",
"%",
"6",
"!=",
"5",
":",
"f",
".",
"write",
"(",
"'\\n'",
")"
] | Write the cube to a file in the Gaussian cube format. | [
"Write",
"the",
"cube",
"to",
"a",
"file",
"in",
"the",
"Gaussian",
"cube",
"format",
"."
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/io/cube.py#L226-L258 |
molmod/molmod | molmod/io/cube.py | Cube.copy | def copy(self, newdata=None):
'''Return a copy of the cube with optionally new data.'''
if newdata is None:
newdata = self.data.copy()
return self.__class__(
self.molecule, self.origin.copy(), self.axes.copy(),
self.nrep.copy(), newdata, self.subtitle, self.nuclear_charges
) | python | def copy(self, newdata=None):
'''Return a copy of the cube with optionally new data.'''
if newdata is None:
newdata = self.data.copy()
return self.__class__(
self.molecule, self.origin.copy(), self.axes.copy(),
self.nrep.copy(), newdata, self.subtitle, self.nuclear_charges
) | [
"def",
"copy",
"(",
"self",
",",
"newdata",
"=",
"None",
")",
":",
"if",
"newdata",
"is",
"None",
":",
"newdata",
"=",
"self",
".",
"data",
".",
"copy",
"(",
")",
"return",
"self",
".",
"__class__",
"(",
"self",
".",
"molecule",
",",
"self",
".",
"origin",
".",
"copy",
"(",
")",
",",
"self",
".",
"axes",
".",
"copy",
"(",
")",
",",
"self",
".",
"nrep",
".",
"copy",
"(",
")",
",",
"newdata",
",",
"self",
".",
"subtitle",
",",
"self",
".",
"nuclear_charges",
")"
] | Return a copy of the cube with optionally new data. | [
"Return",
"a",
"copy",
"of",
"the",
"cube",
"with",
"optionally",
"new",
"data",
"."
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/io/cube.py#L260-L267 |
molmod/molmod | molmod/io/cp2k.py | CP2KSection._consistent | def _consistent(self):
"""Checks the constency between self.__index and self.__order"""
if len(self.__order) != sum(len(values) for values in self.__index.values()):
return False
import copy
tmp = copy.copy(self.__order)
for key, values in self.__index.items():
for value in values:
if value.name != key:
return False
if value in tmp:
tmp.remove(value)
else:
return False
if isinstance(value, CP2KSection):
if not value._consistent():
return False
return True | python | def _consistent(self):
"""Checks the constency between self.__index and self.__order"""
if len(self.__order) != sum(len(values) for values in self.__index.values()):
return False
import copy
tmp = copy.copy(self.__order)
for key, values in self.__index.items():
for value in values:
if value.name != key:
return False
if value in tmp:
tmp.remove(value)
else:
return False
if isinstance(value, CP2KSection):
if not value._consistent():
return False
return True | [
"def",
"_consistent",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"__order",
")",
"!=",
"sum",
"(",
"len",
"(",
"values",
")",
"for",
"values",
"in",
"self",
".",
"__index",
".",
"values",
"(",
")",
")",
":",
"return",
"False",
"import",
"copy",
"tmp",
"=",
"copy",
".",
"copy",
"(",
"self",
".",
"__order",
")",
"for",
"key",
",",
"values",
"in",
"self",
".",
"__index",
".",
"items",
"(",
")",
":",
"for",
"value",
"in",
"values",
":",
"if",
"value",
".",
"name",
"!=",
"key",
":",
"return",
"False",
"if",
"value",
"in",
"tmp",
":",
"tmp",
".",
"remove",
"(",
"value",
")",
"else",
":",
"return",
"False",
"if",
"isinstance",
"(",
"value",
",",
"CP2KSection",
")",
":",
"if",
"not",
"value",
".",
"_consistent",
"(",
")",
":",
"return",
"False",
"return",
"True"
] | Checks the constency between self.__index and self.__order | [
"Checks",
"the",
"constency",
"between",
"self",
".",
"__index",
"and",
"self",
".",
"__order"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/io/cp2k.py#L63-L80 |
molmod/molmod | molmod/io/cp2k.py | CP2KSection.append | def append(self, child):
"""Add a child section or keyword"""
if not (isinstance(child, CP2KSection) or isinstance(child, CP2KKeyword)):
raise TypeError("The child must be a CP2KSection or a CP2KKeyword, got: %s." % child)
l = self.__index.setdefault(child.name, [])
l.append(child)
self.__order.append(child) | python | def append(self, child):
"""Add a child section or keyword"""
if not (isinstance(child, CP2KSection) or isinstance(child, CP2KKeyword)):
raise TypeError("The child must be a CP2KSection or a CP2KKeyword, got: %s." % child)
l = self.__index.setdefault(child.name, [])
l.append(child)
self.__order.append(child) | [
"def",
"append",
"(",
"self",
",",
"child",
")",
":",
"if",
"not",
"(",
"isinstance",
"(",
"child",
",",
"CP2KSection",
")",
"or",
"isinstance",
"(",
"child",
",",
"CP2KKeyword",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"The child must be a CP2KSection or a CP2KKeyword, got: %s.\"",
"%",
"child",
")",
"l",
"=",
"self",
".",
"__index",
".",
"setdefault",
"(",
"child",
".",
"name",
",",
"[",
"]",
")",
"l",
".",
"append",
"(",
"child",
")",
"self",
".",
"__order",
".",
"append",
"(",
"child",
")"
] | Add a child section or keyword | [
"Add",
"a",
"child",
"section",
"or",
"keyword"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/io/cp2k.py#L177-L183 |
molmod/molmod | molmod/io/cp2k.py | CP2KSection.dump_children | def dump_children(self, f, indent=''):
"""Dump the children of the current section to a file-like object"""
for child in self.__order:
child.dump(f, indent+' ') | python | def dump_children(self, f, indent=''):
"""Dump the children of the current section to a file-like object"""
for child in self.__order:
child.dump(f, indent+' ') | [
"def",
"dump_children",
"(",
"self",
",",
"f",
",",
"indent",
"=",
"''",
")",
":",
"for",
"child",
"in",
"self",
".",
"__order",
":",
"child",
".",
"dump",
"(",
"f",
",",
"indent",
"+",
"' '",
")"
] | Dump the children of the current section to a file-like object | [
"Dump",
"the",
"children",
"of",
"the",
"current",
"section",
"to",
"a",
"file",
"-",
"like",
"object"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/io/cp2k.py#L193-L196 |
molmod/molmod | molmod/io/cp2k.py | CP2KSection.dump | def dump(self, f, indent=''):
"""Dump this section and its children to a file-like object"""
print(("%s&%s %s" % (indent, self.__name, self.section_parameters)).rstrip(), file=f)
self.dump_children(f, indent)
print("%s&END %s" % (indent, self.__name), file=f) | python | def dump(self, f, indent=''):
"""Dump this section and its children to a file-like object"""
print(("%s&%s %s" % (indent, self.__name, self.section_parameters)).rstrip(), file=f)
self.dump_children(f, indent)
print("%s&END %s" % (indent, self.__name), file=f) | [
"def",
"dump",
"(",
"self",
",",
"f",
",",
"indent",
"=",
"''",
")",
":",
"print",
"(",
"(",
"\"%s&%s %s\"",
"%",
"(",
"indent",
",",
"self",
".",
"__name",
",",
"self",
".",
"section_parameters",
")",
")",
".",
"rstrip",
"(",
")",
",",
"file",
"=",
"f",
")",
"self",
".",
"dump_children",
"(",
"f",
",",
"indent",
")",
"print",
"(",
"\"%s&END %s\"",
"%",
"(",
"indent",
",",
"self",
".",
"__name",
")",
",",
"file",
"=",
"f",
")"
] | Dump this section and its children to a file-like object | [
"Dump",
"this",
"section",
"and",
"its",
"children",
"to",
"a",
"file",
"-",
"like",
"object"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/io/cp2k.py#L198-L202 |
molmod/molmod | molmod/io/cp2k.py | CP2KSection.readline | def readline(self, f):
"""A helper method that only reads uncommented lines"""
while True:
line = f.readline()
if len(line) == 0:
raise EOFError
line = line[:line.find('#')]
line = line.strip()
if len(line) > 0:
return line | python | def readline(self, f):
"""A helper method that only reads uncommented lines"""
while True:
line = f.readline()
if len(line) == 0:
raise EOFError
line = line[:line.find('#')]
line = line.strip()
if len(line) > 0:
return line | [
"def",
"readline",
"(",
"self",
",",
"f",
")",
":",
"while",
"True",
":",
"line",
"=",
"f",
".",
"readline",
"(",
")",
"if",
"len",
"(",
"line",
")",
"==",
"0",
":",
"raise",
"EOFError",
"line",
"=",
"line",
"[",
":",
"line",
".",
"find",
"(",
"'#'",
")",
"]",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"len",
"(",
"line",
")",
">",
"0",
":",
"return",
"line"
] | A helper method that only reads uncommented lines | [
"A",
"helper",
"method",
"that",
"only",
"reads",
"uncommented",
"lines"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/io/cp2k.py#L204-L213 |
molmod/molmod | molmod/io/cp2k.py | CP2KSection.load_children | def load_children(self, f):
"""Load the children of this section from a file-like object"""
while True:
line = self.readline(f)
if line[0] == '&':
if line[1:].startswith("END"):
check_name = line[4:].strip().upper()
if check_name != self.__name:
raise FileFormatError("CP2KSection end mismatch, pos=%s", f.tell())
break
else:
section = CP2KSection()
section.load(f, line)
self.append(section)
else:
keyword = CP2KKeyword()
keyword.load(line)
self.append(keyword) | python | def load_children(self, f):
"""Load the children of this section from a file-like object"""
while True:
line = self.readline(f)
if line[0] == '&':
if line[1:].startswith("END"):
check_name = line[4:].strip().upper()
if check_name != self.__name:
raise FileFormatError("CP2KSection end mismatch, pos=%s", f.tell())
break
else:
section = CP2KSection()
section.load(f, line)
self.append(section)
else:
keyword = CP2KKeyword()
keyword.load(line)
self.append(keyword) | [
"def",
"load_children",
"(",
"self",
",",
"f",
")",
":",
"while",
"True",
":",
"line",
"=",
"self",
".",
"readline",
"(",
"f",
")",
"if",
"line",
"[",
"0",
"]",
"==",
"'&'",
":",
"if",
"line",
"[",
"1",
":",
"]",
".",
"startswith",
"(",
"\"END\"",
")",
":",
"check_name",
"=",
"line",
"[",
"4",
":",
"]",
".",
"strip",
"(",
")",
".",
"upper",
"(",
")",
"if",
"check_name",
"!=",
"self",
".",
"__name",
":",
"raise",
"FileFormatError",
"(",
"\"CP2KSection end mismatch, pos=%s\"",
",",
"f",
".",
"tell",
"(",
")",
")",
"break",
"else",
":",
"section",
"=",
"CP2KSection",
"(",
")",
"section",
".",
"load",
"(",
"f",
",",
"line",
")",
"self",
".",
"append",
"(",
"section",
")",
"else",
":",
"keyword",
"=",
"CP2KKeyword",
"(",
")",
"keyword",
".",
"load",
"(",
"line",
")",
"self",
".",
"append",
"(",
"keyword",
")"
] | Load the children of this section from a file-like object | [
"Load",
"the",
"children",
"of",
"this",
"section",
"from",
"a",
"file",
"-",
"like",
"object"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/io/cp2k.py#L215-L232 |
molmod/molmod | molmod/io/cp2k.py | CP2KSection.load | def load(self, f, line=None):
"""Load this section from a file-like object"""
if line is None:
# in case the file contains only a fragment of an input file,
# this is useful.
line = f.readlin()
words = line[1:].split()
self.__name = words[0].upper()
self.section_parameters = " ".join(words[1:])
try:
self.load_children(f)
except EOFError:
raise FileFormatError("Unexpected end of file, section '%s' not ended." % self.__name) | python | def load(self, f, line=None):
"""Load this section from a file-like object"""
if line is None:
# in case the file contains only a fragment of an input file,
# this is useful.
line = f.readlin()
words = line[1:].split()
self.__name = words[0].upper()
self.section_parameters = " ".join(words[1:])
try:
self.load_children(f)
except EOFError:
raise FileFormatError("Unexpected end of file, section '%s' not ended." % self.__name) | [
"def",
"load",
"(",
"self",
",",
"f",
",",
"line",
"=",
"None",
")",
":",
"if",
"line",
"is",
"None",
":",
"# in case the file contains only a fragment of an input file,",
"# this is useful.",
"line",
"=",
"f",
".",
"readlin",
"(",
")",
"words",
"=",
"line",
"[",
"1",
":",
"]",
".",
"split",
"(",
")",
"self",
".",
"__name",
"=",
"words",
"[",
"0",
"]",
".",
"upper",
"(",
")",
"self",
".",
"section_parameters",
"=",
"\" \"",
".",
"join",
"(",
"words",
"[",
"1",
":",
"]",
")",
"try",
":",
"self",
".",
"load_children",
"(",
"f",
")",
"except",
"EOFError",
":",
"raise",
"FileFormatError",
"(",
"\"Unexpected end of file, section '%s' not ended.\"",
"%",
"self",
".",
"__name",
")"
] | Load this section from a file-like object | [
"Load",
"this",
"section",
"from",
"a",
"file",
"-",
"like",
"object"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/io/cp2k.py#L235-L247 |
molmod/molmod | molmod/io/cp2k.py | CP2KKeyword.dump | def dump(self, f, indent=''):
"""Dump this keyword to a file-like object"""
if self.__unit is None:
print(("%s%s %s" % (indent, self.__name, self.__value)).rstrip(), file=f)
else:
print(("%s%s [%s] %s" % (indent, self.__name, self.__unit, self.__value)).rstrip(), file=f) | python | def dump(self, f, indent=''):
"""Dump this keyword to a file-like object"""
if self.__unit is None:
print(("%s%s %s" % (indent, self.__name, self.__value)).rstrip(), file=f)
else:
print(("%s%s [%s] %s" % (indent, self.__name, self.__unit, self.__value)).rstrip(), file=f) | [
"def",
"dump",
"(",
"self",
",",
"f",
",",
"indent",
"=",
"''",
")",
":",
"if",
"self",
".",
"__unit",
"is",
"None",
":",
"print",
"(",
"(",
"\"%s%s %s\"",
"%",
"(",
"indent",
",",
"self",
".",
"__name",
",",
"self",
".",
"__value",
")",
")",
".",
"rstrip",
"(",
")",
",",
"file",
"=",
"f",
")",
"else",
":",
"print",
"(",
"(",
"\"%s%s [%s] %s\"",
"%",
"(",
"indent",
",",
"self",
".",
"__name",
",",
"self",
".",
"__unit",
",",
"self",
".",
"__value",
")",
")",
".",
"rstrip",
"(",
")",
",",
"file",
"=",
"f",
")"
] | Dump this keyword to a file-like object | [
"Dump",
"this",
"keyword",
"to",
"a",
"file",
"-",
"like",
"object"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/io/cp2k.py#L273-L278 |
molmod/molmod | molmod/io/cp2k.py | CP2KKeyword.load | def load(self, line):
"""Load this keyword from a file-like object"""
words = line.split()
try:
float(words[0])
self.__name = ""
self.__value = " ".join(words)
except ValueError:
self.__name = words[0].upper()
if len(words) > 2 and words[1][0]=="[" and words[1][-1]=="]":
self.unit = words[1][1:-1]
self.__value = " ".join(words[2:])
else:
self.__value = " ".join(words[1:]) | python | def load(self, line):
"""Load this keyword from a file-like object"""
words = line.split()
try:
float(words[0])
self.__name = ""
self.__value = " ".join(words)
except ValueError:
self.__name = words[0].upper()
if len(words) > 2 and words[1][0]=="[" and words[1][-1]=="]":
self.unit = words[1][1:-1]
self.__value = " ".join(words[2:])
else:
self.__value = " ".join(words[1:]) | [
"def",
"load",
"(",
"self",
",",
"line",
")",
":",
"words",
"=",
"line",
".",
"split",
"(",
")",
"try",
":",
"float",
"(",
"words",
"[",
"0",
"]",
")",
"self",
".",
"__name",
"=",
"\"\"",
"self",
".",
"__value",
"=",
"\" \"",
".",
"join",
"(",
"words",
")",
"except",
"ValueError",
":",
"self",
".",
"__name",
"=",
"words",
"[",
"0",
"]",
".",
"upper",
"(",
")",
"if",
"len",
"(",
"words",
")",
">",
"2",
"and",
"words",
"[",
"1",
"]",
"[",
"0",
"]",
"==",
"\"[\"",
"and",
"words",
"[",
"1",
"]",
"[",
"-",
"1",
"]",
"==",
"\"]\"",
":",
"self",
".",
"unit",
"=",
"words",
"[",
"1",
"]",
"[",
"1",
":",
"-",
"1",
"]",
"self",
".",
"__value",
"=",
"\" \"",
".",
"join",
"(",
"words",
"[",
"2",
":",
"]",
")",
"else",
":",
"self",
".",
"__value",
"=",
"\" \"",
".",
"join",
"(",
"words",
"[",
"1",
":",
"]",
")"
] | Load this keyword from a file-like object | [
"Load",
"this",
"keyword",
"from",
"a",
"file",
"-",
"like",
"object"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/io/cp2k.py#L280-L293 |
molmod/molmod | molmod/io/cp2k.py | CP2KKeyword.set_value | def set_value(self, value):
"""Set the value associated with the keyword"""
if not isinstance(value, str):
raise TypeError("A value must be a string, got %s." % value)
self.__value = value | python | def set_value(self, value):
"""Set the value associated with the keyword"""
if not isinstance(value, str):
raise TypeError("A value must be a string, got %s." % value)
self.__value = value | [
"def",
"set_value",
"(",
"self",
",",
"value",
")",
":",
"if",
"not",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"\"A value must be a string, got %s.\"",
"%",
"value",
")",
"self",
".",
"__value",
"=",
"value"
] | Set the value associated with the keyword | [
"Set",
"the",
"value",
"associated",
"with",
"the",
"keyword"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/io/cp2k.py#L296-L300 |
molmod/molmod | molmod/io/cp2k.py | CP2KInputFile.read_from_file | def read_from_file(filename):
"""
Arguments:
| ``filename`` -- the filename of the input file
Use as follows::
>>> if = CP2KInputFile.read_from_file("somefile.inp")
>>> for section in if:
... print section.name
"""
with open(filename) as f:
result = CP2KInputFile()
try:
while True:
result.load_children(f)
except EOFError:
pass
return result | python | def read_from_file(filename):
"""
Arguments:
| ``filename`` -- the filename of the input file
Use as follows::
>>> if = CP2KInputFile.read_from_file("somefile.inp")
>>> for section in if:
... print section.name
"""
with open(filename) as f:
result = CP2KInputFile()
try:
while True:
result.load_children(f)
except EOFError:
pass
return result | [
"def",
"read_from_file",
"(",
"filename",
")",
":",
"with",
"open",
"(",
"filename",
")",
"as",
"f",
":",
"result",
"=",
"CP2KInputFile",
"(",
")",
"try",
":",
"while",
"True",
":",
"result",
".",
"load_children",
"(",
"f",
")",
"except",
"EOFError",
":",
"pass",
"return",
"result"
] | Arguments:
| ``filename`` -- the filename of the input file
Use as follows::
>>> if = CP2KInputFile.read_from_file("somefile.inp")
>>> for section in if:
... print section.name | [
"Arguments",
":",
"|",
"filename",
"--",
"the",
"filename",
"of",
"the",
"input",
"file"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/io/cp2k.py#L323-L341 |
molmod/molmod | molmod/io/cpmd.py | CPMDTrajectoryReader._read_frame | def _read_frame(self):
"""Read and return the next time frame"""
pos = np.zeros((self.num_atoms, 3), float)
vel = np.zeros((self.num_atoms, 3), float)
for i in range(self.num_atoms):
line = next(self._f)
words = line.split()
pos[i, 0] = float(words[1])
pos[i, 1] = float(words[2])
pos[i, 2] = float(words[3])
vel[i, 0] = float(words[4])
vel[i, 1] = float(words[5])
vel[i, 2] = float(words[6])
return pos, vel | python | def _read_frame(self):
"""Read and return the next time frame"""
pos = np.zeros((self.num_atoms, 3), float)
vel = np.zeros((self.num_atoms, 3), float)
for i in range(self.num_atoms):
line = next(self._f)
words = line.split()
pos[i, 0] = float(words[1])
pos[i, 1] = float(words[2])
pos[i, 2] = float(words[3])
vel[i, 0] = float(words[4])
vel[i, 1] = float(words[5])
vel[i, 2] = float(words[6])
return pos, vel | [
"def",
"_read_frame",
"(",
"self",
")",
":",
"pos",
"=",
"np",
".",
"zeros",
"(",
"(",
"self",
".",
"num_atoms",
",",
"3",
")",
",",
"float",
")",
"vel",
"=",
"np",
".",
"zeros",
"(",
"(",
"self",
".",
"num_atoms",
",",
"3",
")",
",",
"float",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"num_atoms",
")",
":",
"line",
"=",
"next",
"(",
"self",
".",
"_f",
")",
"words",
"=",
"line",
".",
"split",
"(",
")",
"pos",
"[",
"i",
",",
"0",
"]",
"=",
"float",
"(",
"words",
"[",
"1",
"]",
")",
"pos",
"[",
"i",
",",
"1",
"]",
"=",
"float",
"(",
"words",
"[",
"2",
"]",
")",
"pos",
"[",
"i",
",",
"2",
"]",
"=",
"float",
"(",
"words",
"[",
"3",
"]",
")",
"vel",
"[",
"i",
",",
"0",
"]",
"=",
"float",
"(",
"words",
"[",
"4",
"]",
")",
"vel",
"[",
"i",
",",
"1",
"]",
"=",
"float",
"(",
"words",
"[",
"5",
"]",
")",
"vel",
"[",
"i",
",",
"2",
"]",
"=",
"float",
"(",
"words",
"[",
"6",
"]",
")",
"return",
"pos",
",",
"vel"
] | Read and return the next time frame | [
"Read",
"and",
"return",
"the",
"next",
"time",
"frame"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/io/cpmd.py#L64-L77 |
molmod/molmod | molmod/transformations.py | check_matrix | def check_matrix(m):
"""Check the sanity of the given 4x4 transformation matrix"""
if m.shape != (4, 4):
raise ValueError("The argument must be a 4x4 array.")
if max(abs(m[3, 0:3])) > eps:
raise ValueError("The given matrix does not have correct translational part")
if abs(m[3, 3] - 1.0) > eps:
raise ValueError("The lower right element of the given matrix must be 1.0.") | python | def check_matrix(m):
"""Check the sanity of the given 4x4 transformation matrix"""
if m.shape != (4, 4):
raise ValueError("The argument must be a 4x4 array.")
if max(abs(m[3, 0:3])) > eps:
raise ValueError("The given matrix does not have correct translational part")
if abs(m[3, 3] - 1.0) > eps:
raise ValueError("The lower right element of the given matrix must be 1.0.") | [
"def",
"check_matrix",
"(",
"m",
")",
":",
"if",
"m",
".",
"shape",
"!=",
"(",
"4",
",",
"4",
")",
":",
"raise",
"ValueError",
"(",
"\"The argument must be a 4x4 array.\"",
")",
"if",
"max",
"(",
"abs",
"(",
"m",
"[",
"3",
",",
"0",
":",
"3",
"]",
")",
")",
">",
"eps",
":",
"raise",
"ValueError",
"(",
"\"The given matrix does not have correct translational part\"",
")",
"if",
"abs",
"(",
"m",
"[",
"3",
",",
"3",
"]",
"-",
"1.0",
")",
">",
"eps",
":",
"raise",
"ValueError",
"(",
"\"The lower right element of the given matrix must be 1.0.\"",
")"
] | Check the sanity of the given 4x4 transformation matrix | [
"Check",
"the",
"sanity",
"of",
"the",
"given",
"4x4",
"transformation",
"matrix"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/transformations.py#L48-L55 |
molmod/molmod | molmod/transformations.py | superpose | def superpose(ras, rbs, weights=None):
"""Compute the transformation that minimizes the RMSD between the points ras and rbs
Arguments:
| ``ras`` -- a ``np.array`` with 3D coordinates of geometry A,
shape=(N,3)
| ``rbs`` -- a ``np.array`` with 3D coordinates of geometry B,
shape=(N,3)
Optional arguments:
| ``weights`` -- a numpy array with fitting weights for each
coordinate, shape=(N,)
Return value:
| ``transformation`` -- the transformation that brings geometry A into
overlap with geometry B
Each row in ras and rbs represents a 3D coordinate. Corresponding rows
contain the points that are brought into overlap by the fitting
procedure. The implementation is based on the Kabsch Algorithm:
http://dx.doi.org/10.1107%2FS0567739476001873
"""
if weights is None:
ma = ras.mean(axis=0)
mb = rbs.mean(axis=0)
else:
total_weight = weights.sum()
ma = np.dot(weights, ras)/total_weight
mb = np.dot(weights, rbs)/total_weight
# Kabsch
if weights is None:
A = np.dot((rbs-mb).transpose(), ras-ma)
else:
weights = weights.reshape((-1, 1))
A = np.dot(((rbs-mb)*weights).transpose(), (ras-ma)*weights)
v, s, wt = np.linalg.svd(A)
s[:] = 1
if np.linalg.det(np.dot(v, wt)) < 0:
s[2] = -1
r = np.dot(wt.T*s, v.T)
return Complete(r, np.dot(r, -mb) + ma) | python | def superpose(ras, rbs, weights=None):
"""Compute the transformation that minimizes the RMSD between the points ras and rbs
Arguments:
| ``ras`` -- a ``np.array`` with 3D coordinates of geometry A,
shape=(N,3)
| ``rbs`` -- a ``np.array`` with 3D coordinates of geometry B,
shape=(N,3)
Optional arguments:
| ``weights`` -- a numpy array with fitting weights for each
coordinate, shape=(N,)
Return value:
| ``transformation`` -- the transformation that brings geometry A into
overlap with geometry B
Each row in ras and rbs represents a 3D coordinate. Corresponding rows
contain the points that are brought into overlap by the fitting
procedure. The implementation is based on the Kabsch Algorithm:
http://dx.doi.org/10.1107%2FS0567739476001873
"""
if weights is None:
ma = ras.mean(axis=0)
mb = rbs.mean(axis=0)
else:
total_weight = weights.sum()
ma = np.dot(weights, ras)/total_weight
mb = np.dot(weights, rbs)/total_weight
# Kabsch
if weights is None:
A = np.dot((rbs-mb).transpose(), ras-ma)
else:
weights = weights.reshape((-1, 1))
A = np.dot(((rbs-mb)*weights).transpose(), (ras-ma)*weights)
v, s, wt = np.linalg.svd(A)
s[:] = 1
if np.linalg.det(np.dot(v, wt)) < 0:
s[2] = -1
r = np.dot(wt.T*s, v.T)
return Complete(r, np.dot(r, -mb) + ma) | [
"def",
"superpose",
"(",
"ras",
",",
"rbs",
",",
"weights",
"=",
"None",
")",
":",
"if",
"weights",
"is",
"None",
":",
"ma",
"=",
"ras",
".",
"mean",
"(",
"axis",
"=",
"0",
")",
"mb",
"=",
"rbs",
".",
"mean",
"(",
"axis",
"=",
"0",
")",
"else",
":",
"total_weight",
"=",
"weights",
".",
"sum",
"(",
")",
"ma",
"=",
"np",
".",
"dot",
"(",
"weights",
",",
"ras",
")",
"/",
"total_weight",
"mb",
"=",
"np",
".",
"dot",
"(",
"weights",
",",
"rbs",
")",
"/",
"total_weight",
"# Kabsch",
"if",
"weights",
"is",
"None",
":",
"A",
"=",
"np",
".",
"dot",
"(",
"(",
"rbs",
"-",
"mb",
")",
".",
"transpose",
"(",
")",
",",
"ras",
"-",
"ma",
")",
"else",
":",
"weights",
"=",
"weights",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"1",
")",
")",
"A",
"=",
"np",
".",
"dot",
"(",
"(",
"(",
"rbs",
"-",
"mb",
")",
"*",
"weights",
")",
".",
"transpose",
"(",
")",
",",
"(",
"ras",
"-",
"ma",
")",
"*",
"weights",
")",
"v",
",",
"s",
",",
"wt",
"=",
"np",
".",
"linalg",
".",
"svd",
"(",
"A",
")",
"s",
"[",
":",
"]",
"=",
"1",
"if",
"np",
".",
"linalg",
".",
"det",
"(",
"np",
".",
"dot",
"(",
"v",
",",
"wt",
")",
")",
"<",
"0",
":",
"s",
"[",
"2",
"]",
"=",
"-",
"1",
"r",
"=",
"np",
".",
"dot",
"(",
"wt",
".",
"T",
"*",
"s",
",",
"v",
".",
"T",
")",
"return",
"Complete",
"(",
"r",
",",
"np",
".",
"dot",
"(",
"r",
",",
"-",
"mb",
")",
"+",
"ma",
")"
] | Compute the transformation that minimizes the RMSD between the points ras and rbs
Arguments:
| ``ras`` -- a ``np.array`` with 3D coordinates of geometry A,
shape=(N,3)
| ``rbs`` -- a ``np.array`` with 3D coordinates of geometry B,
shape=(N,3)
Optional arguments:
| ``weights`` -- a numpy array with fitting weights for each
coordinate, shape=(N,)
Return value:
| ``transformation`` -- the transformation that brings geometry A into
overlap with geometry B
Each row in ras and rbs represents a 3D coordinate. Corresponding rows
contain the points that are brought into overlap by the fitting
procedure. The implementation is based on the Kabsch Algorithm:
http://dx.doi.org/10.1107%2FS0567739476001873 | [
"Compute",
"the",
"transformation",
"that",
"minimizes",
"the",
"RMSD",
"between",
"the",
"points",
"ras",
"and",
"rbs"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/transformations.py#L434-L477 |
molmod/molmod | molmod/transformations.py | fit_rmsd | def fit_rmsd(ras, rbs, weights=None):
"""Fit geometry rbs onto ras, returns more info than superpose
Arguments:
| ``ras`` -- a numpy array with 3D coordinates of geometry A,
shape=(N,3)
| ``rbs`` -- a numpy array with 3D coordinates of geometry B,
shape=(N,3)
Optional arguments:
| ``weights`` -- a numpy array with fitting weights for each
coordinate, shape=(N,)
Return values:
| ``transformation`` -- the transformation that brings geometry A into
overlap with geometry B
| ``rbs_trans`` -- the transformed coordinates of geometry B
| ``rmsd`` -- the rmsd of the distances between corresponding atoms in
geometry A and B
This is a utility routine based on the function superpose. It just
computes rbs_trans and rmsd after calling superpose with the same
arguments
"""
transformation = superpose(ras, rbs, weights)
rbs_trans = transformation * rbs
rmsd = compute_rmsd(ras, rbs_trans)
return transformation, rbs_trans, rmsd | python | def fit_rmsd(ras, rbs, weights=None):
"""Fit geometry rbs onto ras, returns more info than superpose
Arguments:
| ``ras`` -- a numpy array with 3D coordinates of geometry A,
shape=(N,3)
| ``rbs`` -- a numpy array with 3D coordinates of geometry B,
shape=(N,3)
Optional arguments:
| ``weights`` -- a numpy array with fitting weights for each
coordinate, shape=(N,)
Return values:
| ``transformation`` -- the transformation that brings geometry A into
overlap with geometry B
| ``rbs_trans`` -- the transformed coordinates of geometry B
| ``rmsd`` -- the rmsd of the distances between corresponding atoms in
geometry A and B
This is a utility routine based on the function superpose. It just
computes rbs_trans and rmsd after calling superpose with the same
arguments
"""
transformation = superpose(ras, rbs, weights)
rbs_trans = transformation * rbs
rmsd = compute_rmsd(ras, rbs_trans)
return transformation, rbs_trans, rmsd | [
"def",
"fit_rmsd",
"(",
"ras",
",",
"rbs",
",",
"weights",
"=",
"None",
")",
":",
"transformation",
"=",
"superpose",
"(",
"ras",
",",
"rbs",
",",
"weights",
")",
"rbs_trans",
"=",
"transformation",
"*",
"rbs",
"rmsd",
"=",
"compute_rmsd",
"(",
"ras",
",",
"rbs_trans",
")",
"return",
"transformation",
",",
"rbs_trans",
",",
"rmsd"
] | Fit geometry rbs onto ras, returns more info than superpose
Arguments:
| ``ras`` -- a numpy array with 3D coordinates of geometry A,
shape=(N,3)
| ``rbs`` -- a numpy array with 3D coordinates of geometry B,
shape=(N,3)
Optional arguments:
| ``weights`` -- a numpy array with fitting weights for each
coordinate, shape=(N,)
Return values:
| ``transformation`` -- the transformation that brings geometry A into
overlap with geometry B
| ``rbs_trans`` -- the transformed coordinates of geometry B
| ``rmsd`` -- the rmsd of the distances between corresponding atoms in
geometry A and B
This is a utility routine based on the function superpose. It just
computes rbs_trans and rmsd after calling superpose with the same
arguments | [
"Fit",
"geometry",
"rbs",
"onto",
"ras",
"returns",
"more",
"info",
"than",
"superpose"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/transformations.py#L480-L507 |
molmod/molmod | molmod/transformations.py | Translation.inv | def inv(self):
"""The inverse translation"""
result = Translation(-self.t)
result._cache_inv = self
return result | python | def inv(self):
"""The inverse translation"""
result = Translation(-self.t)
result._cache_inv = self
return result | [
"def",
"inv",
"(",
"self",
")",
":",
"result",
"=",
"Translation",
"(",
"-",
"self",
".",
"t",
")",
"result",
".",
"_cache_inv",
"=",
"self",
"return",
"result"
] | The inverse translation | [
"The",
"inverse",
"translation"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/transformations.py#L94-L98 |
molmod/molmod | molmod/transformations.py | Translation.apply_to | def apply_to(self, x, columns=False):
"""Apply this translation to the given object
The argument can be several sorts of objects:
* ``np.array`` with shape (3, )
* ``np.array`` with shape (N, 3)
* ``np.array`` with shape (3, N), use ``columns=True``
* ``Translation``
* ``Rotation``
* ``Complete``
* ``UnitCell``
In case of arrays, the 3D vectors are translated. In case of trans-
formations, a new transformation is returned that consists of this
translation applied AFTER the given translation. In case of a unit
cell, the original object is returned.
This method is equivalent to ``self*x``.
"""
if isinstance(x, np.ndarray) and len(x.shape) == 2 and x.shape[0] == 3 and columns:
return x + self.t.reshape((3,1))
if isinstance(x, np.ndarray) and (x.shape == (3, ) or (len(x.shape) == 2 and x.shape[1] == 3)) and not columns:
return x + self.t
elif isinstance(x, Complete):
return Complete(x.r, x.t + self.t)
elif isinstance(x, Translation):
return Translation(x.t + self.t)
elif isinstance(x, Rotation):
return Complete(x.r, self.t)
elif isinstance(x, UnitCell):
return x
else:
raise ValueError("Can not apply this translation to %s" % x) | python | def apply_to(self, x, columns=False):
"""Apply this translation to the given object
The argument can be several sorts of objects:
* ``np.array`` with shape (3, )
* ``np.array`` with shape (N, 3)
* ``np.array`` with shape (3, N), use ``columns=True``
* ``Translation``
* ``Rotation``
* ``Complete``
* ``UnitCell``
In case of arrays, the 3D vectors are translated. In case of trans-
formations, a new transformation is returned that consists of this
translation applied AFTER the given translation. In case of a unit
cell, the original object is returned.
This method is equivalent to ``self*x``.
"""
if isinstance(x, np.ndarray) and len(x.shape) == 2 and x.shape[0] == 3 and columns:
return x + self.t.reshape((3,1))
if isinstance(x, np.ndarray) and (x.shape == (3, ) or (len(x.shape) == 2 and x.shape[1] == 3)) and not columns:
return x + self.t
elif isinstance(x, Complete):
return Complete(x.r, x.t + self.t)
elif isinstance(x, Translation):
return Translation(x.t + self.t)
elif isinstance(x, Rotation):
return Complete(x.r, self.t)
elif isinstance(x, UnitCell):
return x
else:
raise ValueError("Can not apply this translation to %s" % x) | [
"def",
"apply_to",
"(",
"self",
",",
"x",
",",
"columns",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"x",
",",
"np",
".",
"ndarray",
")",
"and",
"len",
"(",
"x",
".",
"shape",
")",
"==",
"2",
"and",
"x",
".",
"shape",
"[",
"0",
"]",
"==",
"3",
"and",
"columns",
":",
"return",
"x",
"+",
"self",
".",
"t",
".",
"reshape",
"(",
"(",
"3",
",",
"1",
")",
")",
"if",
"isinstance",
"(",
"x",
",",
"np",
".",
"ndarray",
")",
"and",
"(",
"x",
".",
"shape",
"==",
"(",
"3",
",",
")",
"or",
"(",
"len",
"(",
"x",
".",
"shape",
")",
"==",
"2",
"and",
"x",
".",
"shape",
"[",
"1",
"]",
"==",
"3",
")",
")",
"and",
"not",
"columns",
":",
"return",
"x",
"+",
"self",
".",
"t",
"elif",
"isinstance",
"(",
"x",
",",
"Complete",
")",
":",
"return",
"Complete",
"(",
"x",
".",
"r",
",",
"x",
".",
"t",
"+",
"self",
".",
"t",
")",
"elif",
"isinstance",
"(",
"x",
",",
"Translation",
")",
":",
"return",
"Translation",
"(",
"x",
".",
"t",
"+",
"self",
".",
"t",
")",
"elif",
"isinstance",
"(",
"x",
",",
"Rotation",
")",
":",
"return",
"Complete",
"(",
"x",
".",
"r",
",",
"self",
".",
"t",
")",
"elif",
"isinstance",
"(",
"x",
",",
"UnitCell",
")",
":",
"return",
"x",
"else",
":",
"raise",
"ValueError",
"(",
"\"Can not apply this translation to %s\"",
"%",
"x",
")"
] | Apply this translation to the given object
The argument can be several sorts of objects:
* ``np.array`` with shape (3, )
* ``np.array`` with shape (N, 3)
* ``np.array`` with shape (3, N), use ``columns=True``
* ``Translation``
* ``Rotation``
* ``Complete``
* ``UnitCell``
In case of arrays, the 3D vectors are translated. In case of trans-
formations, a new transformation is returned that consists of this
translation applied AFTER the given translation. In case of a unit
cell, the original object is returned.
This method is equivalent to ``self*x``. | [
"Apply",
"this",
"translation",
"to",
"the",
"given",
"object"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/transformations.py#L100-L133 |
molmod/molmod | molmod/transformations.py | Translation.compare | def compare(self, other, t_threshold=1e-3):
"""Compare two translations
The RMSD of the translation vectors is computed. The return value
is True when the RMSD is below the threshold, i.e. when the two
translations are almost identical.
"""
return compute_rmsd(self.t, other.t) < t_threshold | python | def compare(self, other, t_threshold=1e-3):
"""Compare two translations
The RMSD of the translation vectors is computed. The return value
is True when the RMSD is below the threshold, i.e. when the two
translations are almost identical.
"""
return compute_rmsd(self.t, other.t) < t_threshold | [
"def",
"compare",
"(",
"self",
",",
"other",
",",
"t_threshold",
"=",
"1e-3",
")",
":",
"return",
"compute_rmsd",
"(",
"self",
".",
"t",
",",
"other",
".",
"t",
")",
"<",
"t_threshold"
] | Compare two translations
The RMSD of the translation vectors is computed. The return value
is True when the RMSD is below the threshold, i.e. when the two
translations are almost identical. | [
"Compare",
"two",
"translations"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/transformations.py#L137-L144 |
molmod/molmod | molmod/transformations.py | Rotation._check_r | def _check_r(self, r):
"""the columns must orthogonal"""
if abs(np.dot(r[:, 0], r[:, 0]) - 1) > eps or \
abs(np.dot(r[:, 0], r[:, 0]) - 1) > eps or \
abs(np.dot(r[:, 0], r[:, 0]) - 1) > eps or \
np.dot(r[:, 0], r[:, 1]) > eps or \
np.dot(r[:, 1], r[:, 2]) > eps or \
np.dot(r[:, 2], r[:, 0]) > eps:
raise ValueError("The rotation matrix is significantly non-orthonormal.") | python | def _check_r(self, r):
"""the columns must orthogonal"""
if abs(np.dot(r[:, 0], r[:, 0]) - 1) > eps or \
abs(np.dot(r[:, 0], r[:, 0]) - 1) > eps or \
abs(np.dot(r[:, 0], r[:, 0]) - 1) > eps or \
np.dot(r[:, 0], r[:, 1]) > eps or \
np.dot(r[:, 1], r[:, 2]) > eps or \
np.dot(r[:, 2], r[:, 0]) > eps:
raise ValueError("The rotation matrix is significantly non-orthonormal.") | [
"def",
"_check_r",
"(",
"self",
",",
"r",
")",
":",
"if",
"abs",
"(",
"np",
".",
"dot",
"(",
"r",
"[",
":",
",",
"0",
"]",
",",
"r",
"[",
":",
",",
"0",
"]",
")",
"-",
"1",
")",
">",
"eps",
"or",
"abs",
"(",
"np",
".",
"dot",
"(",
"r",
"[",
":",
",",
"0",
"]",
",",
"r",
"[",
":",
",",
"0",
"]",
")",
"-",
"1",
")",
">",
"eps",
"or",
"abs",
"(",
"np",
".",
"dot",
"(",
"r",
"[",
":",
",",
"0",
"]",
",",
"r",
"[",
":",
",",
"0",
"]",
")",
"-",
"1",
")",
">",
"eps",
"or",
"np",
".",
"dot",
"(",
"r",
"[",
":",
",",
"0",
"]",
",",
"r",
"[",
":",
",",
"1",
"]",
")",
">",
"eps",
"or",
"np",
".",
"dot",
"(",
"r",
"[",
":",
",",
"1",
"]",
",",
"r",
"[",
":",
",",
"2",
"]",
")",
">",
"eps",
"or",
"np",
".",
"dot",
"(",
"r",
"[",
":",
",",
"2",
"]",
",",
"r",
"[",
":",
",",
"0",
"]",
")",
">",
"eps",
":",
"raise",
"ValueError",
"(",
"\"The rotation matrix is significantly non-orthonormal.\"",
")"
] | the columns must orthogonal | [
"the",
"columns",
"must",
"orthogonal"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/transformations.py#L153-L161 |
molmod/molmod | molmod/transformations.py | Rotation.random | def random(cls):
"""Return a random rotation"""
axis = random_unit()
angle = np.random.uniform(0,2*np.pi)
invert = bool(np.random.randint(0,2))
return Rotation.from_properties(angle, axis, invert) | python | def random(cls):
"""Return a random rotation"""
axis = random_unit()
angle = np.random.uniform(0,2*np.pi)
invert = bool(np.random.randint(0,2))
return Rotation.from_properties(angle, axis, invert) | [
"def",
"random",
"(",
"cls",
")",
":",
"axis",
"=",
"random_unit",
"(",
")",
"angle",
"=",
"np",
".",
"random",
".",
"uniform",
"(",
"0",
",",
"2",
"*",
"np",
".",
"pi",
")",
"invert",
"=",
"bool",
"(",
"np",
".",
"random",
".",
"randint",
"(",
"0",
",",
"2",
")",
")",
"return",
"Rotation",
".",
"from_properties",
"(",
"angle",
",",
"axis",
",",
"invert",
")"
] | Return a random rotation | [
"Return",
"a",
"random",
"rotation"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/transformations.py#L186-L191 |
molmod/molmod | molmod/transformations.py | Rotation.from_properties | def from_properties(cls, angle, axis, invert):
"""Initialize a rotation based on the properties"""
norm = np.linalg.norm(axis)
if norm > 0:
x = axis[0] / norm
y = axis[1] / norm
z = axis[2] / norm
c = np.cos(angle)
s = np.sin(angle)
r = (1-2*invert) * np.array([
[x*x*(1-c)+c , x*y*(1-c)-z*s, x*z*(1-c)+y*s],
[x*y*(1-c)+z*s, y*y*(1-c)+c , y*z*(1-c)-x*s],
[x*z*(1-c)-y*s, y*z*(1-c)+x*s, z*z*(1-c)+c ]
])
else:
r = np.identity(3) * (1-2*invert)
return cls(r) | python | def from_properties(cls, angle, axis, invert):
"""Initialize a rotation based on the properties"""
norm = np.linalg.norm(axis)
if norm > 0:
x = axis[0] / norm
y = axis[1] / norm
z = axis[2] / norm
c = np.cos(angle)
s = np.sin(angle)
r = (1-2*invert) * np.array([
[x*x*(1-c)+c , x*y*(1-c)-z*s, x*z*(1-c)+y*s],
[x*y*(1-c)+z*s, y*y*(1-c)+c , y*z*(1-c)-x*s],
[x*z*(1-c)-y*s, y*z*(1-c)+x*s, z*z*(1-c)+c ]
])
else:
r = np.identity(3) * (1-2*invert)
return cls(r) | [
"def",
"from_properties",
"(",
"cls",
",",
"angle",
",",
"axis",
",",
"invert",
")",
":",
"norm",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"axis",
")",
"if",
"norm",
">",
"0",
":",
"x",
"=",
"axis",
"[",
"0",
"]",
"/",
"norm",
"y",
"=",
"axis",
"[",
"1",
"]",
"/",
"norm",
"z",
"=",
"axis",
"[",
"2",
"]",
"/",
"norm",
"c",
"=",
"np",
".",
"cos",
"(",
"angle",
")",
"s",
"=",
"np",
".",
"sin",
"(",
"angle",
")",
"r",
"=",
"(",
"1",
"-",
"2",
"*",
"invert",
")",
"*",
"np",
".",
"array",
"(",
"[",
"[",
"x",
"*",
"x",
"*",
"(",
"1",
"-",
"c",
")",
"+",
"c",
",",
"x",
"*",
"y",
"*",
"(",
"1",
"-",
"c",
")",
"-",
"z",
"*",
"s",
",",
"x",
"*",
"z",
"*",
"(",
"1",
"-",
"c",
")",
"+",
"y",
"*",
"s",
"]",
",",
"[",
"x",
"*",
"y",
"*",
"(",
"1",
"-",
"c",
")",
"+",
"z",
"*",
"s",
",",
"y",
"*",
"y",
"*",
"(",
"1",
"-",
"c",
")",
"+",
"c",
",",
"y",
"*",
"z",
"*",
"(",
"1",
"-",
"c",
")",
"-",
"x",
"*",
"s",
"]",
",",
"[",
"x",
"*",
"z",
"*",
"(",
"1",
"-",
"c",
")",
"-",
"y",
"*",
"s",
",",
"y",
"*",
"z",
"*",
"(",
"1",
"-",
"c",
")",
"+",
"x",
"*",
"s",
",",
"z",
"*",
"z",
"*",
"(",
"1",
"-",
"c",
")",
"+",
"c",
"]",
"]",
")",
"else",
":",
"r",
"=",
"np",
".",
"identity",
"(",
"3",
")",
"*",
"(",
"1",
"-",
"2",
"*",
"invert",
")",
"return",
"cls",
"(",
"r",
")"
] | Initialize a rotation based on the properties | [
"Initialize",
"a",
"rotation",
"based",
"on",
"the",
"properties"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/transformations.py#L194-L210 |
molmod/molmod | molmod/transformations.py | Rotation.properties | def properties(self):
"""Rotation properties: angle, axis, invert"""
# determine wether an inversion rotation has been applied
invert = (np.linalg.det(self.r) < 0)
factor = {True: -1, False: 1}[invert]
# get the rotation data
# trace(r) = 1+2*cos(angle)
cos_angle = 0.5*(factor*np.trace(self.r) - 1)
if cos_angle > 1: cos_angle = 1.0
if cos_angle < -1: cos_angle = -1.0
# the antisymmetric part of the non-diagonal vector tell us something
# about sin(angle) and n.
axis = 0.5*factor*np.array([-self.r[1, 2] + self.r[2, 1], self.r[0, 2] - self.r[2, 0], -self.r[0, 1] + self.r[1, 0]])
sin_angle = np.linalg.norm(axis)
# look for the best way to normalize the
if (sin_angle == 0.0) and (cos_angle > 0):
axis[2] = 1.0
elif abs(sin_angle) < (1-cos_angle):
for index in range(3):
axis[index] = {True: -1, False: 1}[axis[index] < 0] * np.sqrt(abs((factor*self.r[index, index] - cos_angle) / (1 - cos_angle)))
else:
axis = axis / sin_angle
# Finally calculate the angle:
angle = np.arctan2(sin_angle, cos_angle)
return angle, axis, invert | python | def properties(self):
"""Rotation properties: angle, axis, invert"""
# determine wether an inversion rotation has been applied
invert = (np.linalg.det(self.r) < 0)
factor = {True: -1, False: 1}[invert]
# get the rotation data
# trace(r) = 1+2*cos(angle)
cos_angle = 0.5*(factor*np.trace(self.r) - 1)
if cos_angle > 1: cos_angle = 1.0
if cos_angle < -1: cos_angle = -1.0
# the antisymmetric part of the non-diagonal vector tell us something
# about sin(angle) and n.
axis = 0.5*factor*np.array([-self.r[1, 2] + self.r[2, 1], self.r[0, 2] - self.r[2, 0], -self.r[0, 1] + self.r[1, 0]])
sin_angle = np.linalg.norm(axis)
# look for the best way to normalize the
if (sin_angle == 0.0) and (cos_angle > 0):
axis[2] = 1.0
elif abs(sin_angle) < (1-cos_angle):
for index in range(3):
axis[index] = {True: -1, False: 1}[axis[index] < 0] * np.sqrt(abs((factor*self.r[index, index] - cos_angle) / (1 - cos_angle)))
else:
axis = axis / sin_angle
# Finally calculate the angle:
angle = np.arctan2(sin_angle, cos_angle)
return angle, axis, invert | [
"def",
"properties",
"(",
"self",
")",
":",
"# determine wether an inversion rotation has been applied",
"invert",
"=",
"(",
"np",
".",
"linalg",
".",
"det",
"(",
"self",
".",
"r",
")",
"<",
"0",
")",
"factor",
"=",
"{",
"True",
":",
"-",
"1",
",",
"False",
":",
"1",
"}",
"[",
"invert",
"]",
"# get the rotation data",
"# trace(r) = 1+2*cos(angle)",
"cos_angle",
"=",
"0.5",
"*",
"(",
"factor",
"*",
"np",
".",
"trace",
"(",
"self",
".",
"r",
")",
"-",
"1",
")",
"if",
"cos_angle",
">",
"1",
":",
"cos_angle",
"=",
"1.0",
"if",
"cos_angle",
"<",
"-",
"1",
":",
"cos_angle",
"=",
"-",
"1.0",
"# the antisymmetric part of the non-diagonal vector tell us something",
"# about sin(angle) and n.",
"axis",
"=",
"0.5",
"*",
"factor",
"*",
"np",
".",
"array",
"(",
"[",
"-",
"self",
".",
"r",
"[",
"1",
",",
"2",
"]",
"+",
"self",
".",
"r",
"[",
"2",
",",
"1",
"]",
",",
"self",
".",
"r",
"[",
"0",
",",
"2",
"]",
"-",
"self",
".",
"r",
"[",
"2",
",",
"0",
"]",
",",
"-",
"self",
".",
"r",
"[",
"0",
",",
"1",
"]",
"+",
"self",
".",
"r",
"[",
"1",
",",
"0",
"]",
"]",
")",
"sin_angle",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"axis",
")",
"# look for the best way to normalize the",
"if",
"(",
"sin_angle",
"==",
"0.0",
")",
"and",
"(",
"cos_angle",
">",
"0",
")",
":",
"axis",
"[",
"2",
"]",
"=",
"1.0",
"elif",
"abs",
"(",
"sin_angle",
")",
"<",
"(",
"1",
"-",
"cos_angle",
")",
":",
"for",
"index",
"in",
"range",
"(",
"3",
")",
":",
"axis",
"[",
"index",
"]",
"=",
"{",
"True",
":",
"-",
"1",
",",
"False",
":",
"1",
"}",
"[",
"axis",
"[",
"index",
"]",
"<",
"0",
"]",
"*",
"np",
".",
"sqrt",
"(",
"abs",
"(",
"(",
"factor",
"*",
"self",
".",
"r",
"[",
"index",
",",
"index",
"]",
"-",
"cos_angle",
")",
"/",
"(",
"1",
"-",
"cos_angle",
")",
")",
")",
"else",
":",
"axis",
"=",
"axis",
"/",
"sin_angle",
"# Finally calculate the angle:",
"angle",
"=",
"np",
".",
"arctan2",
"(",
"sin_angle",
",",
"cos_angle",
")",
"return",
"angle",
",",
"axis",
",",
"invert"
] | Rotation properties: angle, axis, invert | [
"Rotation",
"properties",
":",
"angle",
"axis",
"invert"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/transformations.py#L213-L238 |
molmod/molmod | molmod/transformations.py | Rotation.matrix | def matrix(self):
"""The 4x4 matrix representation of this rotation"""
result = np.identity(4, float)
result[0:3, 0:3] = self.r
return result | python | def matrix(self):
"""The 4x4 matrix representation of this rotation"""
result = np.identity(4, float)
result[0:3, 0:3] = self.r
return result | [
"def",
"matrix",
"(",
"self",
")",
":",
"result",
"=",
"np",
".",
"identity",
"(",
"4",
",",
"float",
")",
"result",
"[",
"0",
":",
"3",
",",
"0",
":",
"3",
"]",
"=",
"self",
".",
"r",
"return",
"result"
] | The 4x4 matrix representation of this rotation | [
"The",
"4x4",
"matrix",
"representation",
"of",
"this",
"rotation"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/transformations.py#L241-L245 |
molmod/molmod | molmod/transformations.py | Rotation.inv | def inv(self):
"""The inverse rotation"""
result = Rotation(self.r.transpose())
result._cache_inv = self
return result | python | def inv(self):
"""The inverse rotation"""
result = Rotation(self.r.transpose())
result._cache_inv = self
return result | [
"def",
"inv",
"(",
"self",
")",
":",
"result",
"=",
"Rotation",
"(",
"self",
".",
"r",
".",
"transpose",
"(",
")",
")",
"result",
".",
"_cache_inv",
"=",
"self",
"return",
"result"
] | The inverse rotation | [
"The",
"inverse",
"rotation"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/transformations.py#L248-L252 |
molmod/molmod | molmod/transformations.py | Rotation.apply_to | def apply_to(self, x, columns=False):
"""Apply this rotation to the given object
The argument can be several sorts of objects:
* ``np.array`` with shape (3, )
* ``np.array`` with shape (N, 3)
* ``np.array`` with shape (3, N), use ``columns=True``
* ``Translation``
* ``Rotation``
* ``Complete``
* ``UnitCell``
In case of arrays, the 3D vectors are rotated. In case of trans-
formations, a transformation is returned that consists of this
rotation applied AFTER the given translation. In case of a unit cell,
a unit cell with rotated cell vectors is returned.
This method is equivalent to ``self*x``.
"""
if isinstance(x, np.ndarray) and len(x.shape) == 2 and x.shape[0] == 3 and columns:
return np.dot(self.r, x)
if isinstance(x, np.ndarray) and (x.shape == (3, ) or (len(x.shape) == 2 and x.shape[1] == 3)) and not columns:
return np.dot(x, self.r.transpose())
elif isinstance(x, Complete):
return Complete(np.dot(self.r, x.r), np.dot(self.r, x.t))
elif isinstance(x, Translation):
return Complete(self.r, np.dot(self.r, x.t))
elif isinstance(x, Rotation):
return Rotation(np.dot(self.r, x.r))
elif isinstance(x, UnitCell):
return UnitCell(np.dot(self.r, x.matrix), x.active)
else:
raise ValueError("Can not apply this rotation to %s" % x) | python | def apply_to(self, x, columns=False):
"""Apply this rotation to the given object
The argument can be several sorts of objects:
* ``np.array`` with shape (3, )
* ``np.array`` with shape (N, 3)
* ``np.array`` with shape (3, N), use ``columns=True``
* ``Translation``
* ``Rotation``
* ``Complete``
* ``UnitCell``
In case of arrays, the 3D vectors are rotated. In case of trans-
formations, a transformation is returned that consists of this
rotation applied AFTER the given translation. In case of a unit cell,
a unit cell with rotated cell vectors is returned.
This method is equivalent to ``self*x``.
"""
if isinstance(x, np.ndarray) and len(x.shape) == 2 and x.shape[0] == 3 and columns:
return np.dot(self.r, x)
if isinstance(x, np.ndarray) and (x.shape == (3, ) or (len(x.shape) == 2 and x.shape[1] == 3)) and not columns:
return np.dot(x, self.r.transpose())
elif isinstance(x, Complete):
return Complete(np.dot(self.r, x.r), np.dot(self.r, x.t))
elif isinstance(x, Translation):
return Complete(self.r, np.dot(self.r, x.t))
elif isinstance(x, Rotation):
return Rotation(np.dot(self.r, x.r))
elif isinstance(x, UnitCell):
return UnitCell(np.dot(self.r, x.matrix), x.active)
else:
raise ValueError("Can not apply this rotation to %s" % x) | [
"def",
"apply_to",
"(",
"self",
",",
"x",
",",
"columns",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"x",
",",
"np",
".",
"ndarray",
")",
"and",
"len",
"(",
"x",
".",
"shape",
")",
"==",
"2",
"and",
"x",
".",
"shape",
"[",
"0",
"]",
"==",
"3",
"and",
"columns",
":",
"return",
"np",
".",
"dot",
"(",
"self",
".",
"r",
",",
"x",
")",
"if",
"isinstance",
"(",
"x",
",",
"np",
".",
"ndarray",
")",
"and",
"(",
"x",
".",
"shape",
"==",
"(",
"3",
",",
")",
"or",
"(",
"len",
"(",
"x",
".",
"shape",
")",
"==",
"2",
"and",
"x",
".",
"shape",
"[",
"1",
"]",
"==",
"3",
")",
")",
"and",
"not",
"columns",
":",
"return",
"np",
".",
"dot",
"(",
"x",
",",
"self",
".",
"r",
".",
"transpose",
"(",
")",
")",
"elif",
"isinstance",
"(",
"x",
",",
"Complete",
")",
":",
"return",
"Complete",
"(",
"np",
".",
"dot",
"(",
"self",
".",
"r",
",",
"x",
".",
"r",
")",
",",
"np",
".",
"dot",
"(",
"self",
".",
"r",
",",
"x",
".",
"t",
")",
")",
"elif",
"isinstance",
"(",
"x",
",",
"Translation",
")",
":",
"return",
"Complete",
"(",
"self",
".",
"r",
",",
"np",
".",
"dot",
"(",
"self",
".",
"r",
",",
"x",
".",
"t",
")",
")",
"elif",
"isinstance",
"(",
"x",
",",
"Rotation",
")",
":",
"return",
"Rotation",
"(",
"np",
".",
"dot",
"(",
"self",
".",
"r",
",",
"x",
".",
"r",
")",
")",
"elif",
"isinstance",
"(",
"x",
",",
"UnitCell",
")",
":",
"return",
"UnitCell",
"(",
"np",
".",
"dot",
"(",
"self",
".",
"r",
",",
"x",
".",
"matrix",
")",
",",
"x",
".",
"active",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Can not apply this rotation to %s\"",
"%",
"x",
")"
] | Apply this rotation to the given object
The argument can be several sorts of objects:
* ``np.array`` with shape (3, )
* ``np.array`` with shape (N, 3)
* ``np.array`` with shape (3, N), use ``columns=True``
* ``Translation``
* ``Rotation``
* ``Complete``
* ``UnitCell``
In case of arrays, the 3D vectors are rotated. In case of trans-
formations, a transformation is returned that consists of this
rotation applied AFTER the given translation. In case of a unit cell,
a unit cell with rotated cell vectors is returned.
This method is equivalent to ``self*x``. | [
"Apply",
"this",
"rotation",
"to",
"the",
"given",
"object"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/transformations.py#L254-L287 |
molmod/molmod | molmod/transformations.py | Rotation.compare | def compare(self, other, r_threshold=1e-3):
"""Compare two rotations
The RMSD of the rotation matrices is computed. The return value
is True when the RMSD is below the threshold, i.e. when the two
rotations are almost identical.
"""
return compute_rmsd(self.r, other.r) < r_threshold | python | def compare(self, other, r_threshold=1e-3):
"""Compare two rotations
The RMSD of the rotation matrices is computed. The return value
is True when the RMSD is below the threshold, i.e. when the two
rotations are almost identical.
"""
return compute_rmsd(self.r, other.r) < r_threshold | [
"def",
"compare",
"(",
"self",
",",
"other",
",",
"r_threshold",
"=",
"1e-3",
")",
":",
"return",
"compute_rmsd",
"(",
"self",
".",
"r",
",",
"other",
".",
"r",
")",
"<",
"r_threshold"
] | Compare two rotations
The RMSD of the rotation matrices is computed. The return value
is True when the RMSD is below the threshold, i.e. when the two
rotations are almost identical. | [
"Compare",
"two",
"rotations"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/transformations.py#L291-L298 |
molmod/molmod | molmod/transformations.py | Complete.from_properties | def from_properties(cls, angle, axis, invert, translation):
"""Initialize a transformation based on the properties"""
rot = Rotation.from_properties(angle, axis, invert)
return Complete(rot.r, translation) | python | def from_properties(cls, angle, axis, invert, translation):
"""Initialize a transformation based on the properties"""
rot = Rotation.from_properties(angle, axis, invert)
return Complete(rot.r, translation) | [
"def",
"from_properties",
"(",
"cls",
",",
"angle",
",",
"axis",
",",
"invert",
",",
"translation",
")",
":",
"rot",
"=",
"Rotation",
".",
"from_properties",
"(",
"angle",
",",
"axis",
",",
"invert",
")",
"return",
"Complete",
"(",
"rot",
".",
"r",
",",
"translation",
")"
] | Initialize a transformation based on the properties | [
"Initialize",
"a",
"transformation",
"based",
"on",
"the",
"properties"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/transformations.py#L333-L336 |
molmod/molmod | molmod/transformations.py | Complete.cast | def cast(cls, c):
"""Convert the first argument into a Complete object"""
if isinstance(c, Complete):
return c
elif isinstance(c, Translation):
return Complete(np.identity(3, float), c.t)
elif isinstance(c, Rotation):
return Complete(c.r, np.zeros(3, float)) | python | def cast(cls, c):
"""Convert the first argument into a Complete object"""
if isinstance(c, Complete):
return c
elif isinstance(c, Translation):
return Complete(np.identity(3, float), c.t)
elif isinstance(c, Rotation):
return Complete(c.r, np.zeros(3, float)) | [
"def",
"cast",
"(",
"cls",
",",
"c",
")",
":",
"if",
"isinstance",
"(",
"c",
",",
"Complete",
")",
":",
"return",
"c",
"elif",
"isinstance",
"(",
"c",
",",
"Translation",
")",
":",
"return",
"Complete",
"(",
"np",
".",
"identity",
"(",
"3",
",",
"float",
")",
",",
"c",
".",
"t",
")",
"elif",
"isinstance",
"(",
"c",
",",
"Rotation",
")",
":",
"return",
"Complete",
"(",
"c",
".",
"r",
",",
"np",
".",
"zeros",
"(",
"3",
",",
"float",
")",
")"
] | Convert the first argument into a Complete object | [
"Convert",
"the",
"first",
"argument",
"into",
"a",
"Complete",
"object"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/transformations.py#L339-L346 |
molmod/molmod | molmod/transformations.py | Complete.about_axis | def about_axis(cls, center, angle, axis, invert=False):
"""Create transformation that represents a rotation about an axis
Arguments:
| ``center`` -- Point on the axis
| ``angle`` -- Rotation angle
| ``axis`` -- Rotation axis
| ``invert`` -- When True, an inversion rotation is constructed
[default=False]
"""
return Translation(center) * \
Rotation.from_properties(angle, axis, invert) * \
Translation(-center) | python | def about_axis(cls, center, angle, axis, invert=False):
"""Create transformation that represents a rotation about an axis
Arguments:
| ``center`` -- Point on the axis
| ``angle`` -- Rotation angle
| ``axis`` -- Rotation axis
| ``invert`` -- When True, an inversion rotation is constructed
[default=False]
"""
return Translation(center) * \
Rotation.from_properties(angle, axis, invert) * \
Translation(-center) | [
"def",
"about_axis",
"(",
"cls",
",",
"center",
",",
"angle",
",",
"axis",
",",
"invert",
"=",
"False",
")",
":",
"return",
"Translation",
"(",
"center",
")",
"*",
"Rotation",
".",
"from_properties",
"(",
"angle",
",",
"axis",
",",
"invert",
")",
"*",
"Translation",
"(",
"-",
"center",
")"
] | Create transformation that represents a rotation about an axis
Arguments:
| ``center`` -- Point on the axis
| ``angle`` -- Rotation angle
| ``axis`` -- Rotation axis
| ``invert`` -- When True, an inversion rotation is constructed
[default=False] | [
"Create",
"transformation",
"that",
"represents",
"a",
"rotation",
"about",
"an",
"axis"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/transformations.py#L349-L361 |
molmod/molmod | molmod/transformations.py | Complete.properties | def properties(self):
"""Transformation properties: angle, axis, invert, translation"""
rot = Rotation(self.r)
angle, axis, invert = rot.properties
return angle, axis, invert, self.t | python | def properties(self):
"""Transformation properties: angle, axis, invert, translation"""
rot = Rotation(self.r)
angle, axis, invert = rot.properties
return angle, axis, invert, self.t | [
"def",
"properties",
"(",
"self",
")",
":",
"rot",
"=",
"Rotation",
"(",
"self",
".",
"r",
")",
"angle",
",",
"axis",
",",
"invert",
"=",
"rot",
".",
"properties",
"return",
"angle",
",",
"axis",
",",
"invert",
",",
"self",
".",
"t"
] | Transformation properties: angle, axis, invert, translation | [
"Transformation",
"properties",
":",
"angle",
"axis",
"invert",
"translation"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/transformations.py#L372-L376 |
molmod/molmod | molmod/transformations.py | Complete.inv | def inv(self):
"""The inverse transformation"""
result = Complete(self.r.transpose(), np.dot(self.r.transpose(), -self.t))
result._cache_inv = self
return result | python | def inv(self):
"""The inverse transformation"""
result = Complete(self.r.transpose(), np.dot(self.r.transpose(), -self.t))
result._cache_inv = self
return result | [
"def",
"inv",
"(",
"self",
")",
":",
"result",
"=",
"Complete",
"(",
"self",
".",
"r",
".",
"transpose",
"(",
")",
",",
"np",
".",
"dot",
"(",
"self",
".",
"r",
".",
"transpose",
"(",
")",
",",
"-",
"self",
".",
"t",
")",
")",
"result",
".",
"_cache_inv",
"=",
"self",
"return",
"result"
] | The inverse transformation | [
"The",
"inverse",
"transformation"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/transformations.py#L379-L383 |
molmod/molmod | molmod/transformations.py | Complete.compare | def compare(self, other, t_threshold=1e-3, r_threshold=1e-3):
"""Compare two transformations
The RMSD values of the rotation matrices and the translation vectors
are computed. The return value is True when the RMSD values are below
the thresholds, i.e. when the two transformations are almost
identical.
"""
return compute_rmsd(self.t, other.t) < t_threshold and compute_rmsd(self.r, other.r) < r_threshold | python | def compare(self, other, t_threshold=1e-3, r_threshold=1e-3):
"""Compare two transformations
The RMSD values of the rotation matrices and the translation vectors
are computed. The return value is True when the RMSD values are below
the thresholds, i.e. when the two transformations are almost
identical.
"""
return compute_rmsd(self.t, other.t) < t_threshold and compute_rmsd(self.r, other.r) < r_threshold | [
"def",
"compare",
"(",
"self",
",",
"other",
",",
"t_threshold",
"=",
"1e-3",
",",
"r_threshold",
"=",
"1e-3",
")",
":",
"return",
"compute_rmsd",
"(",
"self",
".",
"t",
",",
"other",
".",
"t",
")",
"<",
"t_threshold",
"and",
"compute_rmsd",
"(",
"self",
".",
"r",
",",
"other",
".",
"r",
")",
"<",
"r_threshold"
] | Compare two transformations
The RMSD values of the rotation matrices and the translation vectors
are computed. The return value is True when the RMSD values are below
the thresholds, i.e. when the two transformations are almost
identical. | [
"Compare",
"two",
"transformations"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/transformations.py#L423-L431 |
molmod/molmod | molmod/io/lammps.py | LAMMPSDumpReader._read_frame | def _read_frame(self):
"""Read and return the next time frame"""
# Read one frame, we assume that the current file position is at the
# line 'ITEM: TIMESTEP' and that this line marks the beginning of a
# time frame.
line = next(self._f)
if line != 'ITEM: TIMESTEP\n':
raise FileFormatError("Expecting line 'ITEM: TIMESTEP' at the beginning of a time frame.")
try:
line = next(self._f)
step = int(line)
except ValueError:
raise FileFormatError("Could not read the step number. Expected an integer. Got '%s'" % line[:-1])
# Now we assume that the next section contains (again) the number of
# atoms.
line = next(self._f)
if line != 'ITEM: NUMBER OF ATOMS\n':
raise FileFormatError("Expecting line 'ITEM: NUMBER OF ATOMS'.")
try:
line = next(self._f)
num_atoms = int(line)
except ValueError:
raise FileFormatError("Could not read the number of atoms. Expected an integer. Got '%s'" % line[:-1])
if num_atoms != self.num_atoms:
raise FileFormatError("A variable number of atoms is not supported.")
# The next section contains the box boundaries. We will skip it
for i in range(4):
next(self._f)
# The next and last section contains the atom related properties
line = next(self._f)
if line != 'ITEM: ATOMS\n':
raise FileFormatError("Expecting line 'ITEM: ATOMS'.")
fields = [list() for i in range(len(self.units))]
for i in range(self.num_atoms):
line = next(self._f)
words = line.split()[1:]
for j in range(len(fields)):
fields[j].append(float(words[j]))
fields = [step] + [np.array(field)*unit for field, unit in zip(fields, self.units)]
return fields | python | def _read_frame(self):
"""Read and return the next time frame"""
# Read one frame, we assume that the current file position is at the
# line 'ITEM: TIMESTEP' and that this line marks the beginning of a
# time frame.
line = next(self._f)
if line != 'ITEM: TIMESTEP\n':
raise FileFormatError("Expecting line 'ITEM: TIMESTEP' at the beginning of a time frame.")
try:
line = next(self._f)
step = int(line)
except ValueError:
raise FileFormatError("Could not read the step number. Expected an integer. Got '%s'" % line[:-1])
# Now we assume that the next section contains (again) the number of
# atoms.
line = next(self._f)
if line != 'ITEM: NUMBER OF ATOMS\n':
raise FileFormatError("Expecting line 'ITEM: NUMBER OF ATOMS'.")
try:
line = next(self._f)
num_atoms = int(line)
except ValueError:
raise FileFormatError("Could not read the number of atoms. Expected an integer. Got '%s'" % line[:-1])
if num_atoms != self.num_atoms:
raise FileFormatError("A variable number of atoms is not supported.")
# The next section contains the box boundaries. We will skip it
for i in range(4):
next(self._f)
# The next and last section contains the atom related properties
line = next(self._f)
if line != 'ITEM: ATOMS\n':
raise FileFormatError("Expecting line 'ITEM: ATOMS'.")
fields = [list() for i in range(len(self.units))]
for i in range(self.num_atoms):
line = next(self._f)
words = line.split()[1:]
for j in range(len(fields)):
fields[j].append(float(words[j]))
fields = [step] + [np.array(field)*unit for field, unit in zip(fields, self.units)]
return fields | [
"def",
"_read_frame",
"(",
"self",
")",
":",
"# Read one frame, we assume that the current file position is at the",
"# line 'ITEM: TIMESTEP' and that this line marks the beginning of a",
"# time frame.",
"line",
"=",
"next",
"(",
"self",
".",
"_f",
")",
"if",
"line",
"!=",
"'ITEM: TIMESTEP\\n'",
":",
"raise",
"FileFormatError",
"(",
"\"Expecting line 'ITEM: TIMESTEP' at the beginning of a time frame.\"",
")",
"try",
":",
"line",
"=",
"next",
"(",
"self",
".",
"_f",
")",
"step",
"=",
"int",
"(",
"line",
")",
"except",
"ValueError",
":",
"raise",
"FileFormatError",
"(",
"\"Could not read the step number. Expected an integer. Got '%s'\"",
"%",
"line",
"[",
":",
"-",
"1",
"]",
")",
"# Now we assume that the next section contains (again) the number of",
"# atoms.",
"line",
"=",
"next",
"(",
"self",
".",
"_f",
")",
"if",
"line",
"!=",
"'ITEM: NUMBER OF ATOMS\\n'",
":",
"raise",
"FileFormatError",
"(",
"\"Expecting line 'ITEM: NUMBER OF ATOMS'.\"",
")",
"try",
":",
"line",
"=",
"next",
"(",
"self",
".",
"_f",
")",
"num_atoms",
"=",
"int",
"(",
"line",
")",
"except",
"ValueError",
":",
"raise",
"FileFormatError",
"(",
"\"Could not read the number of atoms. Expected an integer. Got '%s'\"",
"%",
"line",
"[",
":",
"-",
"1",
"]",
")",
"if",
"num_atoms",
"!=",
"self",
".",
"num_atoms",
":",
"raise",
"FileFormatError",
"(",
"\"A variable number of atoms is not supported.\"",
")",
"# The next section contains the box boundaries. We will skip it",
"for",
"i",
"in",
"range",
"(",
"4",
")",
":",
"next",
"(",
"self",
".",
"_f",
")",
"# The next and last section contains the atom related properties",
"line",
"=",
"next",
"(",
"self",
".",
"_f",
")",
"if",
"line",
"!=",
"'ITEM: ATOMS\\n'",
":",
"raise",
"FileFormatError",
"(",
"\"Expecting line 'ITEM: ATOMS'.\"",
")",
"fields",
"=",
"[",
"list",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"units",
")",
")",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"num_atoms",
")",
":",
"line",
"=",
"next",
"(",
"self",
".",
"_f",
")",
"words",
"=",
"line",
".",
"split",
"(",
")",
"[",
"1",
":",
"]",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"fields",
")",
")",
":",
"fields",
"[",
"j",
"]",
".",
"append",
"(",
"float",
"(",
"words",
"[",
"j",
"]",
")",
")",
"fields",
"=",
"[",
"step",
"]",
"+",
"[",
"np",
".",
"array",
"(",
"field",
")",
"*",
"unit",
"for",
"field",
",",
"unit",
"in",
"zip",
"(",
"fields",
",",
"self",
".",
"units",
")",
"]",
"return",
"fields"
] | Read and return the next time frame | [
"Read",
"and",
"return",
"the",
"next",
"time",
"frame"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/io/lammps.py#L72-L115 |
molmod/molmod | molmod/io/lammps.py | LAMMPSDumpReader._skip_frame | def _skip_frame(self):
"""Skip the next time frame"""
for line in self._f:
if line == 'ITEM: ATOMS\n':
break
for i in range(self.num_atoms):
next(self._f) | python | def _skip_frame(self):
"""Skip the next time frame"""
for line in self._f:
if line == 'ITEM: ATOMS\n':
break
for i in range(self.num_atoms):
next(self._f) | [
"def",
"_skip_frame",
"(",
"self",
")",
":",
"for",
"line",
"in",
"self",
".",
"_f",
":",
"if",
"line",
"==",
"'ITEM: ATOMS\\n'",
":",
"break",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"num_atoms",
")",
":",
"next",
"(",
"self",
".",
"_f",
")"
] | Skip the next time frame | [
"Skip",
"the",
"next",
"time",
"frame"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/io/lammps.py#L117-L123 |
molmod/molmod | molmod/periodic.py | PeriodicData._add_atom_info | def _add_atom_info(self, atom_info):
"""Add an atom info object to the database"""
self.atoms_by_number[atom_info.number] = atom_info
self.atoms_by_symbol[atom_info.symbol.lower()] = atom_info | python | def _add_atom_info(self, atom_info):
"""Add an atom info object to the database"""
self.atoms_by_number[atom_info.number] = atom_info
self.atoms_by_symbol[atom_info.symbol.lower()] = atom_info | [
"def",
"_add_atom_info",
"(",
"self",
",",
"atom_info",
")",
":",
"self",
".",
"atoms_by_number",
"[",
"atom_info",
".",
"number",
"]",
"=",
"atom_info",
"self",
".",
"atoms_by_symbol",
"[",
"atom_info",
".",
"symbol",
".",
"lower",
"(",
")",
"]",
"=",
"atom_info"
] | Add an atom info object to the database | [
"Add",
"an",
"atom",
"info",
"object",
"to",
"the",
"database"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/periodic.py#L118-L121 |
molmod/molmod | molmod/clusters.py | RuleCluster.update | def update(self, other):
"""Extend the current cluster with data from another cluster"""
Cluster.update(self, other)
self.rules.extend(other.rules) | python | def update(self, other):
"""Extend the current cluster with data from another cluster"""
Cluster.update(self, other)
self.rules.extend(other.rules) | [
"def",
"update",
"(",
"self",
",",
"other",
")",
":",
"Cluster",
".",
"update",
"(",
"self",
",",
"other",
")",
"self",
".",
"rules",
".",
"extend",
"(",
"other",
".",
"rules",
")"
] | Extend the current cluster with data from another cluster | [
"Extend",
"the",
"current",
"cluster",
"with",
"data",
"from",
"another",
"cluster"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/clusters.py#L86-L89 |
molmod/molmod | molmod/clusters.py | ClusterFactory.add_related | def add_related(self, *objects):
"""Add related items
The arguments can be individual items or cluster objects containing
several items.
When two groups of related items share one or more common members,
they will be merged into one cluster.
"""
master = None # this will become the common cluster of all related items
slaves = set([]) # set of clusters that are going to be merged in the master
solitaire = set([]) # set of new items that are not yet part of a cluster
for new in objects:
if isinstance(new, self.cls):
if master is None:
master = new
else:
slaves.add(new)
for item in new.items:
existing = self.lookup.get(item)
if existing is not None:
slaves.add(existing)
else:
cluster = self.lookup.get(new)
if cluster is None:
#print "solitaire", new
solitaire.add(new)
elif master is None:
#print "starting master", new
master = cluster
elif master != cluster:
#print "in slave", new
slaves.add(cluster)
#else:
##nothing to do
#print "new in master", new
if master is None:
master = self.cls([])
for slave in slaves:
master.update(slave)
for item in solitaire:
master.add_item(item)
for item in master.items:
self.lookup[item] = master | python | def add_related(self, *objects):
"""Add related items
The arguments can be individual items or cluster objects containing
several items.
When two groups of related items share one or more common members,
they will be merged into one cluster.
"""
master = None # this will become the common cluster of all related items
slaves = set([]) # set of clusters that are going to be merged in the master
solitaire = set([]) # set of new items that are not yet part of a cluster
for new in objects:
if isinstance(new, self.cls):
if master is None:
master = new
else:
slaves.add(new)
for item in new.items:
existing = self.lookup.get(item)
if existing is not None:
slaves.add(existing)
else:
cluster = self.lookup.get(new)
if cluster is None:
#print "solitaire", new
solitaire.add(new)
elif master is None:
#print "starting master", new
master = cluster
elif master != cluster:
#print "in slave", new
slaves.add(cluster)
#else:
##nothing to do
#print "new in master", new
if master is None:
master = self.cls([])
for slave in slaves:
master.update(slave)
for item in solitaire:
master.add_item(item)
for item in master.items:
self.lookup[item] = master | [
"def",
"add_related",
"(",
"self",
",",
"*",
"objects",
")",
":",
"master",
"=",
"None",
"# this will become the common cluster of all related items",
"slaves",
"=",
"set",
"(",
"[",
"]",
")",
"# set of clusters that are going to be merged in the master",
"solitaire",
"=",
"set",
"(",
"[",
"]",
")",
"# set of new items that are not yet part of a cluster",
"for",
"new",
"in",
"objects",
":",
"if",
"isinstance",
"(",
"new",
",",
"self",
".",
"cls",
")",
":",
"if",
"master",
"is",
"None",
":",
"master",
"=",
"new",
"else",
":",
"slaves",
".",
"add",
"(",
"new",
")",
"for",
"item",
"in",
"new",
".",
"items",
":",
"existing",
"=",
"self",
".",
"lookup",
".",
"get",
"(",
"item",
")",
"if",
"existing",
"is",
"not",
"None",
":",
"slaves",
".",
"add",
"(",
"existing",
")",
"else",
":",
"cluster",
"=",
"self",
".",
"lookup",
".",
"get",
"(",
"new",
")",
"if",
"cluster",
"is",
"None",
":",
"#print \"solitaire\", new",
"solitaire",
".",
"add",
"(",
"new",
")",
"elif",
"master",
"is",
"None",
":",
"#print \"starting master\", new",
"master",
"=",
"cluster",
"elif",
"master",
"!=",
"cluster",
":",
"#print \"in slave\", new",
"slaves",
".",
"add",
"(",
"cluster",
")",
"#else:",
"##nothing to do",
"#print \"new in master\", new",
"if",
"master",
"is",
"None",
":",
"master",
"=",
"self",
".",
"cls",
"(",
"[",
"]",
")",
"for",
"slave",
"in",
"slaves",
":",
"master",
".",
"update",
"(",
"slave",
")",
"for",
"item",
"in",
"solitaire",
":",
"master",
".",
"add_item",
"(",
"item",
")",
"for",
"item",
"in",
"master",
".",
"items",
":",
"self",
".",
"lookup",
"[",
"item",
"]",
"=",
"master"
] | Add related items
The arguments can be individual items or cluster objects containing
several items.
When two groups of related items share one or more common members,
they will be merged into one cluster. | [
"Add",
"related",
"items"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/clusters.py#L105-L151 |
molmod/molmod | molmod/io/dlpoly.py | DLPolyHistoryReader._read_frame | def _read_frame(self):
"""Read a single frame from the trajectory"""
# auxiliary read function
def read_three(msg):
"""Read three words as floating point numbers"""
line = next(self._f)
try:
return [float(line[:12]), float(line[12:24]), float(line[24:])]
except ValueError:
raise FileFormatError(msg)
frame = {}
# read the frame header line
words = next(self._f).split()
if len(words) != 6:
raise FileFormatError("The first line of each time frame must contain 6 words. (%i'th frame)" % self._counter)
if words[0] != "timestep":
raise FileFormatError("The first word of the first line of each time frame must be 'timestep'. (%i'th frame)" % self._counter)
try:
step = int(words[1])
frame["step"] = step
if int(words[2]) != self.num_atoms:
raise FileFormatError("The number of atoms has changed. (%i'th frame, %i'th step)" % (self._counter, step))
if int(words[3]) != self.keytrj:
raise FileFormatError("keytrj has changed. (%i'th frame, %i'th step)" % (self._counter, step))
if int(words[4]) != self.imcon:
raise FileFormatError("imcon has changed. (%i'th frame, %i'th step)" % (self._counter, step))
frame["timestep"] = float(words[5])*self.time_unit
frame["time"] = frame["timestep"]*step # this is ugly, or wait ... dlpoly is a bit ugly. we are not to blame!
except ValueError:
raise FileFormatError("Could not convert all numbers on the first line of the current time frame. (%i'th frame)" % self._counter)
# the three cell lines
cell = np.zeros((3, 3), float)
frame["cell"] = cell
cell_msg = "The cell lines must consist of three floating point values. (%i'th frame, %i'th step)" % (self._counter, step)
for i in range(3):
cell[:, i] = read_three(cell_msg)
cell *= self.pos_unit
# the atoms
symbols = []
frame["symbols"] = symbols
masses = np.zeros(self.num_atoms, float)
frame["masses"] = masses
charges = np.zeros(self.num_atoms, float)
frame["charges"] = charges
pos = np.zeros((self.num_atoms, 3), float)
frame["pos"] = pos
if self.keytrj > 0:
vel = np.zeros((self.num_atoms, 3), float)
frame["vel"] = vel
if self.keytrj > 1:
frc = np.zeros((self.num_atoms, 3), float)
frame["frc"] = frc
for i in range(self.num_atoms):
# the atom header line
words = next(self._f).split()
if len(words) != 4:
raise FileFormatError("The atom header line must contain 4 words. (%i'th frame, %i'th step, %i'th atom)" % (self._counter, step, i+1))
symbols.append(words[0])
try:
masses[i] = float(words[2])*self.mass_unit
charges[i] = float(words[3])
except ValueError:
raise FileFormatError("The numbers in the atom header line could not be interpreted.")
# the pos line
pos_msg = "The position lines must consist of three floating point values. (%i'th frame, %i'th step, %i'th atom)" % (self._counter, step, i+1)
pos[i] = read_three(pos_msg)
if self.keytrj > 0:
vel_msg = "The velocity lines must consist of three floating point values. (%i'th frame, %i'th step, %i'th atom)" % (self._counter, step, i+1)
vel[i] = read_three(vel_msg)
if self.keytrj > 1:
frc_msg = "The force lines must consist of three floating point values. (%i'th frame, %i'th step, %i'th atom)" % (self._counter, step, i+1)
frc[i] = read_three(frc_msg)
pos *= self.pos_unit # convert to au
if self.keytrj > 0:
vel *= self.vel_unit # convert to au
if self.keytrj > 1:
frc *= self.frc_unit # convert to au
return frame | python | def _read_frame(self):
"""Read a single frame from the trajectory"""
# auxiliary read function
def read_three(msg):
"""Read three words as floating point numbers"""
line = next(self._f)
try:
return [float(line[:12]), float(line[12:24]), float(line[24:])]
except ValueError:
raise FileFormatError(msg)
frame = {}
# read the frame header line
words = next(self._f).split()
if len(words) != 6:
raise FileFormatError("The first line of each time frame must contain 6 words. (%i'th frame)" % self._counter)
if words[0] != "timestep":
raise FileFormatError("The first word of the first line of each time frame must be 'timestep'. (%i'th frame)" % self._counter)
try:
step = int(words[1])
frame["step"] = step
if int(words[2]) != self.num_atoms:
raise FileFormatError("The number of atoms has changed. (%i'th frame, %i'th step)" % (self._counter, step))
if int(words[3]) != self.keytrj:
raise FileFormatError("keytrj has changed. (%i'th frame, %i'th step)" % (self._counter, step))
if int(words[4]) != self.imcon:
raise FileFormatError("imcon has changed. (%i'th frame, %i'th step)" % (self._counter, step))
frame["timestep"] = float(words[5])*self.time_unit
frame["time"] = frame["timestep"]*step # this is ugly, or wait ... dlpoly is a bit ugly. we are not to blame!
except ValueError:
raise FileFormatError("Could not convert all numbers on the first line of the current time frame. (%i'th frame)" % self._counter)
# the three cell lines
cell = np.zeros((3, 3), float)
frame["cell"] = cell
cell_msg = "The cell lines must consist of three floating point values. (%i'th frame, %i'th step)" % (self._counter, step)
for i in range(3):
cell[:, i] = read_three(cell_msg)
cell *= self.pos_unit
# the atoms
symbols = []
frame["symbols"] = symbols
masses = np.zeros(self.num_atoms, float)
frame["masses"] = masses
charges = np.zeros(self.num_atoms, float)
frame["charges"] = charges
pos = np.zeros((self.num_atoms, 3), float)
frame["pos"] = pos
if self.keytrj > 0:
vel = np.zeros((self.num_atoms, 3), float)
frame["vel"] = vel
if self.keytrj > 1:
frc = np.zeros((self.num_atoms, 3), float)
frame["frc"] = frc
for i in range(self.num_atoms):
# the atom header line
words = next(self._f).split()
if len(words) != 4:
raise FileFormatError("The atom header line must contain 4 words. (%i'th frame, %i'th step, %i'th atom)" % (self._counter, step, i+1))
symbols.append(words[0])
try:
masses[i] = float(words[2])*self.mass_unit
charges[i] = float(words[3])
except ValueError:
raise FileFormatError("The numbers in the atom header line could not be interpreted.")
# the pos line
pos_msg = "The position lines must consist of three floating point values. (%i'th frame, %i'th step, %i'th atom)" % (self._counter, step, i+1)
pos[i] = read_three(pos_msg)
if self.keytrj > 0:
vel_msg = "The velocity lines must consist of three floating point values. (%i'th frame, %i'th step, %i'th atom)" % (self._counter, step, i+1)
vel[i] = read_three(vel_msg)
if self.keytrj > 1:
frc_msg = "The force lines must consist of three floating point values. (%i'th frame, %i'th step, %i'th atom)" % (self._counter, step, i+1)
frc[i] = read_three(frc_msg)
pos *= self.pos_unit # convert to au
if self.keytrj > 0:
vel *= self.vel_unit # convert to au
if self.keytrj > 1:
frc *= self.frc_unit # convert to au
return frame | [
"def",
"_read_frame",
"(",
"self",
")",
":",
"# auxiliary read function",
"def",
"read_three",
"(",
"msg",
")",
":",
"\"\"\"Read three words as floating point numbers\"\"\"",
"line",
"=",
"next",
"(",
"self",
".",
"_f",
")",
"try",
":",
"return",
"[",
"float",
"(",
"line",
"[",
":",
"12",
"]",
")",
",",
"float",
"(",
"line",
"[",
"12",
":",
"24",
"]",
")",
",",
"float",
"(",
"line",
"[",
"24",
":",
"]",
")",
"]",
"except",
"ValueError",
":",
"raise",
"FileFormatError",
"(",
"msg",
")",
"frame",
"=",
"{",
"}",
"# read the frame header line",
"words",
"=",
"next",
"(",
"self",
".",
"_f",
")",
".",
"split",
"(",
")",
"if",
"len",
"(",
"words",
")",
"!=",
"6",
":",
"raise",
"FileFormatError",
"(",
"\"The first line of each time frame must contain 6 words. (%i'th frame)\"",
"%",
"self",
".",
"_counter",
")",
"if",
"words",
"[",
"0",
"]",
"!=",
"\"timestep\"",
":",
"raise",
"FileFormatError",
"(",
"\"The first word of the first line of each time frame must be 'timestep'. (%i'th frame)\"",
"%",
"self",
".",
"_counter",
")",
"try",
":",
"step",
"=",
"int",
"(",
"words",
"[",
"1",
"]",
")",
"frame",
"[",
"\"step\"",
"]",
"=",
"step",
"if",
"int",
"(",
"words",
"[",
"2",
"]",
")",
"!=",
"self",
".",
"num_atoms",
":",
"raise",
"FileFormatError",
"(",
"\"The number of atoms has changed. (%i'th frame, %i'th step)\"",
"%",
"(",
"self",
".",
"_counter",
",",
"step",
")",
")",
"if",
"int",
"(",
"words",
"[",
"3",
"]",
")",
"!=",
"self",
".",
"keytrj",
":",
"raise",
"FileFormatError",
"(",
"\"keytrj has changed. (%i'th frame, %i'th step)\"",
"%",
"(",
"self",
".",
"_counter",
",",
"step",
")",
")",
"if",
"int",
"(",
"words",
"[",
"4",
"]",
")",
"!=",
"self",
".",
"imcon",
":",
"raise",
"FileFormatError",
"(",
"\"imcon has changed. (%i'th frame, %i'th step)\"",
"%",
"(",
"self",
".",
"_counter",
",",
"step",
")",
")",
"frame",
"[",
"\"timestep\"",
"]",
"=",
"float",
"(",
"words",
"[",
"5",
"]",
")",
"*",
"self",
".",
"time_unit",
"frame",
"[",
"\"time\"",
"]",
"=",
"frame",
"[",
"\"timestep\"",
"]",
"*",
"step",
"# this is ugly, or wait ... dlpoly is a bit ugly. we are not to blame!",
"except",
"ValueError",
":",
"raise",
"FileFormatError",
"(",
"\"Could not convert all numbers on the first line of the current time frame. (%i'th frame)\"",
"%",
"self",
".",
"_counter",
")",
"# the three cell lines",
"cell",
"=",
"np",
".",
"zeros",
"(",
"(",
"3",
",",
"3",
")",
",",
"float",
")",
"frame",
"[",
"\"cell\"",
"]",
"=",
"cell",
"cell_msg",
"=",
"\"The cell lines must consist of three floating point values. (%i'th frame, %i'th step)\"",
"%",
"(",
"self",
".",
"_counter",
",",
"step",
")",
"for",
"i",
"in",
"range",
"(",
"3",
")",
":",
"cell",
"[",
":",
",",
"i",
"]",
"=",
"read_three",
"(",
"cell_msg",
")",
"cell",
"*=",
"self",
".",
"pos_unit",
"# the atoms",
"symbols",
"=",
"[",
"]",
"frame",
"[",
"\"symbols\"",
"]",
"=",
"symbols",
"masses",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"num_atoms",
",",
"float",
")",
"frame",
"[",
"\"masses\"",
"]",
"=",
"masses",
"charges",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"num_atoms",
",",
"float",
")",
"frame",
"[",
"\"charges\"",
"]",
"=",
"charges",
"pos",
"=",
"np",
".",
"zeros",
"(",
"(",
"self",
".",
"num_atoms",
",",
"3",
")",
",",
"float",
")",
"frame",
"[",
"\"pos\"",
"]",
"=",
"pos",
"if",
"self",
".",
"keytrj",
">",
"0",
":",
"vel",
"=",
"np",
".",
"zeros",
"(",
"(",
"self",
".",
"num_atoms",
",",
"3",
")",
",",
"float",
")",
"frame",
"[",
"\"vel\"",
"]",
"=",
"vel",
"if",
"self",
".",
"keytrj",
">",
"1",
":",
"frc",
"=",
"np",
".",
"zeros",
"(",
"(",
"self",
".",
"num_atoms",
",",
"3",
")",
",",
"float",
")",
"frame",
"[",
"\"frc\"",
"]",
"=",
"frc",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"num_atoms",
")",
":",
"# the atom header line",
"words",
"=",
"next",
"(",
"self",
".",
"_f",
")",
".",
"split",
"(",
")",
"if",
"len",
"(",
"words",
")",
"!=",
"4",
":",
"raise",
"FileFormatError",
"(",
"\"The atom header line must contain 4 words. (%i'th frame, %i'th step, %i'th atom)\"",
"%",
"(",
"self",
".",
"_counter",
",",
"step",
",",
"i",
"+",
"1",
")",
")",
"symbols",
".",
"append",
"(",
"words",
"[",
"0",
"]",
")",
"try",
":",
"masses",
"[",
"i",
"]",
"=",
"float",
"(",
"words",
"[",
"2",
"]",
")",
"*",
"self",
".",
"mass_unit",
"charges",
"[",
"i",
"]",
"=",
"float",
"(",
"words",
"[",
"3",
"]",
")",
"except",
"ValueError",
":",
"raise",
"FileFormatError",
"(",
"\"The numbers in the atom header line could not be interpreted.\"",
")",
"# the pos line",
"pos_msg",
"=",
"\"The position lines must consist of three floating point values. (%i'th frame, %i'th step, %i'th atom)\"",
"%",
"(",
"self",
".",
"_counter",
",",
"step",
",",
"i",
"+",
"1",
")",
"pos",
"[",
"i",
"]",
"=",
"read_three",
"(",
"pos_msg",
")",
"if",
"self",
".",
"keytrj",
">",
"0",
":",
"vel_msg",
"=",
"\"The velocity lines must consist of three floating point values. (%i'th frame, %i'th step, %i'th atom)\"",
"%",
"(",
"self",
".",
"_counter",
",",
"step",
",",
"i",
"+",
"1",
")",
"vel",
"[",
"i",
"]",
"=",
"read_three",
"(",
"vel_msg",
")",
"if",
"self",
".",
"keytrj",
">",
"1",
":",
"frc_msg",
"=",
"\"The force lines must consist of three floating point values. (%i'th frame, %i'th step, %i'th atom)\"",
"%",
"(",
"self",
".",
"_counter",
",",
"step",
",",
"i",
"+",
"1",
")",
"frc",
"[",
"i",
"]",
"=",
"read_three",
"(",
"frc_msg",
")",
"pos",
"*=",
"self",
".",
"pos_unit",
"# convert to au",
"if",
"self",
".",
"keytrj",
">",
"0",
":",
"vel",
"*=",
"self",
".",
"vel_unit",
"# convert to au",
"if",
"self",
".",
"keytrj",
">",
"1",
":",
"frc",
"*=",
"self",
".",
"frc_unit",
"# convert to au",
"return",
"frame"
] | Read a single frame from the trajectory | [
"Read",
"a",
"single",
"frame",
"from",
"the",
"trajectory"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/io/dlpoly.py#L112-L190 |
molmod/molmod | molmod/io/dlpoly.py | DLPolyOutputReader.goto_next_frame | def goto_next_frame(self):
"""Continue reading until the next frame is reached"""
marked = False
while True:
line = next(self._f)[:-1]
if marked and len(line) > 0 and not line.startswith(" --------"):
try:
step = int(line[:10])
return step, line
except ValueError:
pass
marked = (len(line) == 131 and line == self._marker) | python | def goto_next_frame(self):
"""Continue reading until the next frame is reached"""
marked = False
while True:
line = next(self._f)[:-1]
if marked and len(line) > 0 and not line.startswith(" --------"):
try:
step = int(line[:10])
return step, line
except ValueError:
pass
marked = (len(line) == 131 and line == self._marker) | [
"def",
"goto_next_frame",
"(",
"self",
")",
":",
"marked",
"=",
"False",
"while",
"True",
":",
"line",
"=",
"next",
"(",
"self",
".",
"_f",
")",
"[",
":",
"-",
"1",
"]",
"if",
"marked",
"and",
"len",
"(",
"line",
")",
">",
"0",
"and",
"not",
"line",
".",
"startswith",
"(",
"\" --------\"",
")",
":",
"try",
":",
"step",
"=",
"int",
"(",
"line",
"[",
":",
"10",
"]",
")",
"return",
"step",
",",
"line",
"except",
"ValueError",
":",
"pass",
"marked",
"=",
"(",
"len",
"(",
"line",
")",
"==",
"131",
"and",
"line",
"==",
"self",
".",
"_marker",
")"
] | Continue reading until the next frame is reached | [
"Continue",
"reading",
"until",
"the",
"next",
"frame",
"is",
"reached"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/io/dlpoly.py#L253-L264 |
molmod/molmod | molmod/io/dlpoly.py | DLPolyOutputReader._read_frame | def _read_frame(self):
"""Read a single frame from the trajectory"""
# optionally skip the equilibration
if self.skip_equi_period:
while True:
step, line = self.goto_next_frame()
self._counter += 1
if step >= self.equi_period:
break
self.skip_equi_period = False
else:
step, line = self.goto_next_frame()
# read the three lines
try:
row = [step]
for i in range(9):
row.append(float(line[10+i*12:10+(i+1)*12]))
line = next(self._f)[:-1]
row.append(float(line[:10]))
for i in range(9):
row.append(float(line[10+i*12:10+(i+1)*12]))
line = next(self._f)[:-1]
row.append(float(line[:10]))
for i in range(9):
row.append(float(line[10+i*12:10+(i+1)*12]))
except ValueError:
raise FileFormatError("Some numbers in the output file could not be read. (expecting floating point numbers)")
# convert all the numbers to atomic units
for i in range(30):
row[i] *= self._conv[i]
# done
return row | python | def _read_frame(self):
"""Read a single frame from the trajectory"""
# optionally skip the equilibration
if self.skip_equi_period:
while True:
step, line = self.goto_next_frame()
self._counter += 1
if step >= self.equi_period:
break
self.skip_equi_period = False
else:
step, line = self.goto_next_frame()
# read the three lines
try:
row = [step]
for i in range(9):
row.append(float(line[10+i*12:10+(i+1)*12]))
line = next(self._f)[:-1]
row.append(float(line[:10]))
for i in range(9):
row.append(float(line[10+i*12:10+(i+1)*12]))
line = next(self._f)[:-1]
row.append(float(line[:10]))
for i in range(9):
row.append(float(line[10+i*12:10+(i+1)*12]))
except ValueError:
raise FileFormatError("Some numbers in the output file could not be read. (expecting floating point numbers)")
# convert all the numbers to atomic units
for i in range(30):
row[i] *= self._conv[i]
# done
return row | [
"def",
"_read_frame",
"(",
"self",
")",
":",
"# optionally skip the equilibration",
"if",
"self",
".",
"skip_equi_period",
":",
"while",
"True",
":",
"step",
",",
"line",
"=",
"self",
".",
"goto_next_frame",
"(",
")",
"self",
".",
"_counter",
"+=",
"1",
"if",
"step",
">=",
"self",
".",
"equi_period",
":",
"break",
"self",
".",
"skip_equi_period",
"=",
"False",
"else",
":",
"step",
",",
"line",
"=",
"self",
".",
"goto_next_frame",
"(",
")",
"# read the three lines",
"try",
":",
"row",
"=",
"[",
"step",
"]",
"for",
"i",
"in",
"range",
"(",
"9",
")",
":",
"row",
".",
"append",
"(",
"float",
"(",
"line",
"[",
"10",
"+",
"i",
"*",
"12",
":",
"10",
"+",
"(",
"i",
"+",
"1",
")",
"*",
"12",
"]",
")",
")",
"line",
"=",
"next",
"(",
"self",
".",
"_f",
")",
"[",
":",
"-",
"1",
"]",
"row",
".",
"append",
"(",
"float",
"(",
"line",
"[",
":",
"10",
"]",
")",
")",
"for",
"i",
"in",
"range",
"(",
"9",
")",
":",
"row",
".",
"append",
"(",
"float",
"(",
"line",
"[",
"10",
"+",
"i",
"*",
"12",
":",
"10",
"+",
"(",
"i",
"+",
"1",
")",
"*",
"12",
"]",
")",
")",
"line",
"=",
"next",
"(",
"self",
".",
"_f",
")",
"[",
":",
"-",
"1",
"]",
"row",
".",
"append",
"(",
"float",
"(",
"line",
"[",
":",
"10",
"]",
")",
")",
"for",
"i",
"in",
"range",
"(",
"9",
")",
":",
"row",
".",
"append",
"(",
"float",
"(",
"line",
"[",
"10",
"+",
"i",
"*",
"12",
":",
"10",
"+",
"(",
"i",
"+",
"1",
")",
"*",
"12",
"]",
")",
")",
"except",
"ValueError",
":",
"raise",
"FileFormatError",
"(",
"\"Some numbers in the output file could not be read. (expecting floating point numbers)\"",
")",
"# convert all the numbers to atomic units",
"for",
"i",
"in",
"range",
"(",
"30",
")",
":",
"row",
"[",
"i",
"]",
"*=",
"self",
".",
"_conv",
"[",
"i",
"]",
"# done",
"return",
"row"
] | Read a single frame from the trajectory | [
"Read",
"a",
"single",
"frame",
"from",
"the",
"trajectory"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/io/dlpoly.py#L266-L300 |
molmod/molmod | molmod/io/xyz.py | XYZReader._read_frame | def _read_frame(self):
"""Read a frame from the XYZ file"""
size = self.read_size()
title = self._f.readline()[:-1]
if self.symbols is None:
symbols = []
coordinates = np.zeros((size, 3), float)
for counter in range(size):
line = self._f.readline()
if len(line) == 0:
raise StopIteration
words = line.split()
if len(words) < 4:
raise StopIteration
if self.symbols is None:
symbols.append(words[0])
try:
coordinates[counter, 0] = float(words[1])
coordinates[counter, 1] = float(words[2])
coordinates[counter, 2] = float(words[3])
except ValueError:
raise StopIteration
coordinates *= self.file_unit
if self.symbols is None:
self.symbols = symbols
return title, coordinates | python | def _read_frame(self):
"""Read a frame from the XYZ file"""
size = self.read_size()
title = self._f.readline()[:-1]
if self.symbols is None:
symbols = []
coordinates = np.zeros((size, 3), float)
for counter in range(size):
line = self._f.readline()
if len(line) == 0:
raise StopIteration
words = line.split()
if len(words) < 4:
raise StopIteration
if self.symbols is None:
symbols.append(words[0])
try:
coordinates[counter, 0] = float(words[1])
coordinates[counter, 1] = float(words[2])
coordinates[counter, 2] = float(words[3])
except ValueError:
raise StopIteration
coordinates *= self.file_unit
if self.symbols is None:
self.symbols = symbols
return title, coordinates | [
"def",
"_read_frame",
"(",
"self",
")",
":",
"size",
"=",
"self",
".",
"read_size",
"(",
")",
"title",
"=",
"self",
".",
"_f",
".",
"readline",
"(",
")",
"[",
":",
"-",
"1",
"]",
"if",
"self",
".",
"symbols",
"is",
"None",
":",
"symbols",
"=",
"[",
"]",
"coordinates",
"=",
"np",
".",
"zeros",
"(",
"(",
"size",
",",
"3",
")",
",",
"float",
")",
"for",
"counter",
"in",
"range",
"(",
"size",
")",
":",
"line",
"=",
"self",
".",
"_f",
".",
"readline",
"(",
")",
"if",
"len",
"(",
"line",
")",
"==",
"0",
":",
"raise",
"StopIteration",
"words",
"=",
"line",
".",
"split",
"(",
")",
"if",
"len",
"(",
"words",
")",
"<",
"4",
":",
"raise",
"StopIteration",
"if",
"self",
".",
"symbols",
"is",
"None",
":",
"symbols",
".",
"append",
"(",
"words",
"[",
"0",
"]",
")",
"try",
":",
"coordinates",
"[",
"counter",
",",
"0",
"]",
"=",
"float",
"(",
"words",
"[",
"1",
"]",
")",
"coordinates",
"[",
"counter",
",",
"1",
"]",
"=",
"float",
"(",
"words",
"[",
"2",
"]",
")",
"coordinates",
"[",
"counter",
",",
"2",
"]",
"=",
"float",
"(",
"words",
"[",
"3",
"]",
")",
"except",
"ValueError",
":",
"raise",
"StopIteration",
"coordinates",
"*=",
"self",
".",
"file_unit",
"if",
"self",
".",
"symbols",
"is",
"None",
":",
"self",
".",
"symbols",
"=",
"symbols",
"return",
"title",
",",
"coordinates"
] | Read a frame from the XYZ file | [
"Read",
"a",
"frame",
"from",
"the",
"XYZ",
"file"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/io/xyz.py#L99-L125 |
molmod/molmod | molmod/io/xyz.py | XYZReader._skip_frame | def _skip_frame(self):
"""Skip a single frame from the trajectory"""
size = self.read_size()
for i in range(size+1):
line = self._f.readline()
if len(line) == 0:
raise StopIteration | python | def _skip_frame(self):
"""Skip a single frame from the trajectory"""
size = self.read_size()
for i in range(size+1):
line = self._f.readline()
if len(line) == 0:
raise StopIteration | [
"def",
"_skip_frame",
"(",
"self",
")",
":",
"size",
"=",
"self",
".",
"read_size",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"size",
"+",
"1",
")",
":",
"line",
"=",
"self",
".",
"_f",
".",
"readline",
"(",
")",
"if",
"len",
"(",
"line",
")",
"==",
"0",
":",
"raise",
"StopIteration"
] | Skip a single frame from the trajectory | [
"Skip",
"a",
"single",
"frame",
"from",
"the",
"trajectory"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/io/xyz.py#L127-L133 |
molmod/molmod | molmod/io/xyz.py | XYZReader.get_first_molecule | def get_first_molecule(self):
"""Get the first molecule from the trajectory
This can be useful to configure your program before handeling the
actual trajectory.
"""
title, coordinates = self._first
molecule = Molecule(self.numbers, coordinates, title, symbols=self.symbols)
return molecule | python | def get_first_molecule(self):
"""Get the first molecule from the trajectory
This can be useful to configure your program before handeling the
actual trajectory.
"""
title, coordinates = self._first
molecule = Molecule(self.numbers, coordinates, title, symbols=self.symbols)
return molecule | [
"def",
"get_first_molecule",
"(",
"self",
")",
":",
"title",
",",
"coordinates",
"=",
"self",
".",
"_first",
"molecule",
"=",
"Molecule",
"(",
"self",
".",
"numbers",
",",
"coordinates",
",",
"title",
",",
"symbols",
"=",
"self",
".",
"symbols",
")",
"return",
"molecule"
] | Get the first molecule from the trajectory
This can be useful to configure your program before handeling the
actual trajectory. | [
"Get",
"the",
"first",
"molecule",
"from",
"the",
"trajectory"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/io/xyz.py#L135-L143 |
molmod/molmod | molmod/io/xyz.py | XYZWriter.dump | def dump(self, title, coordinates):
"""Dump a frame to the trajectory file
Arguments:
| ``title`` -- the title of the frame
| ``coordinates`` -- a numpy array with coordinates in atomic units
"""
print("% 8i" % len(self.symbols), file=self._f)
print(str(title), file=self._f)
for symbol, coordinate in zip(self.symbols, coordinates):
print("% 2s % 12.9f % 12.9f % 12.9f" % ((symbol, ) + tuple(coordinate/self.file_unit)), file=self._f) | python | def dump(self, title, coordinates):
"""Dump a frame to the trajectory file
Arguments:
| ``title`` -- the title of the frame
| ``coordinates`` -- a numpy array with coordinates in atomic units
"""
print("% 8i" % len(self.symbols), file=self._f)
print(str(title), file=self._f)
for symbol, coordinate in zip(self.symbols, coordinates):
print("% 2s % 12.9f % 12.9f % 12.9f" % ((symbol, ) + tuple(coordinate/self.file_unit)), file=self._f) | [
"def",
"dump",
"(",
"self",
",",
"title",
",",
"coordinates",
")",
":",
"print",
"(",
"\"% 8i\"",
"%",
"len",
"(",
"self",
".",
"symbols",
")",
",",
"file",
"=",
"self",
".",
"_f",
")",
"print",
"(",
"str",
"(",
"title",
")",
",",
"file",
"=",
"self",
".",
"_f",
")",
"for",
"symbol",
",",
"coordinate",
"in",
"zip",
"(",
"self",
".",
"symbols",
",",
"coordinates",
")",
":",
"print",
"(",
"\"% 2s % 12.9f % 12.9f % 12.9f\"",
"%",
"(",
"(",
"symbol",
",",
")",
"+",
"tuple",
"(",
"coordinate",
"/",
"self",
".",
"file_unit",
")",
")",
",",
"file",
"=",
"self",
".",
"_f",
")"
] | Dump a frame to the trajectory file
Arguments:
| ``title`` -- the title of the frame
| ``coordinates`` -- a numpy array with coordinates in atomic units | [
"Dump",
"a",
"frame",
"to",
"the",
"trajectory",
"file"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/io/xyz.py#L180-L190 |
molmod/molmod | molmod/io/xyz.py | XYZFile.get_molecule | def get_molecule(self, index=0):
"""Get a molecule from the trajectory
Optional argument:
| ``index`` -- The frame index [default=0]
"""
return Molecule(self.numbers, self.geometries[index], self.titles[index], symbols=self.symbols) | python | def get_molecule(self, index=0):
"""Get a molecule from the trajectory
Optional argument:
| ``index`` -- The frame index [default=0]
"""
return Molecule(self.numbers, self.geometries[index], self.titles[index], symbols=self.symbols) | [
"def",
"get_molecule",
"(",
"self",
",",
"index",
"=",
"0",
")",
":",
"return",
"Molecule",
"(",
"self",
".",
"numbers",
",",
"self",
".",
"geometries",
"[",
"index",
"]",
",",
"self",
".",
"titles",
"[",
"index",
"]",
",",
"symbols",
"=",
"self",
".",
"symbols",
")"
] | Get a molecule from the trajectory
Optional argument:
| ``index`` -- The frame index [default=0] | [
"Get",
"a",
"molecule",
"from",
"the",
"trajectory"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/io/xyz.py#L247-L253 |
molmod/molmod | molmod/io/xyz.py | XYZFile.write_to_file | def write_to_file(self, f, file_unit=angstrom):
"""Write the trajectory to a file
Argument:
| ``f`` -- a filename or a file-like object to write to
Optional argument:
| ``file_unit`` -- the unit of the values written to file
[default=angstrom]
"""
xyz_writer = XYZWriter(f, self.symbols, file_unit=file_unit)
for title, coordinates in zip(self.titles, self.geometries):
xyz_writer.dump(title, coordinates) | python | def write_to_file(self, f, file_unit=angstrom):
"""Write the trajectory to a file
Argument:
| ``f`` -- a filename or a file-like object to write to
Optional argument:
| ``file_unit`` -- the unit of the values written to file
[default=angstrom]
"""
xyz_writer = XYZWriter(f, self.symbols, file_unit=file_unit)
for title, coordinates in zip(self.titles, self.geometries):
xyz_writer.dump(title, coordinates) | [
"def",
"write_to_file",
"(",
"self",
",",
"f",
",",
"file_unit",
"=",
"angstrom",
")",
":",
"xyz_writer",
"=",
"XYZWriter",
"(",
"f",
",",
"self",
".",
"symbols",
",",
"file_unit",
"=",
"file_unit",
")",
"for",
"title",
",",
"coordinates",
"in",
"zip",
"(",
"self",
".",
"titles",
",",
"self",
".",
"geometries",
")",
":",
"xyz_writer",
".",
"dump",
"(",
"title",
",",
"coordinates",
")"
] | Write the trajectory to a file
Argument:
| ``f`` -- a filename or a file-like object to write to
Optional argument:
| ``file_unit`` -- the unit of the values written to file
[default=angstrom] | [
"Write",
"the",
"trajectory",
"to",
"a",
"file"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/io/xyz.py#L255-L267 |
molmod/molmod | molmod/io/common.py | slice_match | def slice_match(sub, counter):
"""Efficiently test if counter is in ``xrange(*sub)``
Arguments:
| ``sub`` -- a slice object
| ``counter`` -- an integer
The function returns True if the counter is in
``xrange(sub.start, sub.stop, sub.step)``.
"""
if sub.start is not None and counter < sub.start:
return False
if sub.stop is not None and counter >= sub.stop:
raise StopIteration
if sub.step is not None:
if sub.start is None:
if counter % sub.step != 0:
return False
else:
if (counter - sub.start) % sub.step != 0:
return False
return True | python | def slice_match(sub, counter):
"""Efficiently test if counter is in ``xrange(*sub)``
Arguments:
| ``sub`` -- a slice object
| ``counter`` -- an integer
The function returns True if the counter is in
``xrange(sub.start, sub.stop, sub.step)``.
"""
if sub.start is not None and counter < sub.start:
return False
if sub.stop is not None and counter >= sub.stop:
raise StopIteration
if sub.step is not None:
if sub.start is None:
if counter % sub.step != 0:
return False
else:
if (counter - sub.start) % sub.step != 0:
return False
return True | [
"def",
"slice_match",
"(",
"sub",
",",
"counter",
")",
":",
"if",
"sub",
".",
"start",
"is",
"not",
"None",
"and",
"counter",
"<",
"sub",
".",
"start",
":",
"return",
"False",
"if",
"sub",
".",
"stop",
"is",
"not",
"None",
"and",
"counter",
">=",
"sub",
".",
"stop",
":",
"raise",
"StopIteration",
"if",
"sub",
".",
"step",
"is",
"not",
"None",
":",
"if",
"sub",
".",
"start",
"is",
"None",
":",
"if",
"counter",
"%",
"sub",
".",
"step",
"!=",
"0",
":",
"return",
"False",
"else",
":",
"if",
"(",
"counter",
"-",
"sub",
".",
"start",
")",
"%",
"sub",
".",
"step",
"!=",
"0",
":",
"return",
"False",
"return",
"True"
] | Efficiently test if counter is in ``xrange(*sub)``
Arguments:
| ``sub`` -- a slice object
| ``counter`` -- an integer
The function returns True if the counter is in
``xrange(sub.start, sub.stop, sub.step)``. | [
"Efficiently",
"test",
"if",
"counter",
"is",
"in",
"xrange",
"(",
"*",
"sub",
")"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/io/common.py#L38-L60 |
molmod/molmod | molmod/minimizer.py | check_anagrad | def check_anagrad(fun, x0, epsilon, threshold):
"""Check the analytical gradient using finite differences
Arguments:
| ``fun`` -- the function to be tested, more info below
| ``x0`` -- the reference point around which the function should be
tested
| ``epsilon`` -- a small scalar used for the finite differences
| ``threshold`` -- the maximum acceptable difference between the
analytical gradient and the gradient obtained by
finite differentiation
The function ``fun`` takes a mandatory argument ``x`` and an optional
argument ``do_gradient``:
| ``x`` -- the arguments of the function to be tested
| ``do_gradient`` -- When False, only the function value is returned.
When True, a 2-tuple with the function value and
the gradient are returned [default=False]
"""
N = len(x0)
f0, ana_grad = fun(x0, do_gradient=True)
for i in range(N):
xh = x0.copy()
xh[i] += 0.5*epsilon
xl = x0.copy()
xl[i] -= 0.5*epsilon
num_grad_comp = (fun(xh)-fun(xl))/epsilon
if abs(num_grad_comp - ana_grad[i]) > threshold:
raise AssertionError("Error in the analytical gradient, component %i, got %s, should be about %s" % (i, ana_grad[i], num_grad_comp)) | python | def check_anagrad(fun, x0, epsilon, threshold):
"""Check the analytical gradient using finite differences
Arguments:
| ``fun`` -- the function to be tested, more info below
| ``x0`` -- the reference point around which the function should be
tested
| ``epsilon`` -- a small scalar used for the finite differences
| ``threshold`` -- the maximum acceptable difference between the
analytical gradient and the gradient obtained by
finite differentiation
The function ``fun`` takes a mandatory argument ``x`` and an optional
argument ``do_gradient``:
| ``x`` -- the arguments of the function to be tested
| ``do_gradient`` -- When False, only the function value is returned.
When True, a 2-tuple with the function value and
the gradient are returned [default=False]
"""
N = len(x0)
f0, ana_grad = fun(x0, do_gradient=True)
for i in range(N):
xh = x0.copy()
xh[i] += 0.5*epsilon
xl = x0.copy()
xl[i] -= 0.5*epsilon
num_grad_comp = (fun(xh)-fun(xl))/epsilon
if abs(num_grad_comp - ana_grad[i]) > threshold:
raise AssertionError("Error in the analytical gradient, component %i, got %s, should be about %s" % (i, ana_grad[i], num_grad_comp)) | [
"def",
"check_anagrad",
"(",
"fun",
",",
"x0",
",",
"epsilon",
",",
"threshold",
")",
":",
"N",
"=",
"len",
"(",
"x0",
")",
"f0",
",",
"ana_grad",
"=",
"fun",
"(",
"x0",
",",
"do_gradient",
"=",
"True",
")",
"for",
"i",
"in",
"range",
"(",
"N",
")",
":",
"xh",
"=",
"x0",
".",
"copy",
"(",
")",
"xh",
"[",
"i",
"]",
"+=",
"0.5",
"*",
"epsilon",
"xl",
"=",
"x0",
".",
"copy",
"(",
")",
"xl",
"[",
"i",
"]",
"-=",
"0.5",
"*",
"epsilon",
"num_grad_comp",
"=",
"(",
"fun",
"(",
"xh",
")",
"-",
"fun",
"(",
"xl",
")",
")",
"/",
"epsilon",
"if",
"abs",
"(",
"num_grad_comp",
"-",
"ana_grad",
"[",
"i",
"]",
")",
">",
"threshold",
":",
"raise",
"AssertionError",
"(",
"\"Error in the analytical gradient, component %i, got %s, should be about %s\"",
"%",
"(",
"i",
",",
"ana_grad",
"[",
"i",
"]",
",",
"num_grad_comp",
")",
")"
] | Check the analytical gradient using finite differences
Arguments:
| ``fun`` -- the function to be tested, more info below
| ``x0`` -- the reference point around which the function should be
tested
| ``epsilon`` -- a small scalar used for the finite differences
| ``threshold`` -- the maximum acceptable difference between the
analytical gradient and the gradient obtained by
finite differentiation
The function ``fun`` takes a mandatory argument ``x`` and an optional
argument ``do_gradient``:
| ``x`` -- the arguments of the function to be tested
| ``do_gradient`` -- When False, only the function value is returned.
When True, a 2-tuple with the function value and
the gradient are returned [default=False] | [
"Check",
"the",
"analytical",
"gradient",
"using",
"finite",
"differences"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/minimizer.py#L1577-L1605 |
molmod/molmod | molmod/minimizer.py | check_delta | def check_delta(fun, x, dxs, period=None):
"""Check the difference between two function values using the analytical gradient
Arguments:
| ``fun`` -- The function to be tested, more info below.
| ``x`` -- The argument vector.
| ``dxs`` -- A matrix where each row is a vector of small differences
to be added to the argument vector.
Optional argument:
| ``period`` -- If the function value is periodic, one may provide the
period such that differences are computed using
periodic boundary conditions.
The function ``fun`` takes a mandatory argument ``x`` and an optional
argument ``do_gradient``:
| ``x`` -- The arguments of the function to be tested.
| ``do_gradient`` -- When False, only the function value is returned.
When True, a 2-tuple with the function value and
the gradient are returned. [default=False]
For every row in dxs, the following computation is repeated:
1) D1 = 'f(x+dx) - f(x)' is computed.
2) D2 = '0.5 (grad f(x+dx) + grad f(x)) . dx' is computed.
A threshold is set to the median of the D1 set. For each case where |D1|
is larger than the threshold, |D1 - D2|, should be smaller than the
threshold.
"""
dn1s = []
dn2s = []
dnds = []
for dx in dxs:
f0, grad0 = fun(x, do_gradient=True)
f1, grad1 = fun(x+dx, do_gradient=True)
grad = 0.5*(grad0+grad1)
d1 = f1 - f0
if period is not None:
d1 -= np.floor(d1/period + 0.5)*period
if hasattr(d1, '__iter__'):
norm = np.linalg.norm
else:
norm = abs
d2 = np.dot(grad, dx)
dn1s.append(norm(d1))
dn2s.append(norm(d2))
dnds.append(norm(d1-d2))
dn1s = np.array(dn1s)
dn2s = np.array(dn2s)
dnds = np.array(dnds)
# Get the threshold (and mask)
threshold = np.median(dn1s)
mask = dn1s > threshold
# Make sure that all cases for which dn1 is above the treshold, dnd is below
# the threshold
if not (dnds[mask] < threshold).all():
raise AssertionError((
'The first order approximation on the difference is too wrong. The '
'threshold is %.1e.\n\nDifferences:\n%s\n\nFirst order '
'approximation to differences:\n%s\n\nAbsolute errors:\n%s')
% (threshold,
' '.join('%.1e' % v for v in dn1s[mask]),
' '.join('%.1e' % v for v in dn2s[mask]),
' '.join('%.1e' % v for v in dnds[mask])
)) | python | def check_delta(fun, x, dxs, period=None):
"""Check the difference between two function values using the analytical gradient
Arguments:
| ``fun`` -- The function to be tested, more info below.
| ``x`` -- The argument vector.
| ``dxs`` -- A matrix where each row is a vector of small differences
to be added to the argument vector.
Optional argument:
| ``period`` -- If the function value is periodic, one may provide the
period such that differences are computed using
periodic boundary conditions.
The function ``fun`` takes a mandatory argument ``x`` and an optional
argument ``do_gradient``:
| ``x`` -- The arguments of the function to be tested.
| ``do_gradient`` -- When False, only the function value is returned.
When True, a 2-tuple with the function value and
the gradient are returned. [default=False]
For every row in dxs, the following computation is repeated:
1) D1 = 'f(x+dx) - f(x)' is computed.
2) D2 = '0.5 (grad f(x+dx) + grad f(x)) . dx' is computed.
A threshold is set to the median of the D1 set. For each case where |D1|
is larger than the threshold, |D1 - D2|, should be smaller than the
threshold.
"""
dn1s = []
dn2s = []
dnds = []
for dx in dxs:
f0, grad0 = fun(x, do_gradient=True)
f1, grad1 = fun(x+dx, do_gradient=True)
grad = 0.5*(grad0+grad1)
d1 = f1 - f0
if period is not None:
d1 -= np.floor(d1/period + 0.5)*period
if hasattr(d1, '__iter__'):
norm = np.linalg.norm
else:
norm = abs
d2 = np.dot(grad, dx)
dn1s.append(norm(d1))
dn2s.append(norm(d2))
dnds.append(norm(d1-d2))
dn1s = np.array(dn1s)
dn2s = np.array(dn2s)
dnds = np.array(dnds)
# Get the threshold (and mask)
threshold = np.median(dn1s)
mask = dn1s > threshold
# Make sure that all cases for which dn1 is above the treshold, dnd is below
# the threshold
if not (dnds[mask] < threshold).all():
raise AssertionError((
'The first order approximation on the difference is too wrong. The '
'threshold is %.1e.\n\nDifferences:\n%s\n\nFirst order '
'approximation to differences:\n%s\n\nAbsolute errors:\n%s')
% (threshold,
' '.join('%.1e' % v for v in dn1s[mask]),
' '.join('%.1e' % v for v in dn2s[mask]),
' '.join('%.1e' % v for v in dnds[mask])
)) | [
"def",
"check_delta",
"(",
"fun",
",",
"x",
",",
"dxs",
",",
"period",
"=",
"None",
")",
":",
"dn1s",
"=",
"[",
"]",
"dn2s",
"=",
"[",
"]",
"dnds",
"=",
"[",
"]",
"for",
"dx",
"in",
"dxs",
":",
"f0",
",",
"grad0",
"=",
"fun",
"(",
"x",
",",
"do_gradient",
"=",
"True",
")",
"f1",
",",
"grad1",
"=",
"fun",
"(",
"x",
"+",
"dx",
",",
"do_gradient",
"=",
"True",
")",
"grad",
"=",
"0.5",
"*",
"(",
"grad0",
"+",
"grad1",
")",
"d1",
"=",
"f1",
"-",
"f0",
"if",
"period",
"is",
"not",
"None",
":",
"d1",
"-=",
"np",
".",
"floor",
"(",
"d1",
"/",
"period",
"+",
"0.5",
")",
"*",
"period",
"if",
"hasattr",
"(",
"d1",
",",
"'__iter__'",
")",
":",
"norm",
"=",
"np",
".",
"linalg",
".",
"norm",
"else",
":",
"norm",
"=",
"abs",
"d2",
"=",
"np",
".",
"dot",
"(",
"grad",
",",
"dx",
")",
"dn1s",
".",
"append",
"(",
"norm",
"(",
"d1",
")",
")",
"dn2s",
".",
"append",
"(",
"norm",
"(",
"d2",
")",
")",
"dnds",
".",
"append",
"(",
"norm",
"(",
"d1",
"-",
"d2",
")",
")",
"dn1s",
"=",
"np",
".",
"array",
"(",
"dn1s",
")",
"dn2s",
"=",
"np",
".",
"array",
"(",
"dn2s",
")",
"dnds",
"=",
"np",
".",
"array",
"(",
"dnds",
")",
"# Get the threshold (and mask)",
"threshold",
"=",
"np",
".",
"median",
"(",
"dn1s",
")",
"mask",
"=",
"dn1s",
">",
"threshold",
"# Make sure that all cases for which dn1 is above the treshold, dnd is below",
"# the threshold",
"if",
"not",
"(",
"dnds",
"[",
"mask",
"]",
"<",
"threshold",
")",
".",
"all",
"(",
")",
":",
"raise",
"AssertionError",
"(",
"(",
"'The first order approximation on the difference is too wrong. The '",
"'threshold is %.1e.\\n\\nDifferences:\\n%s\\n\\nFirst order '",
"'approximation to differences:\\n%s\\n\\nAbsolute errors:\\n%s'",
")",
"%",
"(",
"threshold",
",",
"' '",
".",
"join",
"(",
"'%.1e'",
"%",
"v",
"for",
"v",
"in",
"dn1s",
"[",
"mask",
"]",
")",
",",
"' '",
".",
"join",
"(",
"'%.1e'",
"%",
"v",
"for",
"v",
"in",
"dn2s",
"[",
"mask",
"]",
")",
",",
"' '",
".",
"join",
"(",
"'%.1e'",
"%",
"v",
"for",
"v",
"in",
"dnds",
"[",
"mask",
"]",
")",
")",
")"
] | Check the difference between two function values using the analytical gradient
Arguments:
| ``fun`` -- The function to be tested, more info below.
| ``x`` -- The argument vector.
| ``dxs`` -- A matrix where each row is a vector of small differences
to be added to the argument vector.
Optional argument:
| ``period`` -- If the function value is periodic, one may provide the
period such that differences are computed using
periodic boundary conditions.
The function ``fun`` takes a mandatory argument ``x`` and an optional
argument ``do_gradient``:
| ``x`` -- The arguments of the function to be tested.
| ``do_gradient`` -- When False, only the function value is returned.
When True, a 2-tuple with the function value and
the gradient are returned. [default=False]
For every row in dxs, the following computation is repeated:
1) D1 = 'f(x+dx) - f(x)' is computed.
2) D2 = '0.5 (grad f(x+dx) + grad f(x)) . dx' is computed.
A threshold is set to the median of the D1 set. For each case where |D1|
is larger than the threshold, |D1 - D2|, should be smaller than the
threshold. | [
"Check",
"the",
"difference",
"between",
"two",
"function",
"values",
"using",
"the",
"analytical",
"gradient"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/minimizer.py#L1608-L1675 |
molmod/molmod | molmod/minimizer.py | compute_fd_hessian | def compute_fd_hessian(fun, x0, epsilon, anagrad=True):
"""Compute the Hessian using the finite difference method
Arguments:
| ``fun`` -- the function for which the Hessian should be computed,
more info below
| ``x0`` -- the point at which the Hessian must be computed
| ``epsilon`` -- a small scalar step size used to compute the finite
differences
Optional argument:
| ``anagrad`` -- when True, analytical gradients are used
[default=True]
The function ``fun`` takes a mandatory argument ``x`` and an optional
argument ``do_gradient``:
| ``x`` -- the arguments of the function to be tested
| ``do_gradient`` -- When False, only the function value is returned.
When True, a 2-tuple with the function value and
the gradient are returned [default=False]
"""
N = len(x0)
def compute_gradient(x):
if anagrad:
return fun(x, do_gradient=True)[1]
else:
gradient = np.zeros(N, float)
for i in range(N):
xh = x.copy()
xh[i] += 0.5*epsilon
xl = x.copy()
xl[i] -= 0.5*epsilon
gradient[i] = (fun(xh)-fun(xl))/epsilon
return gradient
hessian = np.zeros((N,N), float)
for i in range(N):
xh = x0.copy()
xh[i] += 0.5*epsilon
xl = x0.copy()
xl[i] -= 0.5*epsilon
hessian[i] = (compute_gradient(xh) - compute_gradient(xl))/epsilon
return 0.5*(hessian + hessian.transpose()) | python | def compute_fd_hessian(fun, x0, epsilon, anagrad=True):
"""Compute the Hessian using the finite difference method
Arguments:
| ``fun`` -- the function for which the Hessian should be computed,
more info below
| ``x0`` -- the point at which the Hessian must be computed
| ``epsilon`` -- a small scalar step size used to compute the finite
differences
Optional argument:
| ``anagrad`` -- when True, analytical gradients are used
[default=True]
The function ``fun`` takes a mandatory argument ``x`` and an optional
argument ``do_gradient``:
| ``x`` -- the arguments of the function to be tested
| ``do_gradient`` -- When False, only the function value is returned.
When True, a 2-tuple with the function value and
the gradient are returned [default=False]
"""
N = len(x0)
def compute_gradient(x):
if anagrad:
return fun(x, do_gradient=True)[1]
else:
gradient = np.zeros(N, float)
for i in range(N):
xh = x.copy()
xh[i] += 0.5*epsilon
xl = x.copy()
xl[i] -= 0.5*epsilon
gradient[i] = (fun(xh)-fun(xl))/epsilon
return gradient
hessian = np.zeros((N,N), float)
for i in range(N):
xh = x0.copy()
xh[i] += 0.5*epsilon
xl = x0.copy()
xl[i] -= 0.5*epsilon
hessian[i] = (compute_gradient(xh) - compute_gradient(xl))/epsilon
return 0.5*(hessian + hessian.transpose()) | [
"def",
"compute_fd_hessian",
"(",
"fun",
",",
"x0",
",",
"epsilon",
",",
"anagrad",
"=",
"True",
")",
":",
"N",
"=",
"len",
"(",
"x0",
")",
"def",
"compute_gradient",
"(",
"x",
")",
":",
"if",
"anagrad",
":",
"return",
"fun",
"(",
"x",
",",
"do_gradient",
"=",
"True",
")",
"[",
"1",
"]",
"else",
":",
"gradient",
"=",
"np",
".",
"zeros",
"(",
"N",
",",
"float",
")",
"for",
"i",
"in",
"range",
"(",
"N",
")",
":",
"xh",
"=",
"x",
".",
"copy",
"(",
")",
"xh",
"[",
"i",
"]",
"+=",
"0.5",
"*",
"epsilon",
"xl",
"=",
"x",
".",
"copy",
"(",
")",
"xl",
"[",
"i",
"]",
"-=",
"0.5",
"*",
"epsilon",
"gradient",
"[",
"i",
"]",
"=",
"(",
"fun",
"(",
"xh",
")",
"-",
"fun",
"(",
"xl",
")",
")",
"/",
"epsilon",
"return",
"gradient",
"hessian",
"=",
"np",
".",
"zeros",
"(",
"(",
"N",
",",
"N",
")",
",",
"float",
")",
"for",
"i",
"in",
"range",
"(",
"N",
")",
":",
"xh",
"=",
"x0",
".",
"copy",
"(",
")",
"xh",
"[",
"i",
"]",
"+=",
"0.5",
"*",
"epsilon",
"xl",
"=",
"x0",
".",
"copy",
"(",
")",
"xl",
"[",
"i",
"]",
"-=",
"0.5",
"*",
"epsilon",
"hessian",
"[",
"i",
"]",
"=",
"(",
"compute_gradient",
"(",
"xh",
")",
"-",
"compute_gradient",
"(",
"xl",
")",
")",
"/",
"epsilon",
"return",
"0.5",
"*",
"(",
"hessian",
"+",
"hessian",
".",
"transpose",
"(",
")",
")"
] | Compute the Hessian using the finite difference method
Arguments:
| ``fun`` -- the function for which the Hessian should be computed,
more info below
| ``x0`` -- the point at which the Hessian must be computed
| ``epsilon`` -- a small scalar step size used to compute the finite
differences
Optional argument:
| ``anagrad`` -- when True, analytical gradients are used
[default=True]
The function ``fun`` takes a mandatory argument ``x`` and an optional
argument ``do_gradient``:
| ``x`` -- the arguments of the function to be tested
| ``do_gradient`` -- When False, only the function value is returned.
When True, a 2-tuple with the function value and
the gradient are returned [default=False] | [
"Compute",
"the",
"Hessian",
"using",
"the",
"finite",
"difference",
"method"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/minimizer.py#L1678-L1722 |
molmod/molmod | molmod/minimizer.py | ConjugateGradient.update | def update(self, gradient, step):
"""Update the search direction given the latest gradient and step"""
do_sd = self.gradient_old is None
self.gradient_old = self.gradient
self.gradient = gradient
if do_sd:
self._update_sd()
else:
self._update_cg() | python | def update(self, gradient, step):
"""Update the search direction given the latest gradient and step"""
do_sd = self.gradient_old is None
self.gradient_old = self.gradient
self.gradient = gradient
if do_sd:
self._update_sd()
else:
self._update_cg() | [
"def",
"update",
"(",
"self",
",",
"gradient",
",",
"step",
")",
":",
"do_sd",
"=",
"self",
".",
"gradient_old",
"is",
"None",
"self",
".",
"gradient_old",
"=",
"self",
".",
"gradient",
"self",
".",
"gradient",
"=",
"gradient",
"if",
"do_sd",
":",
"self",
".",
"_update_sd",
"(",
")",
"else",
":",
"self",
".",
"_update_cg",
"(",
")"
] | Update the search direction given the latest gradient and step | [
"Update",
"the",
"search",
"direction",
"given",
"the",
"latest",
"gradient",
"and",
"step"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/minimizer.py#L150-L158 |
molmod/molmod | molmod/minimizer.py | ConjugateGradient._update_cg | def _update_cg(self):
"""Update the conjugate gradient"""
beta = self._beta()
# Automatic direction reset
if beta < 0:
self.direction = -self.gradient
self.status = "SD"
else:
self.direction = self.direction * beta - self.gradient
self.status = "CG" | python | def _update_cg(self):
"""Update the conjugate gradient"""
beta = self._beta()
# Automatic direction reset
if beta < 0:
self.direction = -self.gradient
self.status = "SD"
else:
self.direction = self.direction * beta - self.gradient
self.status = "CG" | [
"def",
"_update_cg",
"(",
"self",
")",
":",
"beta",
"=",
"self",
".",
"_beta",
"(",
")",
"# Automatic direction reset",
"if",
"beta",
"<",
"0",
":",
"self",
".",
"direction",
"=",
"-",
"self",
".",
"gradient",
"self",
".",
"status",
"=",
"\"SD\"",
"else",
":",
"self",
".",
"direction",
"=",
"self",
".",
"direction",
"*",
"beta",
"-",
"self",
".",
"gradient",
"self",
".",
"status",
"=",
"\"CG\""
] | Update the conjugate gradient | [
"Update",
"the",
"conjugate",
"gradient"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/minimizer.py#L168-L177 |
molmod/molmod | molmod/minimizer.py | QuasiNewton.update | def update(self, gradient, step):
"""Update the search direction given the latest gradient and step"""
self.old_gradient = self.gradient
self.gradient = gradient
N = len(self.gradient)
if self.inv_hessian is None:
# update the direction
self.direction = -self.gradient
self.status = "SD"
# new guess of the inverse hessian
self.inv_hessian = np.identity(N, float)
else:
# update the direction
self.direction = -np.dot(self.inv_hessian, self.gradient)
self.status = "QN"
# new guess of the inverse hessian (BFGS)
y = self.gradient - self.old_gradient
s = step
sy = abs(np.dot(s, y))+1e-5
A = np.outer(-y/sy, s)
A.ravel()[::N+1] += 1
self.inv_hessian = (
np.dot(np.dot(A.transpose(), self.inv_hessian), A) +
np.outer(s/sy, s)
) | python | def update(self, gradient, step):
"""Update the search direction given the latest gradient and step"""
self.old_gradient = self.gradient
self.gradient = gradient
N = len(self.gradient)
if self.inv_hessian is None:
# update the direction
self.direction = -self.gradient
self.status = "SD"
# new guess of the inverse hessian
self.inv_hessian = np.identity(N, float)
else:
# update the direction
self.direction = -np.dot(self.inv_hessian, self.gradient)
self.status = "QN"
# new guess of the inverse hessian (BFGS)
y = self.gradient - self.old_gradient
s = step
sy = abs(np.dot(s, y))+1e-5
A = np.outer(-y/sy, s)
A.ravel()[::N+1] += 1
self.inv_hessian = (
np.dot(np.dot(A.transpose(), self.inv_hessian), A) +
np.outer(s/sy, s)
) | [
"def",
"update",
"(",
"self",
",",
"gradient",
",",
"step",
")",
":",
"self",
".",
"old_gradient",
"=",
"self",
".",
"gradient",
"self",
".",
"gradient",
"=",
"gradient",
"N",
"=",
"len",
"(",
"self",
".",
"gradient",
")",
"if",
"self",
".",
"inv_hessian",
"is",
"None",
":",
"# update the direction",
"self",
".",
"direction",
"=",
"-",
"self",
".",
"gradient",
"self",
".",
"status",
"=",
"\"SD\"",
"# new guess of the inverse hessian",
"self",
".",
"inv_hessian",
"=",
"np",
".",
"identity",
"(",
"N",
",",
"float",
")",
"else",
":",
"# update the direction",
"self",
".",
"direction",
"=",
"-",
"np",
".",
"dot",
"(",
"self",
".",
"inv_hessian",
",",
"self",
".",
"gradient",
")",
"self",
".",
"status",
"=",
"\"QN\"",
"# new guess of the inverse hessian (BFGS)",
"y",
"=",
"self",
".",
"gradient",
"-",
"self",
".",
"old_gradient",
"s",
"=",
"step",
"sy",
"=",
"abs",
"(",
"np",
".",
"dot",
"(",
"s",
",",
"y",
")",
")",
"+",
"1e-5",
"A",
"=",
"np",
".",
"outer",
"(",
"-",
"y",
"/",
"sy",
",",
"s",
")",
"A",
".",
"ravel",
"(",
")",
"[",
":",
":",
"N",
"+",
"1",
"]",
"+=",
"1",
"self",
".",
"inv_hessian",
"=",
"(",
"np",
".",
"dot",
"(",
"np",
".",
"dot",
"(",
"A",
".",
"transpose",
"(",
")",
",",
"self",
".",
"inv_hessian",
")",
",",
"A",
")",
"+",
"np",
".",
"outer",
"(",
"s",
"/",
"sy",
",",
"s",
")",
")"
] | Update the search direction given the latest gradient and step | [
"Update",
"the",
"search",
"direction",
"given",
"the",
"latest",
"gradient",
"and",
"step"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/minimizer.py#L200-L224 |
molmod/molmod | molmod/minimizer.py | LineSearch.limit_step | def limit_step(self, step):
"""Clip the a step within the maximum allowed range"""
if self.qmax is None:
return step
else:
return np.clip(step, -self.qmax, self.qmax) | python | def limit_step(self, step):
"""Clip the a step within the maximum allowed range"""
if self.qmax is None:
return step
else:
return np.clip(step, -self.qmax, self.qmax) | [
"def",
"limit_step",
"(",
"self",
",",
"step",
")",
":",
"if",
"self",
".",
"qmax",
"is",
"None",
":",
"return",
"step",
"else",
":",
"return",
"np",
".",
"clip",
"(",
"step",
",",
"-",
"self",
".",
"qmax",
",",
"self",
".",
"qmax",
")"
] | Clip the a step within the maximum allowed range | [
"Clip",
"the",
"a",
"step",
"within",
"the",
"maximum",
"allowed",
"range"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/minimizer.py#L251-L256 |
molmod/molmod | molmod/minimizer.py | GoldenLineSearch._bracket | def _bracket(self, qinit, f0, fun):
"""Find a bracket that does contain the minimum"""
self.num_bracket = 0
qa = qinit
fa = fun(qa)
counter = 0
if fa >= f0:
while True:
self.num_bracket += 1
#print " bracket shrink"
qb, fb = qa, fa
qa /= 1+phi
fa = fun(qa)
if qa < self.qtol:
return
if fa < f0:
return (0, f0), (qa, fa), (qb, fb)
counter += 1
if self.max_iter is not None and counter > self.max_iter:
return
else:
self.num_bracket += 1
#print " bracket grow1"
qb, fb = qa, fa
qa *= (1+phi)
fa = fun(qa)
if fa >= fb:
return (0, f0), (qb, fb), (qa, fa)
while True:
self.num_bracket += 1
#print " bracket grow2"
qc, fc = qb, fb
qb, fb = qa, fa
qa = qb*(1+phi) - qc
fa = fun(qa)
if fa >= fb:
return (qc, fc), (qb, fb), (qa, fa)
counter += 1
if self.max_iter is not None and counter > self.max_iter:
return | python | def _bracket(self, qinit, f0, fun):
"""Find a bracket that does contain the minimum"""
self.num_bracket = 0
qa = qinit
fa = fun(qa)
counter = 0
if fa >= f0:
while True:
self.num_bracket += 1
#print " bracket shrink"
qb, fb = qa, fa
qa /= 1+phi
fa = fun(qa)
if qa < self.qtol:
return
if fa < f0:
return (0, f0), (qa, fa), (qb, fb)
counter += 1
if self.max_iter is not None and counter > self.max_iter:
return
else:
self.num_bracket += 1
#print " bracket grow1"
qb, fb = qa, fa
qa *= (1+phi)
fa = fun(qa)
if fa >= fb:
return (0, f0), (qb, fb), (qa, fa)
while True:
self.num_bracket += 1
#print " bracket grow2"
qc, fc = qb, fb
qb, fb = qa, fa
qa = qb*(1+phi) - qc
fa = fun(qa)
if fa >= fb:
return (qc, fc), (qb, fb), (qa, fa)
counter += 1
if self.max_iter is not None and counter > self.max_iter:
return | [
"def",
"_bracket",
"(",
"self",
",",
"qinit",
",",
"f0",
",",
"fun",
")",
":",
"self",
".",
"num_bracket",
"=",
"0",
"qa",
"=",
"qinit",
"fa",
"=",
"fun",
"(",
"qa",
")",
"counter",
"=",
"0",
"if",
"fa",
">=",
"f0",
":",
"while",
"True",
":",
"self",
".",
"num_bracket",
"+=",
"1",
"#print \" bracket shrink\"",
"qb",
",",
"fb",
"=",
"qa",
",",
"fa",
"qa",
"/=",
"1",
"+",
"phi",
"fa",
"=",
"fun",
"(",
"qa",
")",
"if",
"qa",
"<",
"self",
".",
"qtol",
":",
"return",
"if",
"fa",
"<",
"f0",
":",
"return",
"(",
"0",
",",
"f0",
")",
",",
"(",
"qa",
",",
"fa",
")",
",",
"(",
"qb",
",",
"fb",
")",
"counter",
"+=",
"1",
"if",
"self",
".",
"max_iter",
"is",
"not",
"None",
"and",
"counter",
">",
"self",
".",
"max_iter",
":",
"return",
"else",
":",
"self",
".",
"num_bracket",
"+=",
"1",
"#print \" bracket grow1\"",
"qb",
",",
"fb",
"=",
"qa",
",",
"fa",
"qa",
"*=",
"(",
"1",
"+",
"phi",
")",
"fa",
"=",
"fun",
"(",
"qa",
")",
"if",
"fa",
">=",
"fb",
":",
"return",
"(",
"0",
",",
"f0",
")",
",",
"(",
"qb",
",",
"fb",
")",
",",
"(",
"qa",
",",
"fa",
")",
"while",
"True",
":",
"self",
".",
"num_bracket",
"+=",
"1",
"#print \" bracket grow2\"",
"qc",
",",
"fc",
"=",
"qb",
",",
"fb",
"qb",
",",
"fb",
"=",
"qa",
",",
"fa",
"qa",
"=",
"qb",
"*",
"(",
"1",
"+",
"phi",
")",
"-",
"qc",
"fa",
"=",
"fun",
"(",
"qa",
")",
"if",
"fa",
">=",
"fb",
":",
"return",
"(",
"qc",
",",
"fc",
")",
",",
"(",
"qb",
",",
"fb",
")",
",",
"(",
"qa",
",",
"fa",
")",
"counter",
"+=",
"1",
"if",
"self",
".",
"max_iter",
"is",
"not",
"None",
"and",
"counter",
">",
"self",
".",
"max_iter",
":",
"return"
] | Find a bracket that does contain the minimum | [
"Find",
"a",
"bracket",
"that",
"does",
"contain",
"the",
"minimum"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/minimizer.py#L337-L376 |
molmod/molmod | molmod/minimizer.py | GoldenLineSearch._golden | def _golden(self, triplet, fun):
"""Reduce the size of the bracket until the minimum is found"""
self.num_golden = 0
(qa, fa), (qb, fb), (qc, fc) = triplet
while True:
self.num_golden += 1
qd = qa + (qb-qa)*phi/(1+phi)
fd = fun(qd)
if fd < fb:
#print "golden d"
(qa, fa), (qb, fb) = (qb, fb), (qd, fd)
else:
#print "golden b"
(qa, fa), (qc, fc) = (qd, fd), (qa, fa)
if abs(qa-qb) < self.qtol:
return qc, fc | python | def _golden(self, triplet, fun):
"""Reduce the size of the bracket until the minimum is found"""
self.num_golden = 0
(qa, fa), (qb, fb), (qc, fc) = triplet
while True:
self.num_golden += 1
qd = qa + (qb-qa)*phi/(1+phi)
fd = fun(qd)
if fd < fb:
#print "golden d"
(qa, fa), (qb, fb) = (qb, fb), (qd, fd)
else:
#print "golden b"
(qa, fa), (qc, fc) = (qd, fd), (qa, fa)
if abs(qa-qb) < self.qtol:
return qc, fc | [
"def",
"_golden",
"(",
"self",
",",
"triplet",
",",
"fun",
")",
":",
"self",
".",
"num_golden",
"=",
"0",
"(",
"qa",
",",
"fa",
")",
",",
"(",
"qb",
",",
"fb",
")",
",",
"(",
"qc",
",",
"fc",
")",
"=",
"triplet",
"while",
"True",
":",
"self",
".",
"num_golden",
"+=",
"1",
"qd",
"=",
"qa",
"+",
"(",
"qb",
"-",
"qa",
")",
"*",
"phi",
"/",
"(",
"1",
"+",
"phi",
")",
"fd",
"=",
"fun",
"(",
"qd",
")",
"if",
"fd",
"<",
"fb",
":",
"#print \"golden d\"",
"(",
"qa",
",",
"fa",
")",
",",
"(",
"qb",
",",
"fb",
")",
"=",
"(",
"qb",
",",
"fb",
")",
",",
"(",
"qd",
",",
"fd",
")",
"else",
":",
"#print \"golden b\"",
"(",
"qa",
",",
"fa",
")",
",",
"(",
"qc",
",",
"fc",
")",
"=",
"(",
"qd",
",",
"fd",
")",
",",
"(",
"qa",
",",
"fa",
")",
"if",
"abs",
"(",
"qa",
"-",
"qb",
")",
"<",
"self",
".",
"qtol",
":",
"return",
"qc",
",",
"fc"
] | Reduce the size of the bracket until the minimum is found | [
"Reduce",
"the",
"size",
"of",
"the",
"bracket",
"until",
"the",
"minimum",
"is",
"found"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/minimizer.py#L378-L393 |
molmod/molmod | molmod/minimizer.py | Preconditioner.update | def update(self, counter, f, x_orig, gradient_orig):
"""Perform an update of the linear transformation
Arguments:
| ``counter`` -- the iteration counter of the minimizer
| ``f`` -- the function value at ``x_orig``
| ``x_orig`` -- the unknowns in original coordinates
| ``gradient_orig`` -- the gradient in original coordinates
Return value:
| ``do_update`` -- True when an update is required.
Derived classes must call this method to test of the preconditioner
requires updating. Derived classes must also return this boolean
to their caller.
"""
if counter - self.last_update > self.each:
grad_rms = np.sqrt((gradient_orig**2).mean())
if grad_rms < self.grad_rms:
self.last_update = counter
return True
return False | python | def update(self, counter, f, x_orig, gradient_orig):
"""Perform an update of the linear transformation
Arguments:
| ``counter`` -- the iteration counter of the minimizer
| ``f`` -- the function value at ``x_orig``
| ``x_orig`` -- the unknowns in original coordinates
| ``gradient_orig`` -- the gradient in original coordinates
Return value:
| ``do_update`` -- True when an update is required.
Derived classes must call this method to test of the preconditioner
requires updating. Derived classes must also return this boolean
to their caller.
"""
if counter - self.last_update > self.each:
grad_rms = np.sqrt((gradient_orig**2).mean())
if grad_rms < self.grad_rms:
self.last_update = counter
return True
return False | [
"def",
"update",
"(",
"self",
",",
"counter",
",",
"f",
",",
"x_orig",
",",
"gradient_orig",
")",
":",
"if",
"counter",
"-",
"self",
".",
"last_update",
">",
"self",
".",
"each",
":",
"grad_rms",
"=",
"np",
".",
"sqrt",
"(",
"(",
"gradient_orig",
"**",
"2",
")",
".",
"mean",
"(",
")",
")",
"if",
"grad_rms",
"<",
"self",
".",
"grad_rms",
":",
"self",
".",
"last_update",
"=",
"counter",
"return",
"True",
"return",
"False"
] | Perform an update of the linear transformation
Arguments:
| ``counter`` -- the iteration counter of the minimizer
| ``f`` -- the function value at ``x_orig``
| ``x_orig`` -- the unknowns in original coordinates
| ``gradient_orig`` -- the gradient in original coordinates
Return value:
| ``do_update`` -- True when an update is required.
Derived classes must call this method to test of the preconditioner
requires updating. Derived classes must also return this boolean
to their caller. | [
"Perform",
"an",
"update",
"of",
"the",
"linear",
"transformation"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/minimizer.py#L579-L600 |
molmod/molmod | molmod/minimizer.py | DiagonalPreconditioner.update | def update(self, counter, f, x_orig, gradient_orig):
"""Perform an update of the linear transformation
Arguments:
| ``counter`` -- the iteration counter of the minimizer
| ``f`` -- the function value at ``x_orig``
| ``x_orig`` -- the unknowns in original coordinates
| ``gradient_orig`` -- the gradient in original coordinates
Return value:
| ``done_update`` -- True when an update has been done
The minimizer must reset the search direction method when an updated
has been done.
"""
do_update = Preconditioner.update(self, counter, f, x_orig, gradient_orig)
if do_update:
# determine a new preconditioner
N = len(x_orig)
if self.scales is None:
self.scales = np.ones(N, float)
for i in range(N):
epsilon = self.epsilon/self.scales[i]
xh = x_orig.copy()
xh[i] += 0.5*epsilon
fh = self.fun(xh)
xl = x_orig.copy()
xl[i] -= 0.5*epsilon
fl = self.fun(xl)
curv = (fh+fl-2*f)/epsilon**2
self.scales[i] = np.sqrt(abs(curv))
if self.scales.max() <= 0:
self.scales = np.ones(N, float)
else:
self.scales /= self.scales.max()
self.scales[self.scales<self.scale_limit] = self.scale_limit
return do_update | python | def update(self, counter, f, x_orig, gradient_orig):
"""Perform an update of the linear transformation
Arguments:
| ``counter`` -- the iteration counter of the minimizer
| ``f`` -- the function value at ``x_orig``
| ``x_orig`` -- the unknowns in original coordinates
| ``gradient_orig`` -- the gradient in original coordinates
Return value:
| ``done_update`` -- True when an update has been done
The minimizer must reset the search direction method when an updated
has been done.
"""
do_update = Preconditioner.update(self, counter, f, x_orig, gradient_orig)
if do_update:
# determine a new preconditioner
N = len(x_orig)
if self.scales is None:
self.scales = np.ones(N, float)
for i in range(N):
epsilon = self.epsilon/self.scales[i]
xh = x_orig.copy()
xh[i] += 0.5*epsilon
fh = self.fun(xh)
xl = x_orig.copy()
xl[i] -= 0.5*epsilon
fl = self.fun(xl)
curv = (fh+fl-2*f)/epsilon**2
self.scales[i] = np.sqrt(abs(curv))
if self.scales.max() <= 0:
self.scales = np.ones(N, float)
else:
self.scales /= self.scales.max()
self.scales[self.scales<self.scale_limit] = self.scale_limit
return do_update | [
"def",
"update",
"(",
"self",
",",
"counter",
",",
"f",
",",
"x_orig",
",",
"gradient_orig",
")",
":",
"do_update",
"=",
"Preconditioner",
".",
"update",
"(",
"self",
",",
"counter",
",",
"f",
",",
"x_orig",
",",
"gradient_orig",
")",
"if",
"do_update",
":",
"# determine a new preconditioner",
"N",
"=",
"len",
"(",
"x_orig",
")",
"if",
"self",
".",
"scales",
"is",
"None",
":",
"self",
".",
"scales",
"=",
"np",
".",
"ones",
"(",
"N",
",",
"float",
")",
"for",
"i",
"in",
"range",
"(",
"N",
")",
":",
"epsilon",
"=",
"self",
".",
"epsilon",
"/",
"self",
".",
"scales",
"[",
"i",
"]",
"xh",
"=",
"x_orig",
".",
"copy",
"(",
")",
"xh",
"[",
"i",
"]",
"+=",
"0.5",
"*",
"epsilon",
"fh",
"=",
"self",
".",
"fun",
"(",
"xh",
")",
"xl",
"=",
"x_orig",
".",
"copy",
"(",
")",
"xl",
"[",
"i",
"]",
"-=",
"0.5",
"*",
"epsilon",
"fl",
"=",
"self",
".",
"fun",
"(",
"xl",
")",
"curv",
"=",
"(",
"fh",
"+",
"fl",
"-",
"2",
"*",
"f",
")",
"/",
"epsilon",
"**",
"2",
"self",
".",
"scales",
"[",
"i",
"]",
"=",
"np",
".",
"sqrt",
"(",
"abs",
"(",
"curv",
")",
")",
"if",
"self",
".",
"scales",
".",
"max",
"(",
")",
"<=",
"0",
":",
"self",
".",
"scales",
"=",
"np",
".",
"ones",
"(",
"N",
",",
"float",
")",
"else",
":",
"self",
".",
"scales",
"/=",
"self",
".",
"scales",
".",
"max",
"(",
")",
"self",
".",
"scales",
"[",
"self",
".",
"scales",
"<",
"self",
".",
"scale_limit",
"]",
"=",
"self",
".",
"scale_limit",
"return",
"do_update"
] | Perform an update of the linear transformation
Arguments:
| ``counter`` -- the iteration counter of the minimizer
| ``f`` -- the function value at ``x_orig``
| ``x_orig`` -- the unknowns in original coordinates
| ``gradient_orig`` -- the gradient in original coordinates
Return value:
| ``done_update`` -- True when an update has been done
The minimizer must reset the search direction method when an updated
has been done. | [
"Perform",
"an",
"update",
"of",
"the",
"linear",
"transformation"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/minimizer.py#L655-L691 |
molmod/molmod | molmod/minimizer.py | FullPreconditioner.update | def update(self, counter, f, x_orig, gradient_orig):
"""Perform an update of the linear transformation
Arguments:
| ``counter`` -- the iteration counter of the minimizer
| ``f`` -- the function value at ``x_orig``
| ``x_orig`` -- the unknowns in original coordinates
| ``gradient_orig`` -- the gradient in original coordinates
Return value:
| ``done_update`` -- True when an update has been done
The minimizer must reset the search direction method when an updated
has been done.
"""
if Preconditioner.update(self, counter, f, x_orig, gradient_orig):
# determine a new preconditioner
hessian = compute_fd_hessian(self.fun, x_orig, self.epsilon)
evals, evecs = np.linalg.eigh(hessian)
self.scales = np.sqrt(abs(evals))+self.epsilon
self.rotation = evecs
return True
return False | python | def update(self, counter, f, x_orig, gradient_orig):
"""Perform an update of the linear transformation
Arguments:
| ``counter`` -- the iteration counter of the minimizer
| ``f`` -- the function value at ``x_orig``
| ``x_orig`` -- the unknowns in original coordinates
| ``gradient_orig`` -- the gradient in original coordinates
Return value:
| ``done_update`` -- True when an update has been done
The minimizer must reset the search direction method when an updated
has been done.
"""
if Preconditioner.update(self, counter, f, x_orig, gradient_orig):
# determine a new preconditioner
hessian = compute_fd_hessian(self.fun, x_orig, self.epsilon)
evals, evecs = np.linalg.eigh(hessian)
self.scales = np.sqrt(abs(evals))+self.epsilon
self.rotation = evecs
return True
return False | [
"def",
"update",
"(",
"self",
",",
"counter",
",",
"f",
",",
"x_orig",
",",
"gradient_orig",
")",
":",
"if",
"Preconditioner",
".",
"update",
"(",
"self",
",",
"counter",
",",
"f",
",",
"x_orig",
",",
"gradient_orig",
")",
":",
"# determine a new preconditioner",
"hessian",
"=",
"compute_fd_hessian",
"(",
"self",
".",
"fun",
",",
"x_orig",
",",
"self",
".",
"epsilon",
")",
"evals",
",",
"evecs",
"=",
"np",
".",
"linalg",
".",
"eigh",
"(",
"hessian",
")",
"self",
".",
"scales",
"=",
"np",
".",
"sqrt",
"(",
"abs",
"(",
"evals",
")",
")",
"+",
"self",
".",
"epsilon",
"self",
".",
"rotation",
"=",
"evecs",
"return",
"True",
"return",
"False"
] | Perform an update of the linear transformation
Arguments:
| ``counter`` -- the iteration counter of the minimizer
| ``f`` -- the function value at ``x_orig``
| ``x_orig`` -- the unknowns in original coordinates
| ``gradient_orig`` -- the gradient in original coordinates
Return value:
| ``done_update`` -- True when an update has been done
The minimizer must reset the search direction method when an updated
has been done. | [
"Perform",
"an",
"update",
"of",
"the",
"linear",
"transformation"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/minimizer.py#L740-L762 |
molmod/molmod | molmod/minimizer.py | FullPreconditioner.do | def do(self, x_orig):
"""Transform the unknowns to preconditioned coordinates
This method also transforms the gradient to original coordinates
"""
if self.scales is None:
return x_orig
else:
return np.dot(self.rotation.transpose(), x_orig)*self.scales | python | def do(self, x_orig):
"""Transform the unknowns to preconditioned coordinates
This method also transforms the gradient to original coordinates
"""
if self.scales is None:
return x_orig
else:
return np.dot(self.rotation.transpose(), x_orig)*self.scales | [
"def",
"do",
"(",
"self",
",",
"x_orig",
")",
":",
"if",
"self",
".",
"scales",
"is",
"None",
":",
"return",
"x_orig",
"else",
":",
"return",
"np",
".",
"dot",
"(",
"self",
".",
"rotation",
".",
"transpose",
"(",
")",
",",
"x_orig",
")",
"*",
"self",
".",
"scales"
] | Transform the unknowns to preconditioned coordinates
This method also transforms the gradient to original coordinates | [
"Transform",
"the",
"unknowns",
"to",
"preconditioned",
"coordinates"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/minimizer.py#L764-L772 |
molmod/molmod | molmod/minimizer.py | FullPreconditioner.undo | def undo(self, x_prec):
"""Transform the unknowns to original coordinates
This method also transforms the gradient to preconditioned coordinates
"""
if self.scales is None:
return x_prec
else:
return np.dot(self.rotation, x_prec/self.scales) | python | def undo(self, x_prec):
"""Transform the unknowns to original coordinates
This method also transforms the gradient to preconditioned coordinates
"""
if self.scales is None:
return x_prec
else:
return np.dot(self.rotation, x_prec/self.scales) | [
"def",
"undo",
"(",
"self",
",",
"x_prec",
")",
":",
"if",
"self",
".",
"scales",
"is",
"None",
":",
"return",
"x_prec",
"else",
":",
"return",
"np",
".",
"dot",
"(",
"self",
".",
"rotation",
",",
"x_prec",
"/",
"self",
".",
"scales",
")"
] | Transform the unknowns to original coordinates
This method also transforms the gradient to preconditioned coordinates | [
"Transform",
"the",
"unknowns",
"to",
"original",
"coordinates"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/minimizer.py#L774-L782 |
molmod/molmod | molmod/minimizer.py | ConvergenceCondition.get_header | def get_header(self):
"""Returns the header for screen logging of the minimization"""
result = " "
if self.step_rms is not None:
result += " Step RMS"
if self.step_max is not None:
result += " Step MAX"
if self.grad_rms is not None:
result += " Grad RMS"
if self.grad_max is not None:
result += " Grad MAX"
if self.rel_grad_rms is not None:
result += " Grad/F RMS"
if self.rel_grad_max is not None:
result += " Grad/F MAX"
return result | python | def get_header(self):
"""Returns the header for screen logging of the minimization"""
result = " "
if self.step_rms is not None:
result += " Step RMS"
if self.step_max is not None:
result += " Step MAX"
if self.grad_rms is not None:
result += " Grad RMS"
if self.grad_max is not None:
result += " Grad MAX"
if self.rel_grad_rms is not None:
result += " Grad/F RMS"
if self.rel_grad_max is not None:
result += " Grad/F MAX"
return result | [
"def",
"get_header",
"(",
"self",
")",
":",
"result",
"=",
"\" \"",
"if",
"self",
".",
"step_rms",
"is",
"not",
"None",
":",
"result",
"+=",
"\" Step RMS\"",
"if",
"self",
".",
"step_max",
"is",
"not",
"None",
":",
"result",
"+=",
"\" Step MAX\"",
"if",
"self",
".",
"grad_rms",
"is",
"not",
"None",
":",
"result",
"+=",
"\" Grad RMS\"",
"if",
"self",
".",
"grad_max",
"is",
"not",
"None",
":",
"result",
"+=",
"\" Grad MAX\"",
"if",
"self",
".",
"rel_grad_rms",
"is",
"not",
"None",
":",
"result",
"+=",
"\" Grad/F RMS\"",
"if",
"self",
".",
"rel_grad_max",
"is",
"not",
"None",
":",
"result",
"+=",
"\" Grad/F MAX\"",
"return",
"result"
] | Returns the header for screen logging of the minimization | [
"Returns",
"the",
"header",
"for",
"screen",
"logging",
"of",
"the",
"minimization"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/minimizer.py#L820-L835 |
molmod/molmod | molmod/minimizer.py | LineWrapper.configure | def configure(self, x0, axis):
"""Configure the 1D function for a line search
Arguments:
x0 -- the reference point (q=0)
axis -- a unit vector in the direction of the line search
"""
self.x0 = x0
self.axis = axis | python | def configure(self, x0, axis):
"""Configure the 1D function for a line search
Arguments:
x0 -- the reference point (q=0)
axis -- a unit vector in the direction of the line search
"""
self.x0 = x0
self.axis = axis | [
"def",
"configure",
"(",
"self",
",",
"x0",
",",
"axis",
")",
":",
"self",
".",
"x0",
"=",
"x0",
"self",
".",
"axis",
"=",
"axis"
] | Configure the 1D function for a line search
Arguments:
x0 -- the reference point (q=0)
axis -- a unit vector in the direction of the line search | [
"Configure",
"the",
"1D",
"function",
"for",
"a",
"line",
"search"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/minimizer.py#L958-L966 |
molmod/molmod | molmod/minimizer.py | Constraints._compute_equations | def _compute_equations(self, x, verbose=False):
'''Compute the values and the normals (gradients) of active constraints.
Arguments:
| ``x`` -- The unknowns.
'''
# compute the error and the normals.
normals = []
values = []
signs = []
error = 0.0
if verbose:
print()
print(' '.join('% 10.3e' % val for val in x), end=' ')
active_str = ''
for i, (sign, equation) in enumerate(self.equations):
value, normal = equation(x)
if (i < len(self.lock) and self.lock[i]) or \
(sign==-1 and value > -self.threshold) or \
(sign==0) or (sign==1 and value < self.threshold):
values.append(value)
normals.append(normal)
signs.append(sign)
error += value**2
if verbose:
active_str += 'X'
if i < len(self.lock):
self.lock[i] = True
elif verbose:
active_str += '-'
error = np.sqrt(error)
normals = np.array(normals, float)
values = np.array(values, float)
signs = np.array(signs, int)
if verbose:
print('[%s]' % active_str, end=' ')
if error < self.threshold:
print('OK')
else:
print('%.5e' % error)
return normals, values, error, signs | python | def _compute_equations(self, x, verbose=False):
'''Compute the values and the normals (gradients) of active constraints.
Arguments:
| ``x`` -- The unknowns.
'''
# compute the error and the normals.
normals = []
values = []
signs = []
error = 0.0
if verbose:
print()
print(' '.join('% 10.3e' % val for val in x), end=' ')
active_str = ''
for i, (sign, equation) in enumerate(self.equations):
value, normal = equation(x)
if (i < len(self.lock) and self.lock[i]) or \
(sign==-1 and value > -self.threshold) or \
(sign==0) or (sign==1 and value < self.threshold):
values.append(value)
normals.append(normal)
signs.append(sign)
error += value**2
if verbose:
active_str += 'X'
if i < len(self.lock):
self.lock[i] = True
elif verbose:
active_str += '-'
error = np.sqrt(error)
normals = np.array(normals, float)
values = np.array(values, float)
signs = np.array(signs, int)
if verbose:
print('[%s]' % active_str, end=' ')
if error < self.threshold:
print('OK')
else:
print('%.5e' % error)
return normals, values, error, signs | [
"def",
"_compute_equations",
"(",
"self",
",",
"x",
",",
"verbose",
"=",
"False",
")",
":",
"# compute the error and the normals.",
"normals",
"=",
"[",
"]",
"values",
"=",
"[",
"]",
"signs",
"=",
"[",
"]",
"error",
"=",
"0.0",
"if",
"verbose",
":",
"print",
"(",
")",
"print",
"(",
"' '",
".",
"join",
"(",
"'% 10.3e'",
"%",
"val",
"for",
"val",
"in",
"x",
")",
",",
"end",
"=",
"' '",
")",
"active_str",
"=",
"''",
"for",
"i",
",",
"(",
"sign",
",",
"equation",
")",
"in",
"enumerate",
"(",
"self",
".",
"equations",
")",
":",
"value",
",",
"normal",
"=",
"equation",
"(",
"x",
")",
"if",
"(",
"i",
"<",
"len",
"(",
"self",
".",
"lock",
")",
"and",
"self",
".",
"lock",
"[",
"i",
"]",
")",
"or",
"(",
"sign",
"==",
"-",
"1",
"and",
"value",
">",
"-",
"self",
".",
"threshold",
")",
"or",
"(",
"sign",
"==",
"0",
")",
"or",
"(",
"sign",
"==",
"1",
"and",
"value",
"<",
"self",
".",
"threshold",
")",
":",
"values",
".",
"append",
"(",
"value",
")",
"normals",
".",
"append",
"(",
"normal",
")",
"signs",
".",
"append",
"(",
"sign",
")",
"error",
"+=",
"value",
"**",
"2",
"if",
"verbose",
":",
"active_str",
"+=",
"'X'",
"if",
"i",
"<",
"len",
"(",
"self",
".",
"lock",
")",
":",
"self",
".",
"lock",
"[",
"i",
"]",
"=",
"True",
"elif",
"verbose",
":",
"active_str",
"+=",
"'-'",
"error",
"=",
"np",
".",
"sqrt",
"(",
"error",
")",
"normals",
"=",
"np",
".",
"array",
"(",
"normals",
",",
"float",
")",
"values",
"=",
"np",
".",
"array",
"(",
"values",
",",
"float",
")",
"signs",
"=",
"np",
".",
"array",
"(",
"signs",
",",
"int",
")",
"if",
"verbose",
":",
"print",
"(",
"'[%s]'",
"%",
"active_str",
",",
"end",
"=",
"' '",
")",
"if",
"error",
"<",
"self",
".",
"threshold",
":",
"print",
"(",
"'OK'",
")",
"else",
":",
"print",
"(",
"'%.5e'",
"%",
"error",
")",
"return",
"normals",
",",
"values",
",",
"error",
",",
"signs"
] | Compute the values and the normals (gradients) of active constraints.
Arguments:
| ``x`` -- The unknowns. | [
"Compute",
"the",
"values",
"and",
"the",
"normals",
"(",
"gradients",
")",
"of",
"active",
"constraints",
"."
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/minimizer.py#L1073-L1113 |
molmod/molmod | molmod/minimizer.py | Constraints._rough_shake | def _rough_shake(self, x, normals, values, error):
'''Take a robust, but not very efficient step towards the constraints.
Arguments:
| ``x`` -- The unknowns.
| ``normals`` -- A numpy array with the gradients of the active
constraints. Each row is one gradient.
| ``values`` -- A numpy array with the values of the constraint
functions.
| ``error`` -- The square root of the constraint cost function.
'''
counter = 0
while error > self.threshold and counter < self.max_iter:
dxs = []
for i in range(len(normals)):
dx = -normals[i]*values[i]/np.dot(normals[i], normals[i])
dxs.append(dx)
dxs = np.array(dxs)
dx = dxs[abs(values).argmax()]
x = x+dx
self.lock[:] = False
normals, values, error = self._compute_equations(x)[:-1]
counter += 1
return x, normals, values, error | python | def _rough_shake(self, x, normals, values, error):
'''Take a robust, but not very efficient step towards the constraints.
Arguments:
| ``x`` -- The unknowns.
| ``normals`` -- A numpy array with the gradients of the active
constraints. Each row is one gradient.
| ``values`` -- A numpy array with the values of the constraint
functions.
| ``error`` -- The square root of the constraint cost function.
'''
counter = 0
while error > self.threshold and counter < self.max_iter:
dxs = []
for i in range(len(normals)):
dx = -normals[i]*values[i]/np.dot(normals[i], normals[i])
dxs.append(dx)
dxs = np.array(dxs)
dx = dxs[abs(values).argmax()]
x = x+dx
self.lock[:] = False
normals, values, error = self._compute_equations(x)[:-1]
counter += 1
return x, normals, values, error | [
"def",
"_rough_shake",
"(",
"self",
",",
"x",
",",
"normals",
",",
"values",
",",
"error",
")",
":",
"counter",
"=",
"0",
"while",
"error",
">",
"self",
".",
"threshold",
"and",
"counter",
"<",
"self",
".",
"max_iter",
":",
"dxs",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"normals",
")",
")",
":",
"dx",
"=",
"-",
"normals",
"[",
"i",
"]",
"*",
"values",
"[",
"i",
"]",
"/",
"np",
".",
"dot",
"(",
"normals",
"[",
"i",
"]",
",",
"normals",
"[",
"i",
"]",
")",
"dxs",
".",
"append",
"(",
"dx",
")",
"dxs",
"=",
"np",
".",
"array",
"(",
"dxs",
")",
"dx",
"=",
"dxs",
"[",
"abs",
"(",
"values",
")",
".",
"argmax",
"(",
")",
"]",
"x",
"=",
"x",
"+",
"dx",
"self",
".",
"lock",
"[",
":",
"]",
"=",
"False",
"normals",
",",
"values",
",",
"error",
"=",
"self",
".",
"_compute_equations",
"(",
"x",
")",
"[",
":",
"-",
"1",
"]",
"counter",
"+=",
"1",
"return",
"x",
",",
"normals",
",",
"values",
",",
"error"
] | Take a robust, but not very efficient step towards the constraints.
Arguments:
| ``x`` -- The unknowns.
| ``normals`` -- A numpy array with the gradients of the active
constraints. Each row is one gradient.
| ``values`` -- A numpy array with the values of the constraint
functions.
| ``error`` -- The square root of the constraint cost function. | [
"Take",
"a",
"robust",
"but",
"not",
"very",
"efficient",
"step",
"towards",
"the",
"constraints",
"."
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/minimizer.py#L1115-L1138 |
molmod/molmod | molmod/minimizer.py | Constraints._fast_shake | def _fast_shake(self, x, normals, values, error):
'''Take an efficient (not always robust) step towards the constraints.
Arguments:
| ``x`` -- The unknowns.
| ``normals`` -- A numpy array with the gradients of the active
constraints. Each row is one gradient.
| ``values`` -- A numpy array with the values of the constraint
functions.
| ``error`` -- The square root of the constraint cost function.
'''
# filter out the degrees of freedom that do not feel the constraints.
mask = (normals!=0).any(axis=0) > 0
normals = normals[:,mask]
# Take a step to lower the constraint cost function. If the step is too
# large, it is reduced iteratively towards a small steepest descent
# step. This is very similar to the Levenberg-Marquardt algorithm.
# Singular Value decomposition is used to make this procedure
# numerically more stable and efficient.
U, S, Vt = np.linalg.svd(normals, full_matrices=False)
rcond = None
counter = 0
while True:
if rcond is None:
rcond = 0.0
elif rcond == 0.0:
rcond = self.rcond1
else:
rcond *= 10
# perform the least-norm correction
Sinv = (S**2+rcond)
if Sinv.max() == 0.0:
continue
Sinv = S/Sinv
# compute the step
dx = -np.dot(Vt.transpose(), np.dot(U.transpose(), values)*Sinv)
new_x = x.copy()
new_x[mask] += dx
# try the step
new_normals, new_values, new_error = self._compute_equations(new_x)[:-1]
if new_error < 0.9*error:
# Only if it decreases the constraint cost sufficiently, the
# step is accepted. This routine is pointless of it converges
# slowly.
return new_x, new_normals, new_values, new_error
elif abs(dx).sum() < self.threshold:
# If the step becomes too small, then give up.
break
elif counter > self.max_iter:
raise ConstraintError('Exceeded maximum number of shake iterations.')
counter += 1 | python | def _fast_shake(self, x, normals, values, error):
'''Take an efficient (not always robust) step towards the constraints.
Arguments:
| ``x`` -- The unknowns.
| ``normals`` -- A numpy array with the gradients of the active
constraints. Each row is one gradient.
| ``values`` -- A numpy array with the values of the constraint
functions.
| ``error`` -- The square root of the constraint cost function.
'''
# filter out the degrees of freedom that do not feel the constraints.
mask = (normals!=0).any(axis=0) > 0
normals = normals[:,mask]
# Take a step to lower the constraint cost function. If the step is too
# large, it is reduced iteratively towards a small steepest descent
# step. This is very similar to the Levenberg-Marquardt algorithm.
# Singular Value decomposition is used to make this procedure
# numerically more stable and efficient.
U, S, Vt = np.linalg.svd(normals, full_matrices=False)
rcond = None
counter = 0
while True:
if rcond is None:
rcond = 0.0
elif rcond == 0.0:
rcond = self.rcond1
else:
rcond *= 10
# perform the least-norm correction
Sinv = (S**2+rcond)
if Sinv.max() == 0.0:
continue
Sinv = S/Sinv
# compute the step
dx = -np.dot(Vt.transpose(), np.dot(U.transpose(), values)*Sinv)
new_x = x.copy()
new_x[mask] += dx
# try the step
new_normals, new_values, new_error = self._compute_equations(new_x)[:-1]
if new_error < 0.9*error:
# Only if it decreases the constraint cost sufficiently, the
# step is accepted. This routine is pointless of it converges
# slowly.
return new_x, new_normals, new_values, new_error
elif abs(dx).sum() < self.threshold:
# If the step becomes too small, then give up.
break
elif counter > self.max_iter:
raise ConstraintError('Exceeded maximum number of shake iterations.')
counter += 1 | [
"def",
"_fast_shake",
"(",
"self",
",",
"x",
",",
"normals",
",",
"values",
",",
"error",
")",
":",
"# filter out the degrees of freedom that do not feel the constraints.",
"mask",
"=",
"(",
"normals",
"!=",
"0",
")",
".",
"any",
"(",
"axis",
"=",
"0",
")",
">",
"0",
"normals",
"=",
"normals",
"[",
":",
",",
"mask",
"]",
"# Take a step to lower the constraint cost function. If the step is too",
"# large, it is reduced iteratively towards a small steepest descent",
"# step. This is very similar to the Levenberg-Marquardt algorithm.",
"# Singular Value decomposition is used to make this procedure",
"# numerically more stable and efficient.",
"U",
",",
"S",
",",
"Vt",
"=",
"np",
".",
"linalg",
".",
"svd",
"(",
"normals",
",",
"full_matrices",
"=",
"False",
")",
"rcond",
"=",
"None",
"counter",
"=",
"0",
"while",
"True",
":",
"if",
"rcond",
"is",
"None",
":",
"rcond",
"=",
"0.0",
"elif",
"rcond",
"==",
"0.0",
":",
"rcond",
"=",
"self",
".",
"rcond1",
"else",
":",
"rcond",
"*=",
"10",
"# perform the least-norm correction",
"Sinv",
"=",
"(",
"S",
"**",
"2",
"+",
"rcond",
")",
"if",
"Sinv",
".",
"max",
"(",
")",
"==",
"0.0",
":",
"continue",
"Sinv",
"=",
"S",
"/",
"Sinv",
"# compute the step",
"dx",
"=",
"-",
"np",
".",
"dot",
"(",
"Vt",
".",
"transpose",
"(",
")",
",",
"np",
".",
"dot",
"(",
"U",
".",
"transpose",
"(",
")",
",",
"values",
")",
"*",
"Sinv",
")",
"new_x",
"=",
"x",
".",
"copy",
"(",
")",
"new_x",
"[",
"mask",
"]",
"+=",
"dx",
"# try the step",
"new_normals",
",",
"new_values",
",",
"new_error",
"=",
"self",
".",
"_compute_equations",
"(",
"new_x",
")",
"[",
":",
"-",
"1",
"]",
"if",
"new_error",
"<",
"0.9",
"*",
"error",
":",
"# Only if it decreases the constraint cost sufficiently, the",
"# step is accepted. This routine is pointless of it converges",
"# slowly.",
"return",
"new_x",
",",
"new_normals",
",",
"new_values",
",",
"new_error",
"elif",
"abs",
"(",
"dx",
")",
".",
"sum",
"(",
")",
"<",
"self",
".",
"threshold",
":",
"# If the step becomes too small, then give up.",
"break",
"elif",
"counter",
">",
"self",
".",
"max_iter",
":",
"raise",
"ConstraintError",
"(",
"'Exceeded maximum number of shake iterations.'",
")",
"counter",
"+=",
"1"
] | Take an efficient (not always robust) step towards the constraints.
Arguments:
| ``x`` -- The unknowns.
| ``normals`` -- A numpy array with the gradients of the active
constraints. Each row is one gradient.
| ``values`` -- A numpy array with the values of the constraint
functions.
| ``error`` -- The square root of the constraint cost function. | [
"Take",
"an",
"efficient",
"(",
"not",
"always",
"robust",
")",
"step",
"towards",
"the",
"constraints",
"."
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/minimizer.py#L1140-L1190 |
molmod/molmod | molmod/minimizer.py | Constraints.free_shake | def free_shake(self, x):
'''Brings unknowns to the constraints.
Arguments:
| ``x`` -- The unknowns.
'''
self.lock[:] = False
normals, values, error = self._compute_equations(x)[:-1]
counter = 0
while True:
if error <= self.threshold:
break
# try a well-behaved move to the constrains
result = self._fast_shake(x, normals, values, error)
counter += 1
if result is not None:
x, normals, values, error = result
else:
# well-behaved move is too slow.
# do a cumbersome move to satisfy constraints approximately.
x, normals, values, error = self._rough_shake(x, normals, values, error)
counter += 1
# When too many iterations are required, just give up.
if counter > self.max_iter:
raise ConstraintError('Exceeded maximum number of shake iterations.')
return x, counter, len(values) | python | def free_shake(self, x):
'''Brings unknowns to the constraints.
Arguments:
| ``x`` -- The unknowns.
'''
self.lock[:] = False
normals, values, error = self._compute_equations(x)[:-1]
counter = 0
while True:
if error <= self.threshold:
break
# try a well-behaved move to the constrains
result = self._fast_shake(x, normals, values, error)
counter += 1
if result is not None:
x, normals, values, error = result
else:
# well-behaved move is too slow.
# do a cumbersome move to satisfy constraints approximately.
x, normals, values, error = self._rough_shake(x, normals, values, error)
counter += 1
# When too many iterations are required, just give up.
if counter > self.max_iter:
raise ConstraintError('Exceeded maximum number of shake iterations.')
return x, counter, len(values) | [
"def",
"free_shake",
"(",
"self",
",",
"x",
")",
":",
"self",
".",
"lock",
"[",
":",
"]",
"=",
"False",
"normals",
",",
"values",
",",
"error",
"=",
"self",
".",
"_compute_equations",
"(",
"x",
")",
"[",
":",
"-",
"1",
"]",
"counter",
"=",
"0",
"while",
"True",
":",
"if",
"error",
"<=",
"self",
".",
"threshold",
":",
"break",
"# try a well-behaved move to the constrains",
"result",
"=",
"self",
".",
"_fast_shake",
"(",
"x",
",",
"normals",
",",
"values",
",",
"error",
")",
"counter",
"+=",
"1",
"if",
"result",
"is",
"not",
"None",
":",
"x",
",",
"normals",
",",
"values",
",",
"error",
"=",
"result",
"else",
":",
"# well-behaved move is too slow.",
"# do a cumbersome move to satisfy constraints approximately.",
"x",
",",
"normals",
",",
"values",
",",
"error",
"=",
"self",
".",
"_rough_shake",
"(",
"x",
",",
"normals",
",",
"values",
",",
"error",
")",
"counter",
"+=",
"1",
"# When too many iterations are required, just give up.",
"if",
"counter",
">",
"self",
".",
"max_iter",
":",
"raise",
"ConstraintError",
"(",
"'Exceeded maximum number of shake iterations.'",
")",
"return",
"x",
",",
"counter",
",",
"len",
"(",
"values",
")"
] | Brings unknowns to the constraints.
Arguments:
| ``x`` -- The unknowns. | [
"Brings",
"unknowns",
"to",
"the",
"constraints",
"."
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/minimizer.py#L1193-L1218 |
molmod/molmod | molmod/minimizer.py | Constraints.safe_shake | def safe_shake(self, x, fun, fmax):
'''Brings unknowns to the constraints, without increasing fun above fmax.
Arguments:
| ``x`` -- The unknowns.
| ``fun`` -- The function being minimized.
| ``fmax`` -- The highest allowed value of the function being
minimized.
The function ``fun`` takes a mandatory argument ``x`` and an optional
argument ``do_gradient``:
| ``x`` -- the arguments of the function to be tested
| ``do_gradient`` -- when False, only the function value is
returned. when True, a 2-tuple with the
function value and the gradient are returned
[default=False]
'''
self.lock[:] = False
def extra_equation(xx):
f, g = fun(xx, do_gradient=True)
return (f-fmax)/abs(fmax), g/abs(fmax)
self.equations.append((-1,extra_equation))
x, shake_counter, constraint_couter = self.free_shake(x)
del self.equations[-1]
return x, shake_counter, constraint_couter | python | def safe_shake(self, x, fun, fmax):
'''Brings unknowns to the constraints, without increasing fun above fmax.
Arguments:
| ``x`` -- The unknowns.
| ``fun`` -- The function being minimized.
| ``fmax`` -- The highest allowed value of the function being
minimized.
The function ``fun`` takes a mandatory argument ``x`` and an optional
argument ``do_gradient``:
| ``x`` -- the arguments of the function to be tested
| ``do_gradient`` -- when False, only the function value is
returned. when True, a 2-tuple with the
function value and the gradient are returned
[default=False]
'''
self.lock[:] = False
def extra_equation(xx):
f, g = fun(xx, do_gradient=True)
return (f-fmax)/abs(fmax), g/abs(fmax)
self.equations.append((-1,extra_equation))
x, shake_counter, constraint_couter = self.free_shake(x)
del self.equations[-1]
return x, shake_counter, constraint_couter | [
"def",
"safe_shake",
"(",
"self",
",",
"x",
",",
"fun",
",",
"fmax",
")",
":",
"self",
".",
"lock",
"[",
":",
"]",
"=",
"False",
"def",
"extra_equation",
"(",
"xx",
")",
":",
"f",
",",
"g",
"=",
"fun",
"(",
"xx",
",",
"do_gradient",
"=",
"True",
")",
"return",
"(",
"f",
"-",
"fmax",
")",
"/",
"abs",
"(",
"fmax",
")",
",",
"g",
"/",
"abs",
"(",
"fmax",
")",
"self",
".",
"equations",
".",
"append",
"(",
"(",
"-",
"1",
",",
"extra_equation",
")",
")",
"x",
",",
"shake_counter",
",",
"constraint_couter",
"=",
"self",
".",
"free_shake",
"(",
"x",
")",
"del",
"self",
".",
"equations",
"[",
"-",
"1",
"]",
"return",
"x",
",",
"shake_counter",
",",
"constraint_couter"
] | Brings unknowns to the constraints, without increasing fun above fmax.
Arguments:
| ``x`` -- The unknowns.
| ``fun`` -- The function being minimized.
| ``fmax`` -- The highest allowed value of the function being
minimized.
The function ``fun`` takes a mandatory argument ``x`` and an optional
argument ``do_gradient``:
| ``x`` -- the arguments of the function to be tested
| ``do_gradient`` -- when False, only the function value is
returned. when True, a 2-tuple with the
function value and the gradient are returned
[default=False] | [
"Brings",
"unknowns",
"to",
"the",
"constraints",
"without",
"increasing",
"fun",
"above",
"fmax",
"."
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/minimizer.py#L1220-L1244 |
molmod/molmod | molmod/minimizer.py | Constraints.project | def project(self, x, vector):
'''Project a vector (gradient or direction) on the active constraints.
Arguments:
| ``x`` -- The unknowns.
| ``vector`` -- A numpy array with a direction or a gradient.
The return value is a gradient or direction, where the components
that point away from the constraints are projected out. In case of
half-open constraints, the projection is only active of the vector
points into the infeasible region.
'''
scale = np.linalg.norm(vector)
if scale == 0.0:
return vector
self.lock[:] = False
normals, signs = self._compute_equations(x)[::3]
if len(normals) == 0:
return vector
vector = vector/scale
mask = signs == 0
result = vector.copy()
changed = True
counter = 0
while changed:
changed = False
y = np.dot(normals, result)
for i, sign in enumerate(signs):
if sign != 0:
if sign*y[i] < -self.threshold:
mask[i] = True
changed = True
elif mask[i] and np.dot(normals[i], result-vector) < 0:
mask[i] = False
changed = True
if mask.any():
normals_select = normals[mask]
y = np.dot(normals_select, vector)
U, S, Vt = np.linalg.svd(normals_select, full_matrices=False)
if S.min() == 0.0:
Sinv = S/(S**2+self.rcond1)
else:
Sinv = 1.0/S
result = vector - np.dot(Vt.transpose(), np.dot(U.transpose(), y)*Sinv)
else:
result = vector.copy()
if counter > self.max_iter:
raise ConstraintError('Exceeded maximum number of shake iterations.')
counter += 1
return result*scale | python | def project(self, x, vector):
'''Project a vector (gradient or direction) on the active constraints.
Arguments:
| ``x`` -- The unknowns.
| ``vector`` -- A numpy array with a direction or a gradient.
The return value is a gradient or direction, where the components
that point away from the constraints are projected out. In case of
half-open constraints, the projection is only active of the vector
points into the infeasible region.
'''
scale = np.linalg.norm(vector)
if scale == 0.0:
return vector
self.lock[:] = False
normals, signs = self._compute_equations(x)[::3]
if len(normals) == 0:
return vector
vector = vector/scale
mask = signs == 0
result = vector.copy()
changed = True
counter = 0
while changed:
changed = False
y = np.dot(normals, result)
for i, sign in enumerate(signs):
if sign != 0:
if sign*y[i] < -self.threshold:
mask[i] = True
changed = True
elif mask[i] and np.dot(normals[i], result-vector) < 0:
mask[i] = False
changed = True
if mask.any():
normals_select = normals[mask]
y = np.dot(normals_select, vector)
U, S, Vt = np.linalg.svd(normals_select, full_matrices=False)
if S.min() == 0.0:
Sinv = S/(S**2+self.rcond1)
else:
Sinv = 1.0/S
result = vector - np.dot(Vt.transpose(), np.dot(U.transpose(), y)*Sinv)
else:
result = vector.copy()
if counter > self.max_iter:
raise ConstraintError('Exceeded maximum number of shake iterations.')
counter += 1
return result*scale | [
"def",
"project",
"(",
"self",
",",
"x",
",",
"vector",
")",
":",
"scale",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"vector",
")",
"if",
"scale",
"==",
"0.0",
":",
"return",
"vector",
"self",
".",
"lock",
"[",
":",
"]",
"=",
"False",
"normals",
",",
"signs",
"=",
"self",
".",
"_compute_equations",
"(",
"x",
")",
"[",
":",
":",
"3",
"]",
"if",
"len",
"(",
"normals",
")",
"==",
"0",
":",
"return",
"vector",
"vector",
"=",
"vector",
"/",
"scale",
"mask",
"=",
"signs",
"==",
"0",
"result",
"=",
"vector",
".",
"copy",
"(",
")",
"changed",
"=",
"True",
"counter",
"=",
"0",
"while",
"changed",
":",
"changed",
"=",
"False",
"y",
"=",
"np",
".",
"dot",
"(",
"normals",
",",
"result",
")",
"for",
"i",
",",
"sign",
"in",
"enumerate",
"(",
"signs",
")",
":",
"if",
"sign",
"!=",
"0",
":",
"if",
"sign",
"*",
"y",
"[",
"i",
"]",
"<",
"-",
"self",
".",
"threshold",
":",
"mask",
"[",
"i",
"]",
"=",
"True",
"changed",
"=",
"True",
"elif",
"mask",
"[",
"i",
"]",
"and",
"np",
".",
"dot",
"(",
"normals",
"[",
"i",
"]",
",",
"result",
"-",
"vector",
")",
"<",
"0",
":",
"mask",
"[",
"i",
"]",
"=",
"False",
"changed",
"=",
"True",
"if",
"mask",
".",
"any",
"(",
")",
":",
"normals_select",
"=",
"normals",
"[",
"mask",
"]",
"y",
"=",
"np",
".",
"dot",
"(",
"normals_select",
",",
"vector",
")",
"U",
",",
"S",
",",
"Vt",
"=",
"np",
".",
"linalg",
".",
"svd",
"(",
"normals_select",
",",
"full_matrices",
"=",
"False",
")",
"if",
"S",
".",
"min",
"(",
")",
"==",
"0.0",
":",
"Sinv",
"=",
"S",
"/",
"(",
"S",
"**",
"2",
"+",
"self",
".",
"rcond1",
")",
"else",
":",
"Sinv",
"=",
"1.0",
"/",
"S",
"result",
"=",
"vector",
"-",
"np",
".",
"dot",
"(",
"Vt",
".",
"transpose",
"(",
")",
",",
"np",
".",
"dot",
"(",
"U",
".",
"transpose",
"(",
")",
",",
"y",
")",
"*",
"Sinv",
")",
"else",
":",
"result",
"=",
"vector",
".",
"copy",
"(",
")",
"if",
"counter",
">",
"self",
".",
"max_iter",
":",
"raise",
"ConstraintError",
"(",
"'Exceeded maximum number of shake iterations.'",
")",
"counter",
"+=",
"1",
"return",
"result",
"*",
"scale"
] | Project a vector (gradient or direction) on the active constraints.
Arguments:
| ``x`` -- The unknowns.
| ``vector`` -- A numpy array with a direction or a gradient.
The return value is a gradient or direction, where the components
that point away from the constraints are projected out. In case of
half-open constraints, the projection is only active of the vector
points into the infeasible region. | [
"Project",
"a",
"vector",
"(",
"gradient",
"or",
"direction",
")",
"on",
"the",
"active",
"constraints",
"."
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/minimizer.py#L1246-L1299 |
molmod/molmod | molmod/minimizer.py | Minimizer.get_final | def get_final(self):
"""Return the final solution in the original coordinates"""
if self.prec is None:
return self.x
else:
return self.prec.undo(self.x) | python | def get_final(self):
"""Return the final solution in the original coordinates"""
if self.prec is None:
return self.x
else:
return self.prec.undo(self.x) | [
"def",
"get_final",
"(",
"self",
")",
":",
"if",
"self",
".",
"prec",
"is",
"None",
":",
"return",
"self",
".",
"x",
"else",
":",
"return",
"self",
".",
"prec",
".",
"undo",
"(",
"self",
".",
"x",
")"
] | Return the final solution in the original coordinates | [
"Return",
"the",
"final",
"solution",
"in",
"the",
"original",
"coordinates"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/minimizer.py#L1388-L1393 |
molmod/molmod | molmod/minimizer.py | Minimizer._run | def _run(self):
"""Run the iterative optimizer"""
success = self.initialize()
while success is None:
success = self.propagate()
return success | python | def _run(self):
"""Run the iterative optimizer"""
success = self.initialize()
while success is None:
success = self.propagate()
return success | [
"def",
"_run",
"(",
"self",
")",
":",
"success",
"=",
"self",
".",
"initialize",
"(",
")",
"while",
"success",
"is",
"None",
":",
"success",
"=",
"self",
".",
"propagate",
"(",
")",
"return",
"success"
] | Run the iterative optimizer | [
"Run",
"the",
"iterative",
"optimizer"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/minimizer.py#L1395-L1400 |
molmod/molmod | molmod/minimizer.py | Minimizer._print_header | def _print_header(self):
"""Print the header for screen logging"""
header = " Iter Dir "
if self.constraints is not None:
header += ' SC CC'
header += " Function"
if self.convergence_condition is not None:
header += self.convergence_condition.get_header()
header += " Time"
self._screen("-"*(len(header)), newline=True)
self._screen(header, newline=True)
self._screen("-"*(len(header)), newline=True) | python | def _print_header(self):
"""Print the header for screen logging"""
header = " Iter Dir "
if self.constraints is not None:
header += ' SC CC'
header += " Function"
if self.convergence_condition is not None:
header += self.convergence_condition.get_header()
header += " Time"
self._screen("-"*(len(header)), newline=True)
self._screen(header, newline=True)
self._screen("-"*(len(header)), newline=True) | [
"def",
"_print_header",
"(",
"self",
")",
":",
"header",
"=",
"\" Iter Dir \"",
"if",
"self",
".",
"constraints",
"is",
"not",
"None",
":",
"header",
"+=",
"' SC CC'",
"header",
"+=",
"\" Function\"",
"if",
"self",
".",
"convergence_condition",
"is",
"not",
"None",
":",
"header",
"+=",
"self",
".",
"convergence_condition",
".",
"get_header",
"(",
")",
"header",
"+=",
"\" Time\"",
"self",
".",
"_screen",
"(",
"\"-\"",
"*",
"(",
"len",
"(",
"header",
")",
")",
",",
"newline",
"=",
"True",
")",
"self",
".",
"_screen",
"(",
"header",
",",
"newline",
"=",
"True",
")",
"self",
".",
"_screen",
"(",
"\"-\"",
"*",
"(",
"len",
"(",
"header",
")",
")",
",",
"newline",
"=",
"True",
")"
] | Print the header for screen logging | [
"Print",
"the",
"header",
"for",
"screen",
"logging"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/minimizer.py#L1503-L1514 |
molmod/molmod | molmod/minimizer.py | Minimizer._screen | def _screen(self, s, newline=False):
"""Print something on screen when self.verbose == True"""
if self.verbose:
if newline:
print(s)
else:
print(s, end=' ') | python | def _screen(self, s, newline=False):
"""Print something on screen when self.verbose == True"""
if self.verbose:
if newline:
print(s)
else:
print(s, end=' ') | [
"def",
"_screen",
"(",
"self",
",",
"s",
",",
"newline",
"=",
"False",
")",
":",
"if",
"self",
".",
"verbose",
":",
"if",
"newline",
":",
"print",
"(",
"s",
")",
"else",
":",
"print",
"(",
"s",
",",
"end",
"=",
"' '",
")"
] | Print something on screen when self.verbose == True | [
"Print",
"something",
"on",
"screen",
"when",
"self",
".",
"verbose",
"==",
"True"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/minimizer.py#L1516-L1522 |
molmod/molmod | molmod/minimizer.py | Minimizer._line_opt | def _line_opt(self):
"""Perform a line search along the current direction"""
direction = self.search_direction.direction
if self.constraints is not None:
try:
direction = self.constraints.project(self.x, direction)
except ConstraintError:
self._screen("CONSTRAINT PROJECT FAILED", newline=True)
return False
direction_norm = np.linalg.norm(direction)
if direction_norm == 0:
return False
self.line.configure(self.x, direction/direction_norm)
success, wolfe, qopt, fopt = \
self.line_search(self.line, self.initial_step_size, self.epsilon)
if success:
self.step = qopt*self.line.axis
self.initial_step_size = np.linalg.norm(self.step)
self.x = self.x + self.step
self.f = fopt
if wolfe:
self._screen("W")
else:
self._screen(" ")
self.search_direction.reset()
return True
else:
if self.debug_line:
import matplotlib.pyplot as pt
import datetime
pt.clf()
qs = np.arange(0.0, 100.1)*(5*self.initial_step_size/100.0)
fs = np.array([self.line(q) for q in qs])
pt.plot(qs, fs)
pt.xlim(qs[0], qs[-1])
fdelta = fs.max() - fs.min()
if fdelta == 0.0:
fdelta = fs.mean()
fmargin = fdelta*0.1
pt.ylim(fs.min() - fmargin, fs.max() + fmargin)
pt.title('fdelta = %.2e fmean = %.2e' % (fdelta, fs.mean()))
pt.xlabel('Line coordinate, q')
pt.ylabel('Function value, f')
pt.savefig('line_failed_%s.png' % (datetime.datetime.now().isoformat()))
self._reset_state()
return False | python | def _line_opt(self):
"""Perform a line search along the current direction"""
direction = self.search_direction.direction
if self.constraints is not None:
try:
direction = self.constraints.project(self.x, direction)
except ConstraintError:
self._screen("CONSTRAINT PROJECT FAILED", newline=True)
return False
direction_norm = np.linalg.norm(direction)
if direction_norm == 0:
return False
self.line.configure(self.x, direction/direction_norm)
success, wolfe, qopt, fopt = \
self.line_search(self.line, self.initial_step_size, self.epsilon)
if success:
self.step = qopt*self.line.axis
self.initial_step_size = np.linalg.norm(self.step)
self.x = self.x + self.step
self.f = fopt
if wolfe:
self._screen("W")
else:
self._screen(" ")
self.search_direction.reset()
return True
else:
if self.debug_line:
import matplotlib.pyplot as pt
import datetime
pt.clf()
qs = np.arange(0.0, 100.1)*(5*self.initial_step_size/100.0)
fs = np.array([self.line(q) for q in qs])
pt.plot(qs, fs)
pt.xlim(qs[0], qs[-1])
fdelta = fs.max() - fs.min()
if fdelta == 0.0:
fdelta = fs.mean()
fmargin = fdelta*0.1
pt.ylim(fs.min() - fmargin, fs.max() + fmargin)
pt.title('fdelta = %.2e fmean = %.2e' % (fdelta, fs.mean()))
pt.xlabel('Line coordinate, q')
pt.ylabel('Function value, f')
pt.savefig('line_failed_%s.png' % (datetime.datetime.now().isoformat()))
self._reset_state()
return False | [
"def",
"_line_opt",
"(",
"self",
")",
":",
"direction",
"=",
"self",
".",
"search_direction",
".",
"direction",
"if",
"self",
".",
"constraints",
"is",
"not",
"None",
":",
"try",
":",
"direction",
"=",
"self",
".",
"constraints",
".",
"project",
"(",
"self",
".",
"x",
",",
"direction",
")",
"except",
"ConstraintError",
":",
"self",
".",
"_screen",
"(",
"\"CONSTRAINT PROJECT FAILED\"",
",",
"newline",
"=",
"True",
")",
"return",
"False",
"direction_norm",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"direction",
")",
"if",
"direction_norm",
"==",
"0",
":",
"return",
"False",
"self",
".",
"line",
".",
"configure",
"(",
"self",
".",
"x",
",",
"direction",
"/",
"direction_norm",
")",
"success",
",",
"wolfe",
",",
"qopt",
",",
"fopt",
"=",
"self",
".",
"line_search",
"(",
"self",
".",
"line",
",",
"self",
".",
"initial_step_size",
",",
"self",
".",
"epsilon",
")",
"if",
"success",
":",
"self",
".",
"step",
"=",
"qopt",
"*",
"self",
".",
"line",
".",
"axis",
"self",
".",
"initial_step_size",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"self",
".",
"step",
")",
"self",
".",
"x",
"=",
"self",
".",
"x",
"+",
"self",
".",
"step",
"self",
".",
"f",
"=",
"fopt",
"if",
"wolfe",
":",
"self",
".",
"_screen",
"(",
"\"W\"",
")",
"else",
":",
"self",
".",
"_screen",
"(",
"\" \"",
")",
"self",
".",
"search_direction",
".",
"reset",
"(",
")",
"return",
"True",
"else",
":",
"if",
"self",
".",
"debug_line",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"pt",
"import",
"datetime",
"pt",
".",
"clf",
"(",
")",
"qs",
"=",
"np",
".",
"arange",
"(",
"0.0",
",",
"100.1",
")",
"*",
"(",
"5",
"*",
"self",
".",
"initial_step_size",
"/",
"100.0",
")",
"fs",
"=",
"np",
".",
"array",
"(",
"[",
"self",
".",
"line",
"(",
"q",
")",
"for",
"q",
"in",
"qs",
"]",
")",
"pt",
".",
"plot",
"(",
"qs",
",",
"fs",
")",
"pt",
".",
"xlim",
"(",
"qs",
"[",
"0",
"]",
",",
"qs",
"[",
"-",
"1",
"]",
")",
"fdelta",
"=",
"fs",
".",
"max",
"(",
")",
"-",
"fs",
".",
"min",
"(",
")",
"if",
"fdelta",
"==",
"0.0",
":",
"fdelta",
"=",
"fs",
".",
"mean",
"(",
")",
"fmargin",
"=",
"fdelta",
"*",
"0.1",
"pt",
".",
"ylim",
"(",
"fs",
".",
"min",
"(",
")",
"-",
"fmargin",
",",
"fs",
".",
"max",
"(",
")",
"+",
"fmargin",
")",
"pt",
".",
"title",
"(",
"'fdelta = %.2e fmean = %.2e'",
"%",
"(",
"fdelta",
",",
"fs",
".",
"mean",
"(",
")",
")",
")",
"pt",
".",
"xlabel",
"(",
"'Line coordinate, q'",
")",
"pt",
".",
"ylabel",
"(",
"'Function value, f'",
")",
"pt",
".",
"savefig",
"(",
"'line_failed_%s.png'",
"%",
"(",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
".",
"isoformat",
"(",
")",
")",
")",
"self",
".",
"_reset_state",
"(",
")",
"return",
"False"
] | Perform a line search along the current direction | [
"Perform",
"a",
"line",
"search",
"along",
"the",
"current",
"direction"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/minimizer.py#L1524-L1570 |
molmod/molmod | molmod/graphs.py | Graph.edge_index | def edge_index(self):
"""A map to look up the index of a edge"""
return dict((edge, index) for index, edge in enumerate(self.edges)) | python | def edge_index(self):
"""A map to look up the index of a edge"""
return dict((edge, index) for index, edge in enumerate(self.edges)) | [
"def",
"edge_index",
"(",
"self",
")",
":",
"return",
"dict",
"(",
"(",
"edge",
",",
"index",
")",
"for",
"index",
",",
"edge",
"in",
"enumerate",
"(",
"self",
".",
"edges",
")",
")"
] | A map to look up the index of a edge | [
"A",
"map",
"to",
"look",
"up",
"the",
"index",
"of",
"a",
"edge"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L196-L198 |
molmod/molmod | molmod/graphs.py | Graph.neighbors | def neighbors(self):
"""A dictionary with neighbors
The dictionary will have the following form:
``{vertexX: (vertexY1, vertexY2, ...), ...}``
This means that vertexX and vertexY1 are connected etc. This also
implies that the following elements are part of the dictionary:
``{vertexY1: (vertexX, ...), vertexY2: (vertexX, ...), ...}``.
"""
neighbors = dict(
(vertex, []) for vertex
in range(self.num_vertices)
)
for a, b in self.edges:
neighbors[a].append(b)
neighbors[b].append(a)
# turn lists into frozensets
neighbors = dict((key, frozenset(val)) for key, val in neighbors.items())
return neighbors | python | def neighbors(self):
"""A dictionary with neighbors
The dictionary will have the following form:
``{vertexX: (vertexY1, vertexY2, ...), ...}``
This means that vertexX and vertexY1 are connected etc. This also
implies that the following elements are part of the dictionary:
``{vertexY1: (vertexX, ...), vertexY2: (vertexX, ...), ...}``.
"""
neighbors = dict(
(vertex, []) for vertex
in range(self.num_vertices)
)
for a, b in self.edges:
neighbors[a].append(b)
neighbors[b].append(a)
# turn lists into frozensets
neighbors = dict((key, frozenset(val)) for key, val in neighbors.items())
return neighbors | [
"def",
"neighbors",
"(",
"self",
")",
":",
"neighbors",
"=",
"dict",
"(",
"(",
"vertex",
",",
"[",
"]",
")",
"for",
"vertex",
"in",
"range",
"(",
"self",
".",
"num_vertices",
")",
")",
"for",
"a",
",",
"b",
"in",
"self",
".",
"edges",
":",
"neighbors",
"[",
"a",
"]",
".",
"append",
"(",
"b",
")",
"neighbors",
"[",
"b",
"]",
".",
"append",
"(",
"a",
")",
"# turn lists into frozensets",
"neighbors",
"=",
"dict",
"(",
"(",
"key",
",",
"frozenset",
"(",
"val",
")",
")",
"for",
"key",
",",
"val",
"in",
"neighbors",
".",
"items",
"(",
")",
")",
"return",
"neighbors"
] | A dictionary with neighbors
The dictionary will have the following form:
``{vertexX: (vertexY1, vertexY2, ...), ...}``
This means that vertexX and vertexY1 are connected etc. This also
implies that the following elements are part of the dictionary:
``{vertexY1: (vertexX, ...), vertexY2: (vertexX, ...), ...}``. | [
"A",
"dictionary",
"with",
"neighbors"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L201-L219 |
molmod/molmod | molmod/graphs.py | Graph.distances | def distances(self):
"""The matrix with the all-pairs shortest path lenghts"""
from molmod.ext import graphs_floyd_warshall
distances = np.zeros((self.num_vertices,)*2, dtype=int)
#distances[:] = -1 # set all -1, which is just a very big integer
#distances.ravel()[::len(distances)+1] = 0 # set diagonal to zero
for i, j in self.edges: # set edges to one
distances[i, j] = 1
distances[j, i] = 1
graphs_floyd_warshall(distances)
return distances | python | def distances(self):
"""The matrix with the all-pairs shortest path lenghts"""
from molmod.ext import graphs_floyd_warshall
distances = np.zeros((self.num_vertices,)*2, dtype=int)
#distances[:] = -1 # set all -1, which is just a very big integer
#distances.ravel()[::len(distances)+1] = 0 # set diagonal to zero
for i, j in self.edges: # set edges to one
distances[i, j] = 1
distances[j, i] = 1
graphs_floyd_warshall(distances)
return distances | [
"def",
"distances",
"(",
"self",
")",
":",
"from",
"molmod",
".",
"ext",
"import",
"graphs_floyd_warshall",
"distances",
"=",
"np",
".",
"zeros",
"(",
"(",
"self",
".",
"num_vertices",
",",
")",
"*",
"2",
",",
"dtype",
"=",
"int",
")",
"#distances[:] = -1 # set all -1, which is just a very big integer",
"#distances.ravel()[::len(distances)+1] = 0 # set diagonal to zero",
"for",
"i",
",",
"j",
"in",
"self",
".",
"edges",
":",
"# set edges to one",
"distances",
"[",
"i",
",",
"j",
"]",
"=",
"1",
"distances",
"[",
"j",
",",
"i",
"]",
"=",
"1",
"graphs_floyd_warshall",
"(",
"distances",
")",
"return",
"distances"
] | The matrix with the all-pairs shortest path lenghts | [
"The",
"matrix",
"with",
"the",
"all",
"-",
"pairs",
"shortest",
"path",
"lenghts"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L222-L232 |
molmod/molmod | molmod/graphs.py | Graph.central_vertices | def central_vertices(self):
"""Vertices that have the lowest maximum distance to any other vertex"""
max_distances = self.distances.max(0)
max_distances_min = max_distances[max_distances > 0].min()
return (max_distances == max_distances_min).nonzero()[0] | python | def central_vertices(self):
"""Vertices that have the lowest maximum distance to any other vertex"""
max_distances = self.distances.max(0)
max_distances_min = max_distances[max_distances > 0].min()
return (max_distances == max_distances_min).nonzero()[0] | [
"def",
"central_vertices",
"(",
"self",
")",
":",
"max_distances",
"=",
"self",
".",
"distances",
".",
"max",
"(",
"0",
")",
"max_distances_min",
"=",
"max_distances",
"[",
"max_distances",
">",
"0",
"]",
".",
"min",
"(",
")",
"return",
"(",
"max_distances",
"==",
"max_distances_min",
")",
".",
"nonzero",
"(",
")",
"[",
"0",
"]"
] | Vertices that have the lowest maximum distance to any other vertex | [
"Vertices",
"that",
"have",
"the",
"lowest",
"maximum",
"distance",
"to",
"any",
"other",
"vertex"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L243-L247 |
molmod/molmod | molmod/graphs.py | Graph.independent_vertices | def independent_vertices(self):
"""Lists of vertices that are only interconnected within each list
This means that there is no path from a vertex in one list to a
vertex in another list. In case of a molecular graph, this would
yield the atoms that belong to individual molecules.
"""
candidates = set(range(self.num_vertices))
result = []
while len(candidates) > 0:
pivot = candidates.pop()
group = [
vertex for vertex, distance
in self.iter_breadth_first(pivot)
]
candidates.difference_update(group)
# this sort makes sure that the order of the vertices is respected
group.sort()
result.append(group)
return result | python | def independent_vertices(self):
"""Lists of vertices that are only interconnected within each list
This means that there is no path from a vertex in one list to a
vertex in another list. In case of a molecular graph, this would
yield the atoms that belong to individual molecules.
"""
candidates = set(range(self.num_vertices))
result = []
while len(candidates) > 0:
pivot = candidates.pop()
group = [
vertex for vertex, distance
in self.iter_breadth_first(pivot)
]
candidates.difference_update(group)
# this sort makes sure that the order of the vertices is respected
group.sort()
result.append(group)
return result | [
"def",
"independent_vertices",
"(",
"self",
")",
":",
"candidates",
"=",
"set",
"(",
"range",
"(",
"self",
".",
"num_vertices",
")",
")",
"result",
"=",
"[",
"]",
"while",
"len",
"(",
"candidates",
")",
">",
"0",
":",
"pivot",
"=",
"candidates",
".",
"pop",
"(",
")",
"group",
"=",
"[",
"vertex",
"for",
"vertex",
",",
"distance",
"in",
"self",
".",
"iter_breadth_first",
"(",
"pivot",
")",
"]",
"candidates",
".",
"difference_update",
"(",
"group",
")",
"# this sort makes sure that the order of the vertices is respected",
"group",
".",
"sort",
"(",
")",
"result",
".",
"append",
"(",
"group",
")",
"return",
"result"
] | Lists of vertices that are only interconnected within each list
This means that there is no path from a vertex in one list to a
vertex in another list. In case of a molecular graph, this would
yield the atoms that belong to individual molecules. | [
"Lists",
"of",
"vertices",
"that",
"are",
"only",
"interconnected",
"within",
"each",
"list"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L259-L280 |
molmod/molmod | molmod/graphs.py | Graph.fingerprint | def fingerprint(self):
"""A total graph fingerprint
The result is invariant under permutation of the vertex indexes. The
chance that two different (molecular) graphs yield the same
fingerprint is small but not zero. (See unit tests.)"""
if self.num_vertices == 0:
return np.zeros(20, np.ubyte)
else:
return sum(self.vertex_fingerprints) | python | def fingerprint(self):
"""A total graph fingerprint
The result is invariant under permutation of the vertex indexes. The
chance that two different (molecular) graphs yield the same
fingerprint is small but not zero. (See unit tests.)"""
if self.num_vertices == 0:
return np.zeros(20, np.ubyte)
else:
return sum(self.vertex_fingerprints) | [
"def",
"fingerprint",
"(",
"self",
")",
":",
"if",
"self",
".",
"num_vertices",
"==",
"0",
":",
"return",
"np",
".",
"zeros",
"(",
"20",
",",
"np",
".",
"ubyte",
")",
"else",
":",
"return",
"sum",
"(",
"self",
".",
"vertex_fingerprints",
")"
] | A total graph fingerprint
The result is invariant under permutation of the vertex indexes. The
chance that two different (molecular) graphs yield the same
fingerprint is small but not zero. (See unit tests.) | [
"A",
"total",
"graph",
"fingerprint"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L283-L292 |
molmod/molmod | molmod/graphs.py | Graph.vertex_fingerprints | def vertex_fingerprints(self):
"""A fingerprint for each vertex
The result is invariant under permutation of the vertex indexes.
Vertices that are symmetrically equivalent will get the same
fingerprint, e.g. the hydrogens in methane would get the same
fingerprint.
"""
return self.get_vertex_fingerprints(
[self.get_vertex_string(i) for i in range(self.num_vertices)],
[self.get_edge_string(i) for i in range(self.num_edges)],
) | python | def vertex_fingerprints(self):
"""A fingerprint for each vertex
The result is invariant under permutation of the vertex indexes.
Vertices that are symmetrically equivalent will get the same
fingerprint, e.g. the hydrogens in methane would get the same
fingerprint.
"""
return self.get_vertex_fingerprints(
[self.get_vertex_string(i) for i in range(self.num_vertices)],
[self.get_edge_string(i) for i in range(self.num_edges)],
) | [
"def",
"vertex_fingerprints",
"(",
"self",
")",
":",
"return",
"self",
".",
"get_vertex_fingerprints",
"(",
"[",
"self",
".",
"get_vertex_string",
"(",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"num_vertices",
")",
"]",
",",
"[",
"self",
".",
"get_edge_string",
"(",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"num_edges",
")",
"]",
",",
")"
] | A fingerprint for each vertex
The result is invariant under permutation of the vertex indexes.
Vertices that are symmetrically equivalent will get the same
fingerprint, e.g. the hydrogens in methane would get the same
fingerprint. | [
"A",
"fingerprint",
"for",
"each",
"vertex"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L295-L306 |
molmod/molmod | molmod/graphs.py | Graph.equivalent_vertices | def equivalent_vertices(self):
"""A dictionary with symmetrically equivalent vertices."""
level1 = {}
for i, row in enumerate(self.vertex_fingerprints):
key = row.tobytes()
l = level1.get(key)
if l is None:
l = set([i])
level1[key] = l
else:
l.add(i)
level2 = {}
for key, vertices in level1.items():
for vertex in vertices:
level2[vertex] = vertices
return level2 | python | def equivalent_vertices(self):
"""A dictionary with symmetrically equivalent vertices."""
level1 = {}
for i, row in enumerate(self.vertex_fingerprints):
key = row.tobytes()
l = level1.get(key)
if l is None:
l = set([i])
level1[key] = l
else:
l.add(i)
level2 = {}
for key, vertices in level1.items():
for vertex in vertices:
level2[vertex] = vertices
return level2 | [
"def",
"equivalent_vertices",
"(",
"self",
")",
":",
"level1",
"=",
"{",
"}",
"for",
"i",
",",
"row",
"in",
"enumerate",
"(",
"self",
".",
"vertex_fingerprints",
")",
":",
"key",
"=",
"row",
".",
"tobytes",
"(",
")",
"l",
"=",
"level1",
".",
"get",
"(",
"key",
")",
"if",
"l",
"is",
"None",
":",
"l",
"=",
"set",
"(",
"[",
"i",
"]",
")",
"level1",
"[",
"key",
"]",
"=",
"l",
"else",
":",
"l",
".",
"add",
"(",
"i",
")",
"level2",
"=",
"{",
"}",
"for",
"key",
",",
"vertices",
"in",
"level1",
".",
"items",
"(",
")",
":",
"for",
"vertex",
"in",
"vertices",
":",
"level2",
"[",
"vertex",
"]",
"=",
"vertices",
"return",
"level2"
] | A dictionary with symmetrically equivalent vertices. | [
"A",
"dictionary",
"with",
"symmetrically",
"equivalent",
"vertices",
"."
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L309-L324 |
molmod/molmod | molmod/graphs.py | Graph.symmetries | def symmetries(self):
"""Graph symmetries (permutations) that map the graph onto itself."""
symmetry_cycles = set([])
symmetries = set([])
for match in GraphSearch(EqualPattern(self))(self):
match.cycles = match.get_closed_cycles()
if match.cycles in symmetry_cycles:
raise RuntimeError("Duplicates in EqualMatch")
symmetry_cycles.add(match.cycles)
symmetries.add(match)
return symmetries | python | def symmetries(self):
"""Graph symmetries (permutations) that map the graph onto itself."""
symmetry_cycles = set([])
symmetries = set([])
for match in GraphSearch(EqualPattern(self))(self):
match.cycles = match.get_closed_cycles()
if match.cycles in symmetry_cycles:
raise RuntimeError("Duplicates in EqualMatch")
symmetry_cycles.add(match.cycles)
symmetries.add(match)
return symmetries | [
"def",
"symmetries",
"(",
"self",
")",
":",
"symmetry_cycles",
"=",
"set",
"(",
"[",
"]",
")",
"symmetries",
"=",
"set",
"(",
"[",
"]",
")",
"for",
"match",
"in",
"GraphSearch",
"(",
"EqualPattern",
"(",
"self",
")",
")",
"(",
"self",
")",
":",
"match",
".",
"cycles",
"=",
"match",
".",
"get_closed_cycles",
"(",
")",
"if",
"match",
".",
"cycles",
"in",
"symmetry_cycles",
":",
"raise",
"RuntimeError",
"(",
"\"Duplicates in EqualMatch\"",
")",
"symmetry_cycles",
".",
"add",
"(",
"match",
".",
"cycles",
")",
"symmetries",
".",
"add",
"(",
"match",
")",
"return",
"symmetries"
] | Graph symmetries (permutations) that map the graph onto itself. | [
"Graph",
"symmetries",
"(",
"permutations",
")",
"that",
"map",
"the",
"graph",
"onto",
"itself",
"."
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L327-L338 |
molmod/molmod | molmod/graphs.py | Graph.symmetry_cycles | def symmetry_cycles(self):
"""The cycle representations of the graph symmetries"""
result = set([])
for symmetry in self.symmetries:
result.add(symmetry.cycles)
return result | python | def symmetry_cycles(self):
"""The cycle representations of the graph symmetries"""
result = set([])
for symmetry in self.symmetries:
result.add(symmetry.cycles)
return result | [
"def",
"symmetry_cycles",
"(",
"self",
")",
":",
"result",
"=",
"set",
"(",
"[",
"]",
")",
"for",
"symmetry",
"in",
"self",
".",
"symmetries",
":",
"result",
".",
"add",
"(",
"symmetry",
".",
"cycles",
")",
"return",
"result"
] | The cycle representations of the graph symmetries | [
"The",
"cycle",
"representations",
"of",
"the",
"graph",
"symmetries"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L341-L346 |
molmod/molmod | molmod/graphs.py | Graph.canonical_order | def canonical_order(self):
"""The vertices in a canonical or normalized order.
This routine will return a list of vertices in an order that does not
depend on the initial order, but only depends on the connectivity and
the return values of the function self.get_vertex_string.
Only the vertices that are involved in edges will be included. The
result can be given as first argument to self.get_subgraph, with
reduce=True as second argument. This will return a complete canonical
graph.
The routine is designed not to use symmetry relations that are
obtained with the GraphSearch routine. We also tried to create an
ordering that feels like natural, i.e. starting in the center and
pushing vertices with few equivalents to the front. If necessary, the
nature of the vertices and their bonds to atoms closer to the center
will also play a role, but only as a last resort.
"""
# A) find an appropriate starting vertex.
# Here we take a central vertex that has a minimal number of symmetrical
# equivalents, 'the highest atom number', and the highest fingerprint.
# Note that the symmetrical equivalents are computed from the vertex
# fingerprints, i.e. without the GraphSearch.
starting_vertex = max(
(
-len(self.equivalent_vertices[vertex]),
self.get_vertex_string(vertex),
self.vertex_fingerprints[vertex].tobytes(),
vertex
) for vertex in self.central_vertices
)[-1]
# B) sort all vertices based on
# 1) distance from central vertex
# 2) number of equivalent vertices
# 3) vertex string, (higher atom numbers come first)
# 4) fingerprint
# 5) vertex index
# The last field is only included to collect the result of the sort.
# The fingerprint on itself would be sufficient, but the three first are
# there to have a naturally appealing result.
l = [
[
-distance,
-len(self.equivalent_vertices[vertex]),
self.get_vertex_string(vertex),
self.vertex_fingerprints[vertex].tobytes(),
vertex
] for vertex, distance in self.iter_breadth_first(starting_vertex)
if len(self.neighbors[vertex]) > 0
]
l.sort(reverse=True)
# C) The order of some vertices is still not completely set. e.g.
# consider the case of allene. The four hydrogen atoms are equivalent,
# but one can have two different orders: make geminiles consecutive or
# don't. It is more trikcy than one would think at first sight. In the
# case of allene, geminility could easily solve the problem. Consider a
# big flat rotationally symmetric molecule (order 2). The first five
# shells are order 4 and one would just give a random order to four
# segemnts in the first shell. Only when one reaches the outer part that
# has order two, it turns out that the arbitrary choices in the inner
# shell play a role. So it does not help to look at relations with
# vertices at inner or current shells only. One has to consider the
# whole picture. (unit testing reveals troubles like these)
# I need some sleep now. The code below checks for potential fuzz and
# will raise an error if the ordering is not fully determined yet. One
# day, I'll need this code more than I do now, and I'll fix things up.
# I know how to do this, but I don't care enough right now.
# -- Toon
for i in range(1, len(l)):
if l[i][:-1] == l[i-1][:-1]:
raise NotImplementedError
# D) Return only the vertex indexes.
return [record[-1] for record in l] | python | def canonical_order(self):
"""The vertices in a canonical or normalized order.
This routine will return a list of vertices in an order that does not
depend on the initial order, but only depends on the connectivity and
the return values of the function self.get_vertex_string.
Only the vertices that are involved in edges will be included. The
result can be given as first argument to self.get_subgraph, with
reduce=True as second argument. This will return a complete canonical
graph.
The routine is designed not to use symmetry relations that are
obtained with the GraphSearch routine. We also tried to create an
ordering that feels like natural, i.e. starting in the center and
pushing vertices with few equivalents to the front. If necessary, the
nature of the vertices and their bonds to atoms closer to the center
will also play a role, but only as a last resort.
"""
# A) find an appropriate starting vertex.
# Here we take a central vertex that has a minimal number of symmetrical
# equivalents, 'the highest atom number', and the highest fingerprint.
# Note that the symmetrical equivalents are computed from the vertex
# fingerprints, i.e. without the GraphSearch.
starting_vertex = max(
(
-len(self.equivalent_vertices[vertex]),
self.get_vertex_string(vertex),
self.vertex_fingerprints[vertex].tobytes(),
vertex
) for vertex in self.central_vertices
)[-1]
# B) sort all vertices based on
# 1) distance from central vertex
# 2) number of equivalent vertices
# 3) vertex string, (higher atom numbers come first)
# 4) fingerprint
# 5) vertex index
# The last field is only included to collect the result of the sort.
# The fingerprint on itself would be sufficient, but the three first are
# there to have a naturally appealing result.
l = [
[
-distance,
-len(self.equivalent_vertices[vertex]),
self.get_vertex_string(vertex),
self.vertex_fingerprints[vertex].tobytes(),
vertex
] for vertex, distance in self.iter_breadth_first(starting_vertex)
if len(self.neighbors[vertex]) > 0
]
l.sort(reverse=True)
# C) The order of some vertices is still not completely set. e.g.
# consider the case of allene. The four hydrogen atoms are equivalent,
# but one can have two different orders: make geminiles consecutive or
# don't. It is more trikcy than one would think at first sight. In the
# case of allene, geminility could easily solve the problem. Consider a
# big flat rotationally symmetric molecule (order 2). The first five
# shells are order 4 and one would just give a random order to four
# segemnts in the first shell. Only when one reaches the outer part that
# has order two, it turns out that the arbitrary choices in the inner
# shell play a role. So it does not help to look at relations with
# vertices at inner or current shells only. One has to consider the
# whole picture. (unit testing reveals troubles like these)
# I need some sleep now. The code below checks for potential fuzz and
# will raise an error if the ordering is not fully determined yet. One
# day, I'll need this code more than I do now, and I'll fix things up.
# I know how to do this, but I don't care enough right now.
# -- Toon
for i in range(1, len(l)):
if l[i][:-1] == l[i-1][:-1]:
raise NotImplementedError
# D) Return only the vertex indexes.
return [record[-1] for record in l] | [
"def",
"canonical_order",
"(",
"self",
")",
":",
"# A) find an appropriate starting vertex.",
"# Here we take a central vertex that has a minimal number of symmetrical",
"# equivalents, 'the highest atom number', and the highest fingerprint.",
"# Note that the symmetrical equivalents are computed from the vertex",
"# fingerprints, i.e. without the GraphSearch.",
"starting_vertex",
"=",
"max",
"(",
"(",
"-",
"len",
"(",
"self",
".",
"equivalent_vertices",
"[",
"vertex",
"]",
")",
",",
"self",
".",
"get_vertex_string",
"(",
"vertex",
")",
",",
"self",
".",
"vertex_fingerprints",
"[",
"vertex",
"]",
".",
"tobytes",
"(",
")",
",",
"vertex",
")",
"for",
"vertex",
"in",
"self",
".",
"central_vertices",
")",
"[",
"-",
"1",
"]",
"# B) sort all vertices based on",
"# 1) distance from central vertex",
"# 2) number of equivalent vertices",
"# 3) vertex string, (higher atom numbers come first)",
"# 4) fingerprint",
"# 5) vertex index",
"# The last field is only included to collect the result of the sort.",
"# The fingerprint on itself would be sufficient, but the three first are",
"# there to have a naturally appealing result.",
"l",
"=",
"[",
"[",
"-",
"distance",
",",
"-",
"len",
"(",
"self",
".",
"equivalent_vertices",
"[",
"vertex",
"]",
")",
",",
"self",
".",
"get_vertex_string",
"(",
"vertex",
")",
",",
"self",
".",
"vertex_fingerprints",
"[",
"vertex",
"]",
".",
"tobytes",
"(",
")",
",",
"vertex",
"]",
"for",
"vertex",
",",
"distance",
"in",
"self",
".",
"iter_breadth_first",
"(",
"starting_vertex",
")",
"if",
"len",
"(",
"self",
".",
"neighbors",
"[",
"vertex",
"]",
")",
">",
"0",
"]",
"l",
".",
"sort",
"(",
"reverse",
"=",
"True",
")",
"# C) The order of some vertices is still not completely set. e.g.",
"# consider the case of allene. The four hydrogen atoms are equivalent,",
"# but one can have two different orders: make geminiles consecutive or",
"# don't. It is more trikcy than one would think at first sight. In the",
"# case of allene, geminility could easily solve the problem. Consider a",
"# big flat rotationally symmetric molecule (order 2). The first five",
"# shells are order 4 and one would just give a random order to four",
"# segemnts in the first shell. Only when one reaches the outer part that",
"# has order two, it turns out that the arbitrary choices in the inner",
"# shell play a role. So it does not help to look at relations with",
"# vertices at inner or current shells only. One has to consider the",
"# whole picture. (unit testing reveals troubles like these)",
"# I need some sleep now. The code below checks for potential fuzz and",
"# will raise an error if the ordering is not fully determined yet. One",
"# day, I'll need this code more than I do now, and I'll fix things up.",
"# I know how to do this, but I don't care enough right now.",
"# -- Toon",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"l",
")",
")",
":",
"if",
"l",
"[",
"i",
"]",
"[",
":",
"-",
"1",
"]",
"==",
"l",
"[",
"i",
"-",
"1",
"]",
"[",
":",
"-",
"1",
"]",
":",
"raise",
"NotImplementedError",
"# D) Return only the vertex indexes.",
"return",
"[",
"record",
"[",
"-",
"1",
"]",
"for",
"record",
"in",
"l",
"]"
] | The vertices in a canonical or normalized order.
This routine will return a list of vertices in an order that does not
depend on the initial order, but only depends on the connectivity and
the return values of the function self.get_vertex_string.
Only the vertices that are involved in edges will be included. The
result can be given as first argument to self.get_subgraph, with
reduce=True as second argument. This will return a complete canonical
graph.
The routine is designed not to use symmetry relations that are
obtained with the GraphSearch routine. We also tried to create an
ordering that feels like natural, i.e. starting in the center and
pushing vertices with few equivalents to the front. If necessary, the
nature of the vertices and their bonds to atoms closer to the center
will also play a role, but only as a last resort. | [
"The",
"vertices",
"in",
"a",
"canonical",
"or",
"normalized",
"order",
"."
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L349-L426 |
molmod/molmod | molmod/graphs.py | Graph.iter_breadth_first | def iter_breadth_first(self, start=None, do_paths=False, do_duplicates=False):
"""Iterate over the vertices with the breadth first algorithm.
See http://en.wikipedia.org/wiki/Breadth-first_search for more info.
If not start vertex is given, the central vertex is taken.
By default, the distance to the starting vertex is also computed. If
the path to the starting vertex should be computed instead, set path
to True.
When duplicate is True, then vertices that can be reached through
different paths of equal length, will be iterated twice. This
typically only makes sense when path==True.
"""
if start is None:
start = self.central_vertex
else:
try:
start = int(start)
except ValueError:
raise TypeError("First argument (start) must be an integer.")
if start < 0 or start >= self.num_vertices:
raise ValueError("start must be in the range [0, %i[" %
self.num_vertices)
from collections import deque
work = np.zeros(self.num_vertices, int)
work[:] = -1
work[start] = 0
if do_paths:
result = (start, 0, (start, ))
else:
result = (start, 0)
yield result
todo = deque([result])
while len(todo) > 0:
if do_paths:
parent, parent_length, parent_path = todo.popleft()
else:
parent, parent_length = todo.popleft()
current_length = parent_length + 1
for current in self.neighbors[parent]:
visited = work[current]
if visited == -1 or (do_duplicates and visited == current_length):
work[current] = current_length
if do_paths:
current_path = parent_path + (current, )
result = (current, current_length, current_path)
else:
result = (current, current_length)
#print "iter_breadth_first", result
yield result
todo.append(result) | python | def iter_breadth_first(self, start=None, do_paths=False, do_duplicates=False):
"""Iterate over the vertices with the breadth first algorithm.
See http://en.wikipedia.org/wiki/Breadth-first_search for more info.
If not start vertex is given, the central vertex is taken.
By default, the distance to the starting vertex is also computed. If
the path to the starting vertex should be computed instead, set path
to True.
When duplicate is True, then vertices that can be reached through
different paths of equal length, will be iterated twice. This
typically only makes sense when path==True.
"""
if start is None:
start = self.central_vertex
else:
try:
start = int(start)
except ValueError:
raise TypeError("First argument (start) must be an integer.")
if start < 0 or start >= self.num_vertices:
raise ValueError("start must be in the range [0, %i[" %
self.num_vertices)
from collections import deque
work = np.zeros(self.num_vertices, int)
work[:] = -1
work[start] = 0
if do_paths:
result = (start, 0, (start, ))
else:
result = (start, 0)
yield result
todo = deque([result])
while len(todo) > 0:
if do_paths:
parent, parent_length, parent_path = todo.popleft()
else:
parent, parent_length = todo.popleft()
current_length = parent_length + 1
for current in self.neighbors[parent]:
visited = work[current]
if visited == -1 or (do_duplicates and visited == current_length):
work[current] = current_length
if do_paths:
current_path = parent_path + (current, )
result = (current, current_length, current_path)
else:
result = (current, current_length)
#print "iter_breadth_first", result
yield result
todo.append(result) | [
"def",
"iter_breadth_first",
"(",
"self",
",",
"start",
"=",
"None",
",",
"do_paths",
"=",
"False",
",",
"do_duplicates",
"=",
"False",
")",
":",
"if",
"start",
"is",
"None",
":",
"start",
"=",
"self",
".",
"central_vertex",
"else",
":",
"try",
":",
"start",
"=",
"int",
"(",
"start",
")",
"except",
"ValueError",
":",
"raise",
"TypeError",
"(",
"\"First argument (start) must be an integer.\"",
")",
"if",
"start",
"<",
"0",
"or",
"start",
">=",
"self",
".",
"num_vertices",
":",
"raise",
"ValueError",
"(",
"\"start must be in the range [0, %i[\"",
"%",
"self",
".",
"num_vertices",
")",
"from",
"collections",
"import",
"deque",
"work",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"num_vertices",
",",
"int",
")",
"work",
"[",
":",
"]",
"=",
"-",
"1",
"work",
"[",
"start",
"]",
"=",
"0",
"if",
"do_paths",
":",
"result",
"=",
"(",
"start",
",",
"0",
",",
"(",
"start",
",",
")",
")",
"else",
":",
"result",
"=",
"(",
"start",
",",
"0",
")",
"yield",
"result",
"todo",
"=",
"deque",
"(",
"[",
"result",
"]",
")",
"while",
"len",
"(",
"todo",
")",
">",
"0",
":",
"if",
"do_paths",
":",
"parent",
",",
"parent_length",
",",
"parent_path",
"=",
"todo",
".",
"popleft",
"(",
")",
"else",
":",
"parent",
",",
"parent_length",
"=",
"todo",
".",
"popleft",
"(",
")",
"current_length",
"=",
"parent_length",
"+",
"1",
"for",
"current",
"in",
"self",
".",
"neighbors",
"[",
"parent",
"]",
":",
"visited",
"=",
"work",
"[",
"current",
"]",
"if",
"visited",
"==",
"-",
"1",
"or",
"(",
"do_duplicates",
"and",
"visited",
"==",
"current_length",
")",
":",
"work",
"[",
"current",
"]",
"=",
"current_length",
"if",
"do_paths",
":",
"current_path",
"=",
"parent_path",
"+",
"(",
"current",
",",
")",
"result",
"=",
"(",
"current",
",",
"current_length",
",",
"current_path",
")",
"else",
":",
"result",
"=",
"(",
"current",
",",
"current_length",
")",
"#print \"iter_breadth_first\", result",
"yield",
"result",
"todo",
".",
"append",
"(",
"result",
")"
] | Iterate over the vertices with the breadth first algorithm.
See http://en.wikipedia.org/wiki/Breadth-first_search for more info.
If not start vertex is given, the central vertex is taken.
By default, the distance to the starting vertex is also computed. If
the path to the starting vertex should be computed instead, set path
to True.
When duplicate is True, then vertices that can be reached through
different paths of equal length, will be iterated twice. This
typically only makes sense when path==True. | [
"Iterate",
"over",
"the",
"vertices",
"with",
"the",
"breadth",
"first",
"algorithm",
"."
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L430-L481 |
molmod/molmod | molmod/graphs.py | Graph.iter_breadth_first_edges | def iter_breadth_first_edges(self, start=None):
"""Iterate over the edges with the breadth first convention.
We need this for the pattern matching algorithms, but a quick look at
Wikipedia did not result in a known and named algorithm.
The edges are yielded one by one, together with the distance of the
edge from the starting vertex and a flag that indicates whether the
yielded edge connects two vertices that are at the same distance from
the starting vertex. If that flag is False, the distance from the
starting vertex to edge[0] is equal to the distance variable and the
distance from edge[1] to the starting vertex is equal to distance+1.
One item has the following format: ((i, j), distance, flag)
"""
if start is None:
start = self.central_vertex
else:
try:
start = int(start)
except ValueError:
raise TypeError("First argument (start) must be an integer.")
if start < 0 or start >= self.num_vertices:
raise ValueError("start must be in the range [0, %i[" %
self.num_vertices)
from collections import deque
work = np.zeros(self.num_vertices, int)
work[:] = -1
work[start] = 0
todo = deque([start])
while len(todo) > 0:
parent = todo.popleft()
distance = work[parent]
for current in self.neighbors[parent]:
if work[current] == -1:
yield (parent, current), distance, False
work[current] = distance+1
todo.append(current)
elif work[current] == distance and current > parent:
# second equation in elif avoids duplicates
yield (parent, current), distance, True
elif work[current] == distance+1:
yield (parent, current), distance, False | python | def iter_breadth_first_edges(self, start=None):
"""Iterate over the edges with the breadth first convention.
We need this for the pattern matching algorithms, but a quick look at
Wikipedia did not result in a known and named algorithm.
The edges are yielded one by one, together with the distance of the
edge from the starting vertex and a flag that indicates whether the
yielded edge connects two vertices that are at the same distance from
the starting vertex. If that flag is False, the distance from the
starting vertex to edge[0] is equal to the distance variable and the
distance from edge[1] to the starting vertex is equal to distance+1.
One item has the following format: ((i, j), distance, flag)
"""
if start is None:
start = self.central_vertex
else:
try:
start = int(start)
except ValueError:
raise TypeError("First argument (start) must be an integer.")
if start < 0 or start >= self.num_vertices:
raise ValueError("start must be in the range [0, %i[" %
self.num_vertices)
from collections import deque
work = np.zeros(self.num_vertices, int)
work[:] = -1
work[start] = 0
todo = deque([start])
while len(todo) > 0:
parent = todo.popleft()
distance = work[parent]
for current in self.neighbors[parent]:
if work[current] == -1:
yield (parent, current), distance, False
work[current] = distance+1
todo.append(current)
elif work[current] == distance and current > parent:
# second equation in elif avoids duplicates
yield (parent, current), distance, True
elif work[current] == distance+1:
yield (parent, current), distance, False | [
"def",
"iter_breadth_first_edges",
"(",
"self",
",",
"start",
"=",
"None",
")",
":",
"if",
"start",
"is",
"None",
":",
"start",
"=",
"self",
".",
"central_vertex",
"else",
":",
"try",
":",
"start",
"=",
"int",
"(",
"start",
")",
"except",
"ValueError",
":",
"raise",
"TypeError",
"(",
"\"First argument (start) must be an integer.\"",
")",
"if",
"start",
"<",
"0",
"or",
"start",
">=",
"self",
".",
"num_vertices",
":",
"raise",
"ValueError",
"(",
"\"start must be in the range [0, %i[\"",
"%",
"self",
".",
"num_vertices",
")",
"from",
"collections",
"import",
"deque",
"work",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"num_vertices",
",",
"int",
")",
"work",
"[",
":",
"]",
"=",
"-",
"1",
"work",
"[",
"start",
"]",
"=",
"0",
"todo",
"=",
"deque",
"(",
"[",
"start",
"]",
")",
"while",
"len",
"(",
"todo",
")",
">",
"0",
":",
"parent",
"=",
"todo",
".",
"popleft",
"(",
")",
"distance",
"=",
"work",
"[",
"parent",
"]",
"for",
"current",
"in",
"self",
".",
"neighbors",
"[",
"parent",
"]",
":",
"if",
"work",
"[",
"current",
"]",
"==",
"-",
"1",
":",
"yield",
"(",
"parent",
",",
"current",
")",
",",
"distance",
",",
"False",
"work",
"[",
"current",
"]",
"=",
"distance",
"+",
"1",
"todo",
".",
"append",
"(",
"current",
")",
"elif",
"work",
"[",
"current",
"]",
"==",
"distance",
"and",
"current",
">",
"parent",
":",
"# second equation in elif avoids duplicates",
"yield",
"(",
"parent",
",",
"current",
")",
",",
"distance",
",",
"True",
"elif",
"work",
"[",
"current",
"]",
"==",
"distance",
"+",
"1",
":",
"yield",
"(",
"parent",
",",
"current",
")",
",",
"distance",
",",
"False"
] | Iterate over the edges with the breadth first convention.
We need this for the pattern matching algorithms, but a quick look at
Wikipedia did not result in a known and named algorithm.
The edges are yielded one by one, together with the distance of the
edge from the starting vertex and a flag that indicates whether the
yielded edge connects two vertices that are at the same distance from
the starting vertex. If that flag is False, the distance from the
starting vertex to edge[0] is equal to the distance variable and the
distance from edge[1] to the starting vertex is equal to distance+1.
One item has the following format: ((i, j), distance, flag) | [
"Iterate",
"over",
"the",
"edges",
"with",
"the",
"breadth",
"first",
"convention",
"."
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L495-L536 |
molmod/molmod | molmod/graphs.py | Graph.get_subgraph | def get_subgraph(self, subvertices, normalize=False):
"""Constructs a subgraph of the current graph
Arguments:
| ``subvertices`` -- The vertices that should be retained.
| ``normalize`` -- Whether or not the vertices should renumbered and
reduced to the given set of subvertices. When True, also the
edges are sorted. It the end, this means that new order of the
edges does not depend on the original order, but only on the
order of the argument subvertices.
This option is False by default. When False, only edges will be
discarded, but the retained data remain unchanged. Also the
parameter num_vertices is not affected.
The returned graph will have an attribute ``old_edge_indexes`` that
relates the positions of the new and the old edges as follows::
>>> self.edges[result._old_edge_indexes[i]] = result.edges[i]
In derived classes, the following should be supported::
>>> self.edge_property[result._old_edge_indexes[i]] = result.edge_property[i]
When ``normalize==True``, also the vertices are affected and the
derived classes should make sure that the following works::
>>> self.vertex_property[result._old_vertex_indexes[i]] = result.vertex_property[i]
The attribute ``old_vertex_indexes`` is only constructed when
``normalize==True``.
"""
if normalize:
revorder = dict((j, i) for i, j in enumerate(subvertices))
new_edges = []
old_edge_indexes = []
for counter, (i, j) in enumerate(self.edges):
new_i = revorder.get(i)
if new_i is None:
continue
new_j = revorder.get(j)
if new_j is None:
continue
new_edges.append((new_i, new_j))
old_edge_indexes.append(counter)
# sort the edges
order = list(range(len(new_edges)))
# argsort in pure python
order.sort( key=(lambda i: tuple(sorted(new_edges[i]))) )
new_edges = [new_edges[i] for i in order]
old_edge_indexes = [old_edge_indexes[i] for i in order]
result = Graph(new_edges, num_vertices=len(subvertices))
result._old_vertex_indexes = np.array(subvertices, dtype=int)
#result.new_vertex_indexes = revorder
result._old_edge_indexes = np.array(old_edge_indexes, dtype=int)
else:
subvertices = set(subvertices)
old_edge_indexes = np.array([
i for i, edge in enumerate(self.edges)
if edge.issubset(subvertices)
], dtype=int)
new_edges = tuple(self.edges[i] for i in old_edge_indexes)
result = Graph(new_edges, self.num_vertices)
result._old_edge_indexes = old_edge_indexes
# no need for old and new vertex_indexes because they remain the
# same.
return result | python | def get_subgraph(self, subvertices, normalize=False):
"""Constructs a subgraph of the current graph
Arguments:
| ``subvertices`` -- The vertices that should be retained.
| ``normalize`` -- Whether or not the vertices should renumbered and
reduced to the given set of subvertices. When True, also the
edges are sorted. It the end, this means that new order of the
edges does not depend on the original order, but only on the
order of the argument subvertices.
This option is False by default. When False, only edges will be
discarded, but the retained data remain unchanged. Also the
parameter num_vertices is not affected.
The returned graph will have an attribute ``old_edge_indexes`` that
relates the positions of the new and the old edges as follows::
>>> self.edges[result._old_edge_indexes[i]] = result.edges[i]
In derived classes, the following should be supported::
>>> self.edge_property[result._old_edge_indexes[i]] = result.edge_property[i]
When ``normalize==True``, also the vertices are affected and the
derived classes should make sure that the following works::
>>> self.vertex_property[result._old_vertex_indexes[i]] = result.vertex_property[i]
The attribute ``old_vertex_indexes`` is only constructed when
``normalize==True``.
"""
if normalize:
revorder = dict((j, i) for i, j in enumerate(subvertices))
new_edges = []
old_edge_indexes = []
for counter, (i, j) in enumerate(self.edges):
new_i = revorder.get(i)
if new_i is None:
continue
new_j = revorder.get(j)
if new_j is None:
continue
new_edges.append((new_i, new_j))
old_edge_indexes.append(counter)
# sort the edges
order = list(range(len(new_edges)))
# argsort in pure python
order.sort( key=(lambda i: tuple(sorted(new_edges[i]))) )
new_edges = [new_edges[i] for i in order]
old_edge_indexes = [old_edge_indexes[i] for i in order]
result = Graph(new_edges, num_vertices=len(subvertices))
result._old_vertex_indexes = np.array(subvertices, dtype=int)
#result.new_vertex_indexes = revorder
result._old_edge_indexes = np.array(old_edge_indexes, dtype=int)
else:
subvertices = set(subvertices)
old_edge_indexes = np.array([
i for i, edge in enumerate(self.edges)
if edge.issubset(subvertices)
], dtype=int)
new_edges = tuple(self.edges[i] for i in old_edge_indexes)
result = Graph(new_edges, self.num_vertices)
result._old_edge_indexes = old_edge_indexes
# no need for old and new vertex_indexes because they remain the
# same.
return result | [
"def",
"get_subgraph",
"(",
"self",
",",
"subvertices",
",",
"normalize",
"=",
"False",
")",
":",
"if",
"normalize",
":",
"revorder",
"=",
"dict",
"(",
"(",
"j",
",",
"i",
")",
"for",
"i",
",",
"j",
"in",
"enumerate",
"(",
"subvertices",
")",
")",
"new_edges",
"=",
"[",
"]",
"old_edge_indexes",
"=",
"[",
"]",
"for",
"counter",
",",
"(",
"i",
",",
"j",
")",
"in",
"enumerate",
"(",
"self",
".",
"edges",
")",
":",
"new_i",
"=",
"revorder",
".",
"get",
"(",
"i",
")",
"if",
"new_i",
"is",
"None",
":",
"continue",
"new_j",
"=",
"revorder",
".",
"get",
"(",
"j",
")",
"if",
"new_j",
"is",
"None",
":",
"continue",
"new_edges",
".",
"append",
"(",
"(",
"new_i",
",",
"new_j",
")",
")",
"old_edge_indexes",
".",
"append",
"(",
"counter",
")",
"# sort the edges",
"order",
"=",
"list",
"(",
"range",
"(",
"len",
"(",
"new_edges",
")",
")",
")",
"# argsort in pure python",
"order",
".",
"sort",
"(",
"key",
"=",
"(",
"lambda",
"i",
":",
"tuple",
"(",
"sorted",
"(",
"new_edges",
"[",
"i",
"]",
")",
")",
")",
")",
"new_edges",
"=",
"[",
"new_edges",
"[",
"i",
"]",
"for",
"i",
"in",
"order",
"]",
"old_edge_indexes",
"=",
"[",
"old_edge_indexes",
"[",
"i",
"]",
"for",
"i",
"in",
"order",
"]",
"result",
"=",
"Graph",
"(",
"new_edges",
",",
"num_vertices",
"=",
"len",
"(",
"subvertices",
")",
")",
"result",
".",
"_old_vertex_indexes",
"=",
"np",
".",
"array",
"(",
"subvertices",
",",
"dtype",
"=",
"int",
")",
"#result.new_vertex_indexes = revorder",
"result",
".",
"_old_edge_indexes",
"=",
"np",
".",
"array",
"(",
"old_edge_indexes",
",",
"dtype",
"=",
"int",
")",
"else",
":",
"subvertices",
"=",
"set",
"(",
"subvertices",
")",
"old_edge_indexes",
"=",
"np",
".",
"array",
"(",
"[",
"i",
"for",
"i",
",",
"edge",
"in",
"enumerate",
"(",
"self",
".",
"edges",
")",
"if",
"edge",
".",
"issubset",
"(",
"subvertices",
")",
"]",
",",
"dtype",
"=",
"int",
")",
"new_edges",
"=",
"tuple",
"(",
"self",
".",
"edges",
"[",
"i",
"]",
"for",
"i",
"in",
"old_edge_indexes",
")",
"result",
"=",
"Graph",
"(",
"new_edges",
",",
"self",
".",
"num_vertices",
")",
"result",
".",
"_old_edge_indexes",
"=",
"old_edge_indexes",
"# no need for old and new vertex_indexes because they remain the",
"# same.",
"return",
"result"
] | Constructs a subgraph of the current graph
Arguments:
| ``subvertices`` -- The vertices that should be retained.
| ``normalize`` -- Whether or not the vertices should renumbered and
reduced to the given set of subvertices. When True, also the
edges are sorted. It the end, this means that new order of the
edges does not depend on the original order, but only on the
order of the argument subvertices.
This option is False by default. When False, only edges will be
discarded, but the retained data remain unchanged. Also the
parameter num_vertices is not affected.
The returned graph will have an attribute ``old_edge_indexes`` that
relates the positions of the new and the old edges as follows::
>>> self.edges[result._old_edge_indexes[i]] = result.edges[i]
In derived classes, the following should be supported::
>>> self.edge_property[result._old_edge_indexes[i]] = result.edge_property[i]
When ``normalize==True``, also the vertices are affected and the
derived classes should make sure that the following works::
>>> self.vertex_property[result._old_vertex_indexes[i]] = result.vertex_property[i]
The attribute ``old_vertex_indexes`` is only constructed when
``normalize==True``. | [
"Constructs",
"a",
"subgraph",
"of",
"the",
"current",
"graph"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L538-L604 |
molmod/molmod | molmod/graphs.py | Graph.get_vertex_fingerprints | def get_vertex_fingerprints(self, vertex_strings, edge_strings, num_iter=None):
"""Return an array with fingerprints for each vertex"""
import hashlib
def str2array(x):
"""convert a hash string to a numpy array of bytes"""
if len(x) == 0:
return np.zeros(0, np.ubyte)
elif sys.version_info[0] == 2:
return np.frombuffer(x, np.ubyte)
else:
return np.frombuffer(x.encode(), np.ubyte)
hashrow = lambda x: np.frombuffer(hashlib.sha1(x.data).digest(), np.ubyte)
# initialization
result = np.zeros((self.num_vertices, 20), np.ubyte)
for i in range(self.num_vertices):
result[i] = hashrow(str2array(vertex_strings[i]))
for i in range(self.num_edges):
a, b = self.edges[i]
tmp = hashrow(str2array(edge_strings[i]))
result[a] += tmp
result[b] += tmp
work = result.copy()
# iterations
if num_iter is None:
num_iter = self.max_distance
for i in range(num_iter):
for a, b in self.edges:
work[a] += result[b]
work[b] += result[a]
#for a in xrange(self.num_vertices):
# for b in xrange(self.num_vertices):
# work[a] += hashrow(result[b]*self.distances[a, b])
for a in range(self.num_vertices):
result[a] = hashrow(work[a])
return result | python | def get_vertex_fingerprints(self, vertex_strings, edge_strings, num_iter=None):
"""Return an array with fingerprints for each vertex"""
import hashlib
def str2array(x):
"""convert a hash string to a numpy array of bytes"""
if len(x) == 0:
return np.zeros(0, np.ubyte)
elif sys.version_info[0] == 2:
return np.frombuffer(x, np.ubyte)
else:
return np.frombuffer(x.encode(), np.ubyte)
hashrow = lambda x: np.frombuffer(hashlib.sha1(x.data).digest(), np.ubyte)
# initialization
result = np.zeros((self.num_vertices, 20), np.ubyte)
for i in range(self.num_vertices):
result[i] = hashrow(str2array(vertex_strings[i]))
for i in range(self.num_edges):
a, b = self.edges[i]
tmp = hashrow(str2array(edge_strings[i]))
result[a] += tmp
result[b] += tmp
work = result.copy()
# iterations
if num_iter is None:
num_iter = self.max_distance
for i in range(num_iter):
for a, b in self.edges:
work[a] += result[b]
work[b] += result[a]
#for a in xrange(self.num_vertices):
# for b in xrange(self.num_vertices):
# work[a] += hashrow(result[b]*self.distances[a, b])
for a in range(self.num_vertices):
result[a] = hashrow(work[a])
return result | [
"def",
"get_vertex_fingerprints",
"(",
"self",
",",
"vertex_strings",
",",
"edge_strings",
",",
"num_iter",
"=",
"None",
")",
":",
"import",
"hashlib",
"def",
"str2array",
"(",
"x",
")",
":",
"\"\"\"convert a hash string to a numpy array of bytes\"\"\"",
"if",
"len",
"(",
"x",
")",
"==",
"0",
":",
"return",
"np",
".",
"zeros",
"(",
"0",
",",
"np",
".",
"ubyte",
")",
"elif",
"sys",
".",
"version_info",
"[",
"0",
"]",
"==",
"2",
":",
"return",
"np",
".",
"frombuffer",
"(",
"x",
",",
"np",
".",
"ubyte",
")",
"else",
":",
"return",
"np",
".",
"frombuffer",
"(",
"x",
".",
"encode",
"(",
")",
",",
"np",
".",
"ubyte",
")",
"hashrow",
"=",
"lambda",
"x",
":",
"np",
".",
"frombuffer",
"(",
"hashlib",
".",
"sha1",
"(",
"x",
".",
"data",
")",
".",
"digest",
"(",
")",
",",
"np",
".",
"ubyte",
")",
"# initialization",
"result",
"=",
"np",
".",
"zeros",
"(",
"(",
"self",
".",
"num_vertices",
",",
"20",
")",
",",
"np",
".",
"ubyte",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"num_vertices",
")",
":",
"result",
"[",
"i",
"]",
"=",
"hashrow",
"(",
"str2array",
"(",
"vertex_strings",
"[",
"i",
"]",
")",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"num_edges",
")",
":",
"a",
",",
"b",
"=",
"self",
".",
"edges",
"[",
"i",
"]",
"tmp",
"=",
"hashrow",
"(",
"str2array",
"(",
"edge_strings",
"[",
"i",
"]",
")",
")",
"result",
"[",
"a",
"]",
"+=",
"tmp",
"result",
"[",
"b",
"]",
"+=",
"tmp",
"work",
"=",
"result",
".",
"copy",
"(",
")",
"# iterations",
"if",
"num_iter",
"is",
"None",
":",
"num_iter",
"=",
"self",
".",
"max_distance",
"for",
"i",
"in",
"range",
"(",
"num_iter",
")",
":",
"for",
"a",
",",
"b",
"in",
"self",
".",
"edges",
":",
"work",
"[",
"a",
"]",
"+=",
"result",
"[",
"b",
"]",
"work",
"[",
"b",
"]",
"+=",
"result",
"[",
"a",
"]",
"#for a in xrange(self.num_vertices):",
"# for b in xrange(self.num_vertices):",
"# work[a] += hashrow(result[b]*self.distances[a, b])",
"for",
"a",
"in",
"range",
"(",
"self",
".",
"num_vertices",
")",
":",
"result",
"[",
"a",
"]",
"=",
"hashrow",
"(",
"work",
"[",
"a",
"]",
")",
"return",
"result"
] | Return an array with fingerprints for each vertex | [
"Return",
"an",
"array",
"with",
"fingerprints",
"for",
"each",
"vertex"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L606-L640 |
molmod/molmod | molmod/graphs.py | Graph.get_halfs | def get_halfs(self, vertex1, vertex2):
"""Split the graph in two halfs by cutting the edge: vertex1-vertex2
If this is not possible (due to loops connecting both ends), a
GraphError is raised.
Returns the vertices in both halfs.
"""
def grow(origin, other):
frontier = set(self.neighbors[origin])
frontier.discard(other)
result = set([origin])
while len(frontier) > 0:
pivot = frontier.pop()
if pivot == other:
raise GraphError("The graph can not be separated in two halfs "
"by disconnecting vertex1 and vertex2.")
pivot_neighbors = set(self.neighbors[pivot])
pivot_neighbors -= result
frontier |= pivot_neighbors
result.add(pivot)
return result
vertex1_part = grow(vertex1, vertex2)
vertex2_part = grow(vertex2, vertex1)
return vertex1_part, vertex2_part | python | def get_halfs(self, vertex1, vertex2):
"""Split the graph in two halfs by cutting the edge: vertex1-vertex2
If this is not possible (due to loops connecting both ends), a
GraphError is raised.
Returns the vertices in both halfs.
"""
def grow(origin, other):
frontier = set(self.neighbors[origin])
frontier.discard(other)
result = set([origin])
while len(frontier) > 0:
pivot = frontier.pop()
if pivot == other:
raise GraphError("The graph can not be separated in two halfs "
"by disconnecting vertex1 and vertex2.")
pivot_neighbors = set(self.neighbors[pivot])
pivot_neighbors -= result
frontier |= pivot_neighbors
result.add(pivot)
return result
vertex1_part = grow(vertex1, vertex2)
vertex2_part = grow(vertex2, vertex1)
return vertex1_part, vertex2_part | [
"def",
"get_halfs",
"(",
"self",
",",
"vertex1",
",",
"vertex2",
")",
":",
"def",
"grow",
"(",
"origin",
",",
"other",
")",
":",
"frontier",
"=",
"set",
"(",
"self",
".",
"neighbors",
"[",
"origin",
"]",
")",
"frontier",
".",
"discard",
"(",
"other",
")",
"result",
"=",
"set",
"(",
"[",
"origin",
"]",
")",
"while",
"len",
"(",
"frontier",
")",
">",
"0",
":",
"pivot",
"=",
"frontier",
".",
"pop",
"(",
")",
"if",
"pivot",
"==",
"other",
":",
"raise",
"GraphError",
"(",
"\"The graph can not be separated in two halfs \"",
"\"by disconnecting vertex1 and vertex2.\"",
")",
"pivot_neighbors",
"=",
"set",
"(",
"self",
".",
"neighbors",
"[",
"pivot",
"]",
")",
"pivot_neighbors",
"-=",
"result",
"frontier",
"|=",
"pivot_neighbors",
"result",
".",
"add",
"(",
"pivot",
")",
"return",
"result",
"vertex1_part",
"=",
"grow",
"(",
"vertex1",
",",
"vertex2",
")",
"vertex2_part",
"=",
"grow",
"(",
"vertex2",
",",
"vertex1",
")",
"return",
"vertex1_part",
",",
"vertex2_part"
] | Split the graph in two halfs by cutting the edge: vertex1-vertex2
If this is not possible (due to loops connecting both ends), a
GraphError is raised.
Returns the vertices in both halfs. | [
"Split",
"the",
"graph",
"in",
"two",
"halfs",
"by",
"cutting",
"the",
"edge",
":",
"vertex1",
"-",
"vertex2"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L642-L667 |
molmod/molmod | molmod/graphs.py | Graph.get_part | def get_part(self, vertex_in, vertices_border):
"""List all vertices that are connected to vertex_in, but are not
included in or 'behind' vertices_border.
"""
vertices_new = set(self.neighbors[vertex_in])
vertices_part = set([vertex_in])
while len(vertices_new) > 0:
pivot = vertices_new.pop()
if pivot in vertices_border:
continue
vertices_part.add(pivot)
pivot_neighbors = set(self.neighbors[pivot])
pivot_neighbors -= vertices_part
vertices_new |= pivot_neighbors
return vertices_part | python | def get_part(self, vertex_in, vertices_border):
"""List all vertices that are connected to vertex_in, but are not
included in or 'behind' vertices_border.
"""
vertices_new = set(self.neighbors[vertex_in])
vertices_part = set([vertex_in])
while len(vertices_new) > 0:
pivot = vertices_new.pop()
if pivot in vertices_border:
continue
vertices_part.add(pivot)
pivot_neighbors = set(self.neighbors[pivot])
pivot_neighbors -= vertices_part
vertices_new |= pivot_neighbors
return vertices_part | [
"def",
"get_part",
"(",
"self",
",",
"vertex_in",
",",
"vertices_border",
")",
":",
"vertices_new",
"=",
"set",
"(",
"self",
".",
"neighbors",
"[",
"vertex_in",
"]",
")",
"vertices_part",
"=",
"set",
"(",
"[",
"vertex_in",
"]",
")",
"while",
"len",
"(",
"vertices_new",
")",
">",
"0",
":",
"pivot",
"=",
"vertices_new",
".",
"pop",
"(",
")",
"if",
"pivot",
"in",
"vertices_border",
":",
"continue",
"vertices_part",
".",
"add",
"(",
"pivot",
")",
"pivot_neighbors",
"=",
"set",
"(",
"self",
".",
"neighbors",
"[",
"pivot",
"]",
")",
"pivot_neighbors",
"-=",
"vertices_part",
"vertices_new",
"|=",
"pivot_neighbors",
"return",
"vertices_part"
] | List all vertices that are connected to vertex_in, but are not
included in or 'behind' vertices_border. | [
"List",
"all",
"vertices",
"that",
"are",
"connected",
"to",
"vertex_in",
"but",
"are",
"not",
"included",
"in",
"or",
"behind",
"vertices_border",
"."
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L669-L685 |
molmod/molmod | molmod/graphs.py | Graph.get_halfs_double | def get_halfs_double(self, vertex_a1, vertex_b1, vertex_a2, vertex_b2):
"""Compute the two parts separated by ``(vertex_a1, vertex_b1)`` and ``(vertex_a2, vertex_b2)``
Raise a GraphError when ``(vertex_a1, vertex_b1)`` and
``(vertex_a2, vertex_b2)`` do not separate the graph in two
disconnected parts. The edges must be neighbors. If not a GraphError
is raised. The for vertices must not coincide or a GraphError is
raised.
Returns the vertices of the two halfs and the four 'hinge' vertices
in the correct order, i.e. both ``vertex_a1`` and ``vertex_a2`` are
in the first half and both ``vertex_b1`` and ``vertex_b2`` are in the
second half.
"""
if vertex_a1 not in self.neighbors[vertex_b1]:
raise GraphError("vertex_a1 must be a neighbor of vertex_b1.")
if vertex_a2 not in self.neighbors[vertex_b2]:
raise GraphError("vertex_a2 must be a neighbor of vertex_b2.")
# find vertex_a_part (and possibly switch vertex_a2, vertex_b2)
vertex_a_new = set(self.neighbors[vertex_a1])
vertex_a_new.discard(vertex_b1)
if vertex_a1 == vertex_b2:
# we now that we have to swap vertex_a2 and vertex_b2. The algo
# below will fail otherwise in this 'exotic' case.
vertex_a2, vertex_b2 = vertex_b2, vertex_a2
#vertex_a_new.discard(vertex_a2) # in case there is overlap
if vertex_a1 == vertex_a2:
vertex_a_new.discard(vertex_b2) # in case there is overlap
vertex_a_part = set([vertex_a1])
touched = False # True if (the switched) vertex_a2 has been reached.
while len(vertex_a_new) > 0:
pivot = vertex_a_new.pop()
if pivot == vertex_b1:
raise GraphError("The graph can not be separated in two halfs. "
"vertex_b1 reached by vertex_a1.")
vertex_a_part.add(pivot)
# create a new set that we can modify
pivot_neighbors = set(self.neighbors[pivot])
pivot_neighbors -= vertex_a_part
if pivot == vertex_a2 or pivot == vertex_b2:
if pivot == vertex_b2:
if touched:
raise GraphError("The graph can not be separated in "
"two halfs. vertex_b2 reached by "
"vertex_a1.")
else:
# put them in the correct order
vertex_a2, vertex_b2 = vertex_b2, vertex_a2
pivot_neighbors.discard(vertex_b2)
touched = True
vertex_a_new |= pivot_neighbors
if vertex_a2 not in vertex_a_part:
raise GraphError("The graph can not be separated in two halfs. "
"vertex_a1 can not reach vertex_a2 trough "
"vertex_a_part")
# find vertex_b_part: easy, is just the rest ...
#vertex_b_part = set(xrange(self.num_vertices)) - vertex_a_part
# ... but we also want that there is a path in vertex_b_part from
# vertex_b1 to vertex_b2
if vertex_b1 == vertex_b2:
closed = True
else:
vertex_b_new = set(self.neighbors[vertex_b1])
vertex_b_new.discard(vertex_a1)
vertex_b_part = set([vertex_b1])
closed = False
while len(vertex_b_new) > 0:
pivot = vertex_b_new.pop()
if pivot == vertex_b2:
closed = True
break
pivot_neighbors = set(self.neighbors[pivot])
pivot_neighbors -= vertex_b_part
vertex_b_new |= pivot_neighbors
vertex_b_part.add(pivot)
if not closed:
raise GraphError("The graph can not be separated in two halfs. "
"vertex_b1 can not reach vertex_b2 trough "
"vertex_b_part")
# finaly compute the real vertex_b_part, the former loop might break
# early for efficiency.
vertex_b_part = set(range(self.num_vertices)) - vertex_a_part
# done!
return vertex_a_part, vertex_b_part, \
(vertex_a1, vertex_b1, vertex_a2, vertex_b2) | python | def get_halfs_double(self, vertex_a1, vertex_b1, vertex_a2, vertex_b2):
"""Compute the two parts separated by ``(vertex_a1, vertex_b1)`` and ``(vertex_a2, vertex_b2)``
Raise a GraphError when ``(vertex_a1, vertex_b1)`` and
``(vertex_a2, vertex_b2)`` do not separate the graph in two
disconnected parts. The edges must be neighbors. If not a GraphError
is raised. The for vertices must not coincide or a GraphError is
raised.
Returns the vertices of the two halfs and the four 'hinge' vertices
in the correct order, i.e. both ``vertex_a1`` and ``vertex_a2`` are
in the first half and both ``vertex_b1`` and ``vertex_b2`` are in the
second half.
"""
if vertex_a1 not in self.neighbors[vertex_b1]:
raise GraphError("vertex_a1 must be a neighbor of vertex_b1.")
if vertex_a2 not in self.neighbors[vertex_b2]:
raise GraphError("vertex_a2 must be a neighbor of vertex_b2.")
# find vertex_a_part (and possibly switch vertex_a2, vertex_b2)
vertex_a_new = set(self.neighbors[vertex_a1])
vertex_a_new.discard(vertex_b1)
if vertex_a1 == vertex_b2:
# we now that we have to swap vertex_a2 and vertex_b2. The algo
# below will fail otherwise in this 'exotic' case.
vertex_a2, vertex_b2 = vertex_b2, vertex_a2
#vertex_a_new.discard(vertex_a2) # in case there is overlap
if vertex_a1 == vertex_a2:
vertex_a_new.discard(vertex_b2) # in case there is overlap
vertex_a_part = set([vertex_a1])
touched = False # True if (the switched) vertex_a2 has been reached.
while len(vertex_a_new) > 0:
pivot = vertex_a_new.pop()
if pivot == vertex_b1:
raise GraphError("The graph can not be separated in two halfs. "
"vertex_b1 reached by vertex_a1.")
vertex_a_part.add(pivot)
# create a new set that we can modify
pivot_neighbors = set(self.neighbors[pivot])
pivot_neighbors -= vertex_a_part
if pivot == vertex_a2 or pivot == vertex_b2:
if pivot == vertex_b2:
if touched:
raise GraphError("The graph can not be separated in "
"two halfs. vertex_b2 reached by "
"vertex_a1.")
else:
# put them in the correct order
vertex_a2, vertex_b2 = vertex_b2, vertex_a2
pivot_neighbors.discard(vertex_b2)
touched = True
vertex_a_new |= pivot_neighbors
if vertex_a2 not in vertex_a_part:
raise GraphError("The graph can not be separated in two halfs. "
"vertex_a1 can not reach vertex_a2 trough "
"vertex_a_part")
# find vertex_b_part: easy, is just the rest ...
#vertex_b_part = set(xrange(self.num_vertices)) - vertex_a_part
# ... but we also want that there is a path in vertex_b_part from
# vertex_b1 to vertex_b2
if vertex_b1 == vertex_b2:
closed = True
else:
vertex_b_new = set(self.neighbors[vertex_b1])
vertex_b_new.discard(vertex_a1)
vertex_b_part = set([vertex_b1])
closed = False
while len(vertex_b_new) > 0:
pivot = vertex_b_new.pop()
if pivot == vertex_b2:
closed = True
break
pivot_neighbors = set(self.neighbors[pivot])
pivot_neighbors -= vertex_b_part
vertex_b_new |= pivot_neighbors
vertex_b_part.add(pivot)
if not closed:
raise GraphError("The graph can not be separated in two halfs. "
"vertex_b1 can not reach vertex_b2 trough "
"vertex_b_part")
# finaly compute the real vertex_b_part, the former loop might break
# early for efficiency.
vertex_b_part = set(range(self.num_vertices)) - vertex_a_part
# done!
return vertex_a_part, vertex_b_part, \
(vertex_a1, vertex_b1, vertex_a2, vertex_b2) | [
"def",
"get_halfs_double",
"(",
"self",
",",
"vertex_a1",
",",
"vertex_b1",
",",
"vertex_a2",
",",
"vertex_b2",
")",
":",
"if",
"vertex_a1",
"not",
"in",
"self",
".",
"neighbors",
"[",
"vertex_b1",
"]",
":",
"raise",
"GraphError",
"(",
"\"vertex_a1 must be a neighbor of vertex_b1.\"",
")",
"if",
"vertex_a2",
"not",
"in",
"self",
".",
"neighbors",
"[",
"vertex_b2",
"]",
":",
"raise",
"GraphError",
"(",
"\"vertex_a2 must be a neighbor of vertex_b2.\"",
")",
"# find vertex_a_part (and possibly switch vertex_a2, vertex_b2)",
"vertex_a_new",
"=",
"set",
"(",
"self",
".",
"neighbors",
"[",
"vertex_a1",
"]",
")",
"vertex_a_new",
".",
"discard",
"(",
"vertex_b1",
")",
"if",
"vertex_a1",
"==",
"vertex_b2",
":",
"# we now that we have to swap vertex_a2 and vertex_b2. The algo",
"# below will fail otherwise in this 'exotic' case.",
"vertex_a2",
",",
"vertex_b2",
"=",
"vertex_b2",
",",
"vertex_a2",
"#vertex_a_new.discard(vertex_a2) # in case there is overlap",
"if",
"vertex_a1",
"==",
"vertex_a2",
":",
"vertex_a_new",
".",
"discard",
"(",
"vertex_b2",
")",
"# in case there is overlap",
"vertex_a_part",
"=",
"set",
"(",
"[",
"vertex_a1",
"]",
")",
"touched",
"=",
"False",
"# True if (the switched) vertex_a2 has been reached.",
"while",
"len",
"(",
"vertex_a_new",
")",
">",
"0",
":",
"pivot",
"=",
"vertex_a_new",
".",
"pop",
"(",
")",
"if",
"pivot",
"==",
"vertex_b1",
":",
"raise",
"GraphError",
"(",
"\"The graph can not be separated in two halfs. \"",
"\"vertex_b1 reached by vertex_a1.\"",
")",
"vertex_a_part",
".",
"add",
"(",
"pivot",
")",
"# create a new set that we can modify",
"pivot_neighbors",
"=",
"set",
"(",
"self",
".",
"neighbors",
"[",
"pivot",
"]",
")",
"pivot_neighbors",
"-=",
"vertex_a_part",
"if",
"pivot",
"==",
"vertex_a2",
"or",
"pivot",
"==",
"vertex_b2",
":",
"if",
"pivot",
"==",
"vertex_b2",
":",
"if",
"touched",
":",
"raise",
"GraphError",
"(",
"\"The graph can not be separated in \"",
"\"two halfs. vertex_b2 reached by \"",
"\"vertex_a1.\"",
")",
"else",
":",
"# put them in the correct order",
"vertex_a2",
",",
"vertex_b2",
"=",
"vertex_b2",
",",
"vertex_a2",
"pivot_neighbors",
".",
"discard",
"(",
"vertex_b2",
")",
"touched",
"=",
"True",
"vertex_a_new",
"|=",
"pivot_neighbors",
"if",
"vertex_a2",
"not",
"in",
"vertex_a_part",
":",
"raise",
"GraphError",
"(",
"\"The graph can not be separated in two halfs. \"",
"\"vertex_a1 can not reach vertex_a2 trough \"",
"\"vertex_a_part\"",
")",
"# find vertex_b_part: easy, is just the rest ...",
"#vertex_b_part = set(xrange(self.num_vertices)) - vertex_a_part",
"# ... but we also want that there is a path in vertex_b_part from",
"# vertex_b1 to vertex_b2",
"if",
"vertex_b1",
"==",
"vertex_b2",
":",
"closed",
"=",
"True",
"else",
":",
"vertex_b_new",
"=",
"set",
"(",
"self",
".",
"neighbors",
"[",
"vertex_b1",
"]",
")",
"vertex_b_new",
".",
"discard",
"(",
"vertex_a1",
")",
"vertex_b_part",
"=",
"set",
"(",
"[",
"vertex_b1",
"]",
")",
"closed",
"=",
"False",
"while",
"len",
"(",
"vertex_b_new",
")",
">",
"0",
":",
"pivot",
"=",
"vertex_b_new",
".",
"pop",
"(",
")",
"if",
"pivot",
"==",
"vertex_b2",
":",
"closed",
"=",
"True",
"break",
"pivot_neighbors",
"=",
"set",
"(",
"self",
".",
"neighbors",
"[",
"pivot",
"]",
")",
"pivot_neighbors",
"-=",
"vertex_b_part",
"vertex_b_new",
"|=",
"pivot_neighbors",
"vertex_b_part",
".",
"add",
"(",
"pivot",
")",
"if",
"not",
"closed",
":",
"raise",
"GraphError",
"(",
"\"The graph can not be separated in two halfs. \"",
"\"vertex_b1 can not reach vertex_b2 trough \"",
"\"vertex_b_part\"",
")",
"# finaly compute the real vertex_b_part, the former loop might break",
"# early for efficiency.",
"vertex_b_part",
"=",
"set",
"(",
"range",
"(",
"self",
".",
"num_vertices",
")",
")",
"-",
"vertex_a_part",
"# done!",
"return",
"vertex_a_part",
",",
"vertex_b_part",
",",
"(",
"vertex_a1",
",",
"vertex_b1",
",",
"vertex_a2",
",",
"vertex_b2",
")"
] | Compute the two parts separated by ``(vertex_a1, vertex_b1)`` and ``(vertex_a2, vertex_b2)``
Raise a GraphError when ``(vertex_a1, vertex_b1)`` and
``(vertex_a2, vertex_b2)`` do not separate the graph in two
disconnected parts. The edges must be neighbors. If not a GraphError
is raised. The for vertices must not coincide or a GraphError is
raised.
Returns the vertices of the two halfs and the four 'hinge' vertices
in the correct order, i.e. both ``vertex_a1`` and ``vertex_a2`` are
in the first half and both ``vertex_b1`` and ``vertex_b2`` are in the
second half. | [
"Compute",
"the",
"two",
"parts",
"separated",
"by",
"(",
"vertex_a1",
"vertex_b1",
")",
"and",
"(",
"vertex_a2",
"vertex_b2",
")"
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L687-L780 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.