code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
url = self._build_url(constants.LIST_JOBS_ENDPOINT)
params = {'project': project}
jobs = self.client.get(url, params=params, timeout=self.timeout)
return jobs | def list_jobs(self, project) | Lists all known jobs for a project. First class, maps to Scrapyd's
list jobs endpoint. | 3.530026 | 3.320344 | 1.063151 |
url = self._build_url(constants.LIST_PROJECTS_ENDPOINT)
json = self.client.get(url, timeout=self.timeout)
return json['projects'] | def list_projects(self) | Lists all deployed projects. First class, maps to Scrapyd's
list projects endpoint. | 4.668737 | 4.137352 | 1.128436 |
url = self._build_url(constants.LIST_SPIDERS_ENDPOINT)
params = {'project': project}
json = self.client.get(url, params=params, timeout=self.timeout)
return json['spiders'] | def list_spiders(self, project) | Lists all known spiders for a specific project. First class, maps
to Scrapyd's list spiders endpoint. | 3.220605 | 3.044485 | 1.057849 |
url = self._build_url(constants.LIST_VERSIONS_ENDPOINT)
params = {'project': project}
json = self.client.get(url, params=params, timeout=self.timeout)
return json['versions'] | def list_versions(self, project) | Lists all deployed versions of a specific project. First class, maps
to Scrapyd's list versions endpoint. | 3.547071 | 3.246618 | 1.092544 |
url = self._build_url(constants.SCHEDULE_ENDPOINT)
data = {
'project': project,
'spider': spider
}
data.update(kwargs)
if settings:
setting_params = []
for setting_name, value in iteritems(settings):
setting_params.append('{0}={1}'.format(setting_name, value))
data['setting'] = setting_params
json = self.client.post(url, data=data, timeout=self.timeout)
return json['jobid'] | def schedule(self, project, spider, settings=None, **kwargs) | Schedules a spider from a specific project to run. First class, maps
to Scrapyd's scheduling endpoint. | 2.862345 | 2.705502 | 1.057972 |
url = self._build_url(constants.DAEMON_STATUS_ENDPOINT)
json = self.client.get(url, timeout=self.timeout)
return json | def daemon_status(self) | Displays the load status of a service.
:rtype: dict | 4.80408 | 4.812163 | 0.99832 |
if not response.ok:
raise ScrapydResponseError(
"Scrapyd returned a {0} error: {1}".format(
response.status_code,
response.text))
try:
json = response.json()
except ValueError:
raise ScrapydResponseError("Scrapyd returned an invalid JSON "
"response: {0}".format(response.text))
if json['status'] == 'ok':
json.pop('status')
return json
elif json['status'] == 'error':
raise ScrapydResponseError(json['message']) | def _handle_response(self, response) | Handles the response received from Scrapyd. | 2.492706 | 2.175476 | 1.145821 |
r
for child in self.expr.all:
if isinstance(child, TexExpr):
node = TexNode(child)
node.parent = self
yield node
else:
yield child | def all(self) | r"""Returns all content in this node, regardless of whitespace or
not. This includes all LaTeX needed to reconstruct the original source.
>>> from TexSoup import TexSoup
>>> soup = TexSoup(r'''
... \newcommand{reverseconcat}[3]{#3#2#1}
... ''')
>>> list(soup.all)
['\n', \newcommand{reverseconcat}[3]{#3#2#1}, '\n'] | 8.428982 | 7.937995 | 1.061853 |
r
for child in self.expr.children:
node = TexNode(child)
node.parent = self
yield node | def children(self) | r"""Immediate children of this TeX element that are valid TeX objects.
This is equivalent to contents, excluding text elements and keeping only
Tex expressions.
:return: generator of all children
:rtype: Iterator[TexExpr]
>>> from TexSoup import TexSoup
>>> soup = TexSoup(r'''
... \begin{itemize}
... Random text!
... \item Hello
... \end{itemize}''')
>>> next(soup.itemize.children)
\item Hello
<BLANKLINE> | 12.137951 | 12.036999 | 1.008387 |
r
if isinstance(self.expr, TexCmd) and len(self.expr.args) == 1:
return self.expr.args[0].value | def string(self) | r"""This is valid if and only if
1. the expression is a :class:`.TexCmd` AND
2. the command has only one argument.
:rtype: Union[None,str]
>>> from TexSoup import TexSoup
>>> soup = TexSoup(r'''\textbf{Hello}''')
>>> soup.textbf.string
'Hello'
>>> soup.textbf.string = 'Hello World'
>>> soup.textbf.string
'Hello World'
>>> soup.textbf
\textbf{Hello World} | 7.197481 | 3.938958 | 1.827255 |
r
for descendant in self.contents:
if isinstance(descendant, TokenWithPosition):
yield descendant
elif hasattr(descendant, 'text'):
yield from descendant.text | def text(self) | r"""All text in descendant nodes.
This is equivalent to contents, keeping text elements and excluding
Tex expressions.
>>> from TexSoup import TexSoup
>>> soup = TexSoup(r'''
... \begin{itemize}
... \begin{itemize}
... \item Nested
... \end{itemize}
... \end{itemize}''')
>>> next(soup.text)
'Nested\n ' | 7.27577 | 6.416265 | 1.133957 |
r
assert isinstance(i, int), (
'Provided index "{}" is not an integer! Did you switch your '
'arguments? The first argument to `insert` is the '
'index.'.format(i))
self.expr.insert(i, *nodes) | def insert(self, i, *nodes) | r"""Add node(s) to this node's list of children, inserted at position i.
:param int i: Position to add nodes to
:param TexNode nodes: List of nodes to add
>>> from TexSoup import TexSoup
>>> soup = TexSoup(r'''
... \begin{itemize}
... \item Hello
... \item Bye
... \end{itemize}''')
>>> item = soup.item
>>> soup.item.delete()
>>> soup.itemize.insert(1, item)
>>> soup.itemize
\begin{itemize}
\item Hello
\item Bye
\end{itemize} | 10.029455 | 13.731786 | 0.730382 |
r
return len(list(self.find_all(name, **attrs))) | def count(self, name=None, **attrs) | r"""Number of descendants matching criteria.
:param Union[None,str] name: name of LaTeX expression
:param attrs: LaTeX expression attributes, such as item text.
:return: number of matching expressions
:rtype: int
>>> from TexSoup import TexSoup
>>> soup = TexSoup(r'''
... \section{Hey}
... \textit{Silly}
... \textit{Willy}''')
>>> soup.count('section')
1
>>> soup.count('textit')
2 | 10.528328 | 26.086269 | 0.403597 |
r
# TODO: needs better abstraction for supports contents
parent = self.parent
if parent.expr._supports_contents():
parent.remove(self)
return
# TODO: needs abstraction for removing from arg
for arg in parent.args:
if self.expr in arg.contents:
arg.contents.remove(self.expr) | def delete(self) | r"""Delete this node from the parse tree.
Where applicable, this will remove all descendants of this node from
the parse tree.
>>> from TexSoup import TexSoup
>>> soup = TexSoup(r'''\textit{\color{blue}{Silly}}\textit{keep me!}''')
>>> soup.textit.color.delete()
>>> soup
\textit{}\textit{keep me!}
>>> soup.textit.delete()
>>> soup
\textit{keep me!} | 10.98651 | 11.030652 | 0.995998 |
r
try:
return next(self.find_all(name, **attrs))
except StopIteration:
return None | def find(self, name=None, **attrs) | r"""First descendant node matching criteria.
Returns None if no descendant node found.
:return: descendant node matching criteria
:rtype: Union[None,TexExpr]
>>> from TexSoup import TexSoup
>>> soup = TexSoup(r'''
... \section{Ooo}
... \textit{eee}
... \textit{ooo}''')
>>> soup.find('textit')
\textit{eee}
>>> soup.find('textbf') | 5.179988 | 10.588073 | 0.489229 |
r
for descendant in self.__descendants():
if hasattr(descendant, '__match__') and \
descendant.__match__(name, attrs):
yield descendant | def find_all(self, name=None, **attrs) | r"""Return all descendant nodes matching criteria.
:param Union[None,str] name: name of LaTeX expression
:param attrs: LaTeX expression attributes, such as item text.
:return: All descendant nodes matching criteria
:rtype: Iterator[TexNode]
>>> from TexSoup import TexSoup
>>> soup = TexSoup(r'''
... \section{Ooo}
... \textit{eee}
... \textit{ooo}''')
>>> gen = soup.find_all('textit')
>>> next(gen)
\textit{eee}
>>> next(gen)
\textit{ooo}
>>> next(soup.find_all('textbf'))
Traceback (most recent call last):
...
StopIteration | 5.950149 | 7.04757 | 0.844284 |
r
self.expr.insert(
self.expr.remove(child.expr),
*nodes) | def replace(self, child, *nodes) | r"""Replace provided node with node(s).
:param TexNode child: Child node to replace
:param TexNode nodes: List of nodes to subtitute in
>>> from TexSoup import TexSoup
>>> soup = TexSoup(r'''
... \begin{itemize}
... \item Hello
... \item Bye
... \end{itemize}''')
>>> items = list(soup.find_all('item'))
>>> bye = items[1]
>>> soup.itemize.replace(soup.item, bye)
>>> soup.itemize
\begin{itemize}
\item Bye
\item Bye
\end{itemize} | 19.533968 | 40.371136 | 0.48386 |
return itertools.chain(self.contents,
*[c.descendants for c in self.children]) | def __descendants(self) | Implementation for descendants, hacky workaround for __getattr__
issues. | 7.199171 | 6.224135 | 1.156654 |
r
for arg in self.args:
for expr in arg:
yield expr
for content in self._contents:
yield content | def all(self) | r"""Returns all content in this expression, regardless of whitespace or
not. This includes all LaTeX needed to reconstruct the original source.
>>> expr1 = TexExpr('textbf', ('\n', 'hi'))
>>> expr2 = TexExpr('textbf', ('\n', 'hi'), preserve_whitespace=True)
>>> list(expr1.all) == list(expr2.all)
True | 11.966186 | 13.018222 | 0.919187 |
r
for content in self.all:
is_whitespace = isinstance(content, str) and content.isspace()
if not is_whitespace or self.preserve_whitespace:
yield content | def contents(self) | r"""Returns all contents in this expression.
Optionally includes whitespace if set when node was created.
>>> expr1 = TexExpr('textbf', ('\n', 'hi'))
>>> list(expr1.contents)
['hi']
>>> expr2 = TexExpr('textbf', ('\n', 'hi'), preserve_whitespace=True)
>>> list(expr2.contents)
['\n', 'hi'] | 7.981812 | 6.465816 | 1.234463 |
for content in self.contents:
if isinstance(content, TokenWithPosition):
for word in content.split():
yield word
else:
yield content | def tokens(self) | Further breaks down all tokens for a particular expression into
words and other expressions.
>>> tex = TexEnv('lstlisting', ('var x = 10',))
>>> list(tex.tokens)
['var x = 10'] | 6.247718 | 7.448231 | 0.838819 |
self._assert_supports_contents()
for j, expr in enumerate(exprs):
self._contents.insert(i + j, expr) | def insert(self, i, *exprs) | Insert content at specified position into expression.
:param int i: Position to add content to
:param Union[TexExpr,str] exprs: List of contents to add
>>> expr = TexExpr('textbf', ('hello',))
>>> expr
TexExpr('textbf', ['hello'])
>>> expr.insert(0, 'world')
>>> expr
TexExpr('textbf', ['world', 'hello']) | 5.908981 | 10.340433 | 0.571444 |
self._assert_supports_contents()
index = self._contents.index(expr)
self._contents.remove(expr)
return index | def remove(self, expr) | Remove a provided expression from its list of contents.
:param Union[TexExpr,str] expr: Content to add
:return: index of the expression removed
:rtype: int
>>> expr = TexExpr('textbf', ('hello',))
>>> expr.remove('hello')
0
>>> expr
TexExpr('textbf', []) | 6.378564 | 8.928995 | 0.714365 |
if isinstance(s, arg_type):
return s
if isinstance(s, (list, tuple)):
for arg in arg_type:
if [s[0], s[-1]] == arg.delims():
return arg(*s[1:-1])
raise TypeError('Malformed argument. First and last elements must '
'match a valid argument format. In this case, TexSoup'
' could not find matching punctuation for: %s.\n'
'Common issues include: Unescaped special characters,'
' mistyped closing punctuation, misalignment.' % (str(s)))
for arg in arg_type:
if arg.__is__(s):
return arg(arg.__strip__(s))
raise TypeError('Malformed argument. Must be an Arg or a string in '
'either brackets or curly braces.') | def parse(s) | Parse a string or list and return an Argument object
:param Union[str,iterable] s: Either a string or a list, where the first and
last elements are valid argument delimiters.
>>> Arg.parse(RArg('arg0'))
RArg('arg0')
>>> Arg.parse('[arg0]')
OArg('arg0') | 7.572085 | 6.647281 | 1.139125 |
r
arg = self.__coerce(arg)
if isinstance(arg, Arg):
super().insert(i, arg)
if len(self) <= 1:
self.all.append(arg)
else:
if i > len(self):
i = len(self) - 1
before = self[i - 1]
index_before = self.all.index(before)
self.all.insert(index_before + 1, arg) | def insert(self, i, arg) | r"""Insert whitespace, an unparsed argument string, or an argument
object.
:param int i: Index to insert argument into
:param Arg arg: Argument to insert
>>> arguments = TexArgs(['\n', RArg('arg0'), '[arg2]'])
>>> arguments.insert(1, '[arg1]')
>>> len(arguments)
3
>>> arguments
[RArg('arg0'), OArg('arg1'), OArg('arg2')]
>>> arguments.all
['\n', RArg('arg0'), OArg('arg1'), OArg('arg2')]
>>> arguments.insert(10, '[arg3]')
>>> arguments[3]
OArg('arg3') | 3.768437 | 4.340316 | 0.86824 |
item = self.__coerce(item)
self.all.remove(item)
super().remove(item) | def remove(self, item) | Remove either an unparsed argument string or an argument object.
:param Union[str,Arg] item: Item to remove
>>> arguments = TexArgs([RArg('arg0'), '[arg2]', '{arg3}'])
>>> arguments.remove('{arg0}')
>>> len(arguments)
2
>>> arguments[0]
OArg('arg2') | 7.594739 | 14.810404 | 0.512798 |
item = super().pop(i)
j = self.all.index(item)
return self.all.pop(j) | def pop(self, i) | Pop argument object at provided index.
:param int i: Index to pop from the list
>>> arguments = TexArgs([RArg('arg0'), '[arg2]', '{arg3}'])
>>> arguments.pop(1)
OArg('arg2')
>>> len(arguments)
2
>>> arguments[0]
RArg('arg0') | 5.087618 | 10.688807 | 0.475976 |
@functools.wraps(f)
def wrap(*args, **kwargs):
iterator = kwargs.get('iterator', args[0])
if not isinstance(iterator, Buffer):
iterator = Buffer(iterator)
return f(iterator, *args[1:], **kwargs)
return wrap | def to_buffer(f) | Decorator converting all strings and iterators/iterables into Buffers. | 2.647245 | 2.180748 | 1.213916 |
if j < 0:
return self.backward(-j)
self.__i += j
return self[self.__i-j:self.__i] | def forward(self, j=1) | Move forward by j steps.
>>> b = Buffer('abcdef')
>>> b.forward(3)
'abc'
>>> b.forward(-2)
'bc' | 5.812663 | 5.680748 | 1.023221 |
i, c = 0, ''
while self.hasNext() and not condition(self.peek()):
c += self.forward(1)
i += 1
assert self.backward(i) == c
return i | def num_forward_until(self, condition) | Forward until one of the provided matches is found.
:param condition: set of valid strings | 4.642958 | 6.196098 | 0.749336 |
c = TokenWithPosition('', self.peek().position)
while self.hasNext() and not condition(self.peek()):
c += self.forward(1)
return c | def forward_until(self, condition) | Forward until one of the provided matches is found.
The returned string contains all characters found *before the condition
was met. In other words, the condition will be true for the remainder
of the buffer.
:param condition: set of valid strings | 8.988616 | 13.74484 | 0.653963 |
try:
if isinstance(j, int):
return self[self.__i+j]
return self[self.__i + j[0]:self.__i + j[1]]
except IndexError:
return None | def peek(self, j=(0, 1)) | Peek at the next value(s), without advancing the Buffer.
Return None if index is out of range. | 3.295739 | 2.840222 | 1.160381 |
if isinstance(tex, str):
tex = tex
else:
tex = ''.join(itertools.chain(*tex))
buf, children = Buffer(tokenize(tex)), []
while buf.hasNext():
content = read_tex(buf)
if content is not None:
children.append(content)
return TexEnv('[tex]', children), tex | def read(tex) | Read and parse all LaTeX source
:param Union[str,iterable] tex: LaTeX source
:return TexEnv: the global environment | 6.31156 | 6.01088 | 1.050023 |
# soupify
soup = TexSoup(tex)
# resolve subimports
for subimport in soup.find_all('subimport'):
path = subimport.args[0] + subimport.args[1]
subimport.replace_with(*resolve(open(path)).contents)
# resolve imports
for _import in soup.find_all('import'):
_import.replace_with(*resolve(open(_import.args[0])).contents)
# resolve includes
for include in soup.find_all('include'):
include.replace_with(*resolve(open(include.args[0])).contents)
return soup | def resolve(tex) | Resolve all imports and update the parse tree.
Reads from a tex file and once finished, writes to a tex file. | 2.84927 | 2.633973 | 1.081738 |
r
return sum(len(a.string) for a in TexSoup(tex).find_all(command)) | def sollen(tex, command) | r"""Measure solution length
:param Union[str,buffer] tex: the LaTeX source as a string or file buffer
:param str command: the command denoting a solution i.e., if the tex file
uses '\answer{<answer here>}', then the command is 'answer'.
:return int: the solution length | 8.991314 | 10.817654 | 0.83117 |
r
parsed, src = read(tex_code)
return TexNode(parsed, src=src) | def TexSoup(tex_code) | r"""
At a high-level, parses provided Tex into a navigable, searchable structure.
This is accomplished in two steps:
1. Tex is parsed, cleaned, and packaged.
2. Structure fed to TexNodes for a searchable, coder-friendly interface.
:param Union[str,iterable] tex_code: the Tex source
:return: :class:`TexSoup.data.TexNode` object representing tex document
>>> from TexSoup import TexSoup
>>> soup = TexSoup(r'''
... \begin{document}
...
... \section{Hello \textit{world}.}
...
... \subsection{Watermelon}
...
... (n.) A sacred fruit. Also known as:
...
... \begin{itemize}
... \item red lemon
... \item life
... \end{itemize}
...
... Here is the prevalence of each synonym.
...
... \begin{tabular}{c c}
... red lemon & uncommon \\ \n
... life & common
... \end{tabular}
...
... \end{document}
... ''')
>>> soup.section
\section{Hello \textit{world}.}
>>> soup.section.name
'section'
>>> soup.section.string
'Hello \\textit{world}.'
>>> soup.section.parent.name
'document'
>>> soup.tabular
\begin{tabular}{c c}
red lemon & uncommon \\ \n
life & common
\end{tabular}
>>> soup.tabular.args[0].value
'c c'
>>> soup.itemize
\begin{itemize}
\item red lemon
\item life
\end{itemize}
>>> soup.item
\item red lemon
...
>>> list(soup.find_all('item'))
[\item red lemon
, \item life
]
>>> soup = TexSoup(r'''\textbf{'Hello'}\textit{'Y'}O\textit{'U'}''')
>>> soup.textbf.delete()
>>> 'Hello' not in repr(soup)
True
>>> soup.textit.replace_with('S')
>>> soup.textit.replace_with('U', 'P')
>>> soup
SOUP | 20.70517 | 39.257076 | 0.527425 |
# soupify
soup = TexSoup(tex)
# extract all unique labels
labels = set(label.string for label in soup.find_all('label'))
# create dictionary mapping label to number of references
return dict((label, soup.find_all('\ref{%s}' % label)) for label in labels) | def count(tex) | Extract all labels, then count the number of times each is referenced in
the provided file. Does not follow \includes. | 6.285995 | 4.72041 | 1.331663 |
r
while text.hasNext():
for name, f in tokenizers:
current_token = f(text)
if current_token is not None:
return current_token | def next_token(text) | r"""Returns the next possible token, advancing the iterator to the next
position to start processing from.
:param Union[str,iterator,Buffer] text: LaTeX to process
:return str: the token
>>> b = Buffer(r'\textbf{Do play\textit{nice}.} $$\min_w \|w\|_2^2$$')
>>> print(next_token(b), next_token(b), next_token(b), next_token(b))
\textbf { Do play \textit
>>> print(next_token(b), next_token(b), next_token(b), next_token(b))
{ nice } .
>>> print(next_token(b))
}
>>> print(next_token(Buffer('.}')))
.
>>> next_token(b)
' '
>>> next_token(b)
'$$'
>>> b2 = Buffer(r'\gamma = \beta')
>>> print(next_token(b2), next_token(b2), next_token(b2))
\gamma = \beta | 7.481994 | 10.882093 | 0.687551 |
r
current_token = next_token(text)
while current_token is not None:
yield current_token
current_token = next_token(text) | def tokenize(text) | r"""Generator for LaTeX tokens on text, ignoring comments.
:param Union[str,iterator,Buffer] text: LaTeX to process
>>> print(*tokenize(r'\textbf{Do play \textit{nice}.}'))
\textbf { Do play \textit { nice } . }
>>> print(*tokenize(r'\begin{tabular} 0 & 1 \\ 2 & 0 \end{tabular}'))
\begin { tabular } 0 & 1 \\ 2 & 0 \end { tabular } | 3.822971 | 4.812232 | 0.794428 |
def wrap(f):
tokenizers.append((name, f))
return f
return wrap | def token(name) | Marker for a token
:param str name: Name of tokenizer | 7.946814 | 8.498227 | 0.935114 |
if text.peek() == '\\':
for point in PUNCTUATION_COMMANDS:
if text.peek((1, len(point) + 1)) == point:
return text.forward(len(point) + 1) | def tokenize_punctuation_command(text) | Process command that augments or modifies punctuation.
This is important to the tokenization of a string, as opening or closing
punctuation is not supposed to match.
:param Buffer text: iterator over text, with current position | 5.447432 | 5.766193 | 0.944719 |
if text.peek() == '\\':
c = text.forward(1)
tokens = set(string.punctuation + string.whitespace) - {'*'}
while text.hasNext() and (c == '\\' or text.peek() not in tokens) and c not in MATH_TOKENS:
c += text.forward(1)
return c | def tokenize_command(text) | Process command, but ignore line breaks. (double backslash)
:param Buffer text: iterator over line, with current position | 6.638646 | 6.310711 | 1.051965 |
r
result = TokenWithPosition('', text.position)
if text.peek() == '%' and text.peek(-1) != '\\':
result += text.forward(1)
while text.peek() != '\n' and text.hasNext():
result += text.forward(1)
return result | def tokenize_line_comment(text) | r"""Process a line comment
:param Buffer text: iterator over line, with current position
>>> tokenize_line_comment(Buffer('hello %world'))
>>> tokenize_line_comment(Buffer('%hello world'))
'%hello world'
>>> tokenize_line_comment(Buffer('%hello\n world'))
'%hello' | 6.118985 | 6.444227 | 0.94953 |
for delim in ARG_TOKENS:
if text.startswith(delim):
return text.forward(len(delim)) | def tokenize_argument(text) | Process both optional and required arguments.
:param Buffer text: iterator over line, with current position | 9.260691 | 13.067533 | 0.708679 |
r
if text.startswith('$') and (
text.position == 0 or text.peek(-1) != '\\' or text.endswith(r'\\')):
starter = '$$' if text.startswith('$$') else '$'
return TokenWithPosition(text.forward(len(starter)), text.position) | def tokenize_math(text) | r"""Prevents math from being tokenized.
:param Buffer text: iterator over line, with current position
>>> b = Buffer(r'$\min_x$ \command')
>>> tokenize_math(b)
'$'
>>> b = Buffer(r'$$\min_x$$ \command')
>>> tokenize_math(b)
'$$' | 9.870281 | 9.588457 | 1.029392 |
r
if delimiters is None:
delimiters = ALL_TOKENS
result = TokenWithPosition('', text.position)
for c in text:
if c == '\\' and str(text.peek()) in delimiters and str(c + text.peek()) not in delimiters:
c += next(text)
elif str(c) in delimiters: # assumes all tokens are single characters
text.backward(1)
return result
result += c
if text.peek((0, 2)) == '\\\\':
result += text.forward(2)
if text.peek((0, 2)) == '\n\n':
result += text.forward(2)
return result
return result | def tokenize_string(text, delimiters=None) | r"""Process a string of text
:param Buffer text: iterator over line, with current position
:param Union[None,iterable,str] delimiters: defines the delimiters
>>> tokenize_string(Buffer('hello'))
'hello'
>>> b = Buffer(r'hello again\command')
>>> tokenize_string(b)
'hello again'
>>> print(b.peek())
\
>>> print(tokenize_string(Buffer(r'0 & 1 \\\command')))
0 & 1 \\ | 4.759193 | 4.680061 | 1.016908 |
r
c = next(src)
if c.startswith('%'):
return c
elif c.startswith('$'):
name = '$$' if c.startswith('$$') else '$'
expr = TexEnv(name, [], nobegin=True)
return read_math_env(src, expr)
elif c.startswith('\[') or c.startswith("\("):
if c.startswith('\['):
name = 'displaymath'
begin = '\['
end = '\]'
else:
name = "math"
begin = "\("
end = "\)"
expr = TexEnv(name, [], nobegin=True, begin=begin, end=end)
return read_math_env(src, expr)
elif c.startswith('\\'):
command = TokenWithPosition(c[1:], src.position)
if command == 'item':
contents, arg = read_item(src)
mode, expr = 'command', TexCmd(command, contents, arg)
elif command == 'begin':
mode, expr, _ = 'begin', TexEnv(src.peek(1)), src.forward(3)
else:
mode, expr = 'command', TexCmd(command)
expr.args = read_args(src, expr.args)
if mode == 'begin':
read_env(src, expr)
return expr
if c in ARG_START_TOKENS:
return read_arg(src, c)
return c | def read_tex(src) | r"""Read next expression from buffer
:param Buffer src: a buffer of tokens | 4.166256 | 4.063146 | 1.025377 |
r
def stringify(s):
return TokenWithPosition.join(s.split(' '), glue=' ')
def forward_until_new(s):
t = TokenWithPosition('', s.peek().position)
while (s.hasNext() and
any([s.peek().startswith(substr) for substr in string.whitespace]) and
not t.strip(" ").endswith('\n')):
t += s.forward(1)
return t
# Item argument such as in description environment
arg = []
extra = []
if src.peek() in ARG_START_TOKENS:
c = next(src)
a = read_arg(src, c)
arg.append(a)
if not src.hasNext():
return extra, arg
last = stringify(forward_until_new(src))
extra.append(last.lstrip(" "))
while (src.hasNext() and not str(src).strip(" ").startswith('\n\n') and
not src.startswith('\item') and
not src.startswith('\end') and
not (isinstance(last, TokenWithPosition) and last.strip(" ").endswith('\n\n') and len(extra) > 1)):
last = read_tex(src)
extra.append(last)
return extra, arg | def read_item(src) | r"""Read the item content.
There can be any number of whitespace characters between \item and the first
non-whitespace character. However, after that first non-whitespace
character, the item can only tolerate one successive line break at a time.
\item can also take an argument.
:param Buffer src: a buffer of tokens
:return: contents of the item and any item arguments | 7.29919 | 6.820169 | 1.070236 |
r
content = src.forward_until(lambda s: s == expr.end)
if not src.startswith(expr.end):
end = src.peek()
explanation = 'Instead got %s' % end if end else 'Reached end of file.'
raise EOFError('Expecting %s. %s' % (expr.end, explanation))
else:
src.forward(1)
expr.append(content)
return expr | def read_math_env(src, expr) | r"""Read the environment from buffer.
Advances the buffer until right after the end of the environment. Adds
parsed content to the expression automatically.
:param Buffer src: a buffer of tokens
:param TexExpr expr: expression for the environment
:rtype: TexExpr | 7.302614 | 6.832764 | 1.068764 |
r
contents = []
if expr.name in SKIP_ENVS:
contents = [src.forward_until(lambda s: s == '\\end')]
while src.hasNext() and not src.startswith('\\end{%s}' % expr.name):
contents.append(read_tex(src))
if not src.startswith('\\end{%s}' % expr.name):
end = src.peek((0, 5))
explanation = 'Instead got %s' % end if end else 'Reached end of file.'
raise EOFError('Expecting \\end{%s}. %s' % (expr.name, explanation))
else:
src.forward(4)
expr.append(*contents)
return expr | def read_env(src, expr) | r"""Read the environment from buffer.
Advances the buffer until right after the end of the environment. Adds
parsed content to the expression automatically.
:param Buffer src: a buffer of tokens
:param TexExpr expr: expression for the environment
:rtype: TexExpr | 6.643965 | 5.826166 | 1.140367 |
r
args = args or TexArgs()
# Unlimited whitespace before first argument
candidate_index = src.num_forward_until(lambda s: not s.isspace())
while src.peek().isspace():
args.append(read_tex(src))
# Restricted to only one line break after first argument
line_breaks = 0
while src.peek() in ARG_START_TOKENS or \
(src.peek().isspace() and line_breaks == 0):
space_index = src.num_forward_until(lambda s: not s.isspace())
if space_index > 0:
line_breaks += 1
if src.peek((0, space_index)).count("\n") <= 1 and src.peek(space_index) in ARG_START_TOKENS:
args.append(read_tex(src))
else:
line_breaks = 0
tex_text = read_tex(src)
args.append(tex_text)
if not args:
src.backward(candidate_index)
return args | def read_args(src, args=None) | r"""Read all arguments from buffer.
Advances buffer until end of last valid arguments. There can be any number
of whitespace characters between command and the first argument.
However, after that first argument, the command can only tolerate one
successive line break, before discontinuing the chain of arguments.
:param TexArgs args: existing arguments to extend
:return: parsed arguments
:rtype: TexArgs | 4.307753 | 3.971709 | 1.08461 |
content = [c]
while src.hasNext():
if src.peek() in ARG_END_TOKENS:
content.append(next(src))
break
else:
content.append(read_tex(src))
return Arg.parse(content) | def read_arg(src, c) | Read the argument from buffer.
Advances buffer until right before the end of the argument.
:param Buffer src: a buffer of tokens
:param str c: argument token (starting token)
:return: the parsed argument
:rtype: Arg | 5.54202 | 6.156846 | 0.900139 |
if self._pareto_front is None:
self._pareto_front = self._calc_pareto_front(*args, **kwargs)
return self._pareto_front | def pareto_front(self, *args, **kwargs) | Returns
-------
P : np.array
The Pareto front of a given problem. It is only loaded or calculate the first time and then cached.
For a single-objective problem only one point is returned but still in a two dimensional array. | 2.373275 | 2.428561 | 0.977235 |
if self._pareto_set is None:
self._pareto_set = self._calc_pareto_set(*args, **kwargs)
return self._pareto_set | def pareto_set(self, *args, **kwargs) | Returns
-------
S : np.array
Returns the pareto set for a problem. Points in the X space to be known to be optimal! | 2.376363 | 2.661328 | 0.892924 |
for_committors = committors([source], [sink], msm)
cond_committors = conditional_committors(source, sink, waypoint, msm)
if hasattr(msm, 'all_transmats_'):
frac_visited = np.zeros((msm.n_states,))
for i, tprob in enumerate(msm.all_transmats_):
frac_visited[i] = _fraction_visited(source, sink, waypoint,
msm.transmat_, for_committors,
cond_committors)
return np.median(frac_visited, axis=0)
return _fraction_visited(source, sink, waypoint, msm.transmat_,
for_committors, cond_committors) | def fraction_visited(source, sink, waypoint, msm) | Calculate the fraction of times a walker on `tprob` going from `sources`
to `sinks` will travel through the set of states `waypoints` en route.
Computes the conditional committors q^{ABC^+} and uses them to find the
fraction of paths mentioned above.
Note that in the notation of Dickson et. al. this computes h_c(A,B), with
sources = A
sinks = B
waypoint = C
Parameters
----------
source : int
The index of the source state
sink : int
The index of the sink state
waypoint : int
The index of the intermediate state
msm : msmbuilder.MarkovStateModel
MSM to analyze.
Returns
-------
fraction_visited : float
The fraction of times a walker going from `sources` -> `sinks` stops
by `waypoints` on its way.
See Also
--------
msmbuilder.tpt.conditional_committors
Calculate the probability of visiting a waypoint while on a path
between a source and sink.
msmbuilder.tpt.hub_scores : function
Compute the 'hub score', the weighted fraction of visits for an
entire network.
References
----------
.. [1] Dickson & Brooks (2012), J. Chem. Theory Comput., 8, 3044-3052. | 3.431543 | 2.962518 | 1.15832 |
n_states = msm.n_states_
if isinstance(waypoints, int):
waypoints = [waypoints]
elif waypoints is None:
waypoints = xrange(n_states)
elif not (isinstance(waypoints, list) or
isinstance(waypoints, np.ndarray)):
raise ValueError("waypoints (%s) must be an int, a list, or None" %
str(waypoints))
hub_scores = []
for waypoint in waypoints:
other_states = (i for i in xrange(n_states) if i != waypoint)
# calculate the hub score for this waypoint
hub_score = 0.0
for (source, sink) in itertools.permutations(other_states, 2):
hub_score += fraction_visited(source, sink, waypoint, msm)
hub_score /= float((n_states - 1) * (n_states - 2))
hub_scores.append(hub_score)
return np.array(hub_scores) | def hub_scores(msm, waypoints=None) | Calculate the hub score for one or more waypoints
The "hub score" is a measure of how well traveled a certain state or
set of states is in a network. Specifically, it is the fraction of
times that a walker visits a state en route from some state A to another
state B, averaged over all combinations of A and B.
Parameters
----------
msm : msmbuilder.MarkovStateModel
MSM to analyze
waypoints : array_like, int, optional
The index of the intermediate state (or more than one).
If None, then all waypoints will be used
Returns
-------
hub_score : float
The hub score for the waypoint
References
----------
.. [1] Dickson & Brooks (2012), J. Chem. Theory Comput., 8, 3044-3052. | 2.405271 | 2.291564 | 1.04962 |
fraction_visited = (np.float(tprob[source, :].dot(cond_committors)) /
np.float(tprob[source, :].dot(for_committors)))
return fraction_visited | def _fraction_visited(source, sink, waypoint, tprob, for_committors,
cond_committors) | Calculate the fraction of times a walker on `tprob` going from `sources`
to `sinks` will travel through the set of states `waypoints` en route.
Computes the conditional committors q^{ABC^+} and uses them to find the
fraction of paths mentioned above.
Note that in the notation of Dickson et. al. this computes h_c(A,B), with
sources = A
sinks = B
waypoint = C
Parameters
----------
source : int
The index of the source state
sink : int
The index of the sink state
waypoint : int
The index of the intermediate state
tprob : np.ndarray
Transition matrix
for_committors : np.ndarray
The forward committors for the reaction sources -> sinks
cond_committors : np.ndarray
Conditional committors, i.e. the probability of visiting
a waypoint when on a path between source and sink.
Returns
-------
fraction_visited : float
The fraction of times a walker going from `sources` -> `sinks` stops
by `waypoints` on its way.
See Also
--------
msmbuilder.tpt.conditional_committors
Calculate the probability of visiting a waypoint while on a path
between a source and sink.
msmbuilder.tpt.hub_scores : function
Compute the 'hub score', the weighted fraction of visits for an
entire network.
References
----------
.. [1] Dickson & Brooks (2012), J. Chem. Theory Comput., 8, 3044-3052. | 3.753615 | 4.360282 | 0.860865 |
super(BACE, self).fit(sequences, y=y)
if self.n_macrostates is not None:
self._do_lumping()
else:
raise RuntimeError('n_macrostates must not be None to fit')
return self | def fit(self, sequences, y=None) | Fit a BACE lumping model using a sequence of cluster assignments.
Parameters
----------
sequences : list(np.ndarray(dtype='int'))
List of arrays of cluster assignments
y : None
Unused, present for sklearn compatibility only.
Returns
-------
self | 6.977251 | 6.067203 | 1.149995 |
c = copy.deepcopy(self.countsmat_)
if self.sliding_window:
c *= self.lag_time
c, macro_map, statesKeep = self._filterFunc(c)
w = np.array(c.sum(axis=1)).flatten()
w[statesKeep] += 1
unmerged = np.zeros(w.shape[0], dtype=np.int8)
unmerged[statesKeep] = 1
# get nonzero indices in upper triangle
indRecalc = self._getInds(c, statesKeep)
dMat = np.zeros(c.shape, dtype=np.float32)
i = 0
nCurrentStates = statesKeep.shape[0]
self.bayesFactors = {}
dMat, minX, minY = self._calcDMat(c, w, indRecalc, dMat,
statesKeep, unmerged)
while nCurrentStates > self.n_macrostates:
c, w, indRecalc, dMat, macro_map, statesKeep, unmerged, minX, minY = self._mergeTwoClosestStates(
c, w, indRecalc, dMat, macro_map,
statesKeep, minX, minY, unmerged)
nCurrentStates -= 1
if self.save_all_maps:
saved_map = copy.deepcopy(macro_map)
self.map_dict[nCurrentStates] = saved_map
if nCurrentStates - 1 == self.n_macrostates:
self.microstate_mapping_ = macro_map | def _do_lumping(self) | Do the BACE lumping. | 5.250536 | 5.132033 | 1.023091 |
params = msm.get_params()
lumper = cls(n_macrostates, filter, save_all_maps, n_proc,
chunk_size, **params)
lumper.transmat_ = msm.transmat_
lumper.populations_ = msm.populations_
lumper.mapping_ = msm.mapping_
lumper.countsmat_ = msm.countsmat_
lumper.n_states_ = msm.n_states_
if n_macrostates is not None:
lumper._do_lumping()
return lumper | def from_msm(cls, msm, n_macrostates, filter=1.1, save_all_maps=True,
n_proc=1, chunk_size=100) | Create and fit lumped model from pre-existing MSM.
Parameters
----------
msm : MarkovStateModel
The input microstate msm to use.
n_macrostates : int
The number of macrostates
Returns
-------
lumper : cls
The fit MVCA object. | 2.639792 | 2.856491 | 0.924138 |
if self.currval >= self.maxval:
return 100.0
return self.currval * 100.0 / self.maxval | def percentage(self) | Returns the progress as a percentage. | 3.597317 | 2.930399 | 1.227586 |
return _brownian_eigs(n_grid, lag_time, DOUBLEWELL_GRAD_POTENTIAL,
-np.pi, np.pi, reflect_bc=True) | def doublewell_eigs(n_grid, lag_time=1) | Analytic eigenvalues/eigenvectors for the doublwell system
TODO: DOCUMENT ME | 10.467216 | 12.158859 | 0.860872 |
transmat = _brownian_transmat(n_grid, lag_time, grad_potential, xmin, xmax, reflect_bc)
u, lv, rv = _solve_msm_eigensystem(transmat, k=len(transmat) - 1)
return u, rv | def _brownian_eigs(n_grid, lag_time, grad_potential, xmin, xmax, reflect_bc) | Analytic eigenvalues/eigenvectors for 1D Brownian dynamics | 5.121587 | 5.07596 | 1.008989 |
seq_id = [k for k, v in self.all_sequences.items()
if v == traj.top.to_fasta(chain=0)][0]
return self.feat_dict[seq_id].partial_transform(traj) | def partial_transform(self, traj) | Featurize an MD trajectory into a vector space derived from
residue-residue distances
Parameters
----------
traj : mdtraj.Trajectory
A molecular dynamics trajectory to featurize.
Returns
-------
features : np.ndarray, dtype=float, shape=(n_samples, n_features)
A featurized trajectory is a 2D array of shape
`(length_of_trajectory x n_features)` where each `features[i]`
vector is computed by applying the featurization function
to the `i`th snapshot of the input trajectory.
See Also
--------
transform : simultaneously featurize a collection of MD trajectories
Warning
-------
Only works for chain 0 for now. | 7.430648 | 7.397398 | 1.004495 |
packages = ['mdtraj.scripts']
for dir,subdirs,files in os.walk('MDTraj'):
package = dir.replace(os.path.sep, '.')
if '__init__.py' not in files:
# not a package
continue
packages.append(package.replace('MDTraj', 'mdtraj'))
return packages | def find_packages() | Find all of mdtraj's python packages.
Adapted from IPython's setupbase.py. Copyright IPython
contributors, licensed under the BSD license. | 3.857608 | 3.130775 | 1.232157 |
"Does this compiler support SSE3 intrinsics?"
self._print_support_start('SSE3')
result = self.hasfunction('__m128 v; _mm_hadd_ps(v,v)',
include='<pmmintrin.h>',
extra_postargs=['-msse3'])
self._print_support_end('SSE3', result)
return result | def _detect_sse3(self) | Does this compiler support SSE3 intrinsics? | 9.8557 | 8.562344 | 1.151052 |
"Does this compiler support SSE4.1 intrinsics?"
self._print_support_start('SSE4.1')
result = self.hasfunction( '__m128 v; _mm_round_ps(v,0x00)',
include='<smmintrin.h>',
extra_postargs=['-msse4'])
self._print_support_end('SSE4.1', result)
return result | def _detect_sse41(self) | Does this compiler support SSE4.1 intrinsics? | 10.176908 | 8.875016 | 1.146692 |
if self.information_ is None:
self._build_information()
sigma_K = _ratematrix.sigma_K(
self.information_, theta=self.theta_, n=self.n_states_)
return sigma_K | def uncertainty_K(self) | Estimate of the element-wise asymptotic standard deviation
in the rate matrix | 9.041026 | 8.71819 | 1.03703 |
if self.information_ is None:
self._build_information()
sigma_pi = _ratematrix.sigma_pi(
self.information_, theta=self.theta_, n=self.n_states_)
return sigma_pi | def uncertainty_pi(self) | Estimate of the element-wise asymptotic standard deviation
in the stationary distribution. | 8.993578 | 9.047277 | 0.994065 |
if self.information_ is None:
self._build_information()
sigma_eigenvalues = _ratematrix.sigma_eigenvalues(
self.information_, theta=self.theta_, n=self.n_states_)
if self.n_timescales is None:
return sigma_eigenvalues
return np.nan_to_num(sigma_eigenvalues[:self.n_timescales+1]) | def uncertainty_eigenvalues(self) | Estimate of the element-wise asymptotic standard deviation
in the model eigenvalues | 5.613792 | 5.883111 | 0.954222 |
if self.information_ is None:
self._build_information()
sigma_timescales = _ratematrix.sigma_timescales(
self.information_, theta=self.theta_, n=self.n_states_)
if self.n_timescales is None:
return sigma_timescales
return sigma_timescales[:self.n_timescales] | def uncertainty_timescales(self) | Estimate of the element-wise asymptotic standard deviation
in the model relaxation timescales. | 5.243085 | 5.422495 | 0.966914 |
if self.theta_ is not None:
return self.theta_
if self.guess == 'log':
transmat, pi = _transmat_mle_prinz(countsmat)
K = np.real(scipy.linalg.logm(transmat)) / self.lag_time
elif self.guess == 'pseudo':
transmat, pi = _transmat_mle_prinz(countsmat)
K = (transmat - np.eye(self.n_states_)) / self.lag_time
elif isinstance(self.guess, np.ndarray):
pi = _solve_ratemat_eigensystem(self.guess)[1][:, 0]
K = self.guess
S = np.multiply(np.sqrt(np.outer(pi, 1/pi)), K)
sflat = np.maximum(S[np.triu_indices_from(countsmat, k=1)], 0)
theta0 = np.concatenate((sflat, np.log(pi)))
return theta0 | def _initial_guess(self, countsmat) | Generate an initial guess for \theta. | 4.705449 | 4.521624 | 1.040655 |
lag_time = float(self.lag_time)
# only the "active set" of variables not at the bounds of the
# feasible set.
inds = np.where(self.theta_ != 0)[0]
hessian = _ratematrix.hessian(
self.theta_, self.countsmat_, t=lag_time, inds=inds)
self.information_ = np.zeros((len(self.theta_), len(self.theta_)))
self.information_[np.ix_(inds, inds)] = scipy.linalg.pinv(-hessian) | def _build_information(self) | Build the inverse of hessian of the log likelihood at theta_ | 7.168124 | 6.315843 | 1.134943 |
# eigenvectors from the model we're scoring, `self`
V = self.right_eigenvectors_
m2 = self.__class__(**self.get_params())
m2.fit(sequences)
if self.mapping_ != m2.mapping_:
V = self._map_eigenvectors(V, m2.mapping_)
S = np.diag(m2.populations_)
C = S.dot(m2.transmat_)
try:
trace = np.trace(V.T.dot(C.dot(V)).dot(np.linalg.inv(V.T.dot(S.dot(V)))))
except np.linalg.LinAlgError:
trace = np.nan
return trace | def score(self, sequences, y=None) | Score the model on new data using the generalized matrix Rayleigh
quotient
Parameters
----------
sequences : list of array-like
List of sequences, or a single sequence. Each sequence should be a
1D iterable of state labels. Labels can be integers, strings, or
other orderable objects.
Returns
-------
gmrq : float
Generalized matrix Rayleigh quotient. This number indicates how
well the top ``n_timescales+1`` eigenvectors of this model perform
as slowly decorrelating collective variables for the new data in
``sequences``.
References
----------
.. [1] McGibbon, R. T. and V. S. Pande, "Variational cross-validation
of slow dynamical modes in molecular kinetics" J. Chem. Phys. 142,
124105 (2015) | 4.759985 | 5.127392 | 0.928344 |
# likelihood + grad
logp1, grad = loglikelihood(theta, counts)
# exponential prior on s_{ij}
logp2 = lexponential(theta[:-n], beta, grad=grad[:-n])
# dirichlet prior on \pi
logp3 = ldirichlet_softmax(theta[-n:], alpha=alpha, grad=grad[-n:])
logp = logp1 + logp2 + logp3
return logp, grad | def _log_posterior(theta, counts, alpha, beta, n) | Log of the posterior probability and gradient
Parameters
----------
theta : ndarray, shape=(n_params,)
The free parameters of the reversible rate matrix
counts : ndarray, shape=(n, n)
The count matrix (sufficient statistics for the likielihood)
alpha : ndarray, shape=(n,)
Dirichlet concentration parameters
beta : ndarray, shape=(n_params-n,)
Scale parameter for the exponential prior on the symmetric rate
matrix. | 5.569827 | 5.223208 | 1.066361 |
us, lvs, rvs = self._get_eigensystem()
# make sure to leave off equilibrium distribution
timescales = -1 / us[:,1:]
return timescales | def all_timescales_(self) | Implied relaxation timescales each sample in the ensemble
Returns
-------
timescales : array-like, shape = (n_samples, n_timescales,)
The longest implied relaxation timescales of the each sample in
the ensemble. | 27.625971 | 23.476265 | 1.176762 |
cumsum = np.cumsum(pvals)
if size is None:
size = (1,)
axis = 0
elif isinstance(size, tuple):
size = size + (1,)
axis = len(size) - 1
else:
raise TypeError('size must be an int or tuple of ints')
random_state = check_random_state(random_state)
return np.sum(cumsum < random_state.random_sample(size), axis=axis) | def categorical(pvals, size=None, random_state=None) | Return random integer from a categorical distribution
Parameters
----------
pvals : sequence of floats, length p
Probabilities of each of the ``p`` different outcomes. These
should sum to 1.
size : int or tuple of ints, optional
Defines the shape of the returned array of random integers. If None
(the default), returns a single float.
random_state: RandomState or an int seed, optional
A random number generator instance. | 2.378471 | 2.641203 | 0.900525 |
if hasattr(msm, 'all_transmats_'):
commits = np.zeros(msm.all_transmats_.shape[:2])
for i, tprob in enumerate(msm.all_transmats_):
commits[i, :] = _committors(sources, sinks, tprob)
return np.median(commits, axis=0)
return _committors(sources, sinks, msm.transmat_) | def committors(sources, sinks, msm) | Get the forward committors of the reaction sources -> sinks.
Parameters
----------
sources : array_like, int
The set of unfolded/reactant states.
sinks : array_like, int
The set of folded/product states.
msm : msmbuilder.MarkovStateModel
MSM fit to the data.
Returns
-------
forward_committors : np.ndarray
The forward committors for the reaction sources -> sinks
References
----------
.. [1] Weinan, E. and Vanden-Eijnden, E. Towards a theory of
transition paths. J. Stat. Phys. 123, 503-523 (2006).
.. [2] Metzner, P., Schutte, C. & Vanden-Eijnden, E.
Transition path theory for Markov jump processes.
Multiscale Model. Simul. 7, 1192-1219 (2009).
.. [3] Berezhkovskii, A., Hummer, G. & Szabo, A. Reactive
flux and folding pathways in network models of
coarse-grained protein dynamics. J. Chem. Phys.
130, 205102 (2009).
.. [4] Noe, Frank, et al. "Constructing the equilibrium ensemble of folding
pathways from short off-equilibrium simulations." PNAS 106.45 (2009):
19011-19016. | 3.004132 | 3.197969 | 0.939387 |
# typecheck
for data in [source, sink, waypoint]:
if not isinstance(data, int):
raise ValueError("source, sink, and waypoint must be integers.")
if (source == waypoint) or (sink == waypoint) or (sink == source):
raise ValueError('source, sink, waypoint must all be disjoint!')
if hasattr(msm, 'all_transmats_'):
cond_committors = np.zeros(msm.all_transmats_.shape[:2])
for i, tprob in enumerate(msm.all_transmats_):
cond_committors[i, :] = _conditional_committors(source, sink,
waypoint, tprob)
return np.median(cond_committors, axis=0)
return _conditional_committors(source, sink, waypoint, msm.transmat_) | def conditional_committors(source, sink, waypoint, msm) | Computes the conditional committors :math:`q^{ABC^+}` which are is the
probability of starting in one state and visiting state B before A while
also visiting state C at some point.
Note that in the notation of Dickson et. al. this computes :math:`h_c(A,B)`,
with ``sources = A``, ``sinks = B``, ``waypoint = C``
Parameters
----------
waypoint : int
The index of the intermediate state
source : int
The index of the source state
sink : int
The index of the sink state
msm : msmbuilder.MarkovStateModel
MSM to analyze.
Returns
-------
cond_committors : np.ndarray
Conditional committors, i.e. the probability of visiting
a waypoint when on a path between source and sink.
See Also
--------
msmbuilder.tpt.fraction_visited : function
Calculate the fraction of visits to a waypoint from a given
source to a sink.
msmbuilder.tpt.hub_scores : function
Compute the 'hub score', the weighted fraction of visits for an
entire network.
Notes
-----
Employs dense linear algebra, memory use scales as N^2,
and cycle use scales as N^3
References
----------
.. [1] Dickson & Brooks (2012), J. Chem. Theory Comput., 8, 3044-3052. | 3.035148 | 3.179837 | 0.954498 |
n_states = np.shape(tprob)[0]
forward_committors = _committors([source], [sink], tprob)
# permute the transition matrix into cannonical form - send waypoint the the
# last row, and source + sink to the end after that
Bsink_indices = [source, sink, waypoint]
perm = np.array([i for i in xrange(n_states) if i not in Bsink_indices],
dtype=int)
perm = np.concatenate([perm, Bsink_indices])
permuted_tprob = tprob[perm, :][:, perm]
# extract P, R
n = n_states - len(Bsink_indices)
P = permuted_tprob[:n, :n]
R = permuted_tprob[:n, n:]
# calculate the conditional committors ( B = N*R ), B[i,j] is the prob
# state i ends in j, where j runs over the source + sink + waypoint
# (waypoint is position -1)
B = np.dot(np.linalg.inv(np.eye(n) - P), R)
# add probs for the sinks, waypoint / b[i] is P( i --> {C & not A, B} )
b = np.append(B[:, -1].flatten(), [0.0] * (len(Bsink_indices) - 1) + [1.0])
cond_committors = b * forward_committors[waypoint]
# get the original order
cond_committors = cond_committors[np.argsort(perm)]
return cond_committors | def _conditional_committors(source, sink, waypoint, tprob) | Computes the conditional committors :math:`q^{ABC^+}` which are is the
probability of starting in one state and visiting state B before A while
also visiting state C at some point.
Note that in the notation of Dickson et. al. this computes :math:`h_c(A,B)`,
with ``sources = A``, ``sinks = B``, ``waypoint = C``
Parameters
----------
waypoint : int
The index of the intermediate state
source : int
The index of the source state
sink : int
The index of the sink state
tprob : np.ndarray
Transition matrix
Returns
-------
cond_committors : np.ndarray
Conditional committors, i.e. the probability of visiting
a waypoint when on a path between source and sink.
Notes
-----
Employs dense linear algebra, memory use scales as N^2,
and cycle use scales as N^3
References
----------
.. [1] Dickson & Brooks (2012), J. Chem. Theory Comput., 8, 3044-3052. | 5.85895 | 5.909542 | 0.991439 |
n_states = np.shape(tprob)[0]
sources = np.array(sources, dtype=int).reshape((-1, 1))
sinks = np.array(sinks, dtype=int).reshape((-1, 1))
# construct the committor problem
lhs = np.eye(n_states) - tprob
for a in sources:
lhs[a, :] = 0.0 # np.zeros(n)
lhs[:, a] = 0.0
lhs[a, a] = 1.0
for b in sinks:
lhs[b, :] = 0.0 # np.zeros(n)
lhs[:, b] = 0.0
lhs[b, b] = 1.0
ident_sinks = np.zeros(n_states)
ident_sinks[sinks] = 1.0
rhs = np.dot(tprob, ident_sinks)
rhs[sources] = 0.0
rhs[sinks] = 1.0
forward_committors = np.linalg.solve(lhs, rhs)
return forward_committors | def _committors(sources, sinks, tprob) | Get the forward committors of the reaction sources -> sinks.
Parameters
----------
sources : array_like, int
The set of unfolded/reactant states.
sinks : array_like, int
The set of folded/product states.
tprob : np.ndarray
Transition matrix
Returns
-------
forward_committors : np.ndarray
The forward committors for the reaction sources -> sinks
References
----------
.. [1] Weinan, E. and Vanden-Eijnden, E. Towards a theory of
transition paths. J. Stat. Phys. 123, 503-523 (2006).
.. [2] Metzner, P., Schutte, C. & Vanden-Eijnden, E.
Transition path theory for Markov jump processes.
Multiscale Model. Simul. 7, 1192-1219 (2009).
.. [3] Berezhkovskii, A., Hummer, G. & Szabo, A. Reactive
flux and folding pathways in network models of
coarse-grained protein dynamics. J. Chem. Phys.
130, 205102 (2009).
.. [4] Noe, Frank, et al. "Constructing the equilibrium ensemble of folding
pathways from short off-equilibrium simulations." PNAS 106.45 (2009):
19011-19016. | 2.29444 | 2.25843 | 1.015945 |
frames_by_state = []
for state, pairs in enumerate(selected_pairs_by_state):
if isinstance(trajectories[0], str):
if top:
process = lambda x, frame: md.load_frame(x, frame, top=top)
else:
process = lambda x, frame: md.load_frame(x, frame)
else:
process = lambda x, frame: x[frame]
frames = [process(trajectories[trj], frame) for trj, frame in pairs]
try: # If frames are mdtraj Trajectories
# Get an empty trajectory with correct shape and call the join
# method on it to merge trajectories
state_trj = frames[0][0:0].join(frames)
except AttributeError:
state_trj = np.array(frames) # Just a bunch of np arrays
frames_by_state.append(state_trj)
return frames_by_state | def map_drawn_samples(selected_pairs_by_state, trajectories, top=None) | Lookup trajectory frames using pairs of (trajectory, frame) indices.
Parameters
----------
selected_pairs_by_state : array, dtype=int, shape=(n_states, n_samples, 2)
selected_pairs_by_state[state, sample] gives the (trajectory, frame)
index associated with a particular sample from that state.
trajectories : list(md.Trajectory) or list(np.ndarray) or list(filenames)
The trajectories assocated with sequences,
which will be used to extract coordinates of the state centers
from the raw trajectory data. This can also be a list of np.ndarray
objects or filenames. If they are filenames, mdtraj will be used to
load them
top : md.Topology, optional, default=None
Use this topology object to help mdtraj load filenames
Returns
-------
frames_by_state : mdtraj.Trajectory
Output will be a list of trajectories such that frames_by_state[state]
is a trajectory drawn from `state` of length `n_samples`. If
trajectories are numpy arrays, the output will be numpy arrays instead
of md.Trajectories
Examples
--------
>>> selected_pairs_by_state = hmm.draw_samples(sequences, 3)
>>> samples = map_drawn_samples(selected_pairs_by_state, trajectories)
Notes
-----
YOU are responsible for ensuring that selected_pairs_by_state and
trajectories correspond to the same dataset!
See Also
--------
ghmm.GaussianHMM.draw_samples : Draw samples from GHMM
ghmm.GaussianHMM.draw_centroids : Draw centroids from GHMM | 3.918947 | 3.509178 | 1.116771 |
# Upper and lower bounds on the sum of the K matrix, to ensure proper
# proposal weights. See Eq. 17 of [1].
K_MINUS = 0.9
K_PLUS = 1.1
Z = np.asarray(Z)
n_states = Z.shape[0]
if not Z.ndim == 2 and Z.shape[1] == n_states:
raise ValueError("Z must be square. Z.shape=%s" % str(Z.shape))
K = 0.5 * (Z + Z.T) / np.sum(Z, dtype=float)
random = check_random_state(random_state)
n_accept = 0
for t in range(n_samples):
# proposal
# Select two indices in [0...n_states). We draw them by drawing a
# random floats in [0,1) and then rounding to int so that this method
# is exactly analogous to `metzner_mcmc_fast`, which, for each MCMC
# iteration, draws 4 random floats in [0,1) from the same numpy PSRNG,
# and then inside the C step kernel (src/metzner_mcmc.c) uses two of
# them like this. This ensures that this function and
# `metzner_mcmc_fast` give _exactly_ the same sequence of transition
# matricies, given the same random seed.
i, j = (random.rand(2) * n_states).astype(np.int)
sc = np.sum(K)
if i == j:
a, b = max(-K[i,j], K_MINUS - sc), K_PLUS - sc
else:
a, b = max(-K[i,j], 0.5*(K_MINUS - sc)), 0.5*(K_PLUS - sc)
epsilon = random.uniform(a, b)
K_proposal = np.copy(K)
K_proposal[i, j] += epsilon
if i != j:
K_proposal[j, i] += epsilon
# acceptance?
cutoff = np.exp(_logprob_T(_K_to_T(K_proposal), Z) -
_logprob_T(_K_to_T(K), Z))
r = random.rand()
# print 'i', i, 'j', j
# print 'a', a, 'b', b
# print 'cutoff', cutoff
# print 'r', r
# print 'sc', sc
if r < cutoff:
n_accept += 1
K = K_proposal
if (t+1) % n_thin == 0:
yield _K_to_T(K) | def metzner_mcmc_slow(Z, n_samples, n_thin=1, random_state=None) | Metropolis Markov chain Monte Carlo sampler for reversible transition
matrices
Parameters
----------
Z : np.array, shape=(n_states, n_states)
The effective count matrix, the number of observed transitions
between states plus the number of prior counts
n_samples : int
Number of steps to iterate the chain for
n_thin : int
Yield every ``n_thin``-th sample from the MCMC chain
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton.
Notes
-----
The transition matrix posterior distribution is ::
P(T | Z) \propto \Prod_{ij} T_{ij}^{Z_{ij}}
and constrained to be reversible, such that there exists a \pi s.t. ::
\pi_i T_{ij} = \pi_j T_{ji}
Yields
------
T : np.array, shape=(n_states, n_states)
This generator yields samples from the transition matrix posterior
References
----------
.. [1] P. Metzner, F. Noe and C. Schutte, "Estimating the sampling error:
Distribution of transition matrices and functions of transition
matrices for given trajectory data." Phys. Rev. E 80 021106 (2009)
See Also
--------
metzner_mcmc_fast | 4.071599 | 4.145965 | 0.982063 |
import matplotlib.pyplot as pp
def _scatter(Ts, xi, xj, yi, yj):
pp.grid(False)
pp.hexbin(Ts[:, xi, xj], Ts[:, yi, yj], cmap='hot_r', vmin=0, vmax=100)
pp.xlabel('T_{%d,%d}' % (xi+1, xj+1))
pp.ylabel('T_{%d,%d}' % (yi+1, yj+1))
pp.plot([0,1], [1,0], c='k')
pp.ylim(0, 1)
pp.xlim(0, 1)
C = np.array([[1, 10, 2], [2, 26, 3], [15, 20, 20]])
Ts = np.array(list(metzner_mcmc_slow(C, 100000)))
pp.figure(figsize=(6, 6)); pp.subplot(axisbg=(0,0,0,0))
_scatter(Ts, 0, 1, 0, 2)
pp.figure(figsize=(6, 6)); pp.subplot(axisbg=(0,0,0,0))
_scatter(Ts, 1, 0, 1, 2)
pp.figure(figsize=(6, 6)); pp.subplot(axisbg=(0,0,0,0))
_scatter(Ts, 2, 0, 2, 1)
pp.show() | def _metzner_figure_4() | Generate figure 4 from Metzner's paper [1].
This can be used as a rough test of the sampler | 2.537697 | 2.471442 | 1.026808 |
'''
Computes transition probabilities and standard errors of the transition probabilities due to
finite sampling using the MSM counts matrix. First, the transition probabilities are computed
by dividing the each element c_ij by the row-sumemd counts of row i. THe standard errors are then
computed by first computing the standard deviation of the transition probability, treating each count
as a Bernoulli process with p = t_ij (std = (t_ij - t_ij ^2)^0.5). This is then divided by the
square root of the row-summed counts of row i to obtain the standard error.
Parameters:
----------
countsmat: np.ndarray
The msm counts matrix
transmat: np.ndarray
If you have a transition matrix you want to use (e.g. MLE symmetrized), you can supply that here. This
function will use the transition probabilities from this matrix to calculate the Bernoulli standard deviations,
which will be divided by the row-summed counts in the original supplied counts matrix.
Returns:
-----------
transmat, np.ndarray:
The MSM transition matrix
scale, np.ndarray:
The matrix of standard errors for each transition probability
'''
norm = np.sum(countsmat, axis=1)
if not transmat:
transmat = (countsmat.transpose() / norm).transpose()
counts = (np.ones((len(transmat), len(transmat))) * norm).transpose()
scale = ((transmat - transmat ** 2) ** 0.5 / counts ** 0.5) + 10 ** -15
return transmat, scale | def create_perturb_params(countsmat, transmat=None) | Computes transition probabilities and standard errors of the transition probabilities due to
finite sampling using the MSM counts matrix. First, the transition probabilities are computed
by dividing the each element c_ij by the row-sumemd counts of row i. THe standard errors are then
computed by first computing the standard deviation of the transition probability, treating each count
as a Bernoulli process with p = t_ij (std = (t_ij - t_ij ^2)^0.5). This is then divided by the
square root of the row-summed counts of row i to obtain the standard error.
Parameters:
----------
countsmat: np.ndarray
The msm counts matrix
transmat: np.ndarray
If you have a transition matrix you want to use (e.g. MLE symmetrized), you can supply that here. This
function will use the transition probabilities from this matrix to calculate the Bernoulli standard deviations,
which will be divided by the row-summed counts in the original supplied counts matrix.
Returns:
-----------
transmat, np.ndarray:
The MSM transition matrix
scale, np.ndarray:
The matrix of standard errors for each transition probability | 6.558023 | 1.446754 | 4.532924 |
'''
Perturbs each nonzero entry in the MSM transition matrix by treating it as a Gaussian random variable
with mean t_ij and standard deviation equal to the standard error computed using "create_perturb_params".
Returns a sampled transition matrix that takes into consideration errors due to finite sampling
(useful for boostrapping, etc.)
Parameters:
----------
transmat: np.ndarray:
The transition matrix, whose elements serve as the means of the Gaussian random variables
scale: np.ndarray:
The matrix of standard errors. For transition probability t_ij, this is assumed to be the standard
error of the mean of a binomial distribution with p = transition probability and number of observations
equal to the summed counts in row i.
'''
output = np.vectorize(np.random.normal)(transmat, scale)
output[np.where(output < 0)] = 0
return (output.transpose() / np.sum(output, axis=1)).transpose() | def perturb_tmat(transmat, scale) | Perturbs each nonzero entry in the MSM transition matrix by treating it as a Gaussian random variable
with mean t_ij and standard deviation equal to the standard error computed using "create_perturb_params".
Returns a sampled transition matrix that takes into consideration errors due to finite sampling
(useful for boostrapping, etc.)
Parameters:
----------
transmat: np.ndarray:
The transition matrix, whose elements serve as the means of the Gaussian random variables
scale: np.ndarray:
The matrix of standard errors. For transition probability t_ij, this is assumed to be the standard
error of the mean of a binomial distribution with p = transition probability and number of observations
equal to the summed counts in row i. | 9.284511 | 1.514304 | 6.131207 |
tica_msm = TemplateDir(
'tica',
[
'tica/tica.py',
'tica/tica-plot.py',
'tica/tica-sample-coordinate.py',
'tica/tica-sample-coordinate-plot.py',
],
[
TemplateDir(
'cluster',
[
'cluster/cluster.py',
'cluster/cluster-plot.py',
'cluster/sample-clusters.py',
'cluster/sample-clusters-plot.py',
],
[
TemplateDir(
'msm',
[
'msm/timescales.py',
'msm/timescales-plot.py',
'msm/microstate.py',
'msm/microstate-plot.py',
'msm/microstate-traj.py',
],
[],
)
]
)
]
)
layout = TemplateDir(
'',
[
'0-test-install.py',
'1-get-example-data.py',
'README.md',
],
[
TemplateDir(
'analysis',
[
'analysis/gather-metadata.py',
'analysis/gather-metadata-plot.py',
],
[
TemplateDir(
'rmsd',
[
'rmsd/rmsd.py',
'rmsd/rmsd-plot.py',
],
[],
),
TemplateDir(
'landmarks',
[
'landmarks/find-landmarks.py',
'landmarks/featurize.py',
'landmarks/featurize-plot.py',
],
[tica_msm],
),
TemplateDir(
'dihedrals',
[
'dihedrals/featurize.py',
'dihedrals/featurize-plot.py',
],
[tica_msm],
)
]
)
]
)
return layout | def get_layout() | Specify a hierarchy of our templates. | 2.369858 | 2.319826 | 1.021567 |
if name == self.name:
if limit is not None:
assert limit == 1
self.subdirs = []
return self
for subdir in self.subdirs:
res = subdir.find(name, limit)
if res is not None:
return res
return None | def find(self, name, limit=None) | Find the named TemplateDir in the hierarchy | 2.67486 | 2.413258 | 1.108402 |
if msm is None:
msm = MarkovStateModel()
param_grid = {'lag_time' : lag_times}
models = param_sweep(msm, sequences, param_grid, n_jobs=n_jobs,
verbose=verbose)
timescales = [m.timescales_ for m in models]
n_timescales = min(n_timescales, min(len(ts) for ts in timescales))
timescales = np.array([ts[:n_timescales] for ts in timescales])
return timescales | def implied_timescales(sequences, lag_times, n_timescales=10,
msm=None, n_jobs=1, verbose=0) | Calculate the implied timescales for a given MSM.
Parameters
----------
sequences : list of array-like
List of sequences, or a single sequence. Each
sequence should be a 1D iterable of state
labels. Labels can be integers, strings, or
other orderable objects.
lag_times : array-like
Lag times to calculate implied timescales at.
n_timescales : int, optional
Number of timescales to calculate.
msm : msmbuilder.msm.MarkovStateModel, optional
Instance of an MSM to specify parameters other
than the lag time. If None, then the default
parameters (as implemented by msmbuilder.msm.MarkovStateModel)
will be used.
n_jobs : int, optional
Number of jobs to run in parallel
Returns
-------
timescales : np.ndarray, shape = [n_models, n_timescales]
The slowest timescales (in units of lag times) for each
model. | 2.535767 | 2.684185 | 0.944706 |
def inner(func):
@functools.wraps(func)
def wrapper(*fargs, **kw):
fname = name
if name is None:
fname = func.__name__
warnings.warn("%s" % fname, category=ExperimentalWarning,
stacklevel=2)
return func(*fargs, **kw)
return wrapper
return inner | def experimental(name=None) | A simple decorator to mark functions and methods as experimental. | 2.596628 | 2.52154 | 1.029779 |
lines = doc.splitlines()
labelstart, labelend = None, None
foundattributes = False
for i, line in enumerate(lines):
stripped = line.strip()
if stripped == 'Attributes':
foundattributes = True
if foundattributes and not labelstart and stripped.startswith('labels_'):
labelstart = len('\n'.join(lines[:i])) + 1
if labelstart and not labelend and stripped == '':
labelend = len('\n'.join(lines[:i + 1]))
if labelstart is None or labelend is None:
return doc
replace = '\n'.join([
' labels_ : list of arrays, each of shape [sequence_length, ]',
' The label of each point is an integer in [0, n_clusters).',
'',
])
return doc[:labelstart] + replace + doc[labelend:] | def _replace_labels(doc) | Really hacky find-and-replace method that modifies one of the sklearn
docstrings to change the semantics of labels_ for the subclasses | 3.619166 | 3.322793 | 1.089194 |
if isinstance(X, np.ndarray):
if not (X.dtype == 'float32' or X.dtype == 'float64'):
X = X.astype('float64')
labels, inertia = libdistance.assign_nearest(
X, self.cluster_centers_, metric=self.metric)
return labels | def predict(self, X) | Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
New data to predict.
Returns
-------
Y : array, shape [n_samples,]
Index of the closest center each sample belongs to. | 4.384741 | 4.30265 | 1.019079 |
MultiSequenceClusterMixin.fit(self, sequences)
self.cluster_ids_ = self._split_indices(self.cluster_ids_)
return self | def fit(self, sequences, y=None) | Fit the kcenters clustering on the data
Parameters
----------
sequences : list of array-like, each of shape [sequence_length, n_features]
A list of multivariate timeseries, or ``md.Trajectory``. Each
sequence may have a different length, but they all must have the
same number of features, or the same number of atoms if they are
``md.Trajectory``s.
Returns
-------
self | 7.745762 | 13.017882 | 0.595009 |
if compress is not None or cache_size is not None:
warnings.warn("compress and cache_size are no longer valid options")
with open(filename, 'wb') as f:
pickle.dump(value, f) | def dump(value, filename, compress=None, cache_size=None) | Save an arbitrary python object using pickle.
Parameters
-----------
value : any Python object
The object to store to disk using pickle.
filename : string
The name of the file in which it is to be stored
compress : None
No longer used
cache_size : positive number, optional
No longer used
See Also
--------
load : corresponding loader | 2.933469 | 3.859736 | 0.760018 |
try:
with open(filename, 'rb') as f:
return pickle.load(f)
except Exception as e1:
try:
return jl_load(filename)
except Exception as e2:
raise IOError(
"Unable to load {} using the pickle or joblib protocol.\n"
"Pickle: {}\n"
"Joblib: {}".format(filename, e1, e2)
) | def load(filename) | Load an object that has been saved with dump.
We try to open it using the pickle protocol. As a fallback, we
use joblib.load. Joblib was the default prior to msmbuilder v3.2
Parameters
----------
filename : string
The name of the file to load. | 2.937313 | 2.80873 | 1.04578 |
print('Saving "%s"... (%s)' % (fn, type(value)))
dump(value, fn, compress=compress) | def verbosedump(value, fn, compress=None) | Verbose wrapper around dump | 4.851748 | 4.937037 | 0.982725 |
if hasattr(msm, 'all_transmats_'):
fluxes = np.zeros_like(msm.all_transmats_)
for i, el in enumerate(zip(msm.all_transmats_, msm.all_populations_)):
tprob = el[0]
populations = el[1]
fluxes[i, :, :] = _fluxes(sources, sinks, tprob,
populations, for_committors)
return np.median(fluxes, axis=0)
return _fluxes(sources, sinks, msm.transmat_, msm.populations_,
for_committors) | def fluxes(sources, sinks, msm, for_committors=None) | Compute the transition path theory flux matrix.
Parameters
----------
sources : array_like, int
The set of unfolded/reactant states.
sinks : array_like, int
The set of folded/product states.
msm : msmbuilder.MarkovStateModel
MSM that has been fit to data.
for_committors : np.ndarray, optional
The forward committors associated with `sources`, `sinks`, and `tprob`.
If not provided, is calculated from scratch. If provided, `sources`
and `sinks` are ignored.
Returns
-------
flux_matrix : np.ndarray
The flux matrix
See Also
--------
net_fluxes
References
----------
.. [1] Weinan, E. and Vanden-Eijnden, E. Towards a theory of
transition paths. J. Stat. Phys. 123, 503-523 (2006).
.. [2] Metzner, P., Schutte, C. & Vanden-Eijnden, E.
Transition path theory for Markov jump processes.
Multiscale Model. Simul. 7, 1192-1219 (2009).
.. [3] Berezhkovskii, A., Hummer, G. & Szabo, A. Reactive
flux and folding pathways in network models of
coarse-grained protein dynamics. J. Chem. Phys.
130, 205102 (2009).
.. [4] Noe, Frank, et al. "Constructing the equilibrium ensemble of folding
pathways from short off-equilibrium simulations." PNAS 106.45 (2009):
19011-19016. | 2.752641 | 2.73773 | 1.005447 |
flux_matrix = fluxes(sources, sinks, msm, for_committors=for_committors)
net_flux = flux_matrix - flux_matrix.T
net_flux[np.where(net_flux < 0)] = 0.0
return net_flux | def net_fluxes(sources, sinks, msm, for_committors=None) | Computes the transition path theory net flux matrix.
Parameters
----------
sources : array_like, int
The set of unfolded/reactant states.
sinks : array_like, int
The set of folded/product states.
msm : msmbuilder.MarkovStateModel
MSM fit to data.
for_committors : np.ndarray, optional
The forward committors associated with `sources`, `sinks`, and `tprob`.
If not provided, is calculated from scratch. If provided, `sources`
and `sinks` are ignored.
Returns
-------
net_flux : np.ndarray
The net flux matrix
See Also
--------
fluxes
References
----------
.. [1] Weinan, E. and Vanden-Eijnden, E. Towards a theory of
transition paths. J. Stat. Phys. 123, 503-523 (2006).
.. [2] Metzner, P., Schutte, C. & Vanden-Eijnden, E.
Transition path theory for Markov jump processes.
Multiscale Model. Simul. 7, 1192-1219 (2009).
.. [3] Berezhkovskii, A., Hummer, G. & Szabo, A. Reactive
flux and folding pathways in network models of
coarse-grained protein dynamics. J. Chem. Phys.
130, 205102 (2009).
.. [4] Noe, Frank, et al. "Constructing the equilibrium ensemble of folding
pathways from short off-equilibrium simulations." PNAS 106.45 (2009):
19011-19016. | 2.674048 | 3.162248 | 0.845616 |
n_states = np.shape(populations)[0]
# check if we got the committors
if for_committors is None:
for_committors = _committors(sources, sinks, tprob)
else:
for_committors = np.array(for_committors)
if for_committors.shape != (n_states,):
raise ValueError("Shape of committors %s should be %s" %
(str(for_committors.shape), str((n_states,))))
sources = np.array(sources).reshape((-1,))
sinks = np.array(sinks).reshape((-1,))
X = np.zeros((n_states, n_states))
X[(np.arange(n_states), np.arange(n_states))] = (populations *
(1.0 - for_committors))
Y = np.zeros((n_states, n_states))
Y[(np.arange(n_states), np.arange(n_states))] = for_committors
fluxes = np.dot(np.dot(X, tprob), Y)
fluxes[(np.arange(n_states), np.arange(n_states))] = np.zeros(n_states)
return fluxes | def _fluxes(sources, sinks, tprob, populations, for_committors=None) | Compute the transition path theory flux matrix.
Parameters
----------
sources : array_like, int
The set of unfolded/reactant states.
sinks : array_like, int
The set of folded/product states.
tprob : np.ndarray
Transition matrix
populations : np.ndarray, (n_states,)
MSM populations
for_committors : np.ndarray, optional
The forward committors associated with `sources`, `sinks`, and `tprob`.
If not provided, is calculated from scratch. If provided, `sources`
and `sinks` are ignored.
Returns
-------
flux_matrix : np.ndarray
The flux matrix
See Also
--------
net_fluxes
References
----------
.. [1] Weinan, E. and Vanden-Eijnden, E. Towards a theory of
transition paths. J. Stat. Phys. 123, 503-523 (2006).
.. [2] Metzner, P., Schutte, C. & Vanden-Eijnden, E.
Transition path theory for Markov jump processes.
Multiscale Model. Simul. 7, 1192-1219 (2009).
.. [3] Berezhkovskii, A., Hummer, G. & Szabo, A. Reactive
flux and folding pathways in network models of
coarse-grained protein dynamics. J. Chem. Phys.
130, 205102 (2009).
.. [4] Noe, Frank, et al. "Constructing the equilibrium ensemble of folding
pathways from short off-equilibrium simulations." PNAS 106.45 (2009):
19011-19016. | 2.045923 | 2.067445 | 0.98959 |
data = []
indices = []
fns = []
for file in filenames:
kwargs = {} if file.endswith('.h5') else {'top': topology}
count = 0
for t in md.iterload(file, chunk=chunk, stride=stride, **kwargs):
x = featurizer.partial_transform(t)
n_frames = len(x)
data.append(x)
indices.append(count + (stride * np.arange(n_frames)))
fns.extend([file] * n_frames)
count += (stride * n_frames)
if len(data) == 0:
raise ValueError("None!")
return np.concatenate(data), np.concatenate(indices), np.array(fns) | def featurize_all(filenames, featurizer, topology, chunk=1000, stride=1) | Load and featurize many trajectory files.
Parameters
----------
filenames : list of strings
List of paths to MD trajectory files
featurizer : Featurizer
The featurizer to be invoked on each trajectory trajectory as
it is loaded
topology : str, Topology, Trajectory
Topology or path to a topology file, used to load trajectories with
MDTraj
chunk : {int, None}
If chunk is an int, load the trajectories up in chunks using
md.iterload for better memory efficiency (less trajectory data needs
to be in memory at once)
stride : int, default=1
Only read every stride-th frame.
Returns
-------
data : np.ndarray, shape=(total_length_of_all_trajectories, n_features)
indices : np.ndarray, shape=(total_length_of_all_trajectories)
fns : np.ndarray shape=(total_length_of_all_trajectories)
These three arrays all share the same indexing, such that data[i] is
the featurized version of indices[i]-th frame in the MD trajectory
with filename fns[i]. | 3.295015 | 2.802971 | 1.175544 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.