code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
logger.debug("download page: %r, %r", args, kwargs)
return self.__clientDefer(downloadPage(*args, **kwargs)) | def __doDownloadPage(self, *args, **kwargs) | Works like client.downloadPage(), but handle incoming headers | 9.274728 | 8.634883 | 1.0741 |
"Verify a user's credentials."
parser = txml.Users(delegate)
return self.__downloadPage('/account/verify_credentials.xml', parser) | def verify_credentials(self, delegate=None) | Verify a user's credentials. | 26.344419 | 26.810453 | 0.982617 |
"Update your status. Returns the ID of the new post."
params = params.copy()
params['status'] = status
if source:
params['source'] = source
return self.__parsed_post(self.__post('/statuses/update.xml', params),
txml.parseUpdateResponse) | def update(self, status, source=None, params={}) | Update your status. Returns the ID of the new post. | 9.848126 | 6.075469 | 1.620966 |
parser = txml.Statuses(delegate)
return self.__postPage('/statuses/retweet/%s.xml' % (id), parser) | def retweet(self, id, delegate) | Retweet a post
Returns the retweet status info back to the given delegate | 16.201429 | 18.569172 | 0.872491 |
return self.__get('/statuses/friends_timeline.xml', delegate, params,
txml.Statuses, extra_args=extra_args) | def friends(self, delegate, params={}, extra_args=None) | Get updates from friends.
Calls the delgate once for each status object received. | 10.582291 | 12.037911 | 0.87908 |
return self.__get('/statuses/home_timeline.xml', delegate, params,
txml.Statuses, extra_args=extra_args) | def home_timeline(self, delegate, params={}, extra_args=None) | Get updates from friends.
Calls the delgate once for each status object received. | 7.569693 | 10.001319 | 0.756869 |
if user:
params['id'] = user
return self.__get('/statuses/user_timeline.xml', delegate, params,
txml.Statuses, extra_args=extra_args) | def user_timeline(self, delegate, user=None, params={}, extra_args=None) | Get the most recent updates for a user.
If no user is specified, the statuses for the authenticating user are
returned.
See search for example of how results are returned. | 5.412481 | 7.040149 | 0.768802 |
"Get the most recent public timeline."
return self.__get('/statuses/public_timeline.atom', delegate, params,
extra_args=extra_args) | def public_timeline(self, delegate, params={}, extra_args=None) | Get the most recent public timeline. | 9.748096 | 7.606725 | 1.28151 |
return self.__get('/direct_messages.xml', delegate, params,
txml.Direct, extra_args=extra_args) | def direct_messages(self, delegate, params={}, extra_args=None) | Get direct messages for the authenticating user.
Search results are returned one message at a time a DirectMessage
objects | 10.768832 | 14.484067 | 0.743495 |
params = params.copy()
if user is not None:
params['user'] = user
if user_id is not None:
params['user_id'] = user_id
if screen_name is not None:
params['screen_name'] = screen_name
params['text'] = text
parser = txml.Direct(delegate)
return self.__postPage('/direct_messages/new.xml', parser, params) | def send_direct_message(self, text, user=None, delegate=None, screen_name=None, user_id=None, params={}) | Send a direct message | 2.812298 | 2.934687 | 0.958296 |
return self.__get('/statuses/replies.atom', delegate, params,
extra_args=extra_args) | def replies(self, delegate, params={}, extra_args=None) | Get the most recent replies for the authenticating user.
See search for example of how results are returned. | 10.013626 | 10.771002 | 0.929684 |
parser = txml.Users(delegate)
return self.__postPage('/friendships/create/%s.xml' % (user), parser) | def follow_user(self, user, delegate) | Follow the given user.
Returns the user info back to the given delegate | 19.622629 | 25.458941 | 0.770756 |
parser = txml.Users(delegate)
return self.__postPage('/friendships/destroy/%s.xml' % (user), parser) | def unfollow_user(self, user, delegate) | Unfollow the given user.
Returns the user info back to the given delegate | 18.616955 | 23.450497 | 0.793883 |
if user:
url = '/statuses/friends/' + user + '.xml'
else:
url = '/statuses/friends.xml'
return self.__get_maybe_paging(url, delegate, params, txml.PagedUserList, extra_args, page_delegate) | def list_friends(self, delegate, user=None, params={}, extra_args=None, page_delegate=None) | Get the list of friends for a user.
Calls the delegate with each user object found. | 5.968116 | 6.42521 | 0.928859 |
url = '/users/show/%s.xml' % (user)
d = defer.Deferred()
self.__downloadPage(url, txml.Users(lambda u: d.callback(u))) \
.addErrback(lambda e: d.errback(e))
return d | def show_user(self, user) | Get the info for a specific user.
Returns a delegate that will receive the user in a callback. | 7.751983 | 7.091542 | 1.093131 |
if args is None:
args = {}
args['q'] = query
return self.__doDownloadPage(self.search_url + '?' + self._urlencode(args),
txml.Feed(delegate, extra_args), agent=self.agent) | def search(self, query, delegate, args=None, extra_args=None) | Perform a search query.
Results are given one at a time to the delegate. An example delegate
may look like this:
def exampleDelegate(entry):
print entry.title | 6.958767 | 8.503998 | 0.818294 |
service.Service.startService(self)
self._toState('idle')
try:
self.connect()
except NoConsumerError:
pass | def startService(self) | Start the service.
This causes a transition to the C{'idle'} state, and then calls
L{connect} to attempt an initial conection. | 11.356627 | 10.324114 | 1.10001 |
if self._state == 'stopped':
raise Error("This service is not running. Not connecting.")
if self._state == 'connected':
if forceReconnect:
self._toState('disconnecting')
return True
else:
raise ConnectError("Already connected.")
elif self._state == 'aborting':
raise ConnectError("Aborting connection in progress.")
elif self._state == 'disconnecting':
raise ConnectError("Disconnect in progress.")
elif self._state == 'connecting':
if forceReconnect:
self._toState('aborting')
return True
else:
raise ConnectError("Connect in progress.")
if self.delegate is None:
if self._state != 'idle':
self._toState('idle')
raise NoConsumerError()
if self._state == 'waiting':
if self._reconnectDelayedCall.called:
self._reconnectDelayedCall = None
pass
else:
self._reconnectDelayedCall.reset(0)
return True
self._toState('connecting')
return True | def connect(self, forceReconnect=False) | Check current conditions and initiate connection if possible.
This is called to check preconditions for starting a new connection,
and initating the connection itself.
If the service is not running, this will do nothing.
@param forceReconnect: Drop an existing connection to reconnnect.
@type forceReconnect: C{False}
@raises L{ConnectError}: When a connection (attempt) is already in
progress, unless C{forceReconnect} is set.
@raises L{NoConsumerError}: When there is no consumer for incoming
tweets. No further connection attempts will be made, unless L{connect}
is called again. | 3.621999 | 3.377448 | 1.072407 |
self._errorState = None
def cb(result):
self.protocol = None
if self._state == 'stopped':
# Don't transition to any other state. We are stopped.
pass
else:
if isinstance(result, failure.Failure):
reason = result
else:
reason = None
self._toState('disconnected', reason)
self.protocol = protocol
d = protocol.deferred
d.addBoth(cb) | def makeConnection(self, protocol) | Called when the connection has been established.
This method is called when an HTTP 200 response has been received,
with the protocol that decodes the individual Twitter stream elements.
That protocol will call the consumer for all Twitter entries received.
The protocol, stored in L{protocol}, has a deferred that fires when
the connection is closed, causing a transition to the
C{'disconnected'} state.
@param protocol: The Twitter stream protocol.
@type protocol: L{TwitterStream} | 5.170686 | 4.950514 | 1.044475 |
def connect():
if self.noisy:
log.msg("Reconnecting now.")
self.connect()
backOff = self.backOffs[errorState]
if self._errorState != errorState or self._delay is None:
self._errorState = errorState
self._delay = backOff['initial']
else:
self._delay = min(backOff['max'], self._delay * backOff['factor'])
if self._delay == 0:
connect()
else:
self._reconnectDelayedCall = self.reactor.callLater(self._delay,
connect)
self._toState('waiting') | def _reconnect(self, errorState) | Attempt to reconnect.
If the current back-off delay is 0, L{connect} is called. Otherwise,
it will cause a transition to the C{'waiting'} state, ultimately
causing a call to L{connect} when the delay expires. | 4.210004 | 3.713782 | 1.133616 |
try:
method = getattr(self, '_state_%s' % state)
except AttributeError:
raise ValueError("No such state %r" % state)
log.msg("%s: to state %r" % (self.__class__.__name__, state))
self._state = state
method(*args, **kwargs) | def _toState(self, state, *args, **kwargs) | Transition to the next state.
@param state: Name of the next state. | 3.158419 | 3.390367 | 0.931586 |
if self._reconnectDelayedCall:
self._reconnectDelayedCall.cancel()
self._reconnectDelayedCall = None
self.loseConnection() | def _state_stopped(self) | The service is not running.
This is the initial state, and the state after L{stopService} was
called. To get out of this state, call L{startService}. If there is a
current connection, we disconnect. | 8.07396 | 6.365485 | 1.268397 |
def responseReceived(protocol):
self.makeConnection(protocol)
if self._state == 'aborting':
self._toState('disconnecting')
else:
self._toState('connected')
def trapError(failure):
self._toState('error', failure)
def onEntry(entry):
if self.delegate:
try:
self.delegate(entry)
except:
log.err()
else:
pass
d = self.api(onEntry, self.args)
d.addCallback(responseReceived)
d.addErrback(trapError) | def _state_connecting(self) | A connection is being started.
A succesful attempt results in the state C{'connected'} when the
first response from Twitter has been received. Transitioning
to the state C{'aborting'} will cause an immediate disconnect instead,
by transitioning to C{'disconnecting'}.
Errors will cause a transition to the C{'error'} state. | 4.84985 | 4.349496 | 1.115037 |
log.err(reason)
def matchException(failure):
for errorState, backOff in self.backOffs.iteritems():
if 'errorTypes' not in backOff:
continue
if failure.check(*backOff['errorTypes']):
return errorState
return 'other'
errorState = matchException(reason)
self._reconnect(errorState) | def _state_error(self, reason) | The connection attempt resulted in an error.
Attempt a reconnect with a back-off algorithm. | 6.880278 | 6.338443 | 1.085484 |
if line and line.isdigit():
self._expectedLength = int(line)
self._rawBuffer = []
self._rawBufferLength = 0
self.setRawMode()
else:
self.keepAliveReceived() | def lineReceived(self, line) | Called when a line is received.
We expect a length in bytes or an empty line for keep-alive. If
we got a length, switch to raw mode to receive that amount of bytes. | 6.696274 | 4.021614 | 1.665071 |
self._rawBuffer.append(data)
self._rawBufferLength += len(data)
if self._rawBufferLength >= self._expectedLength:
receivedData = ''.join(self._rawBuffer)
expectedData = receivedData[:self._expectedLength]
extraData = receivedData[self._expectedLength:]
self._rawBuffer = None
self._rawBufferLength = None
self._expectedLength = None
self.datagramReceived(expectedData)
self.setLineMode(extraData) | def rawDataReceived(self, data) | Called when raw data is received.
Fill the raw buffer C{_rawBuffer} until we have received at least
C{_expectedLength} bytes. Call C{datagramReceived} with the received
byte string of the expected size. Then switch back to line mode with
the remainder of the buffer. | 2.641829 | 2.131545 | 1.239396 |
obj = cls()
obj.raw = data
for name, value in data.iteritems():
if cls.SIMPLE_PROPS and name in cls.SIMPLE_PROPS:
setattr(obj, name, value)
elif cls.COMPLEX_PROPS and name in cls.COMPLEX_PROPS:
value = cls.COMPLEX_PROPS[name].fromDict(value)
setattr(obj, name, value)
elif cls.LIST_PROPS and name in cls.LIST_PROPS:
value = [cls.LIST_PROPS[name].fromDict(item)
for item in value]
setattr(obj, name, value)
return obj | def fromDict(cls, data) | Fill this objects attributes from a dict for known properties. | 2.002048 | 1.936767 | 1.033707 |
try:
obj = json.loads(data)
except ValueError, e:
log.err(e, 'Invalid JSON in stream: %r' % data)
return
if u'text' in obj:
obj = Status.fromDict(obj)
else:
log.msg('Unsupported object %r' % obj)
return
self.callback(obj) | def datagramReceived(self, data) | Decode the JSON-encoded datagram and call the callback. | 4.263481 | 3.911668 | 1.089939 |
self.setTimeout(None)
if reason.check(ResponseDone, PotentialDataLoss):
self.deferred.callback(None)
else:
self.deferred.errback(reason) | def connectionLost(self, reason) | Called when the body is complete or the connection was lost.
@note: As the body length is usually not known at the beginning of the
response we expect a L{PotentialDataLoss} when Twitter closes the
stream, instead of L{ResponseDone}. Other exceptions are treated
as error conditions. | 4.145854 | 3.376323 | 1.22792 |
def create(delegate, extra_args=None):
return listParser(list_type, delegate, extra_args)
return create | def simpleListFactory(list_type) | Used for simple parsers that support only one type of object | 10.231252 | 9.46826 | 1.080584 |
if len(namelist) > 1:
def set_sub(i):
i.setSubDelegates(namelist[1:], before, after)
self.setBeforeDelegate(namelist[0], set_sub)
elif len(namelist) == 1:
self.setDelegate(namelist[0], before, after) | def setSubDelegates(self, namelist, before=None, after=None) | Set a delegate for a sub-sub-item, according to a list of names | 2.790969 | 2.566319 | 1.087538 |
path = path.strip('/')
list_path = path.split('/')
sentinel = list_path.pop(0)
return sentinel, list_path, path | def _split_path(path) | split a path return by the api
return
- the sentinel:
- the rest of the path as a list.
- the original path stripped of / for normalisation. | 5.380266 | 3.798178 | 1.416539 |
def _wrapper_method(self, old_path, new_path):
old_path, _old_path, old_sentinel = _split_path(old_path);
new_path, _new_path, new_sentinel = _split_path(new_path);
if old_sentinel != new_sentinel:
raise ValueError('Does not know how to move things across contents manager mountpoints')
else:
sentinel = new_sentinel
man = self.managers.get(sentinel, None)
if man is not None:
rename_meth = getattr(man, rename_like_method.__name__)
sub = rename_meth('/'.join(_old_path), '/'.join(_new_path))
return sub
else :
return rename_meth(self, old_path, new_path)
return _wrapper_method | def path_dispatch_rename(rename_like_method) | decorator for rename-like function, that need dispatch on 2 arguments | 4.517883 | 4.413498 | 1.023651 |
with jconfig(profile) as config:
deact = True;
if not getattr(config.NotebookApp.contents_manager_class, 'startswith',lambda x:False)('jupyterdrive'):
deact=False
if 'gdrive' not in getattr(config.NotebookApp.tornado_settings,'get', lambda _,__:'')('contents_js_source',''):
deact=False
if deact:
del config['NotebookApp']['tornado_settings']['contents_js_source']
del config['NotebookApp']['contents_manager_class'] | def deactivate(profile='default') | should be a matter of just unsetting the above keys | 5.706626 | 5.879306 | 0.970629 |
'''Validates a coral.sequence data type.
:param sequence_in: input DNA sequence.
:type sequence_in: any
:returns: The material - 'dna', 'rna', or 'peptide'.
:rtype: str
:raises: ValueError
'''
if isinstance(seq, coral.DNA):
material = 'dna'
elif isinstance(seq, coral.RNA):
material = 'rna'
elif isinstance(seq, coral.Peptide):
material = 'peptide'
else:
raise ValueError('Input was not a recognized coral.sequence object.')
return material | def sequence_type(seq) | Validates a coral.sequence data type.
:param sequence_in: input DNA sequence.
:type sequence_in: any
:returns: The material - 'dna', 'rna', or 'peptide'.
:rtype: str
:raises: ValueError | 3.815509 | 1.754184 | 2.175091 |
'''Restriction endonuclease reaction.
:param dna: DNA template to digest.
:type dna: coral.DNA
:param restriction_site: Restriction site to use.
:type restriction_site: RestrictionSite
:returns: list of digested DNA fragments.
:rtype: coral.DNA list
'''
pattern = restriction_enzyme.recognition_site
located = dna.locate(pattern)
if not located[0] and not located[1]:
return [dna]
# Bottom strand indices are relative to the bottom strand 5' end.
# Convert to same type as top strand
pattern_len = len(pattern)
r_indices = [len(dna) - index - pattern_len for index in
located[1]]
# If sequence is palindrome, remove redundant results
if pattern.is_palindrome():
r_indices = [index for index in r_indices if index not in
located[0]]
# Flatten cut site indices
cut_sites = sorted(located[0] + r_indices)
# Go through each cut site starting at highest one
# Cut remaining template once, generating remaining + new
current = [dna]
for cut_site in cut_sites[::-1]:
new = _cut(current, cut_site, restriction_enzyme)
current.append(new[1])
current.append(new[0])
current.reverse()
# Combine first and last back together if digest was circular
if dna.circular:
current[0] = current.pop() + current[0]
return current | def digest(dna, restriction_enzyme) | Restriction endonuclease reaction.
:param dna: DNA template to digest.
:type dna: coral.DNA
:param restriction_site: Restriction site to use.
:type restriction_site: RestrictionSite
:returns: list of digested DNA fragments.
:rtype: coral.DNA list | 5.556808 | 4.371573 | 1.271123 |
'''Cuts template once at the specified index.
:param dna: DNA to cut
:type dna: coral.DNA
:param index: index at which to cut
:type index: int
:param restriction_enzyme: Enzyme with which to cut
:type restriction_enzyme: coral.RestrictionSite
:returns: 2-element list of digested sequence, including any overhangs.
:rtype: list
'''
# TODO: handle case where cut site is outside of recognition sequence,
# for both circular and linear cases where site is at index 0
# Find absolute indices at which to cut
cut_site = restriction_enzyme.cut_site
top_cut = index + cut_site[0]
bottom_cut = index + cut_site[1]
# Isolate left and ride sequences
to_cut = dna.pop()
max_cut = max(top_cut, bottom_cut)
min_cut = min(top_cut, bottom_cut)
left = to_cut[:max_cut]
right = to_cut[min_cut:]
# If applicable, leave overhangs
diff = top_cut - bottom_cut
if not diff:
# Blunt-end cutter, no adjustment necessary
pass
elif diff > 0:
# 3' overhangs
left = coral.reaction.five_resect(left.flip(), diff).flip()
right = coral.reaction.five_resect(right, diff)
else:
# 5' overhangs
left = coral.reaction.three_resect(left, abs(diff))
right = coral.reaction.three_resect(right.flip(), abs(diff)).flip()
return [left, right] | def _cut(dna, index, restriction_enzyme) | Cuts template once at the specified index.
:param dna: DNA to cut
:type dna: coral.DNA
:param index: index at which to cut
:type index: int
:param restriction_enzyme: Enzyme with which to cut
:type restriction_enzyme: coral.RestrictionSite
:returns: 2-element list of digested sequence, including any overhangs.
:rtype: list | 4.456177 | 3.300551 | 1.350131 |
print(filename)
os.chdir(directory)
subprocess.Popen(["ipython", "nbconvert", "--to", "rst",
filename],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=directory) | def ipynb_to_rst(directory, filename) | Converts a given file in a directory to an rst in the same directory. | 2.934473 | 3.115523 | 0.941888 |
# The ipython_examples dir has to be in the same dir as this script
for root, subfolders, files in os.walk(os.path.abspath(directory)):
for f in files:
if ".ipynb_checkpoints" not in root:
if f.endswith("ipynb"):
ipynb_to_rst(root, f) | def convert_ipynbs(directory) | Recursively converts all ipynb files in a directory into rst files in
the same directory. | 3.938568 | 3.845685 | 1.024152 |
'''Generate context-dependent 'non-boundedness' scores for a DNA sequence.
:param dna: Sequence to score.
:type dna: coral.DNA
:param window_size: Window size in base pairs.
:type window_size: int
:param context_len: The number of bases of context to use when analyzing
each window.
:type context_len: int
:param step: The number of base pairs to move for each new window.
:type step: int
'''
# Generate window indices
window_start_ceiling = len(dna) - context_len - window_size
window_starts = range(context_len - 1, window_start_ceiling, step)
window_ends = [start + window_size for start in window_starts]
# Generate left and right in-context subsequences
l_starts = [step * i for i in range(len(window_starts))]
l_seqs = [dna[start:end] for start, end in zip(l_starts, window_ends)]
r_ends = [x + window_size + context_len for x in window_starts]
r_seqs = [dna[start:end].reverse_complement() for start, end in
zip(window_starts, r_ends)]
# Combine and calculate nupack pair probabilities
seqs = l_seqs + r_seqs
pairs_run = coral.analysis.nupack_multi(seqs, 'dna', 'pairs', {'index': 0})
# Focus on pair probabilities that matter - those in the window
pairs = [run[-window_size:] for run in pairs_run]
# Score by average pair probability
lr_scores = [sum(pair) / len(pair) for pair in pairs]
# Split into left-right contexts again and sum for each window
l_scores = lr_scores[0:len(seqs) / 2]
r_scores = lr_scores[len(seqs) / 2:]
scores = [(l + r) / 2 for l, r in zip(l_scores, r_scores)]
# Summarize and return window indices and score
summary = zip(window_starts, window_ends, scores)
return summary | def _context_walk(dna, window_size, context_len, step) | Generate context-dependent 'non-boundedness' scores for a DNA sequence.
:param dna: Sequence to score.
:type dna: coral.DNA
:param window_size: Window size in base pairs.
:type window_size: int
:param context_len: The number of bases of context to use when analyzing
each window.
:type context_len: int
:param step: The number of base pairs to move for each new window.
:type step: int | 3.805051 | 2.917796 | 1.304084 |
'''Walk through the sequence of interest in windows of window_size,
evaluate free (unbound) pair probabilities.
:param window_size: Window size in base pairs.
:type window_size: int
:param context_len: The number of bases of context to use when
analyzing each window.
:type context_len: int
:param step: The number of base pairs to move for each new window.
:type step: int
'''
self.walked = _context_walk(self.template, window_size, context_len,
step)
self.core_starts, self.core_ends, self.scores = zip(*self.walked)
return self.walked | def windows(self, window_size=60, context_len=90, step=10) | Walk through the sequence of interest in windows of window_size,
evaluate free (unbound) pair probabilities.
:param window_size: Window size in base pairs.
:type window_size: int
:param context_len: The number of bases of context to use when
analyzing each window.
:type context_len: int
:param step: The number of base pairs to move for each new window.
:type step: int | 5.52363 | 2.147151 | 2.572539 |
'''Plot the results of the run method.'''
try:
from matplotlib import pylab
except ImportError:
raise ImportError('Optional dependency matplotlib not installed.')
if self.walked:
fig = pylab.figure()
ax1 = fig.add_subplot(111)
ax1.plot(self.core_starts, self.scores, 'bo-')
pylab.xlabel('Core sequence start position (base pairs).')
pylab.ylabel('Score - Probability of being unbound.')
pylab.show()
else:
raise Exception('Run calculate() first so there\'s data to plot!') | def plot(self) | Plot the results of the run method. | 5.875856 | 5.378951 | 1.09238 |
'''Design primers for PCR amplifying any arbitrary sequence.
:param dna: Input sequence.
:type dna: coral.DNA
:param tm: Ideal primer Tm in degrees C.
:type tm: float
:param min_len: Minimum primer length.
:type min_len: int
:param tm_undershoot: Allowed Tm undershoot.
:type tm_undershoot: float
:param tm_overshoot: Allowed Tm overshoot.
:type tm_overshoot: float
:param end_gc: Obey the 'end on G or C' rule.
:type end_gc: bool
:param tm_parameters: Melting temp calculator method to use.
:type tm_parameters: string
:param overhangs: 2-tuple of overhang sequences.
:type overhangs: tuple
:param structure: Evaluate each primer for structure, with warning for high
structure.
:type structure: bool
:returns: A list primers (the output of primer).
:rtype: list
'''
if not overhangs:
overhangs = [None, None]
templates = [dna, dna.reverse_complement()]
primer_list = []
for template, overhang in zip(templates, overhangs):
primer_i = primer(template, tm=tm, min_len=min_len,
tm_undershoot=tm_undershoot,
tm_overshoot=tm_overshoot, end_gc=end_gc,
tm_parameters=tm_parameters,
overhang=overhang, structure=structure)
primer_list.append(primer_i)
return primer_list | def primers(dna, tm=65, min_len=10, tm_undershoot=1, tm_overshoot=3,
end_gc=False, tm_parameters='cloning', overhangs=None,
structure=False) | Design primers for PCR amplifying any arbitrary sequence.
:param dna: Input sequence.
:type dna: coral.DNA
:param tm: Ideal primer Tm in degrees C.
:type tm: float
:param min_len: Minimum primer length.
:type min_len: int
:param tm_undershoot: Allowed Tm undershoot.
:type tm_undershoot: float
:param tm_overshoot: Allowed Tm overshoot.
:type tm_overshoot: float
:param end_gc: Obey the 'end on G or C' rule.
:type end_gc: bool
:param tm_parameters: Melting temp calculator method to use.
:type tm_parameters: string
:param overhangs: 2-tuple of overhang sequences.
:type overhangs: tuple
:param structure: Evaluate each primer for structure, with warning for high
structure.
:type structure: bool
:returns: A list primers (the output of primer).
:rtype: list | 2.637113 | 1.30226 | 2.025028 |
'''(5' or 3' region on reference sequence that uniquely matches the reverse
complement of the associated (5' or 3') region of one sequence in a list of
query sequences.
:param reference: Reference sequence.
:type reference: coral.DNA
:param query_list: List of query sequences.
:type query_list: coral.DNA list
:param min_overlap: Minimum overlap for a match (in bp).
:type min_overlap: int
:param right: Check right side of sequence (3'). False results in 5' check.
:type right: bool
:returns: Tuple of the indices of any matches and the size of the match in
bp.
:rtype: tuple of ints
:raises: AssemblyError if more than one match is found.
'''
size = min_overlap
found = []
# Reverse complementing here provides massive speedup?
rev_query = [seq.reverse_complement() for seq in query_list]
while not found and not size > len(reference):
for i, seq in enumerate(rev_query):
if right:
# FIXME: these getitems are the slowest part of assembly
# Easiest speedup?
if reference.endswith(seq[:size]):
found.append(i)
else:
if reference.startswith(seq[-size:]):
found.append(i)
size += 1
if len(found) > 1:
raise AssemblyError('Ambiguous oligo binding')
if not found:
return None
else:
return found[0], size | def bind_unique(reference, query_list, min_overlap=12, right=True) | (5' or 3' region on reference sequence that uniquely matches the reverse
complement of the associated (5' or 3') region of one sequence in a list of
query sequences.
:param reference: Reference sequence.
:type reference: coral.DNA
:param query_list: List of query sequences.
:type query_list: coral.DNA list
:param min_overlap: Minimum overlap for a match (in bp).
:type min_overlap: int
:param right: Check right side of sequence (3'). False results in 5' check.
:type right: bool
:returns: Tuple of the indices of any matches and the size of the match in
bp.
:rtype: tuple of ints
:raises: AssemblyError if more than one match is found. | 4.892581 | 2.215205 | 2.208636 |
'''Report mismatches, indels, and coverage.'''
# For every result, keep a dictionary of mismatches, insertions, and
# deletions
report = []
for result in self.aligned_results:
report.append(self._analyze_single(self.aligned_reference, result))
return report | def nonmatches(self) | Report mismatches, indels, and coverage. | 10.503677 | 6.897498 | 1.522824 |
'''Make a summary plot of the alignment and highlight nonmatches.'''
import matplotlib.pyplot as plt
import matplotlib.patches as patches
# Constants to use throughout drawing
n = len(self.results)
nbases = len(self.aligned_reference)
barheight = 0.4
# Vary height of figure based on number of results
figheight = 3 + 3 * (n - 1)
fig = plt.figure(figsize=(9, figheight))
ax1 = fig.add_subplot(111)
# Plot bars to represent coverage area
# Reference sequence
ax1.add_patch(patches.Rectangle((0, 0), nbases, barheight,
facecolor='black'))
# Results
for i, report in enumerate(self.nonmatches()):
j = i + 1
start, stop = report['coverage']
patch = patches.Rectangle((start, j), stop - start, barheight,
facecolor='darkgray')
ax1.add_patch(patch)
# Draw a vertical line for each type of result
plt.vlines(report['mismatches'], j, j + barheight,
colors='b')
plt.vlines(report['insertions'], j, j + barheight,
colors='r')
# Terminal trailing deletions shouldn't be added
deletions = []
crange = range(*report['coverage'])
deletions = [idx for idx in report['deletions'] if idx in crange]
plt.vlines(deletions, j, j + barheight,
colors='g')
ax1.set_xlim((0, nbases))
ax1.set_ylim((-0.3, n + 1))
ax1.set_yticks([i + barheight / 2 for i in range(n + 1)])
ax1.set_yticklabels(['Reference'] + self.names)
# Add legend
mismatch_patch = patches.Patch(color='blue', label='Mismatch')
insertion_patch = patches.Patch(color='red', label='Insertion')
deletion_patch = patches.Patch(color='green', label='Deletion')
plt.legend(handles=[mismatch_patch, insertion_patch, deletion_patch],
loc=1, ncol=3, mode='expand', borderaxespad=0.)
plt.show() | def plot(self) | Make a summary plot of the alignment and highlight nonmatches. | 3.375052 | 3.12481 | 1.080082 |
'''Report mistmatches and indels for a single (aligned) reference and
result.'''
# TODO: Recalculate coverage based on reference (e.g. sequencing result
# longer than template
reference_str = str(reference)
result_str = str(result)
report = {'mismatches': [], 'insertions': [], 'deletions': []}
for i, (ref, res) in enumerate(zip(reference_str, result_str)):
if ref != res:
# It's a mismatch or indel
if ref == '-':
report['insertions'].append(i)
if res == '-':
report['deletions'].append(i)
else:
report['mismatches'].append(i)
start = len(result_str) - len(result_str.lstrip('-'))
stop = len(result_str) - len(result_str.rstrip('-'))
report['coverage'] = [start, stop]
return report | def _analyze_single(self, reference, result) | Report mistmatches and indels for a single (aligned) reference and
result. | 3.62034 | 2.835608 | 1.276742 |
'''Remove terminal Ns from sequencing results.'''
for i, result in enumerate(self.results):
largest = max(str(result).split('N'), key=len)
start = result.locate(largest)[0][0]
stop = start + len(largest)
if start != stop:
self.results[i] = self.results[i][start:stop] | def _remove_n(self) | Remove terminal Ns from sequencing results. | 5.11028 | 3.932151 | 1.299615 |
'''Generate a random DNA sequence.
:param n: Output sequence length.
:type n: int
:returns: Random DNA sequence of length n.
:rtype: coral.DNA
'''
return coral.DNA(''.join([random.choice('ATGC') for i in range(n)])) | def random_dna(n) | Generate a random DNA sequence.
:param n: Output sequence length.
:type n: int
:returns: Random DNA sequence of length n.
:rtype: coral.DNA | 3.314126 | 2.221397 | 1.491911 |
'''Generate randomized codons given a peptide sequence.
:param peptide: Peptide sequence for which to generate randomized
codons.
:type peptide: coral.Peptide
:param frequency_cutoff: Relative codon usage cutoff - codons that
are rarer will not be used. Frequency is
relative to average over all codons for a
given amino acid.
:param frequency_cutoff: Codon frequency table to use.
:param weighted: Use codon table
:type weighted: bool
:param table: Codon frequency table to use. Table should be organized
by amino acid, then be a dict of codon: frequency.
Only relevant if weighted=True or frequency_cutoff > 0.
Tables available:
constants.molecular_bio.CODON_FREQ_BY_AA['sc'] (default)
:type table: dict
:returns: Randomized sequence of codons (DNA) that code for the input
peptide.
:rtype: coral.DNA
:raises: ValueError if frequency_cutoff is set so high that there are no
codons available for an amino acid in the input peptide.
'''
if table is None:
table = CODON_FREQ_BY_AA['sc']
# Process codon table using frequency_cutoff
new_table = _cutoff(table, frequency_cutoff)
# Select codons randomly or using weighted distribution
rna = ''
for amino_acid in str(peptide):
codons = new_table[amino_acid.upper()]
if not codons:
raise ValueError('No {} codons at freq cutoff'.format(amino_acid))
if weighted:
cumsum = []
running_sum = 0
for codon, frequency in codons.iteritems():
running_sum += frequency
cumsum.append(running_sum)
random_num = random.uniform(0, max(cumsum))
for codon, value in zip(codons, cumsum):
if value > random_num:
selection = codon
break
else:
selection = random.choice(codons.keys())
rna += selection
return coral.RNA(rna) | def random_codons(peptide, frequency_cutoff=0.0, weighted=False, table=None) | Generate randomized codons given a peptide sequence.
:param peptide: Peptide sequence for which to generate randomized
codons.
:type peptide: coral.Peptide
:param frequency_cutoff: Relative codon usage cutoff - codons that
are rarer will not be used. Frequency is
relative to average over all codons for a
given amino acid.
:param frequency_cutoff: Codon frequency table to use.
:param weighted: Use codon table
:type weighted: bool
:param table: Codon frequency table to use. Table should be organized
by amino acid, then be a dict of codon: frequency.
Only relevant if weighted=True or frequency_cutoff > 0.
Tables available:
constants.molecular_bio.CODON_FREQ_BY_AA['sc'] (default)
:type table: dict
:returns: Randomized sequence of codons (DNA) that code for the input
peptide.
:rtype: coral.DNA
:raises: ValueError if frequency_cutoff is set so high that there are no
codons available for an amino acid in the input peptide. | 4.489327 | 1.787573 | 2.511409 |
'''Generate new codon frequency table given a mean cutoff.
:param table: codon frequency table of form {amino acid: codon: frequency}
:type table: dict
:param frequency_cutoff: value between 0 and 1.0 for mean frequency cutoff
:type frequency_cutoff: float
:returns: A codon frequency table with some codons removed.
:rtype: dict
'''
new_table = {}
# IDEA: cutoff should be relative to most-frequent codon, not average?
for amino_acid, codons in table.iteritems():
average_cutoff = frequency_cutoff * sum(codons.values()) / len(codons)
new_table[amino_acid] = {}
for codon, frequency in codons.iteritems():
if frequency > average_cutoff:
new_table[amino_acid][codon] = frequency
return new_table | def _cutoff(table, frequency_cutoff) | Generate new codon frequency table given a mean cutoff.
:param table: codon frequency table of form {amino acid: codon: frequency}
:type table: dict
:param frequency_cutoff: value between 0 and 1.0 for mean frequency cutoff
:type frequency_cutoff: float
:returns: A codon frequency table with some codons removed.
:rtype: dict | 3.192814 | 1.927658 | 1.656318 |
'''Acquire a genome from Entrez
'''
# TODO: Can strandedness by found in fetched genome attributes?
# TODO: skip read/write step?
# Using a dummy email for now - does this violate NCBI guidelines?
email = '[email protected]'
Entrez.email = email
print 'Downloading Genome...'
handle = Entrez.efetch(db='nucleotide', id=str(genome_id), rettype='gb',
retmode='text')
print 'Genome Downloaded...'
tmpfile = os.path.join(mkdtemp(), 'tmp.gb')
with open(tmpfile, 'w') as f:
f.write(handle.read())
genome = coral.seqio.read_dna(tmpfile)
return genome | def fetch_genome(genome_id) | Acquire a genome from Entrez | 6.813064 | 6.542125 | 1.041414 |
'''Calculate expected fraction of primer dimers.
:param primer1: Forward primer.
:type primer1: coral.DNA
:param primer2: Reverse primer.
:type primer2: coral.DNA
:param template: DNA template.
:type template: coral.DNA
:param concentrations: list of concentrations for primers and the
template. Defaults are those for PCR with 1kb
template.
:type concentrations: list
:returns: Fraction of dimers versus the total amount of primer added.
:rtype: float
'''
# It is not reasonable (yet) to use a long template for doing these
# computations directly, as NUPACK does an exhaustive calculation and
# would take too long without a cluster.
# Instead, this function compares primer-primer binding to
# primer-complement binding
# Simulate binding of template vs. primers
nupack = coral.analysis.NUPACK([primer1.primer(), primer2.primer(),
primer1.primer().reverse_complement(),
primer2.primer().reverse_complement()])
# Include reverse complement concentration
primer_concs = [concentrations[0]] * 2
template_concs = [concentrations[1]] * 2
concs = primer_concs + template_concs
nupack_concs = nupack.concentrations(2, conc=concs)
dimer_conc = nupack_concs['concentrations'][5]
#primer1_template = nupack_concs['concentrations'][6]
#primer2_template = nupack_concs['concentrations'][10]
return dimer_conc / concs[0] | def dimers(primer1, primer2, concentrations=[5e-7, 3e-11]) | Calculate expected fraction of primer dimers.
:param primer1: Forward primer.
:type primer1: coral.DNA
:param primer2: Reverse primer.
:type primer2: coral.DNA
:param template: DNA template.
:type template: coral.DNA
:param concentrations: list of concentrations for primers and the
template. Defaults are those for PCR with 1kb
template.
:type concentrations: list
:returns: Fraction of dimers versus the total amount of primer added.
:rtype: float | 5.043592 | 3.250504 | 1.551634 |
'''Read DNA from file. Uses BioPython and coerces to coral format.
:param path: Full path to input file.
:type path: str
:returns: DNA sequence.
:rtype: coral.DNA
'''
filename, ext = os.path.splitext(os.path.split(path)[-1])
genbank_exts = ['.gb', '.ape']
fasta_exts = ['.fasta', '.fa', '.fsa', '.seq']
abi_exts = ['.abi', '.ab1']
if any([ext == extension for extension in genbank_exts]):
file_format = 'genbank'
elif any([ext == extension for extension in fasta_exts]):
file_format = 'fasta'
elif any([ext == extension for extension in abi_exts]):
file_format = 'abi'
else:
raise ValueError('File format not recognized.')
seq = SeqIO.read(path, file_format)
dna = coral.DNA(str(seq.seq))
if seq.name == '.':
dna.name = filename
else:
dna.name = seq.name
# Features
for feature in seq.features:
try:
dna.features.append(_seqfeature_to_coral(feature))
except FeatureNameError:
pass
dna.features = sorted(dna.features, key=lambda feature: feature.start)
# Used to use data_file_division, but it's inconsistent (not always the
# molecule type)
dna.circular = False
with open(path) as f:
first_line = f.read().split()
for word in first_line:
if word == 'circular':
dna.circular = True
return dna | def read_dna(path) | Read DNA from file. Uses BioPython and coerces to coral format.
:param path: Full path to input file.
:type path: str
:returns: DNA sequence.
:rtype: coral.DNA | 3.326115 | 2.776469 | 1.197966 |
'''Read .seq and .abi/.ab1 results files from a dir.
:param directory: Path to directory containing sequencing files.
:type directory: str
:returns: A list of DNA sequences.
:rtype: coral.DNA list
'''
dirfiles = os.listdir(directory)
seq_exts = ['.seq', '.abi', '.ab1']
# Exclude files that aren't sequencing results
seq_paths = [x for x in dirfiles if os.path.splitext(x)[1] in seq_exts]
paths = [os.path.join(directory, x) for x in seq_paths]
sequences = [read_dna(x) for x in paths]
return sequences | def read_sequencing(directory) | Read .seq and .abi/.ab1 results files from a dir.
:param directory: Path to directory containing sequencing files.
:type directory: str
:returns: A list of DNA sequences.
:rtype: coral.DNA list | 3.799152 | 2.116369 | 1.795127 |
'''Write DNA to a file (genbank or fasta).
:param dna: DNA sequence to write to file
:type dna: coral.DNA
:param path: file path to write. Has to be genbank or fasta file.
:type path: str
'''
# Check if path filetype is valid, remember for later
ext = os.path.splitext(path)[1]
if ext == '.gb' or ext == '.ape':
filetype = 'genbank'
elif ext == '.fa' or ext == '.fasta':
filetype = 'fasta'
else:
raise ValueError('Only genbank or fasta files are supported.')
# Convert features to Biopython form
# Information lost on conversion:
# specificity of feature type
# strandedness
# topology
features = []
for feature in dna.features:
features.append(_coral_to_seqfeature(feature))
# Biopython doesn't like 'None' here
# FIXME: this is a legacy feature - remove?
bio_id = dna.id if hasattr(dna, 'id') else ''
# Maximum length of name is 16
seq = SeqRecord(Seq(str(dna), alphabet=ambiguous_dna), id=bio_id,
name=dna.name[0:16].replace(' ', '_'), features=features,
description=dna.name)
if dna.circular:
seq.annotations['data_file_division'] = 'circular'
else:
seq.annotations['data_file_division'] = 'linear'
if filetype == 'genbank':
SeqIO.write(seq, path, 'genbank')
elif filetype == 'fasta':
SeqIO.write(seq, path, 'fasta') | def write_dna(dna, path) | Write DNA to a file (genbank or fasta).
:param dna: DNA sequence to write to file
:type dna: coral.DNA
:param path: file path to write. Has to be genbank or fasta file.
:type path: str | 3.750541 | 3.299584 | 1.136671 |
'''Write a list of primers out to a csv file. The first three columns are
compatible with the current IDT order form (name, sequence, notes). By
default there are no notes, which is an optional parameter.
:param primer_list: A list of primers.
:type primer_list: coral.Primer list
:param path: A path to the csv you want to write.
:type path: str
:param names: A list of strings to name each oligo. Must be the same length
as the primer_list.
:type names: str list
:param notes: A list of strings to provide a note for each oligo. Must be
the same length as the primer_list.
:type notes: str list
'''
# Check for notes and names having the right length, apply them to primers
if names is not None:
if len(names) != len(primer_list):
names_msg = 'Mismatch in number of notes and primers.'
raise PrimerAnnotationError(names_msg)
for i, name in enumerate(names):
primer_list[i].name = name
if notes is not None:
if len(notes) != len(primer_list):
notes_msg = 'Mismatch in number of notes and primers.'
raise PrimerAnnotationError(notes_msg)
for i, note in enumerate(notes):
primer_list[i].note = note
# Write to csv
with open(path, 'w') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(['name', 'sequence', 'notes'])
for primer in primer_list:
string_rep = str(primer.overhang).lower() + str(primer.anneal)
writer.writerow([primer.name, string_rep, primer.note]) | def write_primers(primer_list, path, names=None, notes=None) | Write a list of primers out to a csv file. The first three columns are
compatible with the current IDT order form (name, sequence, notes). By
default there are no notes, which is an optional parameter.
:param primer_list: A list of primers.
:type primer_list: coral.Primer list
:param path: A path to the csv you want to write.
:type path: str
:param names: A list of strings to name each oligo. Must be the same length
as the primer_list.
:type names: str list
:param notes: A list of strings to provide a note for each oligo. Must be
the same length as the primer_list.
:type notes: str list | 2.856694 | 1.650687 | 1.73061 |
'''Translate genbank feature types into usable ones (currently identical).
The feature table is derived from the official genbank spec (gbrel.txt)
available at http://www.insdc.org/documents/feature-table
:param feature_type: feature to convert
:type feature_type: str
:param bio_to_coral: from coral to Biopython (True) or the other direction
(False)
:param bio_to_coral: bool
:returns: coral version of genbank feature_type, or vice-versa.
:rtype: str
'''
err_msg = 'Unrecognized feature type: {}'.format(feature_type)
if bio_to_coral:
try:
name = coral.constants.genbank.TO_CORAL[feature_type]
except KeyError:
raise ValueError(err_msg)
else:
try:
name = coral.constants.genbank.TO_BIO[feature_type]
except KeyError:
raise ValueError(err_msg)
return name | def _process_feature_type(feature_type, bio_to_coral=True) | Translate genbank feature types into usable ones (currently identical).
The feature table is derived from the official genbank spec (gbrel.txt)
available at http://www.insdc.org/documents/feature-table
:param feature_type: feature to convert
:type feature_type: str
:param bio_to_coral: from coral to Biopython (True) or the other direction
(False)
:param bio_to_coral: bool
:returns: coral version of genbank feature_type, or vice-versa.
:rtype: str | 4.404928 | 1.618157 | 2.722188 |
'''Convert a Biopython SeqFeature to a coral.Feature.
:param feature: Biopython SeqFeature
:type feature: Bio.SeqFeature
'''
# Some genomic sequences don't have a label attribute
# TODO: handle genomic cases differently than others. Some features lack
# a label but should still be incorporated somehow.
qualifiers = feature.qualifiers
if 'label' in qualifiers:
feature_name = qualifiers['label'][0]
elif 'locus_tag' in qualifiers:
feature_name = qualifiers['locus_tag'][0]
else:
raise FeatureNameError('Unrecognized feature name')
# Features with gaps are special, require looking at subfeatures
# Assumption: subfeatures are never more than one level deep
if feature.location_operator == 'join':
# Feature has gaps. Have to figure out start/stop from subfeatures,
# calculate gap indices. A nested feature model may be required
# eventually.
# Reorder the sub_feature list by start location
# Assumption: none of the subfeatures overlap so the last entry in
# the reordered list also has the final stop point of the feature.
# FIXME: Getting a deprecation warning about using sub_features
# instead of feature.location being a CompoundFeatureLocation
reordered = sorted(feature.location.parts,
key=lambda location: location.start)
starts = [int(location.start) for location in reordered]
stops = [int(location.end) for location in reordered]
feature_start = starts.pop(0)
feature_stop = stops.pop(-1)
starts = [start - feature_start for start in starts]
stops = [stop - feature_start for stop in stops]
feature_gaps = list(zip(stops, starts))
else:
# Feature doesn't have gaps. Ignore subfeatures.
feature_start = int(feature.location.start)
feature_stop = int(feature.location.end)
feature_gaps = []
feature_type = _process_feature_type(feature.type)
if feature.location.strand == -1:
feature_strand = 1
else:
feature_strand = 0
if 'gene' in qualifiers:
gene = qualifiers['gene']
else:
gene = []
if 'locus_tag' in qualifiers:
locus_tag = qualifiers['locus_tag']
else:
locus_tag = []
coral_feature = coral.Feature(feature_name, feature_start,
feature_stop, feature_type,
gene=gene, locus_tag=locus_tag,
qualifiers=qualifiers,
strand=feature_strand,
gaps=feature_gaps)
return coral_feature | def _seqfeature_to_coral(feature) | Convert a Biopython SeqFeature to a coral.Feature.
:param feature: Biopython SeqFeature
:type feature: Bio.SeqFeature | 3.583013 | 3.567224 | 1.004426 |
'''Convert a coral.Feature to a Biopython SeqFeature.
:param feature: coral Feature.
:type feature: coral.Feature
'''
bio_strand = 1 if feature.strand == 1 else -1
ftype = _process_feature_type(feature.feature_type, bio_to_coral=False)
sublocations = []
if feature.gaps:
# There are gaps. Have to define location_operator and add subfeatures
location_operator = 'join'
# Feature location means nothing for 'join' sequences?
# TODO: verify
location = FeatureLocation(ExactPosition(0), ExactPosition(1),
strand=bio_strand)
# Reconstruct start/stop indices for each subfeature
stops, starts = zip(*feature.gaps)
starts = [feature.start] + [start + feature.start for start in starts]
stops = [stop + feature.start for stop in stops] + [feature.stop]
# Build subfeatures
for start, stop in zip(starts, stops):
sublocation = FeatureLocation(ExactPosition(start),
ExactPosition(stop),
strand=bio_strand)
sublocations.append(sublocation)
location = CompoundLocation(sublocations, operator='join')
else:
# No gaps, feature is simple
location_operator = ''
location = FeatureLocation(ExactPosition(feature.start),
ExactPosition(feature.stop),
strand=bio_strand)
qualifiers = feature.qualifiers
qualifiers['label'] = [feature.name]
seqfeature = SeqFeature(location, type=ftype,
qualifiers=qualifiers,
location_operator=location_operator)
return seqfeature | def _coral_to_seqfeature(feature) | Convert a coral.Feature to a Biopython SeqFeature.
:param feature: coral Feature.
:type feature: coral.Feature | 3.572819 | 3.526821 | 1.013042 |
'''Given the SubstitutionMatrix input, generate an equivalent matrix that
is indexed by the ASCII number of each residue (e.g. A -> 65).'''
ords = [ord(c) for c in alphabet]
ord_matrix = np.zeros((max(ords) + 1, max(ords) + 1), dtype=np.integer)
for i, row_ord in enumerate(ords):
for j, col_ord in enumerate(ords):
ord_matrix[row_ord, col_ord] = matrix[i, j]
return ord_matrix | def as_ord_matrix(matrix, alphabet) | Given the SubstitutionMatrix input, generate an equivalent matrix that
is indexed by the ASCII number of each residue (e.g. A -> 65). | 3.562066 | 2.025381 | 1.758714 |
'''Calculate the alignment score from two aligned sequences.
:param a: The first aligned sequence.
:type a: str
:param b: The second aligned sequence.
:type b: str
:param gap_open: The cost of opening a gap (negative number).
:type gap_open: int
:param gap_extend: The cost of extending an open gap (negative number).
:type gap_extend: int.
:param matrix: A score matrix dictionary name. Examples can be found in
the substitution_matrices module.
'''
al = a
bl = b
l = len(al)
score = 0
assert len(bl) == l, 'Alignment lengths must be the same'
mat = as_ord_matrix(matrix)
gap_started = 0
for i in range(l):
if al[i] == '-' or bl[i] == '-':
score += gap_extend if gap_started else gap_open
gap_started = 1
else:
score += mat[ord(al[i]), ord(bl[i])]
gap_started = 0
return score | def score_alignment(a, b, gap_open, gap_extend, matrix) | Calculate the alignment score from two aligned sequences.
:param a: The first aligned sequence.
:type a: str
:param b: The second aligned sequence.
:type b: str
:param gap_open: The cost of opening a gap (negative number).
:type gap_open: int
:param gap_extend: The cost of extending an open gap (negative number).
:type gap_extend: int.
:param matrix: A score matrix dictionary name. Examples can be found in
the substitution_matrices module. | 2.939344 | 1.947677 | 1.509154 |
os.chdir(directory)
process = subprocess.Popen(["make", "html"], cwd=directory)
process.communicate() | def build_docs(directory) | Builds sphinx docs from a given directory. | 3.040383 | 3.111716 | 0.977076 |
'''Given string and multiplier n, find m**2 decomposition.
:param string: input string
:type string: str
:param n: multiplier
:type n: int
:returns: generator that produces m**2 * string if m**2 is a factor of n
:rtype: generator of 0 or 1
'''
binary = [int(x) for x in bin(n)[2:]]
new_string = string
counter = 1
while counter <= len(binary):
if binary[-counter]:
yield new_string
new_string += new_string
counter += 1 | def _decompose(string, n) | Given string and multiplier n, find m**2 decomposition.
:param string: input string
:type string: str
:param n: multiplier
:type n: int
:returns: generator that produces m**2 * string if m**2 is a factor of n
:rtype: generator of 0 or 1 | 4.374403 | 2.083943 | 2.099099 |
'''Reverse complement a sequence.
:param sequence: Sequence to reverse complement
:type sequence: str
:param material: dna, rna, or peptide.
:type material: str
'''
code = dict(COMPLEMENTS[material])
reverse_sequence = sequence[::-1]
return ''.join([code[str(base)] for base in reverse_sequence]) | def reverse_complement(sequence, material) | Reverse complement a sequence.
:param sequence: Sequence to reverse complement
:type sequence: str
:param material: dna, rna, or peptide.
:type material: str | 3.646435 | 2.953553 | 1.234593 |
'''Verify that a given string is valid DNA, RNA, or peptide characters.
:param seq: DNA, RNA, or peptide sequence.
:type seq: str
:param material: Input material - 'dna', 'rna', or 'pepide'.
:type sequence: str
:returns: Whether the `seq` is a valid string of `material`.
:rtype: bool
:raises: ValueError if `material` isn't \'dna\', \'rna\', or \'peptide\'.
ValueError if `seq` contains invalid characters for its
material type.
'''
errs = {'dna': 'DNA', 'rna': 'RNA', 'peptide': 'peptide'}
if material == 'dna' or material == 'rna' or material == 'peptide':
alphabet = ALPHABETS[material]
err_msg = errs[material]
else:
msg = 'Input material must be \'dna\', \'rna\', or \'peptide\'.'
raise ValueError(msg)
# This is a bottleneck when modifying sequence - hence the run_checks
# optional parameter in sequence objects..
# First attempt with cython was slower. Could also try pypy.
if re.search('[^' + alphabet + ']', seq):
raise ValueError('Encountered a non-%s character' % err_msg) | def check_alphabet(seq, material) | Verify that a given string is valid DNA, RNA, or peptide characters.
:param seq: DNA, RNA, or peptide sequence.
:type seq: str
:param material: Input material - 'dna', 'rna', or 'pepide'.
:type sequence: str
:returns: Whether the `seq` is a valid string of `material`.
:rtype: bool
:raises: ValueError if `material` isn't \'dna\', \'rna\', or \'peptide\'.
ValueError if `seq` contains invalid characters for its
material type. | 4.965397 | 2.842016 | 1.747139 |
'''Validate and process sequence inputs.
:param seq: input sequence
:type seq: str
:param material: DNA, RNA, or peptide
:type: str
:returns: Uppercase version of `seq` with the alphabet checked by
check_alphabet().
:rtype: str
'''
check_alphabet(seq, material)
seq = seq.upper()
return seq | def process_seq(seq, material) | Validate and process sequence inputs.
:param seq: input sequence
:type seq: str
:param material: DNA, RNA, or peptide
:type: str
:returns: Uppercase version of `seq` with the alphabet checked by
check_alphabet().
:rtype: str | 5.869313 | 1.927272 | 3.045399 |
'''Test whether a sequence is palindrome.
:param seq: Sequence to analyze (DNA or RNA).
:type seq: coral.DNA or coral.RNA
:returns: Whether a sequence is a palindrome.
:rtype: bool
'''
seq_len = len(seq)
if seq_len % 2 == 0:
# Sequence has even number of bases, can test non-overlapping seqs
wing = seq_len / 2
l_wing = seq[0: wing]
r_wing = seq[wing:]
if l_wing == r_wing.reverse_complement():
return True
else:
return False
else:
# Sequence has odd number of bases and cannot be a palindrome
return False | def palindrome(seq) | Test whether a sequence is palindrome.
:param seq: Sequence to analyze (DNA or RNA).
:type seq: coral.DNA or coral.RNA
:returns: Whether a sequence is a palindrome.
:rtype: bool | 3.416863 | 2.487017 | 1.37388 |
'''Create a copy of the current instance.
:returns: A safely editable copy of the current sequence.
'''
# Significant performance improvements by skipping alphabet check
return type(self)(self.seq, self.material, run_checks=False) | def copy(self) | Create a copy of the current instance.
:returns: A safely editable copy of the current sequence. | 18.710615 | 10.981757 | 1.703791 |
'''Find sequences matching a pattern.
:param pattern: Sequence for which to find matches.
:type pattern: str
:returns: Indices of pattern matches.
:rtype: list of ints
'''
if len(pattern) > len(self):
raise ValueError('Search pattern longer than searchable ' +
'sequence.')
seq = self.seq
pattern = str(pattern).upper()
re_pattern = '(?=' + pattern + ')'
matches = [index.start() % len(self) for index in
re.finditer(re_pattern, seq)]
return matches | def locate(self, pattern) | Find sequences matching a pattern.
:param pattern: Sequence for which to find matches.
:type pattern: str
:returns: Indices of pattern matches.
:rtype: list of ints | 4.261792 | 3.094833 | 1.377067 |
'''Return a copy of the Feature.
:returns: A safely editable copy of the current feature.
:rtype: coral.Feature
'''
return type(self)(self.name, self.start, self.stop, self.feature_type,
gene=self.gene, locus_tag=self.locus_tag,
qualifiers=self.qualifiers, strand=self.strand) | def copy(self) | Return a copy of the Feature.
:returns: A safely editable copy of the current feature.
:rtype: coral.Feature | 4.888325 | 3.044854 | 1.605438 |
'''Split Nupack commands over processors.
:param inputs: List of sequences, same format as for coral.analysis.Nupack.
:type inpus: list
:param material: Input material: 'dna' or 'rna'.
:type material: str
:param cmd: Command: 'mfe', 'pairs', 'complexes', or 'concentrations'.
:type cmd: str
:param arguments: Arguments for the command.
:type arguments: str
:returns: A list of the same return value you would get from `cmd`.
:rtype: list
'''
nupack_pool = multiprocessing.Pool()
try:
args = [{'seq': seq,
'cmd': cmd,
'material': material,
'arguments': arguments} for seq in seqs]
nupack_iterator = nupack_pool.imap(run_nupack, args)
total = len(seqs)
msg = ' calculations complete.'
passed = 4
while report:
completed = nupack_iterator._index
if (completed == total):
break
else:
if passed >= 4:
print '({0}/{1}) '.format(completed, total) + msg
passed = 0
passed += 1
time.sleep(1)
multi_output = [x for x in nupack_iterator]
nupack_pool.close()
nupack_pool.join()
except KeyboardInterrupt:
nupack_pool.terminate()
nupack_pool.close()
raise KeyboardInterrupt
return multi_output | def nupack_multi(seqs, material, cmd, arguments, report=True) | Split Nupack commands over processors.
:param inputs: List of sequences, same format as for coral.analysis.Nupack.
:type inpus: list
:param material: Input material: 'dna' or 'rna'.
:type material: str
:param cmd: Command: 'mfe', 'pairs', 'complexes', or 'concentrations'.
:type cmd: str
:param arguments: Arguments for the command.
:type arguments: str
:returns: A list of the same return value you would get from `cmd`.
:rtype: list | 3.821985 | 2.187514 | 1.747182 |
'''Run picklable Nupack command.
:param kwargs: keyword arguments to pass to Nupack as well as 'cmd'.
:returns: Variable - whatever `cmd` returns.
'''
run = NUPACK(kwargs['seq'])
output = getattr(run, kwargs['cmd'])(**kwargs['arguments'])
return output | def run_nupack(kwargs) | Run picklable Nupack command.
:param kwargs: keyword arguments to pass to Nupack as well as 'cmd'.
:returns: Variable - whatever `cmd` returns. | 8.987692 | 3.070693 | 2.926926 |
'''Compute the suboptimal structures within a defined energy gap of the
MFE. Runs the \'subopt\' command.
:param strand: Strand on which to run subopt. Strands must be either
coral.DNA or coral.RNA).
:type strand: coral.DNA or coral.RNA
:param gap: Energy gap within to restrict results, e.g. 0.1.
:type gap: float
:param temp: Temperature setting for the computation. Negative values
are not allowed.
:type temp: float
:param pseudo: Enable pseudoknots.
:type pseudo: bool
:param material: The material setting to use in the computation. If set
to None (the default), the material type is inferred
from the strands. Other settings available: 'dna' for
DNA parameters, 'rna' for RNA (1995) parameters, and
'rna1999' for the RNA 1999 parameters.
:type material: str
:param dangles: How to treat dangles in the computation. From the
user guide: For \'none\': Dangle energies are ignored.
For \'some\': \'A dangle energy is incorporated for
each unpaired base flanking a duplex\'. For 'all': all
dangle energy is considered.
:type dangles: str
:param sodium: Sodium concentration in solution (molar), only applies
to DNA.
:type sodium: float
:param magnesium: Magnesium concentration in solution (molar), only
applies to DNA>
:type magnesium: float
:returns: A list of dictionaries of the type returned by .mfe().
:rtype: list
'''
# Set the material (will be used to set command material flag)
material = self._set_material(strand, material)
# Set up command flags
cmd_args = self._prep_cmd_args(temp, dangles, material, pseudo, sodium,
magnesium, multi=False)
# Set up the input file and run the command. Note: no STDOUT
lines = [str(strand), str(gap)]
self._run('subopt', cmd_args, lines)
# Read the output from file
structures = self._process_mfe(self._read_tempfile('subopt.subopt'))
return structures | def subopt(self, strand, gap, temp=37.0, pseudo=False, material=None,
dangles='some', sodium=1.0, magnesium=0.0) | Compute the suboptimal structures within a defined energy gap of the
MFE. Runs the \'subopt\' command.
:param strand: Strand on which to run subopt. Strands must be either
coral.DNA or coral.RNA).
:type strand: coral.DNA or coral.RNA
:param gap: Energy gap within to restrict results, e.g. 0.1.
:type gap: float
:param temp: Temperature setting for the computation. Negative values
are not allowed.
:type temp: float
:param pseudo: Enable pseudoknots.
:type pseudo: bool
:param material: The material setting to use in the computation. If set
to None (the default), the material type is inferred
from the strands. Other settings available: 'dna' for
DNA parameters, 'rna' for RNA (1995) parameters, and
'rna1999' for the RNA 1999 parameters.
:type material: str
:param dangles: How to treat dangles in the computation. From the
user guide: For \'none\': Dangle energies are ignored.
For \'some\': \'A dangle energy is incorporated for
each unpaired base flanking a duplex\'. For 'all': all
dangle energy is considered.
:type dangles: str
:param sodium: Sodium concentration in solution (molar), only applies
to DNA.
:type sodium: float
:param magnesium: Magnesium concentration in solution (molar), only
applies to DNA>
:type magnesium: float
:returns: A list of dictionaries of the type returned by .mfe().
:rtype: list | 5.215931 | 1.690027 | 3.086299 |
'''Enumerates the total number of secondary structures over the
structural ensemble Ω(π). Runs the \'count\' command.
:param strand: Strand on which to run count. Strands must be either
coral.DNA or coral.RNA).
:type strand: list
:param pseudo: Enable pseudoknots.
:type pseudo: bool
:returns: The count of the number of structures in the structural
ensemble.
:rtype: int
'''
# Set up command flags
if pseudo:
cmd_args = ['-pseudo']
else:
cmd_args = []
# Set up the input file and run the command
stdout = self._run('count', cmd_args, [str(strand)]).split('\n')
# Return the count
return int(float(stdout[-2])) | def count(self, strand, pseudo=False) | Enumerates the total number of secondary structures over the
structural ensemble Ω(π). Runs the \'count\' command.
:param strand: Strand on which to run count. Strands must be either
coral.DNA or coral.RNA).
:type strand: list
:param pseudo: Enable pseudoknots.
:type pseudo: bool
:returns: The count of the number of structures in the structural
ensemble.
:rtype: int | 6.827022 | 2.173788 | 3.140611 |
'''Enumerates the total number of secondary structures over the
structural ensemble Ω(π) with an ordered permutation of strands. Runs
the \'count\' command.
:param strands: List of strands to use as inputs to count -multi.
:type strands: list
:param permutation: The circular permutation of strands to test in
complex. e.g. to test in the order that was input
for 4 strands, the permutation would be [1,2,3,4].
If set to None, defaults to the order of the
input strands.
:type permutation: list
:param temp: Temperature setting for the computation. Negative values
are not allowed.
:type temp: float
:param pseudo: Enable pseudoknots.
:type pseudo: bool
:param material: The material setting to use in the computation. If set
to None (the default), the material type is inferred
from the strands. Other settings available: 'dna' for
DNA parameters, 'rna' for RNA (1995) parameters, and
'rna1999' for the RNA 1999 parameters.
:type material: str
:returns: Dictionary with the following key:value pairs: 'energy':
free energy, 'pfunc': partition function.
:rtype: dict
'''
# Set up command flags
cmd_args = ['-multi']
if pseudo:
cmd_args.append('-pseudo')
# Set up the input file and run the command
if permutation is None:
permutation = range(1, len(strands) + 1)
lines = self._multi_lines(strands, permutation)
stdout = self._run('count', cmd_args, lines).split('\n')
return int(float(stdout[-2])) | def count_multi(self, strands, permutation=None, pseudo=False) | Enumerates the total number of secondary structures over the
structural ensemble Ω(π) with an ordered permutation of strands. Runs
the \'count\' command.
:param strands: List of strands to use as inputs to count -multi.
:type strands: list
:param permutation: The circular permutation of strands to test in
complex. e.g. to test in the order that was input
for 4 strands, the permutation would be [1,2,3,4].
If set to None, defaults to the order of the
input strands.
:type permutation: list
:param temp: Temperature setting for the computation. Negative values
are not allowed.
:type temp: float
:param pseudo: Enable pseudoknots.
:type pseudo: bool
:param material: The material setting to use in the computation. If set
to None (the default), the material type is inferred
from the strands. Other settings available: 'dna' for
DNA parameters, 'rna' for RNA (1995) parameters, and
'rna1999' for the RNA 1999 parameters.
:type material: str
:returns: Dictionary with the following key:value pairs: 'energy':
free energy, 'pfunc': partition function.
:rtype: dict | 5.471445 | 1.505398 | 3.634551 |
'''Calculate the free energy of a given sequence structure. Runs the
\'energy\' command.
:param strand: Strand on which to run energy. Strands must be either
coral.DNA or coral.RNA).
:type strand: coral.DNA or coral.RNA
:param dotparens: The structure in dotparens notation.
:type dotparens: str
:param temp: Temperature setting for the computation. Negative values
are not allowed.
:type temp: float
:param pseudo: Enable pseudoknots.
:type pseudo: bool
:param material: The material setting to use in the computation. If set
to None (the default), the material type is inferred
from the strands. Other settings available: 'dna' for
DNA parameters, 'rna' for RNA (1995) parameters, and
'rna1999' for the RNA 1999 parameters.
:type material: str
:param dangles: How to treat dangles in the computation. From the
user guide: For \'none\': Dangle energies are ignored.
For \'some\': \'A dangle energy is incorporated for
each unpaired base flanking a duplex\'. For 'all': all
dangle energy is considered.
:type dangles: str
:param sodium: Sodium concentration in solution (molar), only applies
to DNA.
:type sodium: float
:param magnesium: Magnesium concentration in solution (molar), only
applies to DNA>
:type magnesium: float
:returns: The free energy of the sequence with the specified secondary
structure.
:rtype: float
'''
# Set the material (will be used to set command material flag)
material = self._set_material(strand, material)
# Set up command flags
cmd_args = self._prep_cmd_args(temp, dangles, material, pseudo, sodium,
magnesium, multi=False)
# Set up the input file and run the command. Note: no STDOUT
lines = [str(strand), dotparens]
stdout = self._run('energy', cmd_args, lines).split('\n')
# Return the energy
return float(stdout[-2]) | def energy(self, strand, dotparens, temp=37.0, pseudo=False, material=None,
dangles='some', sodium=1.0, magnesium=0.0) | Calculate the free energy of a given sequence structure. Runs the
\'energy\' command.
:param strand: Strand on which to run energy. Strands must be either
coral.DNA or coral.RNA).
:type strand: coral.DNA or coral.RNA
:param dotparens: The structure in dotparens notation.
:type dotparens: str
:param temp: Temperature setting for the computation. Negative values
are not allowed.
:type temp: float
:param pseudo: Enable pseudoknots.
:type pseudo: bool
:param material: The material setting to use in the computation. If set
to None (the default), the material type is inferred
from the strands. Other settings available: 'dna' for
DNA parameters, 'rna' for RNA (1995) parameters, and
'rna1999' for the RNA 1999 parameters.
:type material: str
:param dangles: How to treat dangles in the computation. From the
user guide: For \'none\': Dangle energies are ignored.
For \'some\': \'A dangle energy is incorporated for
each unpaired base flanking a duplex\'. For 'all': all
dangle energy is considered.
:type dangles: str
:param sodium: Sodium concentration in solution (molar), only applies
to DNA.
:type sodium: float
:param magnesium: Magnesium concentration in solution (molar), only
applies to DNA>
:type magnesium: float
:returns: The free energy of the sequence with the specified secondary
structure.
:rtype: float | 4.645195 | 1.60419 | 2.895664 |
'''Estimate the amount of time it will take to calculate all the
partition functions for each circular permutation - estimate the time
the actual \'complexes\' command will take to run.
:param strands: Strands on which to run energy. Strands must be either
coral.DNA or coral.RNA).
:type strands: list of coral.DNA or coral.RNA
:param max_size: Maximum complex size to consider (maximum number of
strand species in complex).
:type max_size: int
:returns: The estimated time to run complexes' partition functions, in
seconds.
:rtype: float
'''
cmd_args = ['-quiet', '-timeonly']
lines = self._multi_lines(strands, [max_size])
stdout = self._run('complexes', cmd_args, lines)
return float(re.search('calculation\: (.*) seconds', stdout).group(1)) | def complexes_timeonly(self, strands, max_size) | Estimate the amount of time it will take to calculate all the
partition functions for each circular permutation - estimate the time
the actual \'complexes\' command will take to run.
:param strands: Strands on which to run energy. Strands must be either
coral.DNA or coral.RNA).
:type strands: list of coral.DNA or coral.RNA
:param max_size: Maximum complex size to consider (maximum number of
strand species in complex).
:type max_size: int
:returns: The estimated time to run complexes' partition functions, in
seconds.
:rtype: float | 7.573513 | 2.125971 | 3.562378 |
'''Prepares lines to write to file for pfunc command input.
:param strand: Strand input (cr.DNA or cr.RNA).
:type strand: cr.DNA or cr.DNA
:param permutation: Permutation (e.g. [1, 2, 3, 4]) of the type used
by pfunc_multi.
:type permutation: list
'''
lines = []
# Write the total number of distinct strands
lines.append(str(len(strands)))
# Write the distinct strands
lines += [str(strand) for strand in strands]
# Write the permutation
lines.append(' '.join(str(p) for p in permutation))
return lines | def _multi_lines(self, strands, permutation) | Prepares lines to write to file for pfunc command input.
:param strand: Strand input (cr.DNA or cr.RNA).
:type strand: cr.DNA or cr.DNA
:param permutation: Permutation (e.g. [1, 2, 3, 4]) of the type used
by pfunc_multi.
:type permutation: list | 4.868887 | 1.907195 | 2.552905 |
'''Read in and return file that's in the tempdir.
:param filename: Name of the file to read.
:type filename: str
'''
with open(os.path.join(self._tempdir, filename)) as f:
return f.read() | def _read_tempfile(self, filename) | Read in and return file that's in the tempdir.
:param filename: Name of the file to read.
:type filename: str | 4.243796 | 2.439801 | 1.739402 |
'''Given a set of pair probability lines, construct a numpy array.
:param pairlist: a list of pair probability triples
:type pairlist: list
:returns: An upper triangular matrix of pair probabilities augmented
with one extra column that represents the unpaired
probabilities.
:rtype: numpy.array
'''
mat = np.zeros((dim, dim + 1))
for line in pairlist:
i = int(line[0]) - 1
j = int(line[1]) - 1
prob = float(line[2])
mat[i, j] = prob
return mat | def _pairs_to_np(self, pairlist, dim) | Given a set of pair probability lines, construct a numpy array.
:param pairlist: a list of pair probability triples
:type pairlist: list
:returns: An upper triangular matrix of pair probabilities augmented
with one extra column that represents the unpaired
probabilities.
:rtype: numpy.array | 4.205786 | 1.639922 | 2.564625 |
'''Adjust a feature's location when flipping DNA.
:param feature: The feature to flip.
:type feature: coral.Feature
:param parent_len: The length of the sequence to which the feature belongs.
:type parent_len: int
'''
copy = feature.copy()
# Put on the other strand
if copy.strand == 0:
copy.strand = 1
else:
copy.strand = 0
# Adjust locations - guarantee that start is always less than end
copy.start = parent_len - copy.start
copy.stop = parent_len - copy.stop
copy.start, copy.stop = copy.stop, copy.start
return copy | def _flip_feature(self, feature, parent_len) | Adjust a feature's location when flipping DNA.
:param feature: The feature to flip.
:type feature: coral.Feature
:param parent_len: The length of the sequence to which the feature belongs.
:type parent_len: int | 3.440274 | 2.559596 | 1.344069 |
'''Open in ApE if `ApE` is in your command line path.'''
# TODO: simplify - make ApE look in PATH only
cmd = 'ApE'
if ape_path is None:
# Check for ApE in PATH
ape_executables = []
for path in os.environ['PATH'].split(os.pathsep):
exepath = os.path.join(path, cmd)
ape_executables.append(os.access(exepath, os.X_OK))
if not any(ape_executables):
raise Exception('Ape not in PATH. Use ape_path kwarg.')
else:
cmd = ape_path
# Check whether ApE exists in PATH
tmp = tempfile.mkdtemp()
if self.name is not None and self.name:
filename = os.path.join(tmp, '{}.ape'.format(self.name))
else:
filename = os.path.join(tmp, 'tmp.ape')
coral.seqio.write_dna(self, filename)
process = subprocess.Popen([cmd, filename])
# Block until window is closed
try:
process.wait()
shutil.rmtree(tmp)
except KeyboardInterrupt:
shutil.rmtree(tmp) | def ape(self, ape_path=None) | Open in ApE if `ApE` is in your command line path. | 3.774205 | 3.172627 | 1.189615 |
'''Create a copy of the current instance.
:returns: A safely-editable copy of the current sequence.
:rtype: coral.DNA
'''
# Significant performance improvements by skipping alphabet check
features_copy = [feature.copy() for feature in self.features]
copy = type(self)(self.top.seq, circular=self.circular,
features=features_copy, name=self.name,
bottom=self.bottom.seq, run_checks=False)
return copy | def copy(self) | Create a copy of the current instance.
:returns: A safely-editable copy of the current sequence.
:rtype: coral.DNA | 9.064727 | 5.508536 | 1.645578 |
'''Circularize linear DNA.
:returns: A circularized version of the current sequence.
:rtype: coral.DNA
'''
if self.top[-1].seq == '-' and self.bottom[0].seq == '-':
raise ValueError('Cannot circularize - termini disconnected.')
if self.bottom[-1].seq == '-' and self.top[0].seq == '-':
raise ValueError('Cannot circularize - termini disconnected.')
copy = self.copy()
copy.circular = True
copy.top.circular = True
copy.bottom.circular = True
return copy | def circularize(self) | Circularize linear DNA.
:returns: A circularized version of the current sequence.
:rtype: coral.DNA | 4.232485 | 3.046294 | 1.389388 |
'''Display a visualization of the sequence in an IPython notebook.'''
try:
from IPython.display import HTML
import uuid
except ImportError:
raise IPythonDisplayImportError
sequence_json = self.json()
d3cdn = '//d3js.org/d3.v3.min.js'
div_id = 'sequence_{}'.format(uuid.uuid1())
cur_dir = os.path.abspath(os.path.dirname(__file__))
d3_plasmid_path = os.path.join(cur_dir, 'd3-plasmid.js')
with open(d3_plasmid_path) as f:
d3_plasmid_js = f.read()
html = '<div id={div_id}></div>'.format(div_id=div_id)
js_databind = '''
<script>
require([\'{d3_cdn}\'], function(lib) {{
window.data = {data};'''.format(div_id=div_id, d3_cdn=d3cdn,
data=sequence_json)
js_viz = '''
d3sequence(window.data, \'{div_id}\')
}});
</script>
'''.format(div_id=div_id)
return HTML(html + js_databind + d3_plasmid_js + js_viz) | def display(self) | Display a visualization of the sequence in an IPython notebook. | 3.175673 | 2.991995 | 1.06139 |
'''Removes feature from circular plasmid and linearizes. Automatically
reorients at the base just after the feature. This operation is
complementary to the .extract() method.
:param feature_name: The feature to remove.
:type feature_name: coral.Feature
'''
rotated = self.rotate_to_feature(feature)
excised = rotated[feature.stop - feature.start:]
return excised | def excise(self, feature) | Removes feature from circular plasmid and linearizes. Automatically
reorients at the base just after the feature. This operation is
complementary to the .extract() method.
:param feature_name: The feature to remove.
:type feature_name: coral.Feature | 14.336 | 2.964322 | 4.836183 |
'''Extract a feature from the sequence. This operation is complementary
to the .excise() method.
:param feature: Feature object.
:type feature: coral.sequence.Feature
:param remove_subfeatures: Remove all features in the extracted
sequence aside from the input feature.
:type remove_subfeatures: bool
:returns: A subsequence from start to stop of the feature.
'''
extracted = self[feature.start:feature.stop]
# Turn gaps into Ns or Xs
for gap in feature.gaps:
for i in range(*gap):
extracted[i] = self._any_char
if remove_subfeatures:
# Keep only the feature specified
extracted.features = [feature]
# Update feature locations
# copy them
for feature in extracted.features:
feature.move(-feature.start)
return extracted | def extract(self, feature, remove_subfeatures=False) | Extract a feature from the sequence. This operation is complementary
to the .excise() method.
:param feature: Feature object.
:type feature: coral.sequence.Feature
:param remove_subfeatures: Remove all features in the extracted
sequence aside from the input feature.
:type remove_subfeatures: bool
:returns: A subsequence from start to stop of the feature. | 6.147841 | 2.952086 | 2.082542 |
'''Flip the DNA - swap the top and bottom strands.
:returns: Flipped DNA (bottom strand is now top strand, etc.).
:rtype: coral.DNA
'''
copy = self.copy()
copy.top, copy.bottom = copy.bottom, copy.top
copy.features = [_flip_feature(f, len(self)) for f in copy.features]
return copy | def flip(self) | Flip the DNA - swap the top and bottom strands.
:returns: Flipped DNA (bottom strand is now top strand, etc.).
:rtype: coral.DNA | 5.763852 | 2.830133 | 2.036601 |
'''Linearize circular DNA at an index.
:param index: index at which to linearize.
:type index: int
:returns: A linearized version of the current sequence.
:rtype: coral.DNA
:raises: ValueError if the input is linear DNA.
'''
if not self.circular:
raise ValueError('Cannot relinearize linear DNA.')
copy = self.copy()
# Snip at the index
if index:
return copy[index:] + copy[:index]
copy.circular = False
copy.top.circular = False
copy.bottom.circular = False
return copy | def linearize(self, index=0) | Linearize circular DNA at an index.
:param index: index at which to linearize.
:type index: int
:returns: A linearized version of the current sequence.
:rtype: coral.DNA
:raises: ValueError if the input is linear DNA. | 5.914723 | 2.810955 | 2.104169 |
'''Find sequences matching a pattern. For a circular sequence, the
search extends over the origin.
:param pattern: str or NucleicAcidSequence for which to find matches.
:type pattern: str or coral.DNA
:returns: A list of top and bottom strand indices of matches.
:rtype: list of lists of indices (ints)
:raises: ValueError if the pattern is longer than either the input
sequence (for linear DNA) or twice as long as the input
sequence (for circular DNA).
'''
top_matches = self.top.locate(pattern)
bottom_matches = self.bottom.locate(pattern)
return [top_matches, bottom_matches] | def locate(self, pattern) | Find sequences matching a pattern. For a circular sequence, the
search extends over the origin.
:param pattern: str or NucleicAcidSequence for which to find matches.
:type pattern: str or coral.DNA
:returns: A list of top and bottom strand indices of matches.
:rtype: list of lists of indices (ints)
:raises: ValueError if the pattern is longer than either the input
sequence (for linear DNA) or twice as long as the input
sequence (for circular DNA). | 6.700916 | 1.499176 | 4.469734 |
'''Rotate Sequence by n bases.
:param n: Number of bases to rotate.
:type n: int
:returns: The current sequence reoriented at `index`.
:rtype: coral.DNA
:raises: ValueError if applied to linear sequence or `index` is
negative.
'''
if not self.circular and n != 0:
raise ValueError('Cannot rotate linear DNA')
else:
copy = self.copy()
copy.top = self.top.rotate(n)
copy.bottom = self.bottom.rotate(-n)
copy.features = []
for feature in self.features:
feature_copy = feature.copy()
feature_copy.move(n)
# Adjust the start/stop if we move over the origin
feature_copy.start = feature_copy.start % len(self)
feature_copy.stop = feature_copy.stop % len(self)
copy.features.append(feature_copy)
return copy.circularize() | def rotate(self, n) | Rotate Sequence by n bases.
:param n: Number of bases to rotate.
:type n: int
:returns: The current sequence reoriented at `index`.
:rtype: coral.DNA
:raises: ValueError if applied to linear sequence or `index` is
negative. | 4.118193 | 2.456951 | 1.67614 |
'''Reverse complement the DNA.
:returns: A reverse-complemented instance of the current sequence.
:rtype: coral.DNA
'''
copy = self.copy()
# Note: if sequence is double-stranded, swapping strand is basically
# (but not entirely) the same thing - gaps affect accuracy.
copy.top = self.top.reverse_complement()
copy.bottom = self.bottom.reverse_complement()
# Remove all features - the reverse complement isn't flip!
copy.features = []
return copy | def reverse_complement(self) | Reverse complement the DNA.
:returns: A reverse-complemented instance of the current sequence.
:rtype: coral.DNA | 10.033641 | 7.333435 | 1.368205 |
'''Select features from the features list based on feature name,
gene, or locus tag.
:param term: Search term.
:type term: str
:param by: Feature attribute to search by. Options are 'name',
'gene', and 'locus_tag'.
:type by: str
:param fuzzy: If True, search becomes case-insensitive and will also
find substrings - e.g. if fuzzy search is enabled, a search for
'gfp' would return a hit for a feature named 'GFP_seq'.
:type fuzzy: bool
:returns: A list of features matched by the search.
:rtype: list
'''
features = []
if fuzzy:
fuzzy_term = term.lower()
for feature in self.features:
if fuzzy_term in feature.__getattribute__(by).lower():
features.append(feature)
else:
for feature in self.features:
if feature.__getattribute__(by) == term:
features.append(feature)
return features | def select_features(self, term, by='name', fuzzy=False) | Select features from the features list based on feature name,
gene, or locus tag.
:param term: Search term.
:type term: str
:param by: Feature attribute to search by. Options are 'name',
'gene', and 'locus_tag'.
:type by: str
:param fuzzy: If True, search becomes case-insensitive and will also
find substrings - e.g. if fuzzy search is enabled, a search for
'gfp' would return a hit for a feature named 'GFP_seq'.
:type fuzzy: bool
:returns: A list of features matched by the search.
:rtype: list | 3.213151 | 1.382727 | 2.323778 |
'''Create a feature from the current object.
:param name: Name for the new feature. Must be specified if the DNA
instance has no .name attribute.
:type name: str
:param feature_type: The type of feature (genbank standard).
:type feature_type: str
'''
if name is None:
if not self.name:
raise ValueError('name attribute missing from DNA instance'
' and arguments')
name = self.name
return Feature(name, start=0, stop=len(self),
feature_type=feature_type) | def to_feature(self, name=None, feature_type='misc_feature') | Create a feature from the current object.
:param name: Name for the new feature. Must be specified if the DNA
instance has no .name attribute.
:type name: str
:param feature_type: The type of feature (genbank standard).
:type feature_type: str | 5.389203 | 2.544533 | 2.117953 |
'''Report whether the enzyme cuts outside its recognition site.
Cutting at the very end of the site returns True.
:returns: Whether the enzyme will cut outside its recognition site.
:rtype: bool
'''
for index in self.cut_site:
if index < 0 or index > len(self.recognition_site) + 1:
return True
return False | def cuts_outside(self) | Report whether the enzyme cuts outside its recognition site.
Cutting at the very end of the site returns True.
:returns: Whether the enzyme will cut outside its recognition site.
:rtype: bool | 6.347384 | 2.640666 | 2.403706 |
'''Generate a Primer copy.
:returns: A safely-editable copy of the current primer.
:rtype: coral.DNA
'''
return type(self)(self.anneal, self.tm, overhang=self.overhang,
name=self.name, note=self.note) | def copy(self) | Generate a Primer copy.
:returns: A safely-editable copy of the current primer.
:rtype: coral.DNA | 10.951224 | 4.426711 | 2.473896 |
'''Add up nearest-neighbor parameters for a given sequence.
:param seq: DNA sequence for which to sum nearest neighbors
:type seq: str
:param pars: parameter set to use
:type pars: dict
:returns: nearest-neighbor delta_H and delta_S sums.
:rtype: tuple of floats
'''
delta0 = 0
delta1 = 0
for i in range(len(seq) - 1):
curchar = seq[i:i + 2]
delta0 += pars['delta_h'][curchar]
delta1 += pars['delta_s'][curchar]
return delta0, delta1 | def _pair_deltas(seq, pars) | Add up nearest-neighbor parameters for a given sequence.
:param seq: DNA sequence for which to sum nearest neighbors
:type seq: str
:param pars: parameter set to use
:type pars: dict
:returns: nearest-neighbor delta_H and delta_S sums.
:rtype: tuple of floats | 3.556811 | 1.778643 | 1.999734 |
'''Sum corrections for Breslauer '84 method.
:param seq: sequence for which to calculate corrections.
:type seq: str
:param pars_error: dictionary of error corrections
:type pars_error: dict
:returns: Corrected delta_H and delta_S parameters
:rtype: list of floats
'''
deltas_corr = [0, 0]
contains_gc = 'G' in str(seq) or 'C' in str(seq)
only_at = str(seq).count('A') + str(seq).count('T') == len(seq)
symmetric = seq == seq.reverse_complement()
terminal_t = str(seq)[0] == 'T' + str(seq)[-1] == 'T'
for i, delta in enumerate(['delta_h', 'delta_s']):
if contains_gc:
deltas_corr[i] += pars_error[delta]['anyGC']
if only_at:
deltas_corr[i] += pars_error[delta]['onlyAT']
if symmetric:
deltas_corr[i] += pars_error[delta]['symmetry']
if terminal_t and delta == 'delta_h':
deltas_corr[i] += pars_error[delta]['terminalT'] * terminal_t
return deltas_corr | def breslauer_corrections(seq, pars_error) | Sum corrections for Breslauer '84 method.
:param seq: sequence for which to calculate corrections.
:type seq: str
:param pars_error: dictionary of error corrections
:type pars_error: dict
:returns: Corrected delta_H and delta_S parameters
:rtype: list of floats | 3.182886 | 2.300911 | 1.383315 |
'''Sum corrections for SantaLucia '98 method (unified parameters).
:param seq: sequence for which to calculate corrections.
:type seq: str
:param pars_error: dictionary of error corrections
:type pars_error: dict
:returns: Corrected delta_H and delta_S parameters
:rtype: list of floats
'''
deltas_corr = [0, 0]
first = str(seq)[0]
last = str(seq)[-1]
start_gc = first == 'G' or first == 'C'
start_at = first == 'A' or first == 'T'
end_gc = last == 'G' or last == 'C'
end_at = last == 'A' or last == 'T'
init_gc = start_gc + end_gc
init_at = start_at + end_at
symmetric = seq == seq.reverse_complement()
for i, delta in enumerate(['delta_h', 'delta_s']):
deltas_corr[i] += init_gc * pars_error[delta]['initGC']
deltas_corr[i] += init_at * pars_error[delta]['initAT']
if symmetric:
deltas_corr[i] += pars_error[delta]['symmetry']
return deltas_corr | def santalucia98_corrections(seq, pars_error) | Sum corrections for SantaLucia '98 method (unified parameters).
:param seq: sequence for which to calculate corrections.
:type seq: str
:param pars_error: dictionary of error corrections
:type pars_error: dict
:returns: Corrected delta_H and delta_S parameters
:rtype: list of floats | 2.92548 | 2.040009 | 1.434052 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.