code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
# Default to negative. parseTimeOffset defaults to + if timeShift[0].isdigit(): timeShift = '-' + timeShift delta = parseTimeOffset(timeShift) myContext = requestContext.copy() myContext['startTime'] = requestContext['startTime'] + delta myContext['endTime'] = requestContext['endTime'] + delta if alignDST: reqStartDST = localDST(requestContext['startTime']) reqEndDST = localDST(requestContext['endTime']) myStartDST = localDST(myContext['startTime']) myEndDST = localDST(myContext['endTime']) dstOffset = timedelta(hours=0) # If the requestContext is entirely in DST, and we are entirely # NOT in DST if ( (reqStartDST and reqEndDST) and (not myStartDST and not myEndDST) ): dstOffset = timedelta(hours=1) # Or if the requestContext is entirely NOT in DST, and we are # entirely in DST elif ( (not reqStartDST and not reqEndDST) and (myStartDST and myEndDST) ): dstOffset = timedelta(hours=-1) # Otherwise, we don't do anything, because it would be visually # confusing myContext['startTime'] += dstOffset myContext['endTime'] += dstOffset results = [] if not seriesList: return results # if len(seriesList) > 1, they will all have the same pathExpression, # which is all we care about. series = seriesList[0] for shiftedSeries in evaluateTarget(myContext, series.pathExpression): shiftedSeries.name = 'timeShift(%s, %s)' % (shiftedSeries.name, timeShift) if resetEnd: shiftedSeries.end = series.end else: shiftedSeries.end = ( shiftedSeries.end - shiftedSeries.start + series.start) shiftedSeries.start = series.start results.append(shiftedSeries) return results
def timeShift(requestContext, seriesList, timeShift, resetEnd=True, alignDST=False)
Takes one metric or a wildcard seriesList, followed by a quoted string with the length of time (See ``from / until`` in the render\_api_ for examples of time formats). Draws the selected metrics shifted in time. If no sign is given, a minus sign ( - ) is implied which will shift the metric back in time. If a plus sign ( + ) is given, the metric will be shifted forward in time. Will reset the end date range automatically to the end of the base stat unless resetEnd is False. Example case is when you timeshift to last week and have the graph date range set to include a time in the future, will limit this timeshift to pretend ending at the current time. If resetEnd is False, will instead draw full range including future time. Because time is shifted by a fixed number of seconds, comparing a time period with DST to a time period without DST, and vice-versa, will result in an apparent misalignment. For example, 8am might be overlaid with 7am. To compensate for this, use the alignDST option. Useful for comparing a metric against itself at a past periods or correcting data stored at an offset. Example:: &target=timeShift(Sales.widgets.largeBlue,"7d") &target=timeShift(Sales.widgets.largeBlue,"-7d") &target=timeShift(Sales.widgets.largeBlue,"+1h")
2.991103
3.417299
0.875283
results = [] start = epoch(parseATTime(startSliceAt)) end = epoch(parseATTime(endSliceAt)) for slicedSeries in seriesList: slicedSeries.name = 'timeSlice(%s, %s, %s)' % (slicedSeries.name, int(start), int(end)) curr = epoch(requestContext["startTime"]) for i, v in enumerate(slicedSeries): if v is None or curr < start or curr > end: slicedSeries[i] = None curr += slicedSeries.step results.append(slicedSeries) return results
def timeSlice(requestContext, seriesList, startSliceAt, endSliceAt='now')
Takes one metric or a wildcard metric, followed by a quoted string with the time to start the line and another quoted string with the time to end the line. The start and end times are inclusive. See ``from / until`` in the render api for examples of time formats. Useful for filtering out a part of a series of data from a wider range of data. Example:: &target=timeSlice(network.core.port1,"00:00 20140101","11:59 20140630") &target=timeSlice(network.core.port1,"12:00 20140630","now")
3.418951
4.135887
0.826655
name = "constantLine(%s)" % str(value) start = int(epoch(requestContext['startTime'])) end = int(epoch(requestContext['endTime'])) step = int((end - start) / 2.0) series = TimeSeries(str(value), start, end, step, [value, value, value]) series.pathExpression = name return [series]
def constantLine(requestContext, value)
Takes a float F. Draws a horizontal line at value F across the graph. Example:: &target=constantLine(123.456)
4.307927
5.264822
0.818247
t_funcs = {'avg': safeAvg, 'min': safeMin, 'max': safeMax} if func not in t_funcs: raise ValueError("Invalid function %s" % func) results = [] for series in seriesList: value = t_funcs[func](series) if value is not None: name = 'aggregateLine(%s, %g)' % (series.name, value) else: name = 'aggregateLine(%s, None)' % (series.name) [series] = constantLine(requestContext, value) series.name = name series.pathExpression = series.name results.append(series) return results
def aggregateLine(requestContext, seriesList, func='avg')
Takes a metric or wildcard seriesList and draws a horizontal line based on the function applied to each series. Note: By default, the graphite renderer consolidates data points by averaging data points over time. If you are using the 'min' or 'max' function for aggregateLine, this can cause an unusual gap in the line drawn by this function and the data itself. To fix this, you should use the consolidateBy() function with the same function argument you are using for aggregateLine. This will ensure that the proper data points are retained and the graph should line up correctly. Example:: &target=aggregateLine(server01.connections.total, 'avg') &target=aggregateLine(server*.connections.total, 'avg')
3.438628
3.836161
0.896372
ts = int(epoch(parseATTime(ts, requestContext['tzinfo']))) start = int(epoch(requestContext['startTime'])) end = int(epoch(requestContext['endTime'])) if ts < start: raise ValueError("verticalLine(): timestamp %s exists " "before start of range" % ts) elif ts > end: raise ValueError("verticalLine(): timestamp %s exists " "after end of range" % ts) start = end = ts step = 1.0 series = TimeSeries(label, start, end, step, [1.0, 1.0]) series.options['drawAsInfinite'] = True if color: series.color = color return [series]
def verticalLine(requestContext, ts, label=None, color=None)
Takes a timestamp string ts. Draws a vertical line at the designated timestamp with optional 'label' and 'color'. Supported timestamp formats include both relative (e.g. -3h) and absolute (e.g. 16:00_20110501) strings, such as those used with ``from`` and ``until`` parameters. When set, the 'label' will appear in the graph legend. Note: Any timestamps defined outside the requested range will raise a 'ValueError' exception. Example:: &target=verticalLine("12:3420131108","event","blue") &target=verticalLine("16:00_20110501","event") &target=verticalLine("-5mins")
3.949965
4.463101
0.885027
[series] = constantLine(requestContext, value) if label: series.name = label if color: series.color = color return [series]
def threshold(requestContext, value, label=None, color=None)
Takes a float F, followed by a label (in double quotes) and a color. (See ``bgcolor`` in the render\_api_ for valid color names & formats.) Draws a horizontal line at value F across the graph. Example:: &target=threshold(123.456, "omgwtfbbq", "red")
5.368659
12.465095
0.430695
def transform(v, d): if v is None: return d else: return v if referenceSeries: defaults = [default if any(v is not None for v in x) else None for x in zip_longest(*referenceSeries)] else: defaults = None for series in seriesList: if referenceSeries: series.name = "transformNull(%s,%g,referenceSeries)" % ( series.name, default) else: series.name = "transformNull(%s,%g)" % (series.name, default) series.pathExpression = series.name if defaults: values = [transform(v, d) for v, d in zip_longest(series, defaults)] else: values = [transform(v, default) for v in series] series.extend(values) del series[:len(values)] return seriesList
def transformNull(requestContext, seriesList, default=0, referenceSeries=None)
Takes a metric or wildcard seriesList and replaces null values with the value specified by `default`. The value 0 used if not specified. The optional referenceSeries, if specified, is a metric or wildcard series list that governs which time intervals nulls should be replaced. If specified, nulls are replaced only in intervals where a non-null is found for the same interval in any of referenceSeries. This method compliments the drawNullAsZero function in graphical mode, but also works in text-only mode. Example:: &target=transformNull(webapp.pages.*.views,-1) This would take any page that didn't have values and supply negative 1 as a default. Any other numeric value may be used as well.
3.007131
3.03424
0.991066
def transform(v): if v is None: return 0 else: return 1 for series in seriesList: series.name = "isNonNull(%s)" % (series.name) series.pathExpression = series.name values = [transform(v) for v in series] series.extend(values) del series[:len(values)] return seriesList
def isNonNull(requestContext, seriesList)
Takes a metric or wild card seriesList and counts up how many non-null values are specified. This is useful for understanding which metrics have data at a given point in time (ie, to count which servers are alive). Example:: &target=isNonNull(webapp.pages.*.views) Returns a seriesList where 1 is specified for non-null values, and 0 is specified for null values.
4.281002
3.626529
1.180468
start = int(epoch(requestContext["startTime"])) end = int(epoch(requestContext["endTime"])) values = range(start, end, step) series = TimeSeries(name, start, end, step, values) series.pathExpression = 'identity("%s")' % name return [series]
def identity(requestContext, name, step=60)
Identity function: Returns datapoints where the value equals the timestamp of the datapoint. Useful when you have another series where the value is a timestamp, and you want to compare it to the time of the datapoint, to render an age Example:: &target=identity("The.time.series") This would create a series named "The.time.series" that contains points where x(t) == t. Accepts optional second argument as 'step' parameter (default step is 60 sec)
4.676119
5.14426
0.908997
if not seriesLists or not any(seriesLists): series = constantLine(requestContext, 0).pop() series.pathExpression = "countSeries()" else: seriesList, start, end, step = normalize(seriesLists) name = "countSeries(%s)" % formatPathExpressions(seriesList) values = (int(len(row)) for row in zip_longest(*seriesList)) series = TimeSeries(name, start, end, step, values) series.pathExpression = name return [series]
def countSeries(requestContext, *seriesLists)
Draws a horizontal line representing the number of nodes found in the seriesList. Example:: &target=countSeries(carbon.agents.*.*)
5.925752
7.911834
0.748973
seriesGroup = [] for s in seriesLists: seriesGroup.extend(s) return seriesGroup
def group(requestContext, *seriesLists)
Takes an arbitrary number of seriesLists and adds them to a single seriesList. This is used to pass multiple seriesLists to a function which only takes one.
4.227954
4.717182
0.896288
metaSeries = {} keys = [] for series in seriesList: key = series.name.split(".")[mapNode] if key not in metaSeries: metaSeries[key] = [series] keys.append(key) else: metaSeries[key].append(series) return [metaSeries[k] for k in keys]
def mapSeries(requestContext, seriesList, mapNode)
Short form: ``map()``. Takes a seriesList and maps it to a list of sub-seriesList. Each sub-seriesList has the given mapNode in common. Example (note: This function is not very useful alone. It should be used with :py:func:`reduceSeries`):: mapSeries(servers.*.cpu.*,1) => [ servers.server1.cpu.*, servers.server2.cpu.*, ... servers.serverN.cpu.* ]
2.927682
2.98804
0.9798
metaSeries = {} keys = [] for seriesList in seriesLists: for series in seriesList: nodes = series.name.split('.') node = nodes[reduceNode] reduceSeriesName = '.'.join( nodes[0:reduceNode]) + '.reduce.' + reduceFunction if node in reduceMatchers: if reduceSeriesName not in metaSeries: metaSeries[reduceSeriesName] = [None] * len(reduceMatchers) keys.append(reduceSeriesName) i = reduceMatchers.index(node) metaSeries[reduceSeriesName][i] = series for key in keys: metaSeries[key] = app.functions[reduceFunction]( requestContext, *[[s] for s in metaSeries[key]])[0] metaSeries[key].name = key return [metaSeries[key] for key in keys]
def reduceSeries(requestContext, seriesLists, reduceFunction, reduceNode, *reduceMatchers)
Short form: ``reduce()``. Takes a list of seriesLists and reduces it to a list of series by means of the reduceFunction. Reduction is performed by matching the reduceNode in each series against the list of reduceMatchers. The each series is then passed to the reduceFunction as arguments in the order given by reduceMatchers. The reduceFunction should yield a single series. The resulting list of series are aliased so that they can easily be nested in other functions. **Example**: Map/Reduce asPercent(bytes_used,total_bytes) for each server. Assume that metrics in the form below exist:: servers.server1.disk.bytes_used servers.server1.disk.total_bytes servers.server2.disk.bytes_used servers.server2.disk.total_bytes servers.server3.disk.bytes_used servers.server3.disk.total_bytes ... servers.serverN.disk.bytes_used servers.serverN.disk.total_bytes To get the percentage of disk used for each server:: reduceSeries(mapSeries(servers.*.disk.*,1), "asPercent",3,"bytes_used","total_bytes") => alias(asPercent(servers.server1.disk.bytes_used, servers.server1.disk.total_bytes), "servers.server1.disk.reduce.asPercent"), alias(asPercent(servers.server2.disk.bytes_used, servers.server2.disk.total_bytes), "servers.server2.disk.reduce.asPercent"), ... alias(asPercent(servers.serverN.disk.bytes_used, servers.serverN.disk.total_bytes), "servers.serverN.disk.reduce.asPercent") In other words, we will get back the following metrics:: servers.server1.disk.reduce.asPercent, servers.server2.disk.reduce.asPercent, ... servers.serverN.disk.reduce.asPercent .. seealso:: :py:func:`mapSeries`
2.933548
3.289704
0.891736
from .app import evaluateTarget prefixes = set() for series in seriesList: prefix = '.'.join(series.name.split('.')[:nodeNum + 1]) prefixes.add(prefix) results = [] for prefix in sorted(prefixes): target = templateFunction.replace('%', prefix) for resultSeries in evaluateTarget(requestContext, target): if newName: resultSeries.name = newName.replace('%', prefix) resultSeries.pathExpression = prefix resultSeries.start = series.start resultSeries.end = series.end results.append(resultSeries) return results
def applyByNode(requestContext, seriesList, nodeNum, templateFunction, newName=None)
Takes a seriesList and applies some complicated function (described by a string), replacing templates with unique prefixes of keys from the seriesList (the key is all nodes up to the index given as `nodeNum`). If the `newName` parameter is provided, the name of the resulting series will be given by that parameter, with any "%" characters replaced by the unique prefix. Example:: &target=applyByNode(servers.*.disk.bytes_free,1, "divideSeries(%.disk.bytes_free,sumSeries(%.disk.bytes_*))") Would find all series which match `servers.*.disk.bytes_free`, then trim them down to unique series up to the node given by nodeNum, then fill them into the template function provided (replacing % by the prefixes).
3.651137
3.877627
0.941591
return groupByNodes(requestContext, seriesList, callback, nodeNum)
def groupByNode(requestContext, seriesList, nodeNum, callback)
Takes a serieslist and maps a callback to subgroups within as defined by a common node. Example:: &target=groupByNode(ganglia.by-function.*.*.cpu.load5,2,"sumSeries") Would return multiple series which are each the result of applying the "sumSeries" function to groups joined on the second node (0 indexed) resulting in a list of targets like:: sumSeries(ganglia.by-function.server1.*.cpu.load5), sumSeries(ganglia.by-function.server2.*.cpu.load5),...
6.025861
17.898621
0.336666
from .app import app metaSeries = {} keys = [] if isinstance(nodes, int): nodes = [nodes] for series in seriesList: key = '.'.join(series.name.split(".")[n] for n in nodes) if key not in metaSeries: metaSeries[key] = [series] keys.append(key) else: metaSeries[key].append(series) for key in metaSeries: metaSeries[key] = app.functions[callback](requestContext, metaSeries[key])[0] metaSeries[key].name = key return [metaSeries[key] for key in keys]
def groupByNodes(requestContext, seriesList, callback, *nodes)
Takes a serieslist and maps a callback to subgroups within as defined by multiple nodes. Example:: &target=groupByNodes(ganglia.server*.*.cpu.load*,"sumSeries",1,4) Would return multiple series which are each the result of applying the "sumSeries" function to groups joined on the nodes' list (0 indexed) resulting in a list of targets like:: sumSeries(ganglia.server1.*.cpu.load5), sumSeries(ganglia.server1.*.cpu.load10), sumSeries(ganglia.server1.*.cpu.load15), sumSeries(ganglia.server2.*.cpu.load5), sumSeries(ganglia.server2.*.cpu.load10), sumSeries(ganglia.server2.*.cpu.load15), ...
2.989369
3.594429
0.831667
regex = re.compile(pattern) return [s for s in seriesList if not regex.search(s.name)]
def exclude(requestContext, seriesList, pattern)
Takes a metric or a wildcard seriesList, followed by a regular expression in double quotes. Excludes metrics that match the regular expression. Example:: &target=exclude(servers*.instance*.threads.busy,"server02")
2.809778
4.90451
0.572897
results = [] delta = parseTimeOffset(intervalString) interval = to_seconds(delta) # Adjust the start time to fit an entire day for intervals >= 1 day requestContext = requestContext.copy() tzinfo = requestContext['tzinfo'] s = requestContext['startTime'] if interval >= DAY: requestContext['startTime'] = datetime(s.year, s.month, s.day, tzinfo=tzinfo) elif interval >= HOUR: requestContext['startTime'] = datetime(s.year, s.month, s.day, s.hour, tzinfo=tzinfo) elif interval >= MINUTE: requestContext['startTime'] = datetime(s.year, s.month, s.day, s.hour, s.minute, tzinfo=tzinfo) paths = [] for series in seriesList: paths.extend(pathsFromTarget(requestContext, series.pathExpression)) data_store = fetchData(requestContext, paths) for series in seriesList: # XXX: breaks with summarize(metric.{a,b}) # each series.pathExpression == metric.{a,b} newSeries = evaluateTarget(requestContext, series.pathExpression, data_store)[0] series[0:len(series)] = newSeries series.start = newSeries.start series.end = newSeries.end series.step = newSeries.step for series in seriesList: buckets = {} # {timestamp: [values]} timestamps = range(int(series.start), int(series.end), int(series.step)) datapoints = zip_longest(timestamps, series) # Populate buckets for timestamp, value in datapoints: # ISSUE: Sometimes there is a missing timestamp in datapoints when # running a smartSummary if not timestamp: continue bucketInterval = int((timestamp - series.start) / interval) if bucketInterval not in buckets: buckets[bucketInterval] = [] if value is not None: buckets[bucketInterval].append(value) newValues = [] for timestamp in range(series.start, series.end, interval): bucketInterval = int((timestamp - series.start) / interval) bucket = buckets.get(bucketInterval, []) if bucket: if func == 'avg': newValues.append(float(sum(bucket)) / float(len(bucket))) elif func == 'last': newValues.append(bucket[len(bucket)-1]) elif func == 'max': newValues.append(max(bucket)) elif func == 'min': newValues.append(min(bucket)) else: newValues.append(sum(bucket)) else: newValues.append(None) newName = "smartSummarize(%s, \"%s\", \"%s\")" % (series.name, intervalString, func) alignedEnd = series.start + (bucketInterval * interval) + interval newSeries = TimeSeries(newName, series.start, alignedEnd, interval, newValues) newSeries.pathExpression = newName results.append(newSeries) return results
def smartSummarize(requestContext, seriesList, intervalString, func='sum')
Smarter experimental version of summarize.
2.83432
2.81114
1.008246
results = [] delta = parseTimeOffset(intervalString) interval = to_seconds(delta) for series in seriesList: buckets = {} timestamps = range(int(series.start), int(series.end) + 1, int(series.step)) datapoints = zip_longest(timestamps, series) for timestamp, value in datapoints: if timestamp is None: continue if alignToFrom: bucketInterval = int((timestamp - series.start) / interval) else: bucketInterval = timestamp - (timestamp % interval) if bucketInterval not in buckets: buckets[bucketInterval] = [] if value is not None: buckets[bucketInterval].append(value) if alignToFrom: newStart = series.start newEnd = series.end else: newStart = series.start - (series.start % interval) newEnd = series.end - (series.end % interval) + interval newValues = [] for timestamp in range(newStart, newEnd, interval): if alignToFrom: newEnd = timestamp bucketInterval = int((timestamp - series.start) / interval) else: bucketInterval = timestamp - (timestamp % interval) bucket = buckets.get(bucketInterval, []) if bucket: if func == 'avg': newValues.append(float(sum(bucket)) / float(len(bucket))) elif func == 'last': newValues.append(bucket[len(bucket)-1]) elif func == 'max': newValues.append(max(bucket)) elif func == 'min': newValues.append(min(bucket)) else: newValues.append(sum(bucket)) else: newValues.append(None) if alignToFrom: newEnd += interval newName = "summarize(%s, \"%s\", \"%s\"%s)" % ( series.name, intervalString, func, alignToFrom and ", true" or "") newSeries = TimeSeries(newName, newStart, newEnd, interval, newValues) newSeries.pathExpression = newName results.append(newSeries) return results
def summarize(requestContext, seriesList, intervalString, func='sum', alignToFrom=False)
Summarize the data into interval buckets of a certain size. By default, the contents of each interval bucket are summed together. This is useful for counters where each increment represents a discrete event and retrieving a "per X" value requires summing all the events in that interval. Specifying 'avg' instead will return the mean for each bucket, which can be more useful when the value is a gauge that represents a certain value in time. 'max', 'min' or 'last' can also be specified. By default, buckets are calculated by rounding to the nearest interval. This works well for intervals smaller than a day. For example, 22:32 will end up in the bucket 22:00-23:00 when the interval=1hour. Passing alignToFrom=true will instead create buckets starting at the from time. In this case, the bucket for 22:32 depends on the from time. If from=6:30 then the 1hour bucket for 22:32 is 22:30-23:30. Example:: # total errors per hour &target=summarize(counter.errors, "1hour") # new users per week &target=summarize(nonNegativeDerivative(gauge.num_users), "1week") # average queue size per hour &target=summarize(queue.size, "1hour", "avg") # maximum queue size during each hour &target=summarize(queue.size, "1hour", "max") # 2010 Q1-4 &target=summarize(metric, "13week", "avg", true)&from=midnight+20100101
2.036231
2.117462
0.961638
results = [] delta = parseTimeOffset(intervalString) interval = to_seconds(delta) if alignToInterval: requestContext = requestContext.copy() tzinfo = requestContext['tzinfo'] s = requestContext['startTime'] if interval >= DAY: requestContext['startTime'] = datetime(s.year, s.month, s.day, tzinfo=tzinfo) elif interval >= HOUR: requestContext['startTime'] = datetime(s.year, s.month, s.day, s.hour, tzinfo=tzinfo) elif interval >= MINUTE: requestContext['startTime'] = datetime(s.year, s.month, s.day, s.hour, s.minute, tzinfo=tzinfo) # Gather all paths first, then the data paths = [] for series in seriesList: paths.extend(pathsFromTarget(requestContext, series.pathExpression)) data_store = fetchData(requestContext, paths) for series in seriesList: newSeries = evaluateTarget(requestContext, series.pathExpression, data_store)[0] intervalCount = int((series.end - series.start) / interval) series[0:len(series)] = newSeries series.start = newSeries.start series.end = newSeries.start + ( intervalCount * interval) + interval series.step = newSeries.step for series in seriesList: step = int(series.step) bucket_count = int(math.ceil( float(series.end - series.start) / interval)) buckets = [[] for _ in range(bucket_count)] newStart = int(series.end - bucket_count * interval) for i, value in enumerate(series): if value is None: continue start_time = int(series.start + i * step) start_bucket, start_mod = divmod(start_time - newStart, interval) end_time = start_time + step end_bucket, end_mod = divmod(end_time - newStart, interval) if end_bucket >= bucket_count: end_bucket = bucket_count - 1 end_mod = interval if start_bucket == end_bucket: # All of the hits go to a single bucket. if start_bucket >= 0: buckets[start_bucket].append(value * (end_mod - start_mod)) else: # Spread the hits among 2 or more buckets. if start_bucket >= 0: buckets[start_bucket].append( value * (interval - start_mod)) hits_per_bucket = value * interval for j in range(start_bucket + 1, end_bucket): buckets[j].append(hits_per_bucket) if end_mod > 0: buckets[end_bucket].append(value * end_mod) newValues = [] for bucket in buckets: if bucket: newValues.append(sum(bucket)) else: newValues.append(None) newName = 'hitcount(%s, "%s"%s)' % (series.name, intervalString, alignToInterval and ", true" or "") newSeries = TimeSeries(newName, newStart, series.end, interval, newValues) newSeries.pathExpression = newName results.append(newSeries) return results
def hitcount(requestContext, seriesList, intervalString, alignToInterval=False)
Estimate hit counts from a list of time series. This function assumes the values in each time series represent hits per second. It calculates hits per some larger interval such as per day or per hour. This function is like summarize(), except that it compensates automatically for different time scales (so that a similar graph results from using either fine-grained or coarse-grained records) and handles rarely-occurring events gracefully.
2.505323
2.490142
1.006097
start = int(epoch(requestContext["startTime"])) end = int(epoch(requestContext["endTime"])) delta = timedelta(seconds=step) when = requestContext["startTime"] values = [] while when < requestContext["endTime"]: values.append(epoch(when)) when += delta series = TimeSeries(name, start, end, step, values) series.pathExpression = name return [series]
def timeFunction(requestContext, name, step=60)
Short Alias: time() Just returns the timestamp for each X value. T Example:: &target=time("The.time.series") This would create a series named "The.time.series" that contains in Y the same value (in seconds) as X. A second argument can be provided as a step parameter (default is 60 secs)
3.733529
4.059045
0.919805
delta = timedelta(seconds=step) when = requestContext["startTime"] values = [] while when < requestContext["endTime"]: values.append(math.sin(epoch(when))*amplitude) when += delta series = TimeSeries( name, int(epoch(requestContext["startTime"])), int(epoch(requestContext["endTime"])), step, values) series.pathExpression = 'sin({0})'.format(name) return [series]
def sinFunction(requestContext, name, amplitude=1, step=60)
Short Alias: sin() Just returns the sine of the current time. The optional amplitude parameter changes the amplitude of the wave. Example:: &target=sin("The.time.series", 2) This would create a series named "The.time.series" that contains sin(x)*2. A third argument can be provided as a step parameter (default is 60 secs).
3.698415
4.779009
0.773887
delta = timedelta(seconds=step) when = requestContext["startTime"] values = [] current = 0 while when < requestContext["endTime"]: values.append(current) current += random.random() - 0.5 when += delta return [TimeSeries( name, int(epoch(requestContext["startTime"])), int(epoch(requestContext["endTime"])), step, values)]
def randomWalkFunction(requestContext, name, step=60)
Short Alias: randomWalk() Returns a random walk starting at 0. This is great for testing when there is no real data in whisper. Example:: &target=randomWalk("The.time.series") This would create a series named "The.time.series" that contains points where x(t) == x(t-1)+random()-0.5, and x(0) == 0. Accepts an optional second argument as step parameter (default step is 60 sec).
3.836529
4.345394
0.882896
custom = [ check_partial(reaction_id_check, frozenset(r.id for r in model.reactions)) ] super(Medium, self).validate(model=model, checks=checks + custom)
def validate(self, model, checks=[])
Use a defined schema to validate the medium table format.
10.36693
8.461685
1.225161
model.medium = {row.exchange: row.uptake for row in self.data.itertuples(index=False)}
def apply(self, model)
Set the defined medium on the given model.
16.693483
11.052573
1.510371
meta["timestamp"] = datetime.utcnow().isoformat(" ") meta["platform"] = platform.system() meta["release"] = platform.release() meta["python"] = platform.python_version() meta["packages"] = get_pkg_info("memote")
def add_environment_information(meta)
Record environment information.
3.752616
3.692294
1.016337
element_dist = defaultdict() # Collecting elements for each metabolite. for met in rxn.metabolites: if met.compartment not in element_dist: # Multiplication by the metabolite stoichiometry. element_dist[met.compartment] = \ {k: v * rxn.metabolites[met] for (k, v) in iteritems(met.elements)} else: x = {k: v * rxn.metabolites[met] for (k, v) in iteritems(met.elements)} y = element_dist[met.compartment] element_dist[met.compartment] = \ {k: x.get(k, 0) + y.get(k, 0) for k in set(x) | set(y)} delta_dict = defaultdict() # Simplification of the resulting dictionary of dictionaries. for elements in itervalues(element_dist): delta_dict.update(elements) # Only non-zero values get included in the returned delta-dict. delta_dict = {k: abs(v) for (k, v) in iteritems(delta_dict) if v != 0} return delta_dict
def find_transported_elements(rxn)
Return a dictionary showing the amount of transported elements of a rxn. Collects the elements for each metabolite participating in a reaction, multiplies the amount by the metabolite's stoichiometry in the reaction and bins the result according to the compartment that metabolite is in. This produces a dictionary of dictionaries such as this ``{'p': {'C': -1, 'H': -4}, c: {'C': 1, 'H': 4}}`` which shows the transported entities. This dictionary is then simplified to only include the non-zero elements of one single compartment i.e. showing the precise elements that are transported. Parameters ---------- rxn : cobra.Reaction Any cobra.Reaction containing metabolites.
2.74448
2.602656
1.054492
transport_reactions = [] transport_rxn_candidates = set(model.reactions) - set(model.boundary) \ - set(find_biomass_reaction(model)) transport_rxn_candidates = set( [rxn for rxn in transport_rxn_candidates if len(rxn.compartments) >= 2] ) # Add all labeled transport reactions sbo_matches = set([rxn for rxn in transport_rxn_candidates if rxn.annotation is not None and 'sbo' in rxn.annotation and rxn.annotation['sbo'] in TRANSPORT_RXN_SBO_TERMS]) if len(sbo_matches) > 0: transport_reactions += list(sbo_matches) # Find unlabeled transport reactions via formula or annotation checks for rxn in transport_rxn_candidates: # Check if metabolites have formula field rxn_mets = set([met.formula for met in rxn.metabolites]) if (None not in rxn_mets) and (len(rxn_mets) != 0): if is_transport_reaction_formulae(rxn): transport_reactions.append(rxn) elif is_transport_reaction_annotations(rxn): transport_reactions.append(rxn) return set(transport_reactions)
def find_transport_reactions(model)
Return a list of all transport reactions. Parameters ---------- model : cobra.Model The metabolic model under investigation. Notes ----- A transport reaction is defined as follows: 1. It contains metabolites from at least 2 compartments and 2. at least 1 metabolite undergoes no chemical reaction, i.e., the formula and/or annotation stays the same on both sides of the equation. A notable exception is transport via PTS, which also contains the following restriction: 3. The transported metabolite(s) are transported into a compartment through the exchange of a phosphate group. An example of transport via PTS would be pep(c) + glucose(e) -> glucose-6-phosphate(c) + pyr(c) Reactions similar to transport via PTS (referred to as "modified transport reactions") follow a similar pattern: A(x) + B-R(y) -> A-R(y) + B(y) Such modified transport reactions can be detected, but only when a formula field exists for all metabolites in a particular reaction. If this is not the case, transport reactions are identified through annotations, which cannot detect modified transport reactions.
2.926057
2.774671
1.05456
# Collecting criteria to classify transporters by. rxn_reactants = set([met.formula for met in rxn.reactants]) rxn_products = set([met.formula for met in rxn.products]) # Looking for formulas that stay the same on both side of the reaction. transported_mets = \ [formula for formula in rxn_reactants if formula in rxn_products] # Collect information on the elemental differences between # compartments in the reaction. delta_dicts = find_transported_elements(rxn) non_zero_array = [v for (k, v) in iteritems(delta_dicts) if v != 0] # Excluding reactions such as oxidoreductases where no net # transport of Hydrogen is occurring, but rather just an exchange of # electrons or charges effecting a change in protonation. if set(transported_mets) != set('H') and list( delta_dicts.keys() ) == ['H']: pass # All other reactions for which the amount of transported elements is # not zero, which are not part of the model's exchange nor # biomass reactions, are defined as transport reactions. # This includes reactions where the transported metabolite reacts with # a carrier molecule. elif sum(non_zero_array): return True
def is_transport_reaction_formulae(rxn)
Return boolean if a reaction is a transport reaction (from formulae). Parameters ---------- rxn: cobra.Reaction The metabolic reaction under investigation.
7.419016
7.275484
1.019728
reactants = set([(k, tuple(v)) for met in rxn.reactants for k, v in iteritems(met.annotation) if met.id != "H" and k is not None and k != 'sbo' and v is not None]) products = set([(k, tuple(v)) for met in rxn.products for k, v in iteritems(met.annotation) if met.id != "H" and k is not None and k != 'sbo' and v is not None]) # Find intersection between reactant annotations and # product annotations to find common metabolites between them, # satisfying the requirements for a transport reaction. Reactions such # as those involving oxidoreductases (where no net transport of # Hydrogen is occurring, but rather just an exchange of electrons or # charges effecting a change in protonation) are excluded. transported_mets = reactants & products if len(transported_mets) > 0: return True
def is_transport_reaction_annotations(rxn)
Return boolean if a reaction is a transport reaction (from annotations). Parameters ---------- rxn: cobra.Reaction The metabolic reaction under investigation.
5.240935
5.491704
0.954337
first = set(find_met_in_model(model, pair[0])) second = set(find_met_in_model(model, pair[1])) hits = list() for rxn in model.reactions: # FIXME: Use `set.issubset` much more idiomatic. if len(first & set(rxn.reactants)) > 0 and len( second & set(rxn.products)) > 0: hits.append(rxn) elif len(first & set(rxn.products)) > 0 and len( second & set(rxn.reactants)) > 0: hits.append(rxn) return frozenset(hits)
def find_converting_reactions(model, pair)
Find all reactions which convert a given metabolite pair. Parameters ---------- model : cobra.Model The metabolic model under investigation. pair: tuple or list A pair of metabolite identifiers without compartment suffix. Returns ------- frozenset The set of reactions that have one of the pair on their left-hand side and the other on the right-hand side.
2.615389
2.513756
1.040431
sbo_matches = set([rxn for rxn in model.reactions if rxn.annotation is not None and 'sbo' in rxn.annotation and rxn.annotation['sbo'] == 'SBO:0000629']) if len(sbo_matches) > 0: return list(sbo_matches) buzzwords = ['biomass', 'growth', 'bof'] buzzword_matches = set([rxn for rxn in model.reactions if any( string in rxn.id.lower() for string in buzzwords)]) biomass_met = [] for met in model.metabolites: if 'biomass' in met.id.lower() or ( met.name is not None and 'biomass' in met.name.lower()): biomass_met.append(met) if biomass_met == 1: biomass_met_matches = set( biomass_met.reactions ) - set(model.boundary) else: biomass_met_matches = set() return list(buzzword_matches | biomass_met_matches)
def find_biomass_reaction(model)
Return a list of the biomass reaction(s) of the model. This function identifies possible biomass reactions using two steps: 1. Return reactions that include the SBO annotation "SBO:0000629" for biomass. If no reactions can be identifies this way: 2. Look for the ``buzzwords`` "biomass", "growth" and "bof" in reaction IDs. 3. Look for metabolite IDs or names that contain the ``buzzword`` "biomass" and obtain the set of reactions they are involved in. 4. Remove boundary reactions from this set. 5. Return the union of reactions that match the buzzwords and of the reactions that metabolites are involved in that match the buzzword. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- list Identified biomass reactions.
2.803485
2.305174
1.216171
u try: extracellular = find_compartment_id_in_model(model, 'e') except KeyError: extracellular = None return find_boundary_types(model, 'demand', extracellular)
def find_demand_reactions(model)
u""" Return a list of demand reactions. Parameters ---------- model : cobra.Model The metabolic model under investigation. Notes ----- [1] defines demand reactions as: -- 'unbalanced network reactions that allow the accumulation of a compound' -- reactions that are chiefly added during the gap-filling process -- as a means of dealing with 'compounds that are known to be produced by the organism [..] (i) for which no information is available about their fractional distribution to the biomass or (ii) which may only be produced in some environmental conditions -- reactions with a formula such as: 'met_c -> ' Demand reactions differ from exchange reactions in that the metabolites are not removed from the extracellular environment, but from any of the organism's compartments. References ---------- .. [1] Thiele, I., & Palsson, B. Ø. (2010, January). A protocol for generating a high-quality genome-scale metabolic reconstruction. Nature protocols. Nature Publishing Group. http://doi.org/10.1038/nprot.2009.203
6.774273
7.257932
0.933361
u try: extracellular = find_compartment_id_in_model(model, 'e') except KeyError: extracellular = None return find_boundary_types(model, 'sink', extracellular)
def find_sink_reactions(model)
u""" Return a list of sink reactions. Parameters ---------- model : cobra.Model The metabolic model under investigation. Notes ----- [1] defines sink reactions as: -- 'similar to demand reactions' but reversible, thus able to supply the model with metabolites -- reactions that are chiefly added during the gap-filling process -- as a means of dealing with 'compounds that are produced by nonmetabolic cellular processes but that need to be metabolized' -- reactions with a formula such as: 'met_c <-> ' Sink reactions differ from exchange reactions in that the metabolites are not removed from the extracellular environment, but from any of the organism's compartments. References ---------- .. [1] Thiele, I., & Palsson, B. Ø. (2010, January). A protocol for generating a high-quality genome-scale metabolic reconstruction. Nature protocols. Nature Publishing Group. http://doi.org/10.1038/nprot.2009.203
6.535685
7.103508
0.920064
u try: extracellular = find_compartment_id_in_model(model, 'e') except KeyError: extracellular = None return find_boundary_types(model, 'exchange', extracellular)
def find_exchange_rxns(model)
u""" Return a list of exchange reactions. Parameters ---------- model : cobra.Model The metabolic model under investigation. Notes ----- [1] defines exchange reactions as: -- reactions that 'define the extracellular environment' -- 'unbalanced, extra-organism reactions that represent the supply to or removal of metabolites from the extra-organism "space"' -- reactions with a formula such as: 'met_e -> ' or ' -> met_e' or 'met_e <=> ' Exchange reactions differ from demand reactions in that the metabolites are removed from or added to the extracellular environment only. With this the uptake or secretion of a metabolite is modeled, respectively. References ---------- .. [1] Thiele, I., & Palsson, B. Ø. (2010, January). A protocol for generating a high-quality genome-scale metabolic reconstruction. Nature protocols. Nature Publishing Group. http://doi.org/10.1038/nprot.2009.203
6.776855
7.70239
0.879838
boundary = set(model.boundary) transporters = find_transport_reactions(model) if biomass is None: biomass = set(find_biomass_reaction(model)) return boundary | transporters | biomass
def find_interchange_biomass_reactions(model, biomass=None)
Return the set of all transport, boundary, and biomass reactions. These reactions are either pseudo-reactions, or incorporated to allow metabolites to pass between compartments. Some tests focus on purely metabolic reactions and hence exclude this set. Parameters ---------- model : cobra.Model The metabolic model under investigation. biomass : list or cobra.Reaction, optional A list of cobrapy biomass reactions.
4.054776
4.866321
0.833232
model.objective = model.reactions.get_by_id(rxn_id) model.objective_direction = direction if single_value: try: return model.slim_optimize() except Infeasible: return np.nan else: try: solution = model.optimize() return solution except Infeasible: return np.nan
def run_fba(model, rxn_id, direction="max", single_value=True)
Return the solution of an FBA to a set objective function. Parameters ---------- model : cobra.Model The metabolic model under investigation. rxn_id : string A string containing the reaction ID of the desired FBA objective. direction: string A string containing either "max" or "min" to specify the direction of the desired FBA objective function. single_value: boolean Indicates whether the results for all reactions are gathered from the solver, or only the result for the objective value. Returns ------- cobra.solution The cobra solution object for the corresponding FBA problem.
2.202753
2.585458
0.851978
for rxn in model.reactions: if rxn.reversibility: rxn.bounds = -1, 1 else: rxn.bounds = 0, 1 for boundary in model.boundary: boundary.bounds = (0, 0)
def close_boundaries_sensibly(model)
Return a cobra model with all boundaries closed and changed constraints. In the returned model previously fixed reactions are no longer constrained as such. Instead reactions are constrained according to their reversibility. This is to prevent the FBA from becoming infeasible when trying to solve a model with closed exchanges and one fixed reaction. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- cobra.Model A cobra model with all boundary reactions closed and the constraints of each reaction set according to their reversibility.
3.070671
3.11691
0.985165
return [met for met in model.metabolites if met.compartment == compartment_id]
def metabolites_per_compartment(model, compartment_id)
Identify all metabolites that belong to a given compartment. Parameters ---------- model : cobra.Model The metabolic model under investigation. compartment_id : string Model specific compartment identifier. Returns ------- list List of metabolites belonging to a given compartment.
3.455535
4.82352
0.716393
# Sort compartments by decreasing size and extract the largest two. candidate, second = sorted( ((c, len(metabolites_per_compartment(model, c))) for c in model.compartments), reverse=True, key=itemgetter(1))[:2] # Compare the size of the compartments. if candidate[1] == second[1]: raise RuntimeError("There is a tie for the largest compartment. " "Compartment {} and {} have equal amounts of " "metabolites.".format(candidate[0], second[0])) else: return candidate[0]
def largest_compartment_id_met(model)
Return the ID of the compartment with the most metabolites. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- string Compartment ID of the compartment with the most metabolites.
4.060963
3.97228
1.022325
if compartment_id not in COMPARTMENT_SHORTLIST.keys(): raise KeyError("{} is not in the COMPARTMENT_SHORTLIST! Make sure " "you typed the ID correctly, if yes, update the " "shortlist manually.".format(compartment_id)) if len(model.compartments) == 0: raise KeyError( "It was not possible to identify the " "compartment {}, since the " "model has no compartments at " "all.".format(COMPARTMENT_SHORTLIST[compartment_id][0]) ) if compartment_id in model.compartments.keys(): return compartment_id for name in COMPARTMENT_SHORTLIST[compartment_id]: for c_id, c_name in model.compartments.items(): if c_name.lower() == name: return c_id if compartment_id == 'c': return largest_compartment_id_met(model)
def find_compartment_id_in_model(model, compartment_id)
Identify a model compartment by looking up names in COMPARTMENT_SHORTLIST. Parameters ---------- model : cobra.Model The metabolic model under investigation. compartment_id : string Memote internal compartment identifier used to access compartment name shortlist to look up potential compartment names. Returns ------- string Compartment identifier in the model corresponding to compartment_id.
3.539366
3.182014
1.112304
def compare_annotation(annotation): query_values = set(utils.flatten(annotation.values())) ref_values = set(utils.flatten(METANETX_SHORTLIST[mnx_id])) return query_values & ref_values # Make sure that the MNX ID we're looking up exists in the metabolite # shortlist. if mnx_id not in METANETX_SHORTLIST.columns: raise ValueError( "{} is not in the MetaNetX Shortlist! Make sure " "you typed the ID correctly, if yes, update the " "shortlist by updating and re-running the script " "generate_mnx_shortlists.py.".format(mnx_id) ) candidates = [] # The MNX ID used in the model may or may not be tagged with a compartment # tag e.g. `MNXM23141_c` vs. `MNXM23141`, which is tested with the # following regex. # If the MNX ID itself cannot be found as an ID, we try all other # identifiers that are provided by our shortlist of MetaNetX' mapping # table. regex = re.compile('^{}(_[a-zA-Z0-9]+)?$'.format(mnx_id)) if model.metabolites.query(regex): candidates = model.metabolites.query(regex) elif model.metabolites.query(compare_annotation, attribute='annotation'): candidates = model.metabolites.query( compare_annotation, attribute='annotation' ) else: for value in METANETX_SHORTLIST[mnx_id]: if value: for ident in value: regex = re.compile('^{}(_[a-zA-Z0-9]+)?$'.format(ident)) if model.metabolites.query(regex, attribute='id'): candidates.extend( model.metabolites.query(regex, attribute='id')) # Return a list of all possible candidates if no specific compartment ID # is provided. # Otherwise, just return the candidate in one specific compartment. Raise # an exception if there are more than one possible candidates for a given # compartment. if compartment_id is None: print("compartment_id = None?") return candidates else: candidates_in_compartment = \ [cand for cand in candidates if cand.compartment == compartment_id] if len(candidates_in_compartment) == 0: raise RuntimeError("It was not possible to identify " "any metabolite in compartment {} corresponding to " "the following MetaNetX identifier: {}." "Make sure that a cross-reference to this ID in " "the MetaNetX Database exists for your " "identifier " "namespace.".format(compartment_id, mnx_id)) elif len(candidates_in_compartment) > 1: raise RuntimeError("It was not possible to uniquely identify " "a single metabolite in compartment {} that " "corresponds to the following MetaNetX " "identifier: {}." "Instead these candidates were found: {}." "Check that metabolite compartment tags are " "correct. Consider switching to a namespace scheme " "where identifiers are truly " "unique.".format(compartment_id, mnx_id, utils.get_ids( candidates_in_compartment )) ) else: return candidates_in_compartment
def find_met_in_model(model, mnx_id, compartment_id=None)
Return specific metabolites by looking up IDs in METANETX_SHORTLIST. Parameters ---------- model : cobra.Model The metabolic model under investigation. mnx_id : string Memote internal MetaNetX metabolite identifier used to map between cross-references in the METANETX_SHORTLIST. compartment_id : string, optional ID of the specific compartment where the metabolites should be found. Defaults to returning matching metabolites from all compartments. Returns ------- list cobra.Metabolite(s) matching the mnx_id.
3.85789
3.598658
1.072036
lower_bounds = np.asarray([rxn.lower_bound for rxn in model.reactions], dtype=float) upper_bounds = np.asarray([rxn.upper_bound for rxn in model.reactions], dtype=float) lower_bound = np.nanmedian(lower_bounds[lower_bounds != 0.0]) upper_bound = np.nanmedian(upper_bounds[upper_bounds != 0.0]) if np.isnan(lower_bound): LOGGER.warning("Could not identify a median lower bound.") lower_bound = -1000.0 if np.isnan(upper_bound): LOGGER.warning("Could not identify a median upper bound.") upper_bound = 1000.0 return lower_bound, upper_bound
def find_bounds(model)
Return the median upper and lower bound of the metabolic model. Bounds can vary from model to model. Cobrapy defaults to (-1000, 1000) but this may not be the case for merged or autogenerated models. In these cases, this function is used to iterate over all the bounds of all the reactions and find the median bound values in the model, which are then used as the 'most common' bounds. Parameters ---------- model : cobra.Model The metabolic model under investigation.
1.929907
1.841197
1.048181
return self._template.safe_substitute( report_type=self._report_type, results=self.render_json() )
def render_html(self)
Render an HTML report.
7.594271
6.229515
1.219079
# LOGGER.info("Begin scoring") cases = self.get_configured_tests() | set(self.result.cases) scores = DataFrame({"score": 0.0, "max": 1.0}, index=sorted(cases)) self.result.setdefault("score", dict()) self.result["score"]["sections"] = list() # Calculate the scores for each test individually. for test, result in iteritems(self.result.cases): # LOGGER.info("Calculate score for test: '%s'.", test) # Test metric may be a dictionary for a parametrized test. metric = result["metric"] if hasattr(metric, "items"): result["score"] = test_score = dict() total = 0.0 for key, value in iteritems(metric): value = 1.0 - value total += value test_score[key] = value # For some reason there are parametrized tests without cases. if len(metric) == 0: metric = 0.0 else: metric = total / len(metric) else: metric = 1.0 - metric scores.at[test, "score"] = metric scores.loc[test, :] *= self.config["weights"].get(test, 1.0) score = 0.0 maximum = 0.0 # Calculate the scores for each section considering the individual test # case scores. for section_id, card in iteritems( self.config['cards']['scored']['sections'] ): # LOGGER.info("Calculate score for section: '%s'.", section_id) cases = card.get("cases", None) if cases is None: continue card_score = scores.loc[cases, "score"].sum() card_total = scores.loc[cases, "max"].sum() # Format results nicely to work immediately with Vega Bar Chart. section_score = {"section": section_id, "score": card_score / card_total} self.result["score"]["sections"].append(section_score) # Calculate the final score for the entire model. weight = card.get("weight", 1.0) score += card_score * weight maximum += card_total * weight self.result["score"]["total_score"] = score / maximum
def compute_score(self)
Calculate the overall test score using the configuration.
3.686669
3.599569
1.024197
return [elem for elem in getattr(model, components) if elem.annotation is None or 'sbo' not in elem.annotation]
def find_components_without_sbo_terms(model, components)
Find model components that are not annotated with any SBO terms. Parameters ---------- model : cobra.Model The metabolic model under investigation. components : {"metabolites", "reactions", "genes"} A string denoting `cobra.Model` components. Returns ------- list The components without any SBO term annotation.
7.009865
7.66
0.915126
r # check for multiple allowable SBO terms if isinstance(term, list): return [elem for elem in items if elem.annotation is None or 'sbo' not in elem.annotation or not any(i in elem.annotation['sbo'] for i in term)] else: return [elem for elem in items if elem.annotation is None or 'sbo' not in elem.annotation or term not in elem.annotation['sbo']]
def check_component_for_specific_sbo_term(items, term)
r""" Identify model components that lack a specific SBO term(s). Parameters ---------- items : list A list of model components i.e. reactions to be checked for a specific SBO term. term : str or list of str A string denoting a valid SBO term matching the regex '^SBO:\d{7}$' or a list containing such string elements. Returns ------- list The components without any or that specific SBO term annotation.
3.182783
2.929486
1.086465
return min((c for c in compounds_identifiers if c.startswith("C")), key=lambda c: int(c[1:]))
def get_smallest_compound_id(compounds_identifiers)
Return the smallest KEGG compound identifier from a list. KEGG identifiers may map to compounds, drugs or glycans prefixed respectively with "C", "D", and "G" followed by at least 5 digits. We choose the lowest KEGG identifier with the assumption that several identifiers are due to chirality and that the lower one represents the more common form. Parameters ---------- compounds_identifiers : list A list of mixed KEGG identifiers. Returns ------- str The KEGG compound identifier with the smallest number. Raises ------ ValueError When compound_identifiers contains no KEGG compound identifiers.
4.145382
5.346778
0.775305
logger.debug("Looking for KEGG compound identifier for %s.", metabolite.id) kegg_annotation = metabolite.annotation.get("kegg.compound") if kegg_annotation is None: # TODO (Moritz Beber): Currently name matching is very slow and # inaccurate. We disable it until there is a better solution. # if metabolite.name: # # The compound matcher uses regular expression and chokes # # with a low level error on `[` in the name, for example. # df = compound_matcher.match(metabolite.name) # try: # return df.loc[df["score"] > threshold, "CID"].iat[0] # except (IndexError, AttributeError): # logger.warning( # "Could not match the name %r to any kegg.compound " # "annotation for metabolite %s.", # metabolite.name, metabolite.id # ) # return # else: logger.warning("No kegg.compound annotation for metabolite %s.", metabolite.id) return if isinstance(kegg_annotation, string_types) and \ kegg_annotation.startswith("C"): return kegg_annotation elif isinstance(kegg_annotation, Iterable): try: return get_smallest_compound_id(kegg_annotation) except ValueError: return logger.warning( "No matching kegg.compound annotation for metabolite %s.", metabolite.id ) return
def map_metabolite2kegg(metabolite)
Return a KEGG compound identifier for the metabolite if it exists. First see if there is an unambiguous mapping to a single KEGG compound ID provided with the model. If not, check if there is any KEGG compound ID in a list of mappings. KEGG IDs may map to compounds, drugs and glycans. KEGG compound IDs are sorted so we keep the lowest that is there. If none of this works try mapping to KEGG via the CompoundMatcher by the name of the metabolite. If the metabolite cannot be mapped at all we simply map it back to its own ID. Parameters ---------- metabolite : cobra.Metabolite The metabolite to be mapped to its KEGG compound identifier. Returns ------- None If the metabolite could not be mapped. str The smallest KEGG compound identifier that was found.
3.800577
3.609282
1.053001
# Transport reactions where the same metabolite occurs in different # compartments should have been filtered out but just to be sure, we add # coefficients in the mapping. stoichiometry = defaultdict(float) for met, coef in iteritems(reaction.metabolites): kegg_id = metabolite_mapping.setdefault(met, map_metabolite2kegg(met)) if kegg_id is None: continue stoichiometry[kegg_id] += coef return dict(stoichiometry)
def translate_reaction(reaction, metabolite_mapping)
Return a mapping from KEGG compound identifiers to coefficients. Parameters ---------- reaction : cobra.Reaction The reaction whose metabolites are to be translated. metabolite_mapping : dict An existing mapping from cobra.Metabolite to KEGG compound identifier that may already contain the metabolites in question or will have to be extended. Returns ------- dict The stoichiometry of the reaction given as a mapping from metabolite KEGG identifier to coefficient.
4.898209
4.51952
1.08379
u incomplete_mapping = [] problematic_calculation = [] reversibility_indexes = [] unbalanced = [] metabolite_mapping = {} for rxn in reactions: stoich = translate_reaction(rxn, metabolite_mapping) if len(stoich) < len(rxn.metabolites): incomplete_mapping.append(rxn) continue try: # Remove protons from stoichiometry. if "C00080" in stoich: del stoich["C00080"] eq_rxn = Reaction(stoich, rxn.id) except KeyError: incomplete_mapping.append(rxn) continue if eq_rxn.check_full_reaction_balancing(): try: ln_rev_index = eq_rxn.reversibility_index() # TODO (Moritz Beber): Which exceptions can we expect here? except Exception: problematic_calculation.append(rxn) continue reversibility_indexes.append((rxn, ln_rev_index)) else: unbalanced.append(rxn) reversibility_indexes.sort(key=lambda p: abs(p[1]), reverse=True) return ( reversibility_indexes, incomplete_mapping, problematic_calculation, unbalanced )
def find_thermodynamic_reversibility_index(reactions)
u""" Return the reversibility index of the given reactions. To determine the reversibility index, we calculate the reversibility index ln_gamma (see [1]_ section 3.5) of each reaction using the eQuilibrator API [2]_. Parameters ---------- reactions: list of cobra.Reaction A list of reactions for which to calculate the reversibility index. Returns ------- tuple list of cobra.Reaction, index pairs A list of pairs of reactions and their reversibility indexes. list of cobra.Reaction A list of reactions which contain at least one metabolite that could not be mapped to KEGG on the basis of its annotation. list of cobra.Reaction A list of reactions for which it is not possible to calculate the standard change in Gibbs free energy potential. Reasons of failure include that participating metabolites cannot be broken down with the group contribution method. list of cobra.Reaction A list of reactions that are not chemically or redox balanced. References ---------- .. [1] Elad Noor, Arren Bar-Even, Avi Flamholz, Yaniv Lubling, Dan Davidi, Ron Milo; An integrated open framework for thermodynamics of reactions that combines accuracy and coverage, Bioinformatics, Volume 28, Issue 15, 1 August 2012, Pages 2037–2044, https://doi.org/10.1093/bioinformatics/bts317 .. [2] https://pypi.org/project/equilibrator-api/
3.817662
3.554621
1.074
problem = model.problem # The transpose of the stoichiometric matrix N.T in the paper. stoich_trans = problem.Model() internal_rxns = con_helpers.get_internals(model) metabolites = set(met for rxn in internal_rxns for met in rxn.metabolites) LOGGER.info("model '%s' has %d internal reactions", model.id, len(internal_rxns)) LOGGER.info("model '%s' has %d internal metabolites", model.id, len(metabolites)) stoich_trans.add([problem.Variable(m.id, lb=1) for m in metabolites]) stoich_trans.update() con_helpers.add_reaction_constraints( stoich_trans, internal_rxns, problem.Constraint) # The objective is to minimize the metabolite mass vector. stoich_trans.objective = problem.Objective( Zero, direction="min", sloppy=True) stoich_trans.objective.set_linear_coefficients( {var: 1. for var in stoich_trans.variables}) status = stoich_trans.optimize() if status == OPTIMAL: return True elif status == INFEASIBLE: return False else: raise RuntimeError( "Could not determine stoichiometric consistencty." " Solver status is '{}'" " (only optimal or infeasible expected).".format(status))
def check_stoichiometric_consistency(model)
Verify the consistency of the model's stoichiometry. Parameters ---------- model : cobra.Model The metabolic model under investigation. Notes ----- See [1]_ section 3.1 for a complete description of the algorithm. .. [1] Gevorgyan, A., M. G Poolman, and D. A Fell. "Detection of Stoichiometric Inconsistencies in Biomolecular Models." Bioinformatics 24, no. 19 (2008): 2245.
3.661746
3.737931
0.979618
problem = model.problem stoich_trans = problem.Model() internal_rxns = con_helpers.get_internals(model) metabolites = set(met for rxn in internal_rxns for met in rxn.metabolites) # The binary variables k[i] in the paper. k_vars = list() for met in metabolites: # The element m[i] of the mass vector. m_var = problem.Variable(met.id) k_var = problem.Variable("k_{}".format(met.id), type="binary") k_vars.append(k_var) stoich_trans.add([m_var, k_var]) # This constraint is equivalent to 0 <= k[i] <= m[i]. stoich_trans.add(problem.Constraint( k_var - m_var, ub=0, name="switch_{}".format(met.id))) stoich_trans.update() con_helpers.add_reaction_constraints( stoich_trans, internal_rxns, problem.Constraint) # The objective is to maximize the binary indicators k[i], subject to the # above inequality constraints. stoich_trans.objective = problem.Objective( Zero, sloppy=True, direction="max") stoich_trans.objective.set_linear_coefficients( {var: 1. for var in k_vars}) status = stoich_trans.optimize() if status == OPTIMAL: # TODO: See if that could be a Boolean test `bool(var.primal)`. return set([model.metabolites.get_by_id(var.name[2:]) for var in k_vars if var.primal < 0.8]) else: raise RuntimeError( "Could not compute list of unconserved metabolites." " Solver status is '{}' (only optimal expected).".format(status))
def find_unconserved_metabolites(model)
Detect unconserved metabolites. Parameters ---------- model : cobra.Model The metabolic model under investigation. Notes ----- See [1]_ section 3.2 for a complete description of the algorithm. .. [1] Gevorgyan, A., M. G Poolman, and D. A Fell. "Detection of Stoichiometric Inconsistencies in Biomolecular Models." Bioinformatics 24, no. 19 (2008): 2245.
4.188702
4.152647
1.008682
if check_stoichiometric_consistency(model): return set() Model, Constraint, Variable, Objective = con_helpers.get_interface(model) unconserved_mets = find_unconserved_metabolites(model) LOGGER.info("model has %d unconserved metabolites", len(unconserved_mets)) internal_rxns = con_helpers.get_internals(model) internal_mets = set( met for rxn in internal_rxns for met in rxn.metabolites) get_id = attrgetter("id") reactions = sorted(internal_rxns, key=get_id) metabolites = sorted(internal_mets, key=get_id) stoich, met_index, rxn_index = con_helpers.stoichiometry_matrix( metabolites, reactions) left_ns = con_helpers.nullspace(stoich.T) # deal with numerical instabilities left_ns[np.abs(left_ns) < atol] = 0.0 LOGGER.info("nullspace has dimension %d", left_ns.shape[1]) inc_minimal = set() (problem, indicators) = con_helpers.create_milp_problem( left_ns, metabolites, Model, Variable, Constraint, Objective) LOGGER.debug(str(problem)) cuts = list() for met in unconserved_mets: row = met_index[met] if (left_ns[row] == 0.0).all(): LOGGER.debug("%s: singleton minimal unconservable set", met.id) # singleton set! inc_minimal.add((met,)) continue # expect a positive mass for the unconserved metabolite problem.variables[met.id].lb = 1e-3 status = problem.optimize() while status == "optimal": LOGGER.debug("%s: status %s", met.id, status) LOGGER.debug("sum of all primal values: %f", sum(problem.primal_values.values())) LOGGER.debug("sum of binary indicators: %f", sum(var.primal for var in indicators)) solution = [model.metabolites.get_by_id(var.name[2:]) for var in indicators if var.primal > 0.2] LOGGER.debug("%s: set size %d", met.id, len(solution)) inc_minimal.add(tuple(solution)) if len(solution) == 1: break cuts.append(con_helpers.add_cut( problem, indicators, len(solution) - 1, Constraint)) status = problem.optimize() LOGGER.debug("%s: last status %s", met.id, status) # reset problem.variables[met.id].lb = 0.0 problem.remove(cuts) cuts.clear() return inc_minimal
def find_inconsistent_min_stoichiometry(model, atol=1e-13)
Detect inconsistent minimal net stoichiometries. Parameters ---------- model : cobra.Model The metabolic model under investigation. atol : float, optional Values below the absolute tolerance are treated as zero. Expected to be very small but larger than zero. Notes ----- See [1]_ section 3.3 for a complete description of the algorithm. References ---------- .. [1] Gevorgyan, A., M. G Poolman, and D. A Fell. "Detection of Stoichiometric Inconsistencies in Biomolecular Models." Bioinformatics 24, no. 19 (2008): 2245.
3.546847
3.637415
0.975101
u helpers.close_boundaries_sensibly(model) fva_result = flux_variability_analysis(model, loopless=False) return fva_result.index[ (fva_result["minimum"] <= (-1 + TOLERANCE_THRESHOLD)) | (fva_result["maximum"] >= (1 - TOLERANCE_THRESHOLD)) ].tolist()
def find_stoichiometrically_balanced_cycles(model)
u""" Find metabolic reactions in stoichiometrically balanced cycles (SBCs). Identify forward and reverse cycles by closing all exchanges and using FVA. Parameters ---------- model : cobra.Model The metabolic model under investigation. Notes ----- "SBCs are artifacts of metabolic reconstructions due to insufficient constraints (e.g., thermodynamic constraints and regulatory constraints) [1]_." They are defined by internal reactions that carry flux in spite of closed exchange reactions. References ---------- .. [1] Thiele, I., & Palsson, B. Ø. (2010, January). A protocol for generating a high-quality genome-scale metabolic reconstruction. Nature protocols. Nature Publishing Group. http://doi.org/10.1038/nprot.2009.203
8.152309
7.666709
1.063339
exchange = frozenset(model.exchanges) return [ met for met in model.metabolites if (len(met.reactions) > 0) and all( (not rxn.reversibility) and (rxn not in exchange) and (rxn.metabolites[met] < 0) for rxn in met.reactions ) ]
def find_orphans(model)
Return metabolites that are only consumed in reactions. Metabolites that are involved in an exchange reaction are never considered to be orphaned. Parameters ---------- model : cobra.Model The metabolic model under investigation.
3.962307
3.668952
1.079956
mets_not_produced = list() helpers.open_exchanges(model) for met in model.metabolites: with model: exch = model.add_boundary( met, type="irrex", reaction_id="IRREX", lb=0, ub=1000) solution = helpers.run_fba(model, exch.id) if np.isnan(solution) or solution < TOLERANCE_THRESHOLD: mets_not_produced.append(met) return mets_not_produced
def find_metabolites_not_produced_with_open_bounds(model)
Return metabolites that cannot be produced with open exchange reactions. A perfect model should be able to produce each and every metabolite when all medium components are available. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- list Those metabolites that could not be produced.
5.155942
5.530455
0.932282
mets_not_consumed = list() helpers.open_exchanges(model) for met in model.metabolites: with model: exch = model.add_boundary( met, type="irrex", reaction_id="IRREX", lb=-1000, ub=0) solution = helpers.run_fba(model, exch.id, direction="min") if np.isnan(solution) or abs(solution) < TOLERANCE_THRESHOLD: mets_not_consumed.append(met) return mets_not_consumed
def find_metabolites_not_consumed_with_open_bounds(model)
Return metabolites that cannot be consumed with open boundary reactions. When all metabolites can be secreted, it should be possible for each and every metabolite to be consumed in some form. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- list Those metabolites that could not be consumed.
5.431912
6.120234
0.887533
try: fva_result = flux_variability_analysis(model, fraction_of_optimum=1.0) except Infeasible as err: LOGGER.error("Failed to find reactions with unbounded flux " "because '{}'. This may be a bug.".format(err)) raise Infeasible("It was not possible to run flux variability " "analysis on the model. Make sure that the model " "can be solved! Check if the constraints are not " "too strict.") # Per reaction (row) the flux is below threshold (close to zero). conditionally_blocked = fva_result.loc[ fva_result.abs().max(axis=1) < TOLERANCE_THRESHOLD ].index.tolist() small, large = helpers.find_bounds(model) # Find those reactions whose flux is close to or outside of the median # upper or lower bound, i.e., appears unconstrained. unlimited_flux = fva_result.loc[ np.isclose(fva_result["maximum"], large, atol=TOLERANCE_THRESHOLD) | (fva_result["maximum"] > large) | np.isclose(fva_result["minimum"], small, atol=TOLERANCE_THRESHOLD) | (fva_result["minimum"] < small) ].index.tolist() try: fraction = len(unlimited_flux) / \ (len(model.reactions) - len(conditionally_blocked)) except ZeroDivisionError: LOGGER.error("Division by Zero! Failed to calculate the " "fraction of unbounded reactions. Does this model " "have any reactions at all?") raise ZeroDivisionError("It was not possible to calculate the " "fraction of unbounded reactions to " "un-blocked reactions. This may be because" "the model doesn't have any reactions at " "all or that none of the reactions can " "carry a flux larger than zero!") return unlimited_flux, fraction, conditionally_blocked
def find_reactions_with_unbounded_flux_default_condition(model)
Return list of reactions whose flux is unbounded in the default condition. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- tuple list A list of reactions that in default modeling conditions are able to carry flux as high/low as the systems maximal and minimal bounds. float The fraction of the amount of unbounded reactions to the amount of non-blocked reactions. list A list of reactions that in default modeling conditions are not able to carry flux at all.
3.999624
3.837397
1.042275
if dtype_conversion is None: dtype_conversion = {} name, ext = filename.split(".", 1) ext = ext.lower() # Completely empty columns are interpreted as float by default. dtype_conversion["comment"] = str if "csv" in ext: df = pd.read_csv(filename, dtype=dtype_conversion, encoding="utf-8") elif "tsv" in ext: df = pd.read_table(filename, dtype=dtype_conversion, encoding="utf-8") elif "xls" in ext or "xlsx" in ext: df = pd.read_excel(filename, dtype=dtype_conversion, encoding="utf-8") # TODO: Add a function to parse ODS data into a pandas data frame. else: raise ValueError("Unknown file format '{}'.".format(ext)) return df
def read_tabular(filename, dtype_conversion=None)
Read a tabular data file which can be CSV, TSV, XLS or XLSX. Parameters ---------- filename : str or pathlib.Path The full file path. May be a compressed file. dtype_conversion : dict Column names as keys and corresponding type for loading the data. Please take a look at the `pandas documentation <https://pandas.pydata.org/pandas-docs/stable/io.html#specifying-column-data-types>`__ for detailed explanations. Returns ------- pandas.DataFrame The data table.
2.654434
2.756351
0.963025
model_obj, sbml_ver, notifications = api.validate_model( model) if model_obj is None: LOGGER.critical( "The model could not be loaded due to the following SBML errors.") utils.stdout_notifications(notifications) api.validation_report(model, notifications, filename) sys.exit(1) if not any(a.startswith("--tb") for a in pytest_args): pytest_args = ["--tb", "no"] + pytest_args # Add further directories to search for tests. pytest_args.extend(custom_tests) config = ReportConfiguration.load() # Update the default test configuration with custom ones (if any). for custom in custom_config: config.merge(ReportConfiguration.load(custom)) model_obj.solver = solver _, results = api.test_model(model_obj, sbml_version=sbml_ver, results=True, pytest_args=pytest_args, skip=skip, exclusive=exclusive, experimental=experimental) with open(filename, "w", encoding="utf-8") as file_handle: LOGGER.info("Writing snapshot report to '%s'.", filename) file_handle.write(api.snapshot_report(results, config))
def snapshot(model, filename, pytest_args, exclusive, skip, solver, experimental, custom_tests, custom_config)
Take a snapshot of a model's state and generate a report. MODEL: Path to model file. Can also be supplied via the environment variable MEMOTE_MODEL or configured in 'setup.cfg' or 'memote.ini'.
4.477048
4.601375
0.972981
callbacks.git_installed() LOGGER.info("Initialising history report generation.") if location is None: raise click.BadParameter("No 'location' given or configured.") try: repo = git.Repo() except git.InvalidGitRepositoryError: LOGGER.critical( "The history report requires a git repository in order to check " "the model's commit history.") sys.exit(1) LOGGER.info("Obtaining history of results from " "the deployment branch {}.".format(deployment)) repo.git.checkout(deployment) try: manager = managers.SQLResultManager(repository=repo, location=location) except (AttributeError, ArgumentError): manager = managers.RepoResultManager( repository=repo, location=location) config = ReportConfiguration.load() # Update the default test configuration with custom ones (if any). for custom in custom_config: config.merge(ReportConfiguration.load(custom)) LOGGER.info("Tracing the commit history.") history = managers.HistoryManager(repository=repo, manager=manager) history.load_history(model, skip={deployment}) LOGGER.info("Composing the history report.") report = api.history_report(history, config=config) with open(filename, "w", encoding="utf-8") as file_handle: file_handle.write(report)
def history(location, model, filename, deployment, custom_config)
Generate a report over a model's git commit history.
5.293326
4.956921
1.067866
if not any(a.startswith("--tb") for a in pytest_args): pytest_args = ["--tb", "no"] + pytest_args # Add further directories to search for tests. pytest_args.extend(custom_tests) config = ReportConfiguration.load() # Update the default test configuration with custom ones (if any). for custom in custom_config: config.merge(ReportConfiguration.load(custom)) # Build the diff report specific data structure diff_results = dict() model_and_model_ver_tuple = list() for model_path in models: try: model_filename = os.path.basename(model_path) diff_results.setdefault(model_filename, dict()) model, model_ver, notifications = api.validate_model(model_path) if model is None: head, tail = os.path.split(filename) report_path = os.path.join( head, '{}_structural_report.html'.format(model_filename)) api.validation_report( model_path, notifications, report_path) LOGGER.critical( "The model {} could not be loaded due to SBML errors " "reported in {}.".format(model_filename, report_path)) continue model.solver = solver model_and_model_ver_tuple.append((model, model_ver)) except (IOError, SBMLError): LOGGER.debug(exc_info=True) LOGGER.warning("An error occurred while loading the model '%s'. " "Skipping.", model_filename) # Abort the diff report unless at least two models can be loaded # successfully. if len(model_and_model_ver_tuple) < 2: LOGGER.critical( "Out of the %d provided models only %d could be loaded. Please, " "check if the models that could not be loaded are valid SBML. " "Aborting.", len(models), len(model_and_model_ver_tuple)) sys.exit(1) # Running pytest in individual processes to avoid interference partial_test_diff = partial(_test_diff, pytest_args=pytest_args, skip=skip, exclusive=exclusive, experimental=experimental) pool = Pool(min(len(models), cpu_count())) results = pool.map(partial_test_diff, model_and_model_ver_tuple) for model_path, result in zip(models, results): model_filename = os.path.basename(model_path) diff_results[model_filename] = result with open(filename, "w", encoding="utf-8") as file_handle: LOGGER.info("Writing diff report to '%s'.", filename) file_handle.write(api.diff_report(diff_results, config))
def diff(models, filename, pytest_args, exclusive, skip, solver, experimental, custom_tests, custom_config)
Take a snapshot of all the supplied models and generate a diff report. MODELS: List of paths to two or more model files.
3.393305
3.390393
1.000859
self._history = dict() self._history["commits"] = commits = dict() self._history["branches"] = branches = dict() for branch in self._repo.refs: LOGGER.debug(branch.name) if branch.name in skip: continue branches[branch.name] = branch_history = list() latest = branch.commit history = [latest] + list(latest.iter_parents()) for commit in history: # Find model in committed files. if not is_modified(model, commit): LOGGER.info( "The model was not modified in commit '{}'. " "Skipping.".format(commit)) continue branch_history.append(commit.hexsha) if commit.hexsha not in commits: commits[commit.hexsha] = sub = dict() sub["timestamp"] = commit.authored_datetime.isoformat(" ") sub["author"] = commit.author.name sub["email"] = commit.author.email LOGGER.debug("%s", json.dumps(self._history, indent=2))
def build_branch_structure(self, model, skip)
Inspect and record the repo's branches and their history.
3.445885
3.271324
1.053361
if self._history is None: self.build_branch_structure(model, skip) self._results = dict() all_commits = list(self._history["commits"]) for commit in all_commits: try: self._results[commit] = self.manager.load(commit) except (IOError, NoResultFound) as err: LOGGER.error("Could not load result '%s'.", commit) LOGGER.debug("%s", str(err))
def load_history(self, model, skip={"gh-pages"})
Load the entire results history into memory. Could be a bad idea in a far future.
4.463073
4.307599
1.036093
assert self._results is not None, \ "Please call the method `load_history` first." return self._results.get(commit, default)
def get_result(self, commit, default=MemoteResult())
Return an individual result from the history if it exists.
6.656714
4.921711
1.35252
s_matrix, _, _ = con_helpers.stoichiometry_matrix( model.metabolites, model.reactions ) abs_matrix = np.abs(s_matrix) return abs_matrix.max(), abs_matrix[abs_matrix > 0].min()
def absolute_extreme_coefficient_ratio(model)
Return the maximum and minimum absolute, non-zero coefficients. Parameters ---------- model : cobra.Model The metabolic model under investigation.
5.590013
4.855915
1.151176
s_matrix, _, _ = con_helpers.stoichiometry_matrix( model.metabolites, model.reactions ) ln_matrix = con_helpers.nullspace(s_matrix.T) return ln_matrix.shape[1]
def number_independent_conservation_relations(model)
Return the number of conserved metabolite pools. This number is given by the left null space of the stoichiometric matrix. Parameters ---------- model : cobra.Model The metabolic model under investigation.
7.125763
6.191908
1.150819
s_matrix, _, _ = con_helpers.stoichiometry_matrix( model.metabolites, model.reactions ) return con_helpers.rank(s_matrix)
def matrix_rank(model)
Return the rank of the model's stoichiometric matrix. Parameters ---------- model : cobra.Model The metabolic model under investigation.
7.433455
6.801293
1.092947
s_matrix, _, _ = con_helpers.stoichiometry_matrix( model.metabolites, model.reactions ) return s_matrix.shape[1] - matrix_rank(model)
def degrees_of_freedom(model)
Return the degrees of freedom, i.e., number of "free variables". Parameters ---------- model : cobra.Model The metabolic model under investigation. Notes ----- This specifically refers to the dimensionality of the (right) null space of the stoichiometric matrix, as dim(Null(S)) corresponds directly to the number of free variables in the system [1]_. The formula used calculates this using the rank-nullity theorem [2]_. References ---------- .. [1] Fukuda, K. & Terlaky, T. Criss-cross methods: A fresh view on pivot algorithms. Mathematical Programming 79, 369-395 (1997). .. [2] Alama, J. The Rank+Nullity Theorem. Formalized Mathematics 15, (2007).
8.44662
8.787848
0.961171
self.load_medium(model) self.load_essentiality(model) self.load_growth(model) # self.load_experiment(config.config.get("growth"), model) return self
def load(self, model)
Load all information from an experimental configuration file. Parameters ---------- model : cobra.Model The metabolic model under investigation.
8.41637
7.461856
1.127919
validator = Draft4Validator(self.SCHEMA) if not validator.is_valid(self.config): for err in validator.iter_errors(self.config): LOGGER.error(str(err.message)) validator.validate(self.config)
def validate(self)
Validate the configuration file.
3.421712
3.161396
1.082342
media = self.config.get("medium") if media is None: return definitions = media.get("definitions") if definitions is None or len(definitions) == 0: return path = self.get_path(media, join("data", "experimental", "media")) for medium_id, medium in iteritems(definitions): if medium is None: medium = dict() filename = medium.get("filename") if filename is None: filename = join(path, "{}.csv".format(medium_id)) elif not isabs(filename): filename = join(path, filename) tmp = Medium(identifier=medium_id, obj=medium, filename=filename) tmp.load() tmp.validate(model) self.media[medium_id] = tmp
def load_medium(self, model)
Load and validate all media.
3.547508
3.290679
1.078048
data = self.config.get("essentiality") if data is None: return experiments = data.get("experiments") if experiments is None or len(experiments) == 0: return path = self.get_path(data, join("data", "experimental", "essentiality")) for exp_id, exp in iteritems(experiments): if exp is None: exp = dict() filename = exp.get("filename") if filename is None: filename = join(path, "{}.csv".format(exp_id)) elif not isabs(filename): filename = join(path, filename) experiment = EssentialityExperiment( identifier=exp_id, obj=exp, filename=filename) if experiment.medium is not None: assert experiment.medium in self.media, \ "Experiment '{}' has an undefined medium '{}'.".format( exp_id, experiment.medium) experiment.medium = self.media[experiment.medium] experiment.load() experiment.validate(model) self.essentiality[exp_id] = experiment
def load_essentiality(self, model)
Load and validate all data files.
3.024916
2.888064
1.047385
data = self.config.get("growth") if data is None: return experiments = data.get("experiments") if experiments is None or len(experiments) == 0: return path = self.get_path(data, join("data", "experimental", "growth")) for exp_id, exp in iteritems(experiments): if exp is None: exp = dict() filename = exp.get("filename") if filename is None: filename = join(path, "{}.csv".format(exp_id)) elif not isabs(filename): filename = join(path, filename) growth = GrowthExperiment( identifier=exp_id, obj=exp, filename=filename) if growth.medium is not None: assert growth.medium in self.media, \ "Growth-experiment '{}' has an undefined medium '{}'." \ "".format(exp_id, growth.medium) growth.medium = self.media[growth.medium] growth.load() growth.validate(model) self.growth[exp_id] = growth
def load_growth(self, model)
Load and validate all data files.
3.142109
3.000684
1.047131
path = obj.get("path") if path is None: path = join(self._base, default) if not isabs(path): path = join(self._base, path) return path
def get_path(self, obj, default)
Return a relative or absolute path to experimental data.
3.064375
2.78426
1.100607
return [elem for elem in getattr(model, components) if elem.annotation is None or len(elem.annotation) == 0]
def find_components_without_annotation(model, components)
Find model components with empty annotation attributes. Parameters ---------- model : cobra.Model A cobrapy metabolic model. components : {"metabolites", "reactions", "genes"} A string denoting `cobra.Model` components. Returns ------- list The components without any annotation.
5.739979
6.508082
0.881977
def is_faulty(annotation, key, pattern): # Ignore missing annotation for this database. if key not in annotation: return False test = annotation[key] if isinstance(test, native_str): return pattern.match(test) is None else: return any(pattern.match(elem) is None for elem in test) pattern = { "metabolites": METABOLITE_ANNOTATIONS, "reactions": REACTION_ANNOTATIONS, "genes": GENE_PRODUCT_ANNOTATIONS }[component][db] return [elem for elem in elements if is_faulty(elem.annotation, db, pattern)]
def generate_component_annotation_miriam_match(elements, component, db)
Tabulate which MIRIAM databases the element's annotation match. If the relevant MIRIAM identifier is not in an element's annotation it is ignored. Parameters ---------- elements : list Elements of a model, either metabolites or reactions. component : {"metabolites", "reactions"} A string denoting a type of ``cobra.Model`` component. db : str One of the MIRIAM database identifiers. Returns ------- list The components whose annotation does not match the pattern for the MIRIAM database.
4.413906
3.690943
1.195875
patterns = { "metabolites": METABOLITE_ANNOTATIONS, "reactions": REACTION_ANNOTATIONS, "genes": GENE_PRODUCT_ANNOTATIONS }[components] databases = list(patterns) data = list() index = list() for elem in getattr(model, components): index.append(elem.id) data.append(tuple(patterns[db].match(elem.id) is not None for db in databases)) df = pd.DataFrame(data, index=index, columns=databases) if components != "genes": # Clean up of the dataframe. Unfortunately the Biocyc patterns match # broadly. Hence, whenever a Metabolite or Reaction ID matches to any # DB pattern AND the Biocyc pattern we have to assume that this is a # false positive. # First determine all rows in which 'biocyc' and other entries are # True simultaneously and use this Boolean series to create another # column temporarily. df['duplicate'] = df[df['biocyc']].sum(axis=1) >= 2 # Replace all nan values with False df['duplicate'].fillna(False, inplace=True) # Use the additional column to index the original dataframe to identify # false positive biocyc hits and set them to False. df.loc[df['duplicate'], 'biocyc'] = False # Delete the additional column del df['duplicate'] return df
def generate_component_id_namespace_overview(model, components)
Tabulate which MIRIAM databases the component's identifier matches. Parameters ---------- model : cobra.Model A cobrapy metabolic model. components : {"metabolites", "reactions", "genes"} A string denoting `cobra.Model` components. Returns ------- pandas.DataFrame The index of the table is given by the component identifiers. Each column corresponds to one MIRIAM database and a Boolean entry determines whether the annotation matches.
6.782138
5.684547
1.193083
true_positive = predicted_essential & expected_essential tp = len(true_positive) true_negative = predicted_nonessential & expected_nonessential tn = len(true_negative) false_positive = predicted_essential - expected_essential fp = len(false_positive) false_negative = predicted_nonessential - expected_nonessential fn = len(false_negative) # sensitivity or true positive rate try: tpr = tp / (tp + fn) except ZeroDivisionError: tpr = None # specificity or true negative rate try: tnr = tn / (tn + fp) except ZeroDivisionError: tnr = None # precision or positive predictive value try: ppv = tp / (tp + fp) except ZeroDivisionError: ppv = None # false discovery rate fdr = 1 - ppv # accuracy try: acc = (tp + tn) / (tp + tn + fp + fn) except ZeroDivisionError: acc = None # Compute Matthews correlation coefficient. try: mcc = (tp * tn - fp * fn) /\ sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn)) except ZeroDivisionError: mcc = None return { "TP": list(true_positive), "TN": list(true_negative), "FP": list(false_positive), "FN": list(false_negative), "TPR": tpr, "TNR": tnr, "PPV": ppv, "FDR": fdr, "ACC": acc, "MCC": mcc }
def confusion_matrix(predicted_essential, expected_essential, predicted_nonessential, expected_nonessential)
Compute a representation of the confusion matrix. Parameters ---------- predicted_essential : set expected_essential : set predicted_nonessential : set expected_nonessential : set Returns ------- dict Confusion matrix as different keys of a dictionary. The abbreviated keys correspond to the ones used in [1]_. References ---------- .. [1] `Wikipedia entry for the Confusion matrix <https://en.wikipedia.org/wiki/Confusion_matrix>`_
1.48849
1.573707
0.94585
notifications = {"warnings": [], "errors": []} model, sbml_ver = val.load_cobra_model(path, notifications) return model, sbml_ver, notifications
def validate_model(path)
Validate a model structurally and optionally store results as JSON. Parameters ---------- path : Path to model file. Returns ------- tuple cobra.Model The metabolic model under investigation. tuple A tuple reporting on the SBML level, version, and FBC package version used (if any) in the SBML document. dict A simple dictionary containing a list of errors and warnings.
10.175939
8.52196
1.194084
if config is None: config = ReportConfiguration.load() report = SnapshotReport(result=result, configuration=config) if html: return report.render_html() else: return report.render_json()
def snapshot_report(result, config=None, html=True)
Generate a snapshot report from a result set and configuration. Parameters ---------- result : memote.MemoteResult Nested dictionary structure as returned from the test suite. config : dict, optional The final test report configuration (default None). html : bool, optional Whether to render the report as full HTML or JSON (default True).
3.122978
3.389541
0.921357
if config is None: config = ReportConfiguration.load() report = HistoryReport(history=history, configuration=config) if html: return report.render_html() else: return report.render_json()
def history_report(history, config=None, html=True)
Test a model and save a history report. Parameters ---------- history : memote.HistoryManager The manager grants access to previous results. config : dict, optional The final test report configuration. html : bool, optional Whether to render the report as full HTML or JSON (default True).
3.041296
3.45781
0.879544
if config is None: config = ReportConfiguration.load() report = DiffReport(diff_results=diff_results, configuration=config) if html: return report.render_html() else: return report.render_json()
def diff_report(diff_results, config=None, html=True)
Generate a diff report from a result set and configuration. Parameters ---------- diff_results : iterable of memote.MemoteResult Nested dictionary structure as returned from the test suite. config : dict, optional The final test report configuration (default None). html : bool, optional Whether to render the report as full HTML or JSON (default True).
3.030143
3.392895
0.893085
env = Environment( loader=PackageLoader('memote.suite', 'templates'), autoescape=select_autoescape(['html', 'xml']) ) template = env.get_template('validation_template.html') model = os.path.basename(path) with open(filename, "w") as file_h: file_h.write(template.render(model=model, notifications=notifications))
def validation_report(path, notifications, filename)
Generate a validation report from a notification object. Parameters ---------- path : string Path to model file. notifications : dict A simple dictionary structure containing a list of errors and warnings.
2.637308
3.06495
0.860473
if filename is None: LOGGER.debug("Loading default configuration.") with open_text(templates, "test_config.yml", encoding="utf-8") as file_handle: content = yaml.load(file_handle) else: LOGGER.debug("Loading custom configuration '%s'.", filename) try: with open(filename, encoding="utf-8") as file_handle: content = yaml.load(file_handle) except IOError as err: LOGGER.error( "Failed to load the custom configuration '%s'. Skipping.", filename) LOGGER.debug(str(err)) content = dict() return cls(content)
def load(cls, filename=None)
Load a test report configuration.
2.884873
2.749042
1.049411
logger.debug("%r", gpr) conform = logical_and.sub("and", gpr) conform = logical_or.sub("or", conform) conform = escape_chars.sub("_", conform) expression = ast.parse(conform) walker = GPRVisitor() walker.visit(expression) return len(walker.left ^ walker.right)
def find_top_level_complex(gpr)
Find unique elements of both branches of the top level logical AND. Parameters ---------- gpr : str The gene-protein-reaction association as a string. Returns ------- int The size of the symmetric difference between the set of elements to the left of the top level logical AND and the right set.
6.616974
6.100939
1.084583
if self._is_top and isinstance(node.op, ast.And): self._is_top = False self._current = self.left self.visit(node.values[0]) self._current = self.right for successor in node.values[1:]: self.visit(successor) else: self.generic_visit(node)
def visit_BoolOp(self, node)
Set up recording of elements with this hook.
2.715425
2.594322
1.04668
lower_bound, upper_bound = helpers.find_bounds(model) return [rxn for rxn in model.reactions if 0 > rxn.lower_bound > lower_bound or 0 < rxn.upper_bound < upper_bound]
def find_nonzero_constrained_reactions(model)
Return list of reactions with non-zero, non-maximal bounds.
3.478942
3.100841
1.121935
return [rxn for rxn in model.reactions if rxn.lower_bound == 0 and rxn.upper_bound == 0]
def find_zero_constrained_reactions(model)
Return list of reactions that are constrained to zero flux.
2.604181
2.447085
1.064197
lower_bound, upper_bound = helpers.find_bounds(model) return [rxn for rxn in model.reactions if rxn.lower_bound <= lower_bound and rxn.upper_bound >= upper_bound]
def find_unconstrained_reactions(model)
Return list of reactions that are not constrained at all.
2.934483
2.707544
1.083817
u atp_adp_conv_rxns = helpers.find_converting_reactions( model, ("MNXM3", "MNXM7") ) id_of_main_compartment = helpers.find_compartment_id_in_model(model, 'c') reactants = { helpers.find_met_in_model(model, "MNXM3", id_of_main_compartment)[0], helpers.find_met_in_model(model, "MNXM2", id_of_main_compartment)[0] } products = { helpers.find_met_in_model(model, "MNXM7", id_of_main_compartment)[0], helpers.find_met_in_model(model, "MNXM1", id_of_main_compartment)[0], helpers.find_met_in_model(model, "MNXM9", id_of_main_compartment)[0] } candidates = [rxn for rxn in atp_adp_conv_rxns if rxn.reversibility is False and set(rxn.reactants) == reactants and set(rxn.products) == products] buzzwords = ['maintenance', 'atpm', 'requirement', 'ngam', 'non-growth', 'associated'] refined_candidates = [rxn for rxn in candidates if any( string in filter_none(rxn.name, '').lower() for string in buzzwords )] if refined_candidates: return refined_candidates else: return candidates
def find_ngam(model)
u""" Return all potential non growth-associated maintenance reactions. From the list of all reactions that convert ATP to ADP select the reactions that match a defined reaction string and whose metabolites are situated within the main model compartment. The main model compartment is the cytosol, and if that cannot be identified, the compartment with the most metabolites. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- list Reactions that qualify as non-growth associated maintenance reactions. Notes ----- [1]_ define the non-growth associated maintenance (NGAM) as the energy required to maintain all constant processes such as turgor pressure and other housekeeping activities. In metabolic models this is expressed by requiring a simple ATP hydrolysis reaction to always have a fixed minimal amount of flux. This value can be measured as described by [1]_ . References ---------- .. [1] Thiele, I., & Palsson, B. Ø. (2010, January). A protocol for generating a high-quality genome-scale metabolic reconstruction. Nature protocols. Nature Publishing Group. http://doi.org/10.1038/nprot.2009.203
3.105068
2.751233
1.12861
u if len(model.reactions) == 0 or len(model.genes) == 0: raise ValueError("The model contains no reactions or genes.") return float(len(model.reactions)) / float(len(model.genes))
def calculate_metabolic_coverage(model)
u""" Return the ratio of reactions and genes included in the model. Determine whether the amount of reactions and genes in model not equal to zero, then return the ratio. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- float The ratio of reactions to genes also called metabolic coverage. Raises ------ ValueError If the model does not contain either reactions or genes. Notes ----- According to [1]_ this is a good quality indicator expressing the degree of metabolic coverage i.e. modeling detail of a given reconstruction. The authors explain that models with a 'high level of modeling detail have ratios >1, and [models] with low level of detail have ratios <1'. They explain that 'this difference arises because [models] with basic or intermediate levels of detail often include many reactions in which several gene products and their enzymatic transformations are ‘lumped’'. References ---------- .. [1] Monk, J., Nogales, J., & Palsson, B. O. (2014). Optimizing genome-scale network reconstructions. Nature Biotechnology, 32(5), 447–452. http://doi.org/10.1038/nbt.2870
3.4046
3.081853
1.104725
complexes = [] for rxn in model.reactions: if not rxn.gene_reaction_rule: continue size = find_top_level_complex(rxn.gene_reaction_rule) if size >= 2: complexes.append(rxn) return complexes
def find_protein_complexes(model)
Find reactions that are catalyzed by at least a heterodimer. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- list Reactions whose gene-protein-reaction association contains at least one logical AND combining different gene products (heterodimer).
2.921267
3.3044
0.884053
lower_bound, upper_bound = helpers.find_bounds(model) if rxn.reversibility: return rxn.lower_bound > lower_bound or rxn.upper_bound < upper_bound else: return rxn.lower_bound > 0 or rxn.upper_bound < upper_bound
def is_constrained_reaction(model, rxn)
Return whether a reaction has fixed constraints.
2.661955
2.490576
1.068811
o2_in_model = helpers.find_met_in_model(model, "MNXM4") return set([rxn for met in model.metabolites for rxn in met.reactions if met.formula == "O2" or met in o2_in_model])
def find_oxygen_reactions(model)
Return list of oxygen-producing/-consuming reactions.
6.283626
5.590312
1.124021
unique = set() for met in model.metabolites: is_missing = True for comp in model.compartments: if met.id.endswith("_{}".format(comp)): unique.add(met.id[:-(len(comp) + 1)]) is_missing = False break if is_missing: unique.add(met.id) return unique
def find_unique_metabolites(model)
Return set of metabolite IDs without duplicates from compartments.
2.337926
2.1436
1.090654
unique_identifiers = ["inchikey", "inchi"] duplicates = [] for met_1, met_2 in combinations(model.metabolites, 2): if met_1.compartment == met_2.compartment: for key in unique_identifiers: if key in met_1.annotation and key in met_2.annotation: if met_1.annotation[key] == met_2.annotation[key]: duplicates.append((met_1.id, met_2.id)) break return duplicates
def find_duplicate_metabolites_in_compartments(model)
Return list of metabolites with duplicates in the same compartment. This function identifies duplicate metabolites in each compartment by determining if any two metabolites have identical InChI-key annotations. For instance, this function would find compounds with IDs ATP1 and ATP2 in the cytosolic compartment, with both having the same InChI annotations. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- list A list of tuples of duplicate metabolites.
2.075691
2.228032
0.931625
duplicates = {} rxn_db_identifiers = ["metanetx.reaction", "kegg.reaction", "brenda", "rhea", "biocyc", "bigg.reaction"] # Build a list that associates a reaction with a set of its annotations. ann_rxns = [] for rxn in model.reactions: ann = [] for key in rxn_db_identifiers: if key in rxn.annotation: if isinstance(rxn.annotation[key], list): ann.extend([(key, elem) for elem in rxn.annotation[key]]) else: ann.append((key, rxn.annotation[key])) ann_rxns.append((rxn, frozenset(ann))) # Compute the intersection between annotations and record the matching # reaction identifiers. for (rxn_a, ann_a), (rxn_b, ann_b) in combinations(ann_rxns, 2): mutual_pair = tuple(ann_a & ann_b) if len(mutual_pair) > 0: duplicates.setdefault(mutual_pair, set()).update( [rxn_a.id, rxn_b.id]) # Transform the object for JSON compatibility num_duplicated = set() duplicated = {} for key in duplicates: # Object keys must be strings in JSON. new_key = ",".join(sorted("{}:{}".format(ns, term) for ns, term in key)) duplicated[new_key] = rxns = list(duplicates[key]) num_duplicated.update(rxns) return duplicated, len(num_duplicated)
def find_reactions_with_partially_identical_annotations(model)
Return duplicate reactions based on identical annotation. Identify duplicate reactions globally by checking if any two metabolic reactions have the same entries in their annotation attributes. This can be useful to identify one 'type' of reactions that occurs in several compartments, to curate merged models or to clean-up bulk model modifications. The heuristic looks at annotations with the keys "metanetx.reaction", "kegg.reaction", "brenda", "rhea", "biocyc", "bigg.reaction" only. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- dict A mapping from sets of annotations to groups of reactions with those annotations. int The total number of unique reactions that are duplicated.
3.300846
2.906186
1.1358
# TODO (Moritz Beber): Consider SMILES? unique_identifiers = ["inchikey", "inchi"] met2mol = {} molecules = {c: [] for c in compartments} for met in metabolites: ann = [] for key in unique_identifiers: mol = met.annotation.get(key) if mol is not None: ann.append(mol) # Ignore metabolites without the required information. if len(ann) == 0: continue ann = set(ann) # Compare with other structures in the same compartment. mols = molecules[met.compartment] for i, mol_group in enumerate(mols): if len(ann & mol_group) > 0: mol_group.update(ann) # We map to the index of the group because it is hashable and # cheaper to compare later. met2mol[met] = "{}-{}".format(met.compartment, i) break if met not in met2mol: # The length of the list corresponds to the 0-index after appending. met2mol[met] = "{}-{}".format(met.compartment, len(mols)) mols.append(ann) return met2mol
def map_metabolites_to_structures(metabolites, compartments)
Map metabolites from the identifier namespace to structural space. Metabolites who lack structural annotation (InChI or InChIKey) are ignored. Parameters ---------- metabolites : iterable The cobra.Metabolites to map. compartments : iterable The different compartments to consider. Structures are treated separately for each compartment. Returns ------- dict A mapping from a cobra.Metabolite to its compartment specific structure index.
4.584908
4.440022
1.032632