repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 52
3.87M
| func_code_tokens
sequencelengths 15
672k
| func_documentation_string
stringlengths 1
47.2k
| func_documentation_tokens
sequencelengths 1
3.92k
| split_name
stringclasses 1
value | func_code_url
stringlengths 85
339
|
---|---|---|---|---|---|---|---|---|---|---|
brutasse/graphite-api | graphite_api/functions.py | _getPercentile | def _getPercentile(points, n, interpolate=False):
"""
Percentile is calculated using the method outlined in the NIST Engineering
Statistics Handbook:
http://www.itl.nist.gov/div898/handbook/prc/section2/prc252.htm
"""
sortedPoints = sorted(not_none(points))
if len(sortedPoints) == 0:
return None
fractionalRank = (n/100.0) * (len(sortedPoints) + 1)
rank = int(fractionalRank)
rankFraction = fractionalRank - rank
if not interpolate:
rank += int(math.ceil(rankFraction))
if rank == 0:
percentile = sortedPoints[0]
elif rank - 1 == len(sortedPoints):
percentile = sortedPoints[-1]
else:
percentile = sortedPoints[rank - 1] # Adjust for 0-index
if interpolate:
if rank != len(sortedPoints): # if a next value exists
nextValue = sortedPoints[rank]
percentile = percentile + rankFraction * (nextValue - percentile)
return percentile | python | def _getPercentile(points, n, interpolate=False):
"""
Percentile is calculated using the method outlined in the NIST Engineering
Statistics Handbook:
http://www.itl.nist.gov/div898/handbook/prc/section2/prc252.htm
"""
sortedPoints = sorted(not_none(points))
if len(sortedPoints) == 0:
return None
fractionalRank = (n/100.0) * (len(sortedPoints) + 1)
rank = int(fractionalRank)
rankFraction = fractionalRank - rank
if not interpolate:
rank += int(math.ceil(rankFraction))
if rank == 0:
percentile = sortedPoints[0]
elif rank - 1 == len(sortedPoints):
percentile = sortedPoints[-1]
else:
percentile = sortedPoints[rank - 1] # Adjust for 0-index
if interpolate:
if rank != len(sortedPoints): # if a next value exists
nextValue = sortedPoints[rank]
percentile = percentile + rankFraction * (nextValue - percentile)
return percentile | [
"def",
"_getPercentile",
"(",
"points",
",",
"n",
",",
"interpolate",
"=",
"False",
")",
":",
"sortedPoints",
"=",
"sorted",
"(",
"not_none",
"(",
"points",
")",
")",
"if",
"len",
"(",
"sortedPoints",
")",
"==",
"0",
":",
"return",
"None",
"fractionalRank",
"=",
"(",
"n",
"/",
"100.0",
")",
"*",
"(",
"len",
"(",
"sortedPoints",
")",
"+",
"1",
")",
"rank",
"=",
"int",
"(",
"fractionalRank",
")",
"rankFraction",
"=",
"fractionalRank",
"-",
"rank",
"if",
"not",
"interpolate",
":",
"rank",
"+=",
"int",
"(",
"math",
".",
"ceil",
"(",
"rankFraction",
")",
")",
"if",
"rank",
"==",
"0",
":",
"percentile",
"=",
"sortedPoints",
"[",
"0",
"]",
"elif",
"rank",
"-",
"1",
"==",
"len",
"(",
"sortedPoints",
")",
":",
"percentile",
"=",
"sortedPoints",
"[",
"-",
"1",
"]",
"else",
":",
"percentile",
"=",
"sortedPoints",
"[",
"rank",
"-",
"1",
"]",
"# Adjust for 0-index",
"if",
"interpolate",
":",
"if",
"rank",
"!=",
"len",
"(",
"sortedPoints",
")",
":",
"# if a next value exists",
"nextValue",
"=",
"sortedPoints",
"[",
"rank",
"]",
"percentile",
"=",
"percentile",
"+",
"rankFraction",
"*",
"(",
"nextValue",
"-",
"percentile",
")",
"return",
"percentile"
] | Percentile is calculated using the method outlined in the NIST Engineering
Statistics Handbook:
http://www.itl.nist.gov/div898/handbook/prc/section2/prc252.htm | [
"Percentile",
"is",
"calculated",
"using",
"the",
"method",
"outlined",
"in",
"the",
"NIST",
"Engineering",
"Statistics",
"Handbook",
":",
"http",
":",
"//",
"www",
".",
"itl",
".",
"nist",
".",
"gov",
"/",
"div898",
"/",
"handbook",
"/",
"prc",
"/",
"section2",
"/",
"prc252",
".",
"htm"
] | train | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L2340-L2368 |
brutasse/graphite-api | graphite_api/functions.py | nPercentile | def nPercentile(requestContext, seriesList, n):
"""Returns n-percent of each series in the seriesList."""
assert n, 'The requested percent is required to be greater than 0'
results = []
for s in seriesList:
# Create a sorted copy of the TimeSeries excluding None values in the
# values list.
s_copy = TimeSeries(s.name, s.start, s.end, s.step,
sorted(not_none(s)))
if not s_copy:
continue # Skip this series because it is empty.
perc_val = _getPercentile(s_copy, n)
if perc_val is not None:
name = 'nPercentile(%s, %g)' % (s_copy.name, n)
point_count = int((s.end - s.start)/s.step)
perc_series = TimeSeries(name, s_copy.start, s_copy.end,
s_copy.step, [perc_val] * point_count)
perc_series.pathExpression = name
results.append(perc_series)
return results | python | def nPercentile(requestContext, seriesList, n):
"""Returns n-percent of each series in the seriesList."""
assert n, 'The requested percent is required to be greater than 0'
results = []
for s in seriesList:
# Create a sorted copy of the TimeSeries excluding None values in the
# values list.
s_copy = TimeSeries(s.name, s.start, s.end, s.step,
sorted(not_none(s)))
if not s_copy:
continue # Skip this series because it is empty.
perc_val = _getPercentile(s_copy, n)
if perc_val is not None:
name = 'nPercentile(%s, %g)' % (s_copy.name, n)
point_count = int((s.end - s.start)/s.step)
perc_series = TimeSeries(name, s_copy.start, s_copy.end,
s_copy.step, [perc_val] * point_count)
perc_series.pathExpression = name
results.append(perc_series)
return results | [
"def",
"nPercentile",
"(",
"requestContext",
",",
"seriesList",
",",
"n",
")",
":",
"assert",
"n",
",",
"'The requested percent is required to be greater than 0'",
"results",
"=",
"[",
"]",
"for",
"s",
"in",
"seriesList",
":",
"# Create a sorted copy of the TimeSeries excluding None values in the",
"# values list.",
"s_copy",
"=",
"TimeSeries",
"(",
"s",
".",
"name",
",",
"s",
".",
"start",
",",
"s",
".",
"end",
",",
"s",
".",
"step",
",",
"sorted",
"(",
"not_none",
"(",
"s",
")",
")",
")",
"if",
"not",
"s_copy",
":",
"continue",
"# Skip this series because it is empty.",
"perc_val",
"=",
"_getPercentile",
"(",
"s_copy",
",",
"n",
")",
"if",
"perc_val",
"is",
"not",
"None",
":",
"name",
"=",
"'nPercentile(%s, %g)'",
"%",
"(",
"s_copy",
".",
"name",
",",
"n",
")",
"point_count",
"=",
"int",
"(",
"(",
"s",
".",
"end",
"-",
"s",
".",
"start",
")",
"/",
"s",
".",
"step",
")",
"perc_series",
"=",
"TimeSeries",
"(",
"name",
",",
"s_copy",
".",
"start",
",",
"s_copy",
".",
"end",
",",
"s_copy",
".",
"step",
",",
"[",
"perc_val",
"]",
"*",
"point_count",
")",
"perc_series",
".",
"pathExpression",
"=",
"name",
"results",
".",
"append",
"(",
"perc_series",
")",
"return",
"results"
] | Returns n-percent of each series in the seriesList. | [
"Returns",
"n",
"-",
"percent",
"of",
"each",
"series",
"in",
"the",
"seriesList",
"."
] | train | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L2371-L2392 |
brutasse/graphite-api | graphite_api/functions.py | averageOutsidePercentile | def averageOutsidePercentile(requestContext, seriesList, n):
"""
Removes functions lying inside an average percentile interval
"""
averages = [safeAvg(s) for s in seriesList]
if n < 50:
n = 100 - n
lowPercentile = _getPercentile(averages, 100 - n)
highPercentile = _getPercentile(averages, n)
return [s for s in seriesList
if not lowPercentile < safeAvg(s) < highPercentile] | python | def averageOutsidePercentile(requestContext, seriesList, n):
"""
Removes functions lying inside an average percentile interval
"""
averages = [safeAvg(s) for s in seriesList]
if n < 50:
n = 100 - n
lowPercentile = _getPercentile(averages, 100 - n)
highPercentile = _getPercentile(averages, n)
return [s for s in seriesList
if not lowPercentile < safeAvg(s) < highPercentile] | [
"def",
"averageOutsidePercentile",
"(",
"requestContext",
",",
"seriesList",
",",
"n",
")",
":",
"averages",
"=",
"[",
"safeAvg",
"(",
"s",
")",
"for",
"s",
"in",
"seriesList",
"]",
"if",
"n",
"<",
"50",
":",
"n",
"=",
"100",
"-",
"n",
"lowPercentile",
"=",
"_getPercentile",
"(",
"averages",
",",
"100",
"-",
"n",
")",
"highPercentile",
"=",
"_getPercentile",
"(",
"averages",
",",
"n",
")",
"return",
"[",
"s",
"for",
"s",
"in",
"seriesList",
"if",
"not",
"lowPercentile",
"<",
"safeAvg",
"(",
"s",
")",
"<",
"highPercentile",
"]"
] | Removes functions lying inside an average percentile interval | [
"Removes",
"functions",
"lying",
"inside",
"an",
"average",
"percentile",
"interval"
] | train | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L2395-L2408 |
brutasse/graphite-api | graphite_api/functions.py | removeBetweenPercentile | def removeBetweenPercentile(requestContext, seriesList, n):
"""
Removes lines who do not have an value lying in the x-percentile of all
the values at a moment
"""
if n < 50:
n = 100 - n
transposed = list(zip_longest(*seriesList))
lowPercentiles = [_getPercentile(col, 100-n) for col in transposed]
highPercentiles = [_getPercentile(col, n) for col in transposed]
return [l for l in seriesList
if sum([not lowPercentiles[index] < val < highPercentiles[index]
for index, val in enumerate(l)]) > 0] | python | def removeBetweenPercentile(requestContext, seriesList, n):
"""
Removes lines who do not have an value lying in the x-percentile of all
the values at a moment
"""
if n < 50:
n = 100 - n
transposed = list(zip_longest(*seriesList))
lowPercentiles = [_getPercentile(col, 100-n) for col in transposed]
highPercentiles = [_getPercentile(col, n) for col in transposed]
return [l for l in seriesList
if sum([not lowPercentiles[index] < val < highPercentiles[index]
for index, val in enumerate(l)]) > 0] | [
"def",
"removeBetweenPercentile",
"(",
"requestContext",
",",
"seriesList",
",",
"n",
")",
":",
"if",
"n",
"<",
"50",
":",
"n",
"=",
"100",
"-",
"n",
"transposed",
"=",
"list",
"(",
"zip_longest",
"(",
"*",
"seriesList",
")",
")",
"lowPercentiles",
"=",
"[",
"_getPercentile",
"(",
"col",
",",
"100",
"-",
"n",
")",
"for",
"col",
"in",
"transposed",
"]",
"highPercentiles",
"=",
"[",
"_getPercentile",
"(",
"col",
",",
"n",
")",
"for",
"col",
"in",
"transposed",
"]",
"return",
"[",
"l",
"for",
"l",
"in",
"seriesList",
"if",
"sum",
"(",
"[",
"not",
"lowPercentiles",
"[",
"index",
"]",
"<",
"val",
"<",
"highPercentiles",
"[",
"index",
"]",
"for",
"index",
",",
"val",
"in",
"enumerate",
"(",
"l",
")",
"]",
")",
">",
"0",
"]"
] | Removes lines who do not have an value lying in the x-percentile of all
the values at a moment | [
"Removes",
"lines",
"who",
"do",
"not",
"have",
"an",
"value",
"lying",
"in",
"the",
"x",
"-",
"percentile",
"of",
"all",
"the",
"values",
"at",
"a",
"moment"
] | train | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L2411-L2426 |
brutasse/graphite-api | graphite_api/functions.py | removeAboveValue | def removeAboveValue(requestContext, seriesList, n):
"""
Removes data above the given threshold from the series or list of series
provided. Values above this threshold are assigned a value of None.
"""
for s in seriesList:
s.name = 'removeAboveValue(%s, %g)' % (s.name, n)
s.pathExpression = s.name
for (index, val) in enumerate(s):
if val is None:
continue
if val > n:
s[index] = None
return seriesList | python | def removeAboveValue(requestContext, seriesList, n):
"""
Removes data above the given threshold from the series or list of series
provided. Values above this threshold are assigned a value of None.
"""
for s in seriesList:
s.name = 'removeAboveValue(%s, %g)' % (s.name, n)
s.pathExpression = s.name
for (index, val) in enumerate(s):
if val is None:
continue
if val > n:
s[index] = None
return seriesList | [
"def",
"removeAboveValue",
"(",
"requestContext",
",",
"seriesList",
",",
"n",
")",
":",
"for",
"s",
"in",
"seriesList",
":",
"s",
".",
"name",
"=",
"'removeAboveValue(%s, %g)'",
"%",
"(",
"s",
".",
"name",
",",
"n",
")",
"s",
".",
"pathExpression",
"=",
"s",
".",
"name",
"for",
"(",
"index",
",",
"val",
")",
"in",
"enumerate",
"(",
"s",
")",
":",
"if",
"val",
"is",
"None",
":",
"continue",
"if",
"val",
">",
"n",
":",
"s",
"[",
"index",
"]",
"=",
"None",
"return",
"seriesList"
] | Removes data above the given threshold from the series or list of series
provided. Values above this threshold are assigned a value of None. | [
"Removes",
"data",
"above",
"the",
"given",
"threshold",
"from",
"the",
"series",
"or",
"list",
"of",
"series",
"provided",
".",
"Values",
"above",
"this",
"threshold",
"are",
"assigned",
"a",
"value",
"of",
"None",
"."
] | train | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L2450-L2464 |
brutasse/graphite-api | graphite_api/functions.py | removeBelowPercentile | def removeBelowPercentile(requestContext, seriesList, n):
"""
Removes data below the nth percentile from the series or list of series
provided. Values below this percentile are assigned a value of None.
"""
for s in seriesList:
s.name = 'removeBelowPercentile(%s, %g)' % (s.name, n)
s.pathExpression = s.name
try:
percentile = nPercentile(requestContext, [s], n)[0][0]
except IndexError:
continue
for (index, val) in enumerate(s):
if val is None:
continue
if val < percentile:
s[index] = None
return seriesList | python | def removeBelowPercentile(requestContext, seriesList, n):
"""
Removes data below the nth percentile from the series or list of series
provided. Values below this percentile are assigned a value of None.
"""
for s in seriesList:
s.name = 'removeBelowPercentile(%s, %g)' % (s.name, n)
s.pathExpression = s.name
try:
percentile = nPercentile(requestContext, [s], n)[0][0]
except IndexError:
continue
for (index, val) in enumerate(s):
if val is None:
continue
if val < percentile:
s[index] = None
return seriesList | [
"def",
"removeBelowPercentile",
"(",
"requestContext",
",",
"seriesList",
",",
"n",
")",
":",
"for",
"s",
"in",
"seriesList",
":",
"s",
".",
"name",
"=",
"'removeBelowPercentile(%s, %g)'",
"%",
"(",
"s",
".",
"name",
",",
"n",
")",
"s",
".",
"pathExpression",
"=",
"s",
".",
"name",
"try",
":",
"percentile",
"=",
"nPercentile",
"(",
"requestContext",
",",
"[",
"s",
"]",
",",
"n",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"except",
"IndexError",
":",
"continue",
"for",
"(",
"index",
",",
"val",
")",
"in",
"enumerate",
"(",
"s",
")",
":",
"if",
"val",
"is",
"None",
":",
"continue",
"if",
"val",
"<",
"percentile",
":",
"s",
"[",
"index",
"]",
"=",
"None",
"return",
"seriesList"
] | Removes data below the nth percentile from the series or list of series
provided. Values below this percentile are assigned a value of None. | [
"Removes",
"data",
"below",
"the",
"nth",
"percentile",
"from",
"the",
"series",
"or",
"list",
"of",
"series",
"provided",
".",
"Values",
"below",
"this",
"percentile",
"are",
"assigned",
"a",
"value",
"of",
"None",
"."
] | train | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L2467-L2485 |
brutasse/graphite-api | graphite_api/functions.py | sortByName | def sortByName(requestContext, seriesList, natural=False):
"""
Takes one metric or a wildcard seriesList.
Sorts the list of metrics by the metric name using either alphabetical
order or natural sorting. Natural sorting allows names containing numbers
to be sorted more naturally, e.g:
- Alphabetical sorting: server1, server11, server12, server2
- Natural sorting: server1, server2, server11, server12
"""
if natural:
return list(sorted(seriesList, key=lambda x: paddedName(x.name)))
else:
return list(sorted(seriesList, key=lambda x: x.name)) | python | def sortByName(requestContext, seriesList, natural=False):
"""
Takes one metric or a wildcard seriesList.
Sorts the list of metrics by the metric name using either alphabetical
order or natural sorting. Natural sorting allows names containing numbers
to be sorted more naturally, e.g:
- Alphabetical sorting: server1, server11, server12, server2
- Natural sorting: server1, server2, server11, server12
"""
if natural:
return list(sorted(seriesList, key=lambda x: paddedName(x.name)))
else:
return list(sorted(seriesList, key=lambda x: x.name)) | [
"def",
"sortByName",
"(",
"requestContext",
",",
"seriesList",
",",
"natural",
"=",
"False",
")",
":",
"if",
"natural",
":",
"return",
"list",
"(",
"sorted",
"(",
"seriesList",
",",
"key",
"=",
"lambda",
"x",
":",
"paddedName",
"(",
"x",
".",
"name",
")",
")",
")",
"else",
":",
"return",
"list",
"(",
"sorted",
"(",
"seriesList",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
".",
"name",
")",
")"
] | Takes one metric or a wildcard seriesList.
Sorts the list of metrics by the metric name using either alphabetical
order or natural sorting. Natural sorting allows names containing numbers
to be sorted more naturally, e.g:
- Alphabetical sorting: server1, server11, server12, server2
- Natural sorting: server1, server2, server11, server12 | [
"Takes",
"one",
"metric",
"or",
"a",
"wildcard",
"seriesList",
"."
] | train | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L2526-L2541 |
brutasse/graphite-api | graphite_api/functions.py | sortByTotal | def sortByTotal(requestContext, seriesList):
"""
Takes one metric or a wildcard seriesList.
Sorts the list of metrics by the sum of values across the time period
specified.
"""
return list(sorted(seriesList, key=safeSum, reverse=True)) | python | def sortByTotal(requestContext, seriesList):
"""
Takes one metric or a wildcard seriesList.
Sorts the list of metrics by the sum of values across the time period
specified.
"""
return list(sorted(seriesList, key=safeSum, reverse=True)) | [
"def",
"sortByTotal",
"(",
"requestContext",
",",
"seriesList",
")",
":",
"return",
"list",
"(",
"sorted",
"(",
"seriesList",
",",
"key",
"=",
"safeSum",
",",
"reverse",
"=",
"True",
")",
")"
] | Takes one metric or a wildcard seriesList.
Sorts the list of metrics by the sum of values across the time period
specified. | [
"Takes",
"one",
"metric",
"or",
"a",
"wildcard",
"seriesList",
"."
] | train | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L2544-L2551 |
brutasse/graphite-api | graphite_api/functions.py | useSeriesAbove | def useSeriesAbove(requestContext, seriesList, value, search, replace):
"""
Compares the maximum of each series against the given `value`. If the
series maximum is greater than `value`, the regular expression search and
replace is applied against the series name to plot a related metric.
e.g. given useSeriesAbove(ganglia.metric1.reqs,10,'reqs','time'),
the response time metric will be plotted only when the maximum value of the
corresponding request/s metric is > 10
Example::
&target=useSeriesAbove(ganglia.metric1.reqs,10,"reqs","time")
"""
newSeries = []
for series in seriesList:
newname = re.sub(search, replace, series.name)
if safeMax(series) > value:
n = evaluateTarget(requestContext, newname)
if n is not None and len(n) > 0:
newSeries.append(n[0])
return newSeries | python | def useSeriesAbove(requestContext, seriesList, value, search, replace):
"""
Compares the maximum of each series against the given `value`. If the
series maximum is greater than `value`, the regular expression search and
replace is applied against the series name to plot a related metric.
e.g. given useSeriesAbove(ganglia.metric1.reqs,10,'reqs','time'),
the response time metric will be plotted only when the maximum value of the
corresponding request/s metric is > 10
Example::
&target=useSeriesAbove(ganglia.metric1.reqs,10,"reqs","time")
"""
newSeries = []
for series in seriesList:
newname = re.sub(search, replace, series.name)
if safeMax(series) > value:
n = evaluateTarget(requestContext, newname)
if n is not None and len(n) > 0:
newSeries.append(n[0])
return newSeries | [
"def",
"useSeriesAbove",
"(",
"requestContext",
",",
"seriesList",
",",
"value",
",",
"search",
",",
"replace",
")",
":",
"newSeries",
"=",
"[",
"]",
"for",
"series",
"in",
"seriesList",
":",
"newname",
"=",
"re",
".",
"sub",
"(",
"search",
",",
"replace",
",",
"series",
".",
"name",
")",
"if",
"safeMax",
"(",
"series",
")",
">",
"value",
":",
"n",
"=",
"evaluateTarget",
"(",
"requestContext",
",",
"newname",
")",
"if",
"n",
"is",
"not",
"None",
"and",
"len",
"(",
"n",
")",
">",
"0",
":",
"newSeries",
".",
"append",
"(",
"n",
"[",
"0",
"]",
")",
"return",
"newSeries"
] | Compares the maximum of each series against the given `value`. If the
series maximum is greater than `value`, the regular expression search and
replace is applied against the series name to plot a related metric.
e.g. given useSeriesAbove(ganglia.metric1.reqs,10,'reqs','time'),
the response time metric will be plotted only when the maximum value of the
corresponding request/s metric is > 10
Example::
&target=useSeriesAbove(ganglia.metric1.reqs,10,"reqs","time") | [
"Compares",
"the",
"maximum",
"of",
"each",
"series",
"against",
"the",
"given",
"value",
".",
"If",
"the",
"series",
"maximum",
"is",
"greater",
"than",
"value",
"the",
"regular",
"expression",
"search",
"and",
"replace",
"is",
"applied",
"against",
"the",
"series",
"name",
"to",
"plot",
"a",
"related",
"metric",
"."
] | train | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L2585-L2608 |
brutasse/graphite-api | graphite_api/functions.py | mostDeviant | def mostDeviant(requestContext, seriesList, n):
"""
Takes one metric or a wildcard seriesList followed by an integer N.
Draws the N most deviant metrics.
To find the deviants, the standard deviation (sigma) of each series
is taken and ranked. The top N standard deviations are returned.
Example::
&target=mostDeviant(server*.instance*.memory.free, 5)
Draws the 5 instances furthest from the average memory free.
"""
deviants = []
for series in seriesList:
mean = safeAvg(series)
if mean is None:
continue
square_sum = sum([(value - mean) ** 2 for value in series
if value is not None])
sigma = safeDiv(square_sum, safeLen(series))
if sigma is None:
continue
deviants.append((sigma, series))
return [series for sig, series in sorted(deviants, # sort by sigma
key=itemgetter(0),
reverse=True)][:n] | python | def mostDeviant(requestContext, seriesList, n):
"""
Takes one metric or a wildcard seriesList followed by an integer N.
Draws the N most deviant metrics.
To find the deviants, the standard deviation (sigma) of each series
is taken and ranked. The top N standard deviations are returned.
Example::
&target=mostDeviant(server*.instance*.memory.free, 5)
Draws the 5 instances furthest from the average memory free.
"""
deviants = []
for series in seriesList:
mean = safeAvg(series)
if mean is None:
continue
square_sum = sum([(value - mean) ** 2 for value in series
if value is not None])
sigma = safeDiv(square_sum, safeLen(series))
if sigma is None:
continue
deviants.append((sigma, series))
return [series for sig, series in sorted(deviants, # sort by sigma
key=itemgetter(0),
reverse=True)][:n] | [
"def",
"mostDeviant",
"(",
"requestContext",
",",
"seriesList",
",",
"n",
")",
":",
"deviants",
"=",
"[",
"]",
"for",
"series",
"in",
"seriesList",
":",
"mean",
"=",
"safeAvg",
"(",
"series",
")",
"if",
"mean",
"is",
"None",
":",
"continue",
"square_sum",
"=",
"sum",
"(",
"[",
"(",
"value",
"-",
"mean",
")",
"**",
"2",
"for",
"value",
"in",
"series",
"if",
"value",
"is",
"not",
"None",
"]",
")",
"sigma",
"=",
"safeDiv",
"(",
"square_sum",
",",
"safeLen",
"(",
"series",
")",
")",
"if",
"sigma",
"is",
"None",
":",
"continue",
"deviants",
".",
"append",
"(",
"(",
"sigma",
",",
"series",
")",
")",
"return",
"[",
"series",
"for",
"sig",
",",
"series",
"in",
"sorted",
"(",
"deviants",
",",
"# sort by sigma",
"key",
"=",
"itemgetter",
"(",
"0",
")",
",",
"reverse",
"=",
"True",
")",
"]",
"[",
":",
"n",
"]"
] | Takes one metric or a wildcard seriesList followed by an integer N.
Draws the N most deviant metrics.
To find the deviants, the standard deviation (sigma) of each series
is taken and ranked. The top N standard deviations are returned.
Example::
&target=mostDeviant(server*.instance*.memory.free, 5)
Draws the 5 instances furthest from the average memory free. | [
"Takes",
"one",
"metric",
"or",
"a",
"wildcard",
"seriesList",
"followed",
"by",
"an",
"integer",
"N",
".",
"Draws",
"the",
"N",
"most",
"deviant",
"metrics",
".",
"To",
"find",
"the",
"deviants",
"the",
"standard",
"deviation",
"(",
"sigma",
")",
"of",
"each",
"series",
"is",
"taken",
"and",
"ranked",
".",
"The",
"top",
"N",
"standard",
"deviations",
"are",
"returned",
"."
] | train | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L2629-L2656 |
brutasse/graphite-api | graphite_api/functions.py | stdev | def stdev(requestContext, seriesList, points, windowTolerance=0.1):
"""
Takes one metric or a wildcard seriesList followed by an integer N.
Draw the Standard Deviation of all metrics passed for the past N
datapoints. If the ratio of null points in the window is greater than
windowTolerance, skip the calculation. The default for windowTolerance is
0.1 (up to 10% of points in the window can be missing). Note that if this
is set to 0.0, it will cause large gaps in the output anywhere a single
point is missing.
Example::
&target=stdev(server*.instance*.threads.busy,30)
&target=stdev(server*.instance*.cpu.system,30,0.0)
"""
# For this we take the standard deviation in terms of the moving average
# and the moving average of series squares.
for seriesIndex, series in enumerate(seriesList):
stdevSeries = TimeSeries("stdev(%s,%d)" % (series.name, int(points)),
series.start, series.end, series.step, [])
stdevSeries.pathExpression = "stdev(%s,%d)" % (series.name,
int(points))
validPoints = 0
currentSum = 0
currentSumOfSquares = 0
for index, newValue in enumerate(series):
# Mark whether we've reached our window size - dont drop points
# out otherwise
if index < points:
bootstrapping = True
droppedValue = None
else:
bootstrapping = False
droppedValue = series[index - points]
# Track non-None points in window
if not bootstrapping and droppedValue is not None:
validPoints -= 1
if newValue is not None:
validPoints += 1
# Remove the value that just dropped out of the window
if not bootstrapping and droppedValue is not None:
currentSum -= droppedValue
currentSumOfSquares -= droppedValue**2
# Add in the value that just popped in the window
if newValue is not None:
currentSum += newValue
currentSumOfSquares += newValue**2
if (
validPoints > 0 and
float(validPoints) / points >= windowTolerance
):
try:
deviation = math.sqrt(validPoints * currentSumOfSquares -
currentSum**2) / validPoints
except ValueError:
deviation = None
stdevSeries.append(deviation)
else:
stdevSeries.append(None)
seriesList[seriesIndex] = stdevSeries
return seriesList | python | def stdev(requestContext, seriesList, points, windowTolerance=0.1):
"""
Takes one metric or a wildcard seriesList followed by an integer N.
Draw the Standard Deviation of all metrics passed for the past N
datapoints. If the ratio of null points in the window is greater than
windowTolerance, skip the calculation. The default for windowTolerance is
0.1 (up to 10% of points in the window can be missing). Note that if this
is set to 0.0, it will cause large gaps in the output anywhere a single
point is missing.
Example::
&target=stdev(server*.instance*.threads.busy,30)
&target=stdev(server*.instance*.cpu.system,30,0.0)
"""
# For this we take the standard deviation in terms of the moving average
# and the moving average of series squares.
for seriesIndex, series in enumerate(seriesList):
stdevSeries = TimeSeries("stdev(%s,%d)" % (series.name, int(points)),
series.start, series.end, series.step, [])
stdevSeries.pathExpression = "stdev(%s,%d)" % (series.name,
int(points))
validPoints = 0
currentSum = 0
currentSumOfSquares = 0
for index, newValue in enumerate(series):
# Mark whether we've reached our window size - dont drop points
# out otherwise
if index < points:
bootstrapping = True
droppedValue = None
else:
bootstrapping = False
droppedValue = series[index - points]
# Track non-None points in window
if not bootstrapping and droppedValue is not None:
validPoints -= 1
if newValue is not None:
validPoints += 1
# Remove the value that just dropped out of the window
if not bootstrapping and droppedValue is not None:
currentSum -= droppedValue
currentSumOfSquares -= droppedValue**2
# Add in the value that just popped in the window
if newValue is not None:
currentSum += newValue
currentSumOfSquares += newValue**2
if (
validPoints > 0 and
float(validPoints) / points >= windowTolerance
):
try:
deviation = math.sqrt(validPoints * currentSumOfSquares -
currentSum**2) / validPoints
except ValueError:
deviation = None
stdevSeries.append(deviation)
else:
stdevSeries.append(None)
seriesList[seriesIndex] = stdevSeries
return seriesList | [
"def",
"stdev",
"(",
"requestContext",
",",
"seriesList",
",",
"points",
",",
"windowTolerance",
"=",
"0.1",
")",
":",
"# For this we take the standard deviation in terms of the moving average",
"# and the moving average of series squares.",
"for",
"seriesIndex",
",",
"series",
"in",
"enumerate",
"(",
"seriesList",
")",
":",
"stdevSeries",
"=",
"TimeSeries",
"(",
"\"stdev(%s,%d)\"",
"%",
"(",
"series",
".",
"name",
",",
"int",
"(",
"points",
")",
")",
",",
"series",
".",
"start",
",",
"series",
".",
"end",
",",
"series",
".",
"step",
",",
"[",
"]",
")",
"stdevSeries",
".",
"pathExpression",
"=",
"\"stdev(%s,%d)\"",
"%",
"(",
"series",
".",
"name",
",",
"int",
"(",
"points",
")",
")",
"validPoints",
"=",
"0",
"currentSum",
"=",
"0",
"currentSumOfSquares",
"=",
"0",
"for",
"index",
",",
"newValue",
"in",
"enumerate",
"(",
"series",
")",
":",
"# Mark whether we've reached our window size - dont drop points",
"# out otherwise",
"if",
"index",
"<",
"points",
":",
"bootstrapping",
"=",
"True",
"droppedValue",
"=",
"None",
"else",
":",
"bootstrapping",
"=",
"False",
"droppedValue",
"=",
"series",
"[",
"index",
"-",
"points",
"]",
"# Track non-None points in window",
"if",
"not",
"bootstrapping",
"and",
"droppedValue",
"is",
"not",
"None",
":",
"validPoints",
"-=",
"1",
"if",
"newValue",
"is",
"not",
"None",
":",
"validPoints",
"+=",
"1",
"# Remove the value that just dropped out of the window",
"if",
"not",
"bootstrapping",
"and",
"droppedValue",
"is",
"not",
"None",
":",
"currentSum",
"-=",
"droppedValue",
"currentSumOfSquares",
"-=",
"droppedValue",
"**",
"2",
"# Add in the value that just popped in the window",
"if",
"newValue",
"is",
"not",
"None",
":",
"currentSum",
"+=",
"newValue",
"currentSumOfSquares",
"+=",
"newValue",
"**",
"2",
"if",
"(",
"validPoints",
">",
"0",
"and",
"float",
"(",
"validPoints",
")",
"/",
"points",
">=",
"windowTolerance",
")",
":",
"try",
":",
"deviation",
"=",
"math",
".",
"sqrt",
"(",
"validPoints",
"*",
"currentSumOfSquares",
"-",
"currentSum",
"**",
"2",
")",
"/",
"validPoints",
"except",
"ValueError",
":",
"deviation",
"=",
"None",
"stdevSeries",
".",
"append",
"(",
"deviation",
")",
"else",
":",
"stdevSeries",
".",
"append",
"(",
"None",
")",
"seriesList",
"[",
"seriesIndex",
"]",
"=",
"stdevSeries",
"return",
"seriesList"
] | Takes one metric or a wildcard seriesList followed by an integer N.
Draw the Standard Deviation of all metrics passed for the past N
datapoints. If the ratio of null points in the window is greater than
windowTolerance, skip the calculation. The default for windowTolerance is
0.1 (up to 10% of points in the window can be missing). Note that if this
is set to 0.0, it will cause large gaps in the output anywhere a single
point is missing.
Example::
&target=stdev(server*.instance*.threads.busy,30)
&target=stdev(server*.instance*.cpu.system,30,0.0) | [
"Takes",
"one",
"metric",
"or",
"a",
"wildcard",
"seriesList",
"followed",
"by",
"an",
"integer",
"N",
".",
"Draw",
"the",
"Standard",
"Deviation",
"of",
"all",
"metrics",
"passed",
"for",
"the",
"past",
"N",
"datapoints",
".",
"If",
"the",
"ratio",
"of",
"null",
"points",
"in",
"the",
"window",
"is",
"greater",
"than",
"windowTolerance",
"skip",
"the",
"calculation",
".",
"The",
"default",
"for",
"windowTolerance",
"is",
"0",
".",
"1",
"(",
"up",
"to",
"10%",
"of",
"points",
"in",
"the",
"window",
"can",
"be",
"missing",
")",
".",
"Note",
"that",
"if",
"this",
"is",
"set",
"to",
"0",
".",
"0",
"it",
"will",
"cause",
"large",
"gaps",
"in",
"the",
"output",
"anywhere",
"a",
"single",
"point",
"is",
"missing",
"."
] | train | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L2659-L2728 |
brutasse/graphite-api | graphite_api/functions.py | secondYAxis | def secondYAxis(requestContext, seriesList):
"""
Graph the series on the secondary Y axis.
"""
for series in seriesList:
series.options['secondYAxis'] = True
series.name = 'secondYAxis(%s)' % series.name
return seriesList | python | def secondYAxis(requestContext, seriesList):
"""
Graph the series on the secondary Y axis.
"""
for series in seriesList:
series.options['secondYAxis'] = True
series.name = 'secondYAxis(%s)' % series.name
return seriesList | [
"def",
"secondYAxis",
"(",
"requestContext",
",",
"seriesList",
")",
":",
"for",
"series",
"in",
"seriesList",
":",
"series",
".",
"options",
"[",
"'secondYAxis'",
"]",
"=",
"True",
"series",
".",
"name",
"=",
"'secondYAxis(%s)'",
"%",
"series",
".",
"name",
"return",
"seriesList"
] | Graph the series on the secondary Y axis. | [
"Graph",
"the",
"series",
"on",
"the",
"secondary",
"Y",
"axis",
"."
] | train | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L2731-L2738 |
brutasse/graphite-api | graphite_api/functions.py | holtWintersForecast | def holtWintersForecast(requestContext, seriesList):
"""
Performs a Holt-Winters forecast using the series as input data. Data from
one week previous to the series is used to bootstrap the initial forecast.
"""
previewSeconds = 7 * 86400 # 7 days
# ignore original data and pull new, including our preview
newContext = requestContext.copy()
newContext['startTime'] = (requestContext['startTime'] -
timedelta(seconds=previewSeconds))
previewList = evaluateTokens(newContext, requestContext['args'][0])
results = []
for series in previewList:
analysis = holtWintersAnalysis(series)
predictions = analysis['predictions']
windowPoints = previewSeconds // predictions.step
result = TimeSeries("holtWintersForecast(%s)" % series.name,
predictions.start + previewSeconds,
predictions.end, predictions.step,
predictions[windowPoints:])
result.pathExpression = result.name
results.append(result)
return results | python | def holtWintersForecast(requestContext, seriesList):
"""
Performs a Holt-Winters forecast using the series as input data. Data from
one week previous to the series is used to bootstrap the initial forecast.
"""
previewSeconds = 7 * 86400 # 7 days
# ignore original data and pull new, including our preview
newContext = requestContext.copy()
newContext['startTime'] = (requestContext['startTime'] -
timedelta(seconds=previewSeconds))
previewList = evaluateTokens(newContext, requestContext['args'][0])
results = []
for series in previewList:
analysis = holtWintersAnalysis(series)
predictions = analysis['predictions']
windowPoints = previewSeconds // predictions.step
result = TimeSeries("holtWintersForecast(%s)" % series.name,
predictions.start + previewSeconds,
predictions.end, predictions.step,
predictions[windowPoints:])
result.pathExpression = result.name
results.append(result)
return results | [
"def",
"holtWintersForecast",
"(",
"requestContext",
",",
"seriesList",
")",
":",
"previewSeconds",
"=",
"7",
"*",
"86400",
"# 7 days",
"# ignore original data and pull new, including our preview",
"newContext",
"=",
"requestContext",
".",
"copy",
"(",
")",
"newContext",
"[",
"'startTime'",
"]",
"=",
"(",
"requestContext",
"[",
"'startTime'",
"]",
"-",
"timedelta",
"(",
"seconds",
"=",
"previewSeconds",
")",
")",
"previewList",
"=",
"evaluateTokens",
"(",
"newContext",
",",
"requestContext",
"[",
"'args'",
"]",
"[",
"0",
"]",
")",
"results",
"=",
"[",
"]",
"for",
"series",
"in",
"previewList",
":",
"analysis",
"=",
"holtWintersAnalysis",
"(",
"series",
")",
"predictions",
"=",
"analysis",
"[",
"'predictions'",
"]",
"windowPoints",
"=",
"previewSeconds",
"//",
"predictions",
".",
"step",
"result",
"=",
"TimeSeries",
"(",
"\"holtWintersForecast(%s)\"",
"%",
"series",
".",
"name",
",",
"predictions",
".",
"start",
"+",
"previewSeconds",
",",
"predictions",
".",
"end",
",",
"predictions",
".",
"step",
",",
"predictions",
"[",
"windowPoints",
":",
"]",
")",
"result",
".",
"pathExpression",
"=",
"result",
".",
"name",
"results",
".",
"append",
"(",
"result",
")",
"return",
"results"
] | Performs a Holt-Winters forecast using the series as input data. Data from
one week previous to the series is used to bootstrap the initial forecast. | [
"Performs",
"a",
"Holt",
"-",
"Winters",
"forecast",
"using",
"the",
"series",
"as",
"input",
"data",
".",
"Data",
"from",
"one",
"week",
"previous",
"to",
"the",
"series",
"is",
"used",
"to",
"bootstrap",
"the",
"initial",
"forecast",
"."
] | train | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L2854-L2876 |
brutasse/graphite-api | graphite_api/functions.py | holtWintersConfidenceBands | def holtWintersConfidenceBands(requestContext, seriesList, delta=3):
"""
Performs a Holt-Winters forecast using the series as input data and plots
upper and lower bands with the predicted forecast deviations.
"""
previewSeconds = 7 * 86400 # 7 days
# ignore original data and pull new, including our preview
newContext = requestContext.copy()
newContext['startTime'] = (requestContext['startTime'] -
timedelta(seconds=previewSeconds))
previewList = evaluateTokens(newContext, requestContext['args'][0])
results = []
for series in previewList:
analysis = holtWintersAnalysis(series)
data = analysis['predictions']
windowPoints = previewSeconds // data.step
forecast = TimeSeries(data.name, data.start + previewSeconds,
data.end, data.step, data[windowPoints:])
forecast.pathExpression = data.pathExpression
data = analysis['deviations']
windowPoints = previewSeconds // data.step
deviation = TimeSeries(data.name, data.start + previewSeconds,
data.end, data.step, data[windowPoints:])
deviation.pathExpression = data.pathExpression
seriesLength = len(forecast)
i = 0
upperBand = list()
lowerBand = list()
while i < seriesLength:
forecast_item = forecast[i]
deviation_item = deviation[i]
i = i + 1
if forecast_item is None or deviation_item is None:
upperBand.append(None)
lowerBand.append(None)
else:
scaled_deviation = delta * deviation_item
upperBand.append(forecast_item + scaled_deviation)
lowerBand.append(forecast_item - scaled_deviation)
upperName = "holtWintersConfidenceUpper(%s)" % series.name
lowerName = "holtWintersConfidenceLower(%s)" % series.name
upperSeries = TimeSeries(upperName, forecast.start, forecast.end,
forecast.step, upperBand)
lowerSeries = TimeSeries(lowerName, forecast.start, forecast.end,
forecast.step, lowerBand)
upperSeries.pathExpression = series.pathExpression
lowerSeries.pathExpression = series.pathExpression
results.append(lowerSeries)
results.append(upperSeries)
return results | python | def holtWintersConfidenceBands(requestContext, seriesList, delta=3):
"""
Performs a Holt-Winters forecast using the series as input data and plots
upper and lower bands with the predicted forecast deviations.
"""
previewSeconds = 7 * 86400 # 7 days
# ignore original data and pull new, including our preview
newContext = requestContext.copy()
newContext['startTime'] = (requestContext['startTime'] -
timedelta(seconds=previewSeconds))
previewList = evaluateTokens(newContext, requestContext['args'][0])
results = []
for series in previewList:
analysis = holtWintersAnalysis(series)
data = analysis['predictions']
windowPoints = previewSeconds // data.step
forecast = TimeSeries(data.name, data.start + previewSeconds,
data.end, data.step, data[windowPoints:])
forecast.pathExpression = data.pathExpression
data = analysis['deviations']
windowPoints = previewSeconds // data.step
deviation = TimeSeries(data.name, data.start + previewSeconds,
data.end, data.step, data[windowPoints:])
deviation.pathExpression = data.pathExpression
seriesLength = len(forecast)
i = 0
upperBand = list()
lowerBand = list()
while i < seriesLength:
forecast_item = forecast[i]
deviation_item = deviation[i]
i = i + 1
if forecast_item is None or deviation_item is None:
upperBand.append(None)
lowerBand.append(None)
else:
scaled_deviation = delta * deviation_item
upperBand.append(forecast_item + scaled_deviation)
lowerBand.append(forecast_item - scaled_deviation)
upperName = "holtWintersConfidenceUpper(%s)" % series.name
lowerName = "holtWintersConfidenceLower(%s)" % series.name
upperSeries = TimeSeries(upperName, forecast.start, forecast.end,
forecast.step, upperBand)
lowerSeries = TimeSeries(lowerName, forecast.start, forecast.end,
forecast.step, lowerBand)
upperSeries.pathExpression = series.pathExpression
lowerSeries.pathExpression = series.pathExpression
results.append(lowerSeries)
results.append(upperSeries)
return results | [
"def",
"holtWintersConfidenceBands",
"(",
"requestContext",
",",
"seriesList",
",",
"delta",
"=",
"3",
")",
":",
"previewSeconds",
"=",
"7",
"*",
"86400",
"# 7 days",
"# ignore original data and pull new, including our preview",
"newContext",
"=",
"requestContext",
".",
"copy",
"(",
")",
"newContext",
"[",
"'startTime'",
"]",
"=",
"(",
"requestContext",
"[",
"'startTime'",
"]",
"-",
"timedelta",
"(",
"seconds",
"=",
"previewSeconds",
")",
")",
"previewList",
"=",
"evaluateTokens",
"(",
"newContext",
",",
"requestContext",
"[",
"'args'",
"]",
"[",
"0",
"]",
")",
"results",
"=",
"[",
"]",
"for",
"series",
"in",
"previewList",
":",
"analysis",
"=",
"holtWintersAnalysis",
"(",
"series",
")",
"data",
"=",
"analysis",
"[",
"'predictions'",
"]",
"windowPoints",
"=",
"previewSeconds",
"//",
"data",
".",
"step",
"forecast",
"=",
"TimeSeries",
"(",
"data",
".",
"name",
",",
"data",
".",
"start",
"+",
"previewSeconds",
",",
"data",
".",
"end",
",",
"data",
".",
"step",
",",
"data",
"[",
"windowPoints",
":",
"]",
")",
"forecast",
".",
"pathExpression",
"=",
"data",
".",
"pathExpression",
"data",
"=",
"analysis",
"[",
"'deviations'",
"]",
"windowPoints",
"=",
"previewSeconds",
"//",
"data",
".",
"step",
"deviation",
"=",
"TimeSeries",
"(",
"data",
".",
"name",
",",
"data",
".",
"start",
"+",
"previewSeconds",
",",
"data",
".",
"end",
",",
"data",
".",
"step",
",",
"data",
"[",
"windowPoints",
":",
"]",
")",
"deviation",
".",
"pathExpression",
"=",
"data",
".",
"pathExpression",
"seriesLength",
"=",
"len",
"(",
"forecast",
")",
"i",
"=",
"0",
"upperBand",
"=",
"list",
"(",
")",
"lowerBand",
"=",
"list",
"(",
")",
"while",
"i",
"<",
"seriesLength",
":",
"forecast_item",
"=",
"forecast",
"[",
"i",
"]",
"deviation_item",
"=",
"deviation",
"[",
"i",
"]",
"i",
"=",
"i",
"+",
"1",
"if",
"forecast_item",
"is",
"None",
"or",
"deviation_item",
"is",
"None",
":",
"upperBand",
".",
"append",
"(",
"None",
")",
"lowerBand",
".",
"append",
"(",
"None",
")",
"else",
":",
"scaled_deviation",
"=",
"delta",
"*",
"deviation_item",
"upperBand",
".",
"append",
"(",
"forecast_item",
"+",
"scaled_deviation",
")",
"lowerBand",
".",
"append",
"(",
"forecast_item",
"-",
"scaled_deviation",
")",
"upperName",
"=",
"\"holtWintersConfidenceUpper(%s)\"",
"%",
"series",
".",
"name",
"lowerName",
"=",
"\"holtWintersConfidenceLower(%s)\"",
"%",
"series",
".",
"name",
"upperSeries",
"=",
"TimeSeries",
"(",
"upperName",
",",
"forecast",
".",
"start",
",",
"forecast",
".",
"end",
",",
"forecast",
".",
"step",
",",
"upperBand",
")",
"lowerSeries",
"=",
"TimeSeries",
"(",
"lowerName",
",",
"forecast",
".",
"start",
",",
"forecast",
".",
"end",
",",
"forecast",
".",
"step",
",",
"lowerBand",
")",
"upperSeries",
".",
"pathExpression",
"=",
"series",
".",
"pathExpression",
"lowerSeries",
".",
"pathExpression",
"=",
"series",
".",
"pathExpression",
"results",
".",
"append",
"(",
"lowerSeries",
")",
"results",
".",
"append",
"(",
"upperSeries",
")",
"return",
"results"
] | Performs a Holt-Winters forecast using the series as input data and plots
upper and lower bands with the predicted forecast deviations. | [
"Performs",
"a",
"Holt",
"-",
"Winters",
"forecast",
"using",
"the",
"series",
"as",
"input",
"data",
"and",
"plots",
"upper",
"and",
"lower",
"bands",
"with",
"the",
"predicted",
"forecast",
"deviations",
"."
] | train | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L2879-L2932 |
brutasse/graphite-api | graphite_api/functions.py | holtWintersAberration | def holtWintersAberration(requestContext, seriesList, delta=3):
"""
Performs a Holt-Winters forecast using the series as input data and plots
the positive or negative deviation of the series data from the forecast.
"""
results = []
for series in seriesList:
confidenceBands = holtWintersConfidenceBands(requestContext, [series],
delta)
lowerBand = confidenceBands[0]
upperBand = confidenceBands[1]
aberration = list()
for i, actual in enumerate(series):
if actual is None:
aberration.append(0)
elif upperBand[i] is not None and actual > upperBand[i]:
aberration.append(actual - upperBand[i])
elif lowerBand[i] is not None and actual < lowerBand[i]:
aberration.append(actual - lowerBand[i])
else:
aberration.append(0)
newName = "holtWintersAberration(%s)" % series.name
results.append(TimeSeries(newName, series.start, series.end,
series.step, aberration))
return results | python | def holtWintersAberration(requestContext, seriesList, delta=3):
"""
Performs a Holt-Winters forecast using the series as input data and plots
the positive or negative deviation of the series data from the forecast.
"""
results = []
for series in seriesList:
confidenceBands = holtWintersConfidenceBands(requestContext, [series],
delta)
lowerBand = confidenceBands[0]
upperBand = confidenceBands[1]
aberration = list()
for i, actual in enumerate(series):
if actual is None:
aberration.append(0)
elif upperBand[i] is not None and actual > upperBand[i]:
aberration.append(actual - upperBand[i])
elif lowerBand[i] is not None and actual < lowerBand[i]:
aberration.append(actual - lowerBand[i])
else:
aberration.append(0)
newName = "holtWintersAberration(%s)" % series.name
results.append(TimeSeries(newName, series.start, series.end,
series.step, aberration))
return results | [
"def",
"holtWintersAberration",
"(",
"requestContext",
",",
"seriesList",
",",
"delta",
"=",
"3",
")",
":",
"results",
"=",
"[",
"]",
"for",
"series",
"in",
"seriesList",
":",
"confidenceBands",
"=",
"holtWintersConfidenceBands",
"(",
"requestContext",
",",
"[",
"series",
"]",
",",
"delta",
")",
"lowerBand",
"=",
"confidenceBands",
"[",
"0",
"]",
"upperBand",
"=",
"confidenceBands",
"[",
"1",
"]",
"aberration",
"=",
"list",
"(",
")",
"for",
"i",
",",
"actual",
"in",
"enumerate",
"(",
"series",
")",
":",
"if",
"actual",
"is",
"None",
":",
"aberration",
".",
"append",
"(",
"0",
")",
"elif",
"upperBand",
"[",
"i",
"]",
"is",
"not",
"None",
"and",
"actual",
">",
"upperBand",
"[",
"i",
"]",
":",
"aberration",
".",
"append",
"(",
"actual",
"-",
"upperBand",
"[",
"i",
"]",
")",
"elif",
"lowerBand",
"[",
"i",
"]",
"is",
"not",
"None",
"and",
"actual",
"<",
"lowerBand",
"[",
"i",
"]",
":",
"aberration",
".",
"append",
"(",
"actual",
"-",
"lowerBand",
"[",
"i",
"]",
")",
"else",
":",
"aberration",
".",
"append",
"(",
"0",
")",
"newName",
"=",
"\"holtWintersAberration(%s)\"",
"%",
"series",
".",
"name",
"results",
".",
"append",
"(",
"TimeSeries",
"(",
"newName",
",",
"series",
".",
"start",
",",
"series",
".",
"end",
",",
"series",
".",
"step",
",",
"aberration",
")",
")",
"return",
"results"
] | Performs a Holt-Winters forecast using the series as input data and plots
the positive or negative deviation of the series data from the forecast. | [
"Performs",
"a",
"Holt",
"-",
"Winters",
"forecast",
"using",
"the",
"series",
"as",
"input",
"data",
"and",
"plots",
"the",
"positive",
"or",
"negative",
"deviation",
"of",
"the",
"series",
"data",
"from",
"the",
"forecast",
"."
] | train | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L2935-L2960 |
brutasse/graphite-api | graphite_api/functions.py | holtWintersConfidenceArea | def holtWintersConfidenceArea(requestContext, seriesList, delta=3):
"""
Performs a Holt-Winters forecast using the series as input data and plots
the area between the upper and lower bands of the predicted forecast
deviations.
"""
bands = holtWintersConfidenceBands(requestContext, seriesList, delta)
results = areaBetween(requestContext, bands)
for series in results:
series.name = series.name.replace('areaBetween',
'holtWintersConfidenceArea')
return results | python | def holtWintersConfidenceArea(requestContext, seriesList, delta=3):
"""
Performs a Holt-Winters forecast using the series as input data and plots
the area between the upper and lower bands of the predicted forecast
deviations.
"""
bands = holtWintersConfidenceBands(requestContext, seriesList, delta)
results = areaBetween(requestContext, bands)
for series in results:
series.name = series.name.replace('areaBetween',
'holtWintersConfidenceArea')
return results | [
"def",
"holtWintersConfidenceArea",
"(",
"requestContext",
",",
"seriesList",
",",
"delta",
"=",
"3",
")",
":",
"bands",
"=",
"holtWintersConfidenceBands",
"(",
"requestContext",
",",
"seriesList",
",",
"delta",
")",
"results",
"=",
"areaBetween",
"(",
"requestContext",
",",
"bands",
")",
"for",
"series",
"in",
"results",
":",
"series",
".",
"name",
"=",
"series",
".",
"name",
".",
"replace",
"(",
"'areaBetween'",
",",
"'holtWintersConfidenceArea'",
")",
"return",
"results"
] | Performs a Holt-Winters forecast using the series as input data and plots
the area between the upper and lower bands of the predicted forecast
deviations. | [
"Performs",
"a",
"Holt",
"-",
"Winters",
"forecast",
"using",
"the",
"series",
"as",
"input",
"data",
"and",
"plots",
"the",
"area",
"between",
"the",
"upper",
"and",
"lower",
"bands",
"of",
"the",
"predicted",
"forecast",
"deviations",
"."
] | train | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L2963-L2974 |
brutasse/graphite-api | graphite_api/functions.py | linearRegressionAnalysis | def linearRegressionAnalysis(series):
"""
Returns factor and offset of linear regression function by least
squares method.
"""
n = safeLen(series)
sumI = sum([i for i, v in enumerate(series) if v is not None])
sumV = sum([v for i, v in enumerate(series) if v is not None])
sumII = sum([i * i for i, v in enumerate(series) if v is not None])
sumIV = sum([i * v for i, v in enumerate(series) if v is not None])
denominator = float(n * sumII - sumI * sumI)
if denominator == 0:
return None
else:
factor = (n * sumIV - sumI * sumV) / denominator / series.step
offset = sumII * sumV - sumIV * sumI
offset = offset / denominator - factor * series.start
return factor, offset | python | def linearRegressionAnalysis(series):
"""
Returns factor and offset of linear regression function by least
squares method.
"""
n = safeLen(series)
sumI = sum([i for i, v in enumerate(series) if v is not None])
sumV = sum([v for i, v in enumerate(series) if v is not None])
sumII = sum([i * i for i, v in enumerate(series) if v is not None])
sumIV = sum([i * v for i, v in enumerate(series) if v is not None])
denominator = float(n * sumII - sumI * sumI)
if denominator == 0:
return None
else:
factor = (n * sumIV - sumI * sumV) / denominator / series.step
offset = sumII * sumV - sumIV * sumI
offset = offset / denominator - factor * series.start
return factor, offset | [
"def",
"linearRegressionAnalysis",
"(",
"series",
")",
":",
"n",
"=",
"safeLen",
"(",
"series",
")",
"sumI",
"=",
"sum",
"(",
"[",
"i",
"for",
"i",
",",
"v",
"in",
"enumerate",
"(",
"series",
")",
"if",
"v",
"is",
"not",
"None",
"]",
")",
"sumV",
"=",
"sum",
"(",
"[",
"v",
"for",
"i",
",",
"v",
"in",
"enumerate",
"(",
"series",
")",
"if",
"v",
"is",
"not",
"None",
"]",
")",
"sumII",
"=",
"sum",
"(",
"[",
"i",
"*",
"i",
"for",
"i",
",",
"v",
"in",
"enumerate",
"(",
"series",
")",
"if",
"v",
"is",
"not",
"None",
"]",
")",
"sumIV",
"=",
"sum",
"(",
"[",
"i",
"*",
"v",
"for",
"i",
",",
"v",
"in",
"enumerate",
"(",
"series",
")",
"if",
"v",
"is",
"not",
"None",
"]",
")",
"denominator",
"=",
"float",
"(",
"n",
"*",
"sumII",
"-",
"sumI",
"*",
"sumI",
")",
"if",
"denominator",
"==",
"0",
":",
"return",
"None",
"else",
":",
"factor",
"=",
"(",
"n",
"*",
"sumIV",
"-",
"sumI",
"*",
"sumV",
")",
"/",
"denominator",
"/",
"series",
".",
"step",
"offset",
"=",
"sumII",
"*",
"sumV",
"-",
"sumIV",
"*",
"sumI",
"offset",
"=",
"offset",
"/",
"denominator",
"-",
"factor",
"*",
"series",
".",
"start",
"return",
"factor",
",",
"offset"
] | Returns factor and offset of linear regression function by least
squares method. | [
"Returns",
"factor",
"and",
"offset",
"of",
"linear",
"regression",
"function",
"by",
"least",
"squares",
"method",
"."
] | train | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L2977-L2995 |
brutasse/graphite-api | graphite_api/functions.py | linearRegression | def linearRegression(requestContext, seriesList, startSourceAt=None,
endSourceAt=None):
"""
Graphs the liner regression function by least squares method.
Takes one metric or a wildcard seriesList, followed by a quoted string
with the time to start the line and another quoted string with the time
to end the line. The start and end times are inclusive (default range is
from to until). See ``from / until`` in the render\_api_ for examples of
time formats. Datapoints in the range is used to regression.
Example::
&target=linearRegression(Server.instance01.threads.busy,'-1d')
&target=linearRegression(Server.instance*.threads.busy,
"00:00 20140101","11:59 20140630")
"""
from .app import evaluateTarget
results = []
sourceContext = requestContext.copy()
if startSourceAt is not None:
sourceContext['startTime'] = parseATTime(startSourceAt)
if endSourceAt is not None:
sourceContext['endTime'] = parseATTime(endSourceAt)
sourceList = []
for series in seriesList:
source = evaluateTarget(sourceContext, series.pathExpression)
sourceList.extend(source)
for source, series in zip(sourceList, seriesList):
newName = 'linearRegression(%s, %s, %s)' % (
series.name,
int(epoch(sourceContext['startTime'])),
int(epoch(sourceContext['endTime'])))
forecast = linearRegressionAnalysis(source)
if forecast is None:
continue
factor, offset = forecast
values = [offset + (series.start + i * series.step) * factor
for i in range(len(series))]
newSeries = TimeSeries(newName, series.start, series.end,
series.step, values)
newSeries.pathExpression = newSeries.name
results.append(newSeries)
return results | python | def linearRegression(requestContext, seriesList, startSourceAt=None,
endSourceAt=None):
"""
Graphs the liner regression function by least squares method.
Takes one metric or a wildcard seriesList, followed by a quoted string
with the time to start the line and another quoted string with the time
to end the line. The start and end times are inclusive (default range is
from to until). See ``from / until`` in the render\_api_ for examples of
time formats. Datapoints in the range is used to regression.
Example::
&target=linearRegression(Server.instance01.threads.busy,'-1d')
&target=linearRegression(Server.instance*.threads.busy,
"00:00 20140101","11:59 20140630")
"""
from .app import evaluateTarget
results = []
sourceContext = requestContext.copy()
if startSourceAt is not None:
sourceContext['startTime'] = parseATTime(startSourceAt)
if endSourceAt is not None:
sourceContext['endTime'] = parseATTime(endSourceAt)
sourceList = []
for series in seriesList:
source = evaluateTarget(sourceContext, series.pathExpression)
sourceList.extend(source)
for source, series in zip(sourceList, seriesList):
newName = 'linearRegression(%s, %s, %s)' % (
series.name,
int(epoch(sourceContext['startTime'])),
int(epoch(sourceContext['endTime'])))
forecast = linearRegressionAnalysis(source)
if forecast is None:
continue
factor, offset = forecast
values = [offset + (series.start + i * series.step) * factor
for i in range(len(series))]
newSeries = TimeSeries(newName, series.start, series.end,
series.step, values)
newSeries.pathExpression = newSeries.name
results.append(newSeries)
return results | [
"def",
"linearRegression",
"(",
"requestContext",
",",
"seriesList",
",",
"startSourceAt",
"=",
"None",
",",
"endSourceAt",
"=",
"None",
")",
":",
"from",
".",
"app",
"import",
"evaluateTarget",
"results",
"=",
"[",
"]",
"sourceContext",
"=",
"requestContext",
".",
"copy",
"(",
")",
"if",
"startSourceAt",
"is",
"not",
"None",
":",
"sourceContext",
"[",
"'startTime'",
"]",
"=",
"parseATTime",
"(",
"startSourceAt",
")",
"if",
"endSourceAt",
"is",
"not",
"None",
":",
"sourceContext",
"[",
"'endTime'",
"]",
"=",
"parseATTime",
"(",
"endSourceAt",
")",
"sourceList",
"=",
"[",
"]",
"for",
"series",
"in",
"seriesList",
":",
"source",
"=",
"evaluateTarget",
"(",
"sourceContext",
",",
"series",
".",
"pathExpression",
")",
"sourceList",
".",
"extend",
"(",
"source",
")",
"for",
"source",
",",
"series",
"in",
"zip",
"(",
"sourceList",
",",
"seriesList",
")",
":",
"newName",
"=",
"'linearRegression(%s, %s, %s)'",
"%",
"(",
"series",
".",
"name",
",",
"int",
"(",
"epoch",
"(",
"sourceContext",
"[",
"'startTime'",
"]",
")",
")",
",",
"int",
"(",
"epoch",
"(",
"sourceContext",
"[",
"'endTime'",
"]",
")",
")",
")",
"forecast",
"=",
"linearRegressionAnalysis",
"(",
"source",
")",
"if",
"forecast",
"is",
"None",
":",
"continue",
"factor",
",",
"offset",
"=",
"forecast",
"values",
"=",
"[",
"offset",
"+",
"(",
"series",
".",
"start",
"+",
"i",
"*",
"series",
".",
"step",
")",
"*",
"factor",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"series",
")",
")",
"]",
"newSeries",
"=",
"TimeSeries",
"(",
"newName",
",",
"series",
".",
"start",
",",
"series",
".",
"end",
",",
"series",
".",
"step",
",",
"values",
")",
"newSeries",
".",
"pathExpression",
"=",
"newSeries",
".",
"name",
"results",
".",
"append",
"(",
"newSeries",
")",
"return",
"results"
] | Graphs the liner regression function by least squares method.
Takes one metric or a wildcard seriesList, followed by a quoted string
with the time to start the line and another quoted string with the time
to end the line. The start and end times are inclusive (default range is
from to until). See ``from / until`` in the render\_api_ for examples of
time formats. Datapoints in the range is used to regression.
Example::
&target=linearRegression(Server.instance01.threads.busy,'-1d')
&target=linearRegression(Server.instance*.threads.busy,
"00:00 20140101","11:59 20140630") | [
"Graphs",
"the",
"liner",
"regression",
"function",
"by",
"least",
"squares",
"method",
"."
] | train | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L2998-L3044 |
brutasse/graphite-api | graphite_api/functions.py | drawAsInfinite | def drawAsInfinite(requestContext, seriesList):
"""
Takes one metric or a wildcard seriesList.
If the value is zero, draw the line at 0. If the value is above zero, draw
the line at infinity. If the value is null or less than zero, do not draw
the line.
Useful for displaying on/off metrics, such as exit codes. (0 = success,
anything else = failure.)
Example::
drawAsInfinite(Testing.script.exitCode)
"""
for series in seriesList:
series.options['drawAsInfinite'] = True
series.name = 'drawAsInfinite(%s)' % series.name
return seriesList | python | def drawAsInfinite(requestContext, seriesList):
"""
Takes one metric or a wildcard seriesList.
If the value is zero, draw the line at 0. If the value is above zero, draw
the line at infinity. If the value is null or less than zero, do not draw
the line.
Useful for displaying on/off metrics, such as exit codes. (0 = success,
anything else = failure.)
Example::
drawAsInfinite(Testing.script.exitCode)
"""
for series in seriesList:
series.options['drawAsInfinite'] = True
series.name = 'drawAsInfinite(%s)' % series.name
return seriesList | [
"def",
"drawAsInfinite",
"(",
"requestContext",
",",
"seriesList",
")",
":",
"for",
"series",
"in",
"seriesList",
":",
"series",
".",
"options",
"[",
"'drawAsInfinite'",
"]",
"=",
"True",
"series",
".",
"name",
"=",
"'drawAsInfinite(%s)'",
"%",
"series",
".",
"name",
"return",
"seriesList"
] | Takes one metric or a wildcard seriesList.
If the value is zero, draw the line at 0. If the value is above zero, draw
the line at infinity. If the value is null or less than zero, do not draw
the line.
Useful for displaying on/off metrics, such as exit codes. (0 = success,
anything else = failure.)
Example::
drawAsInfinite(Testing.script.exitCode) | [
"Takes",
"one",
"metric",
"or",
"a",
"wildcard",
"seriesList",
".",
"If",
"the",
"value",
"is",
"zero",
"draw",
"the",
"line",
"at",
"0",
".",
"If",
"the",
"value",
"is",
"above",
"zero",
"draw",
"the",
"line",
"at",
"infinity",
".",
"If",
"the",
"value",
"is",
"null",
"or",
"less",
"than",
"zero",
"do",
"not",
"draw",
"the",
"line",
"."
] | train | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L3047-L3065 |
brutasse/graphite-api | graphite_api/functions.py | lineWidth | def lineWidth(requestContext, seriesList, width):
"""
Takes one metric or a wildcard seriesList, followed by a float F.
Draw the selected metrics with a line width of F, overriding the default
value of 1, or the &lineWidth=X.X parameter.
Useful for highlighting a single metric out of many, or having multiple
line widths in one graph.
Example::
&target=lineWidth(server01.instance01.memory.free,5)
"""
for series in seriesList:
series.options['lineWidth'] = width
return seriesList | python | def lineWidth(requestContext, seriesList, width):
"""
Takes one metric or a wildcard seriesList, followed by a float F.
Draw the selected metrics with a line width of F, overriding the default
value of 1, or the &lineWidth=X.X parameter.
Useful for highlighting a single metric out of many, or having multiple
line widths in one graph.
Example::
&target=lineWidth(server01.instance01.memory.free,5)
"""
for series in seriesList:
series.options['lineWidth'] = width
return seriesList | [
"def",
"lineWidth",
"(",
"requestContext",
",",
"seriesList",
",",
"width",
")",
":",
"for",
"series",
"in",
"seriesList",
":",
"series",
".",
"options",
"[",
"'lineWidth'",
"]",
"=",
"width",
"return",
"seriesList"
] | Takes one metric or a wildcard seriesList, followed by a float F.
Draw the selected metrics with a line width of F, overriding the default
value of 1, or the &lineWidth=X.X parameter.
Useful for highlighting a single metric out of many, or having multiple
line widths in one graph.
Example::
&target=lineWidth(server01.instance01.memory.free,5) | [
"Takes",
"one",
"metric",
"or",
"a",
"wildcard",
"seriesList",
"followed",
"by",
"a",
"float",
"F",
"."
] | train | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L3068-L3085 |
brutasse/graphite-api | graphite_api/functions.py | dashed | def dashed(requestContext, seriesList, dashLength=5):
"""
Takes one metric or a wildcard seriesList, followed by a float F.
Draw the selected metrics with a dotted line with segments of length F
If omitted, the default length of the segments is 5.0
Example::
&target=dashed(server01.instance01.memory.free,2.5)
"""
for series in seriesList:
series.name = 'dashed(%s, %g)' % (series.name, dashLength)
series.options['dashed'] = dashLength
return seriesList | python | def dashed(requestContext, seriesList, dashLength=5):
"""
Takes one metric or a wildcard seriesList, followed by a float F.
Draw the selected metrics with a dotted line with segments of length F
If omitted, the default length of the segments is 5.0
Example::
&target=dashed(server01.instance01.memory.free,2.5)
"""
for series in seriesList:
series.name = 'dashed(%s, %g)' % (series.name, dashLength)
series.options['dashed'] = dashLength
return seriesList | [
"def",
"dashed",
"(",
"requestContext",
",",
"seriesList",
",",
"dashLength",
"=",
"5",
")",
":",
"for",
"series",
"in",
"seriesList",
":",
"series",
".",
"name",
"=",
"'dashed(%s, %g)'",
"%",
"(",
"series",
".",
"name",
",",
"dashLength",
")",
"series",
".",
"options",
"[",
"'dashed'",
"]",
"=",
"dashLength",
"return",
"seriesList"
] | Takes one metric or a wildcard seriesList, followed by a float F.
Draw the selected metrics with a dotted line with segments of length F
If omitted, the default length of the segments is 5.0
Example::
&target=dashed(server01.instance01.memory.free,2.5) | [
"Takes",
"one",
"metric",
"or",
"a",
"wildcard",
"seriesList",
"followed",
"by",
"a",
"float",
"F",
"."
] | train | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L3088-L3103 |
brutasse/graphite-api | graphite_api/functions.py | timeStack | def timeStack(requestContext, seriesList, timeShiftUnit, timeShiftStart,
timeShiftEnd):
"""
Takes one metric or a wildcard seriesList, followed by a quoted string
with the length of time (See ``from / until`` in the render\_api_ for
examples of time formats). Also takes a start multiplier and end
multiplier for the length of time-
Create a seriesList which is composed the original metric series stacked
with time shifts starting time shifts from the start multiplier through
the end multiplier.
Useful for looking at history, or feeding into averageSeries or
stddevSeries.
Example::
# create a series for today and each of the previous 7 days
&target=timeStack(Sales.widgets.largeBlue,"1d",0,7)
"""
# Default to negative. parseTimeOffset defaults to +
if timeShiftUnit[0].isdigit():
timeShiftUnit = '-' + timeShiftUnit
delta = parseTimeOffset(timeShiftUnit)
# if len(seriesList) > 1, they will all have the same pathExpression,
# which is all we care about.
series = seriesList[0]
results = []
timeShiftStartint = int(timeShiftStart)
timeShiftEndint = int(timeShiftEnd)
for shft in range(timeShiftStartint, timeShiftEndint):
myContext = requestContext.copy()
innerDelta = delta * shft
myContext['startTime'] = requestContext['startTime'] + innerDelta
myContext['endTime'] = requestContext['endTime'] + innerDelta
for shiftedSeries in evaluateTarget(myContext, series.pathExpression):
shiftedSeries.name = 'timeShift(%s, %s, %s)' % (shiftedSeries.name,
timeShiftUnit,
shft)
shiftedSeries.pathExpression = shiftedSeries.name
shiftedSeries.start = series.start
shiftedSeries.end = series.end
results.append(shiftedSeries)
return results | python | def timeStack(requestContext, seriesList, timeShiftUnit, timeShiftStart,
timeShiftEnd):
"""
Takes one metric or a wildcard seriesList, followed by a quoted string
with the length of time (See ``from / until`` in the render\_api_ for
examples of time formats). Also takes a start multiplier and end
multiplier for the length of time-
Create a seriesList which is composed the original metric series stacked
with time shifts starting time shifts from the start multiplier through
the end multiplier.
Useful for looking at history, or feeding into averageSeries or
stddevSeries.
Example::
# create a series for today and each of the previous 7 days
&target=timeStack(Sales.widgets.largeBlue,"1d",0,7)
"""
# Default to negative. parseTimeOffset defaults to +
if timeShiftUnit[0].isdigit():
timeShiftUnit = '-' + timeShiftUnit
delta = parseTimeOffset(timeShiftUnit)
# if len(seriesList) > 1, they will all have the same pathExpression,
# which is all we care about.
series = seriesList[0]
results = []
timeShiftStartint = int(timeShiftStart)
timeShiftEndint = int(timeShiftEnd)
for shft in range(timeShiftStartint, timeShiftEndint):
myContext = requestContext.copy()
innerDelta = delta * shft
myContext['startTime'] = requestContext['startTime'] + innerDelta
myContext['endTime'] = requestContext['endTime'] + innerDelta
for shiftedSeries in evaluateTarget(myContext, series.pathExpression):
shiftedSeries.name = 'timeShift(%s, %s, %s)' % (shiftedSeries.name,
timeShiftUnit,
shft)
shiftedSeries.pathExpression = shiftedSeries.name
shiftedSeries.start = series.start
shiftedSeries.end = series.end
results.append(shiftedSeries)
return results | [
"def",
"timeStack",
"(",
"requestContext",
",",
"seriesList",
",",
"timeShiftUnit",
",",
"timeShiftStart",
",",
"timeShiftEnd",
")",
":",
"# Default to negative. parseTimeOffset defaults to +",
"if",
"timeShiftUnit",
"[",
"0",
"]",
".",
"isdigit",
"(",
")",
":",
"timeShiftUnit",
"=",
"'-'",
"+",
"timeShiftUnit",
"delta",
"=",
"parseTimeOffset",
"(",
"timeShiftUnit",
")",
"# if len(seriesList) > 1, they will all have the same pathExpression,",
"# which is all we care about.",
"series",
"=",
"seriesList",
"[",
"0",
"]",
"results",
"=",
"[",
"]",
"timeShiftStartint",
"=",
"int",
"(",
"timeShiftStart",
")",
"timeShiftEndint",
"=",
"int",
"(",
"timeShiftEnd",
")",
"for",
"shft",
"in",
"range",
"(",
"timeShiftStartint",
",",
"timeShiftEndint",
")",
":",
"myContext",
"=",
"requestContext",
".",
"copy",
"(",
")",
"innerDelta",
"=",
"delta",
"*",
"shft",
"myContext",
"[",
"'startTime'",
"]",
"=",
"requestContext",
"[",
"'startTime'",
"]",
"+",
"innerDelta",
"myContext",
"[",
"'endTime'",
"]",
"=",
"requestContext",
"[",
"'endTime'",
"]",
"+",
"innerDelta",
"for",
"shiftedSeries",
"in",
"evaluateTarget",
"(",
"myContext",
",",
"series",
".",
"pathExpression",
")",
":",
"shiftedSeries",
".",
"name",
"=",
"'timeShift(%s, %s, %s)'",
"%",
"(",
"shiftedSeries",
".",
"name",
",",
"timeShiftUnit",
",",
"shft",
")",
"shiftedSeries",
".",
"pathExpression",
"=",
"shiftedSeries",
".",
"name",
"shiftedSeries",
".",
"start",
"=",
"series",
".",
"start",
"shiftedSeries",
".",
"end",
"=",
"series",
".",
"end",
"results",
".",
"append",
"(",
"shiftedSeries",
")",
"return",
"results"
] | Takes one metric or a wildcard seriesList, followed by a quoted string
with the length of time (See ``from / until`` in the render\_api_ for
examples of time formats). Also takes a start multiplier and end
multiplier for the length of time-
Create a seriesList which is composed the original metric series stacked
with time shifts starting time shifts from the start multiplier through
the end multiplier.
Useful for looking at history, or feeding into averageSeries or
stddevSeries.
Example::
# create a series for today and each of the previous 7 days
&target=timeStack(Sales.widgets.largeBlue,"1d",0,7) | [
"Takes",
"one",
"metric",
"or",
"a",
"wildcard",
"seriesList",
"followed",
"by",
"a",
"quoted",
"string",
"with",
"the",
"length",
"of",
"time",
"(",
"See",
"from",
"/",
"until",
"in",
"the",
"render",
"\\",
"_api_",
"for",
"examples",
"of",
"time",
"formats",
")",
".",
"Also",
"takes",
"a",
"start",
"multiplier",
"and",
"end",
"multiplier",
"for",
"the",
"length",
"of",
"time",
"-"
] | train | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L3106-L3151 |
brutasse/graphite-api | graphite_api/functions.py | timeShift | def timeShift(requestContext, seriesList, timeShift, resetEnd=True,
alignDST=False):
"""
Takes one metric or a wildcard seriesList, followed by a quoted string
with the length of time (See ``from / until`` in the render\_api_ for
examples of time formats).
Draws the selected metrics shifted in time. If no sign is given, a minus
sign ( - ) is implied which will shift the metric back in time. If a plus
sign ( + ) is given, the metric will be shifted forward in time.
Will reset the end date range automatically to the end of the base stat
unless resetEnd is False. Example case is when you timeshift to last week
and have the graph date range set to include a time in the future, will
limit this timeshift to pretend ending at the current time. If resetEnd is
False, will instead draw full range including future time.
Because time is shifted by a fixed number of seconds, comparing a time
period with DST to a time period without DST, and vice-versa, will result
in an apparent misalignment. For example, 8am might be overlaid with 7am.
To compensate for this, use the alignDST option.
Useful for comparing a metric against itself at a past periods or
correcting data stored at an offset.
Example::
&target=timeShift(Sales.widgets.largeBlue,"7d")
&target=timeShift(Sales.widgets.largeBlue,"-7d")
&target=timeShift(Sales.widgets.largeBlue,"+1h")
"""
# Default to negative. parseTimeOffset defaults to +
if timeShift[0].isdigit():
timeShift = '-' + timeShift
delta = parseTimeOffset(timeShift)
myContext = requestContext.copy()
myContext['startTime'] = requestContext['startTime'] + delta
myContext['endTime'] = requestContext['endTime'] + delta
if alignDST:
reqStartDST = localDST(requestContext['startTime'])
reqEndDST = localDST(requestContext['endTime'])
myStartDST = localDST(myContext['startTime'])
myEndDST = localDST(myContext['endTime'])
dstOffset = timedelta(hours=0)
# If the requestContext is entirely in DST, and we are entirely
# NOT in DST
if (
(reqStartDST and reqEndDST) and
(not myStartDST and not myEndDST)
):
dstOffset = timedelta(hours=1)
# Or if the requestContext is entirely NOT in DST, and we are
# entirely in DST
elif (
(not reqStartDST and not reqEndDST) and
(myStartDST and myEndDST)
):
dstOffset = timedelta(hours=-1)
# Otherwise, we don't do anything, because it would be visually
# confusing
myContext['startTime'] += dstOffset
myContext['endTime'] += dstOffset
results = []
if not seriesList:
return results
# if len(seriesList) > 1, they will all have the same pathExpression,
# which is all we care about.
series = seriesList[0]
for shiftedSeries in evaluateTarget(myContext, series.pathExpression):
shiftedSeries.name = 'timeShift(%s, %s)' % (shiftedSeries.name,
timeShift)
if resetEnd:
shiftedSeries.end = series.end
else:
shiftedSeries.end = (
shiftedSeries.end - shiftedSeries.start + series.start)
shiftedSeries.start = series.start
results.append(shiftedSeries)
return results | python | def timeShift(requestContext, seriesList, timeShift, resetEnd=True,
alignDST=False):
"""
Takes one metric or a wildcard seriesList, followed by a quoted string
with the length of time (See ``from / until`` in the render\_api_ for
examples of time formats).
Draws the selected metrics shifted in time. If no sign is given, a minus
sign ( - ) is implied which will shift the metric back in time. If a plus
sign ( + ) is given, the metric will be shifted forward in time.
Will reset the end date range automatically to the end of the base stat
unless resetEnd is False. Example case is when you timeshift to last week
and have the graph date range set to include a time in the future, will
limit this timeshift to pretend ending at the current time. If resetEnd is
False, will instead draw full range including future time.
Because time is shifted by a fixed number of seconds, comparing a time
period with DST to a time period without DST, and vice-versa, will result
in an apparent misalignment. For example, 8am might be overlaid with 7am.
To compensate for this, use the alignDST option.
Useful for comparing a metric against itself at a past periods or
correcting data stored at an offset.
Example::
&target=timeShift(Sales.widgets.largeBlue,"7d")
&target=timeShift(Sales.widgets.largeBlue,"-7d")
&target=timeShift(Sales.widgets.largeBlue,"+1h")
"""
# Default to negative. parseTimeOffset defaults to +
if timeShift[0].isdigit():
timeShift = '-' + timeShift
delta = parseTimeOffset(timeShift)
myContext = requestContext.copy()
myContext['startTime'] = requestContext['startTime'] + delta
myContext['endTime'] = requestContext['endTime'] + delta
if alignDST:
reqStartDST = localDST(requestContext['startTime'])
reqEndDST = localDST(requestContext['endTime'])
myStartDST = localDST(myContext['startTime'])
myEndDST = localDST(myContext['endTime'])
dstOffset = timedelta(hours=0)
# If the requestContext is entirely in DST, and we are entirely
# NOT in DST
if (
(reqStartDST and reqEndDST) and
(not myStartDST and not myEndDST)
):
dstOffset = timedelta(hours=1)
# Or if the requestContext is entirely NOT in DST, and we are
# entirely in DST
elif (
(not reqStartDST and not reqEndDST) and
(myStartDST and myEndDST)
):
dstOffset = timedelta(hours=-1)
# Otherwise, we don't do anything, because it would be visually
# confusing
myContext['startTime'] += dstOffset
myContext['endTime'] += dstOffset
results = []
if not seriesList:
return results
# if len(seriesList) > 1, they will all have the same pathExpression,
# which is all we care about.
series = seriesList[0]
for shiftedSeries in evaluateTarget(myContext, series.pathExpression):
shiftedSeries.name = 'timeShift(%s, %s)' % (shiftedSeries.name,
timeShift)
if resetEnd:
shiftedSeries.end = series.end
else:
shiftedSeries.end = (
shiftedSeries.end - shiftedSeries.start + series.start)
shiftedSeries.start = series.start
results.append(shiftedSeries)
return results | [
"def",
"timeShift",
"(",
"requestContext",
",",
"seriesList",
",",
"timeShift",
",",
"resetEnd",
"=",
"True",
",",
"alignDST",
"=",
"False",
")",
":",
"# Default to negative. parseTimeOffset defaults to +",
"if",
"timeShift",
"[",
"0",
"]",
".",
"isdigit",
"(",
")",
":",
"timeShift",
"=",
"'-'",
"+",
"timeShift",
"delta",
"=",
"parseTimeOffset",
"(",
"timeShift",
")",
"myContext",
"=",
"requestContext",
".",
"copy",
"(",
")",
"myContext",
"[",
"'startTime'",
"]",
"=",
"requestContext",
"[",
"'startTime'",
"]",
"+",
"delta",
"myContext",
"[",
"'endTime'",
"]",
"=",
"requestContext",
"[",
"'endTime'",
"]",
"+",
"delta",
"if",
"alignDST",
":",
"reqStartDST",
"=",
"localDST",
"(",
"requestContext",
"[",
"'startTime'",
"]",
")",
"reqEndDST",
"=",
"localDST",
"(",
"requestContext",
"[",
"'endTime'",
"]",
")",
"myStartDST",
"=",
"localDST",
"(",
"myContext",
"[",
"'startTime'",
"]",
")",
"myEndDST",
"=",
"localDST",
"(",
"myContext",
"[",
"'endTime'",
"]",
")",
"dstOffset",
"=",
"timedelta",
"(",
"hours",
"=",
"0",
")",
"# If the requestContext is entirely in DST, and we are entirely",
"# NOT in DST",
"if",
"(",
"(",
"reqStartDST",
"and",
"reqEndDST",
")",
"and",
"(",
"not",
"myStartDST",
"and",
"not",
"myEndDST",
")",
")",
":",
"dstOffset",
"=",
"timedelta",
"(",
"hours",
"=",
"1",
")",
"# Or if the requestContext is entirely NOT in DST, and we are",
"# entirely in DST",
"elif",
"(",
"(",
"not",
"reqStartDST",
"and",
"not",
"reqEndDST",
")",
"and",
"(",
"myStartDST",
"and",
"myEndDST",
")",
")",
":",
"dstOffset",
"=",
"timedelta",
"(",
"hours",
"=",
"-",
"1",
")",
"# Otherwise, we don't do anything, because it would be visually",
"# confusing",
"myContext",
"[",
"'startTime'",
"]",
"+=",
"dstOffset",
"myContext",
"[",
"'endTime'",
"]",
"+=",
"dstOffset",
"results",
"=",
"[",
"]",
"if",
"not",
"seriesList",
":",
"return",
"results",
"# if len(seriesList) > 1, they will all have the same pathExpression,",
"# which is all we care about.",
"series",
"=",
"seriesList",
"[",
"0",
"]",
"for",
"shiftedSeries",
"in",
"evaluateTarget",
"(",
"myContext",
",",
"series",
".",
"pathExpression",
")",
":",
"shiftedSeries",
".",
"name",
"=",
"'timeShift(%s, %s)'",
"%",
"(",
"shiftedSeries",
".",
"name",
",",
"timeShift",
")",
"if",
"resetEnd",
":",
"shiftedSeries",
".",
"end",
"=",
"series",
".",
"end",
"else",
":",
"shiftedSeries",
".",
"end",
"=",
"(",
"shiftedSeries",
".",
"end",
"-",
"shiftedSeries",
".",
"start",
"+",
"series",
".",
"start",
")",
"shiftedSeries",
".",
"start",
"=",
"series",
".",
"start",
"results",
".",
"append",
"(",
"shiftedSeries",
")",
"return",
"results"
] | Takes one metric or a wildcard seriesList, followed by a quoted string
with the length of time (See ``from / until`` in the render\_api_ for
examples of time formats).
Draws the selected metrics shifted in time. If no sign is given, a minus
sign ( - ) is implied which will shift the metric back in time. If a plus
sign ( + ) is given, the metric will be shifted forward in time.
Will reset the end date range automatically to the end of the base stat
unless resetEnd is False. Example case is when you timeshift to last week
and have the graph date range set to include a time in the future, will
limit this timeshift to pretend ending at the current time. If resetEnd is
False, will instead draw full range including future time.
Because time is shifted by a fixed number of seconds, comparing a time
period with DST to a time period without DST, and vice-versa, will result
in an apparent misalignment. For example, 8am might be overlaid with 7am.
To compensate for this, use the alignDST option.
Useful for comparing a metric against itself at a past periods or
correcting data stored at an offset.
Example::
&target=timeShift(Sales.widgets.largeBlue,"7d")
&target=timeShift(Sales.widgets.largeBlue,"-7d")
&target=timeShift(Sales.widgets.largeBlue,"+1h") | [
"Takes",
"one",
"metric",
"or",
"a",
"wildcard",
"seriesList",
"followed",
"by",
"a",
"quoted",
"string",
"with",
"the",
"length",
"of",
"time",
"(",
"See",
"from",
"/",
"until",
"in",
"the",
"render",
"\\",
"_api_",
"for",
"examples",
"of",
"time",
"formats",
")",
"."
] | train | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L3158-L3243 |
brutasse/graphite-api | graphite_api/functions.py | timeSlice | def timeSlice(requestContext, seriesList, startSliceAt, endSliceAt='now'):
"""
Takes one metric or a wildcard metric, followed by a quoted
string with the time to start the line and another quoted string
with the time to end the line. The start and end times are
inclusive. See ``from / until`` in the render api for examples of
time formats.
Useful for filtering out a part of a series of data from a wider
range of data.
Example::
&target=timeSlice(network.core.port1,"00:00 20140101","11:59 20140630")
&target=timeSlice(network.core.port1,"12:00 20140630","now")
"""
results = []
start = epoch(parseATTime(startSliceAt))
end = epoch(parseATTime(endSliceAt))
for slicedSeries in seriesList:
slicedSeries.name = 'timeSlice(%s, %s, %s)' % (slicedSeries.name,
int(start), int(end))
curr = epoch(requestContext["startTime"])
for i, v in enumerate(slicedSeries):
if v is None or curr < start or curr > end:
slicedSeries[i] = None
curr += slicedSeries.step
results.append(slicedSeries)
return results | python | def timeSlice(requestContext, seriesList, startSliceAt, endSliceAt='now'):
"""
Takes one metric or a wildcard metric, followed by a quoted
string with the time to start the line and another quoted string
with the time to end the line. The start and end times are
inclusive. See ``from / until`` in the render api for examples of
time formats.
Useful for filtering out a part of a series of data from a wider
range of data.
Example::
&target=timeSlice(network.core.port1,"00:00 20140101","11:59 20140630")
&target=timeSlice(network.core.port1,"12:00 20140630","now")
"""
results = []
start = epoch(parseATTime(startSliceAt))
end = epoch(parseATTime(endSliceAt))
for slicedSeries in seriesList:
slicedSeries.name = 'timeSlice(%s, %s, %s)' % (slicedSeries.name,
int(start), int(end))
curr = epoch(requestContext["startTime"])
for i, v in enumerate(slicedSeries):
if v is None or curr < start or curr > end:
slicedSeries[i] = None
curr += slicedSeries.step
results.append(slicedSeries)
return results | [
"def",
"timeSlice",
"(",
"requestContext",
",",
"seriesList",
",",
"startSliceAt",
",",
"endSliceAt",
"=",
"'now'",
")",
":",
"results",
"=",
"[",
"]",
"start",
"=",
"epoch",
"(",
"parseATTime",
"(",
"startSliceAt",
")",
")",
"end",
"=",
"epoch",
"(",
"parseATTime",
"(",
"endSliceAt",
")",
")",
"for",
"slicedSeries",
"in",
"seriesList",
":",
"slicedSeries",
".",
"name",
"=",
"'timeSlice(%s, %s, %s)'",
"%",
"(",
"slicedSeries",
".",
"name",
",",
"int",
"(",
"start",
")",
",",
"int",
"(",
"end",
")",
")",
"curr",
"=",
"epoch",
"(",
"requestContext",
"[",
"\"startTime\"",
"]",
")",
"for",
"i",
",",
"v",
"in",
"enumerate",
"(",
"slicedSeries",
")",
":",
"if",
"v",
"is",
"None",
"or",
"curr",
"<",
"start",
"or",
"curr",
">",
"end",
":",
"slicedSeries",
"[",
"i",
"]",
"=",
"None",
"curr",
"+=",
"slicedSeries",
".",
"step",
"results",
".",
"append",
"(",
"slicedSeries",
")",
"return",
"results"
] | Takes one metric or a wildcard metric, followed by a quoted
string with the time to start the line and another quoted string
with the time to end the line. The start and end times are
inclusive. See ``from / until`` in the render api for examples of
time formats.
Useful for filtering out a part of a series of data from a wider
range of data.
Example::
&target=timeSlice(network.core.port1,"00:00 20140101","11:59 20140630")
&target=timeSlice(network.core.port1,"12:00 20140630","now") | [
"Takes",
"one",
"metric",
"or",
"a",
"wildcard",
"metric",
"followed",
"by",
"a",
"quoted",
"string",
"with",
"the",
"time",
"to",
"start",
"the",
"line",
"and",
"another",
"quoted",
"string",
"with",
"the",
"time",
"to",
"end",
"the",
"line",
".",
"The",
"start",
"and",
"end",
"times",
"are",
"inclusive",
".",
"See",
"from",
"/",
"until",
"in",
"the",
"render",
"api",
"for",
"examples",
"of",
"time",
"formats",
"."
] | train | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L3246-L3275 |
brutasse/graphite-api | graphite_api/functions.py | constantLine | def constantLine(requestContext, value):
"""
Takes a float F.
Draws a horizontal line at value F across the graph.
Example::
&target=constantLine(123.456)
"""
name = "constantLine(%s)" % str(value)
start = int(epoch(requestContext['startTime']))
end = int(epoch(requestContext['endTime']))
step = int((end - start) / 2.0)
series = TimeSeries(str(value), start, end, step, [value, value, value])
series.pathExpression = name
return [series] | python | def constantLine(requestContext, value):
"""
Takes a float F.
Draws a horizontal line at value F across the graph.
Example::
&target=constantLine(123.456)
"""
name = "constantLine(%s)" % str(value)
start = int(epoch(requestContext['startTime']))
end = int(epoch(requestContext['endTime']))
step = int((end - start) / 2.0)
series = TimeSeries(str(value), start, end, step, [value, value, value])
series.pathExpression = name
return [series] | [
"def",
"constantLine",
"(",
"requestContext",
",",
"value",
")",
":",
"name",
"=",
"\"constantLine(%s)\"",
"%",
"str",
"(",
"value",
")",
"start",
"=",
"int",
"(",
"epoch",
"(",
"requestContext",
"[",
"'startTime'",
"]",
")",
")",
"end",
"=",
"int",
"(",
"epoch",
"(",
"requestContext",
"[",
"'endTime'",
"]",
")",
")",
"step",
"=",
"int",
"(",
"(",
"end",
"-",
"start",
")",
"/",
"2.0",
")",
"series",
"=",
"TimeSeries",
"(",
"str",
"(",
"value",
")",
",",
"start",
",",
"end",
",",
"step",
",",
"[",
"value",
",",
"value",
",",
"value",
"]",
")",
"series",
".",
"pathExpression",
"=",
"name",
"return",
"[",
"series",
"]"
] | Takes a float F.
Draws a horizontal line at value F across the graph.
Example::
&target=constantLine(123.456) | [
"Takes",
"a",
"float",
"F",
"."
] | train | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L3278-L3295 |
brutasse/graphite-api | graphite_api/functions.py | aggregateLine | def aggregateLine(requestContext, seriesList, func='avg'):
"""
Takes a metric or wildcard seriesList and draws a horizontal line
based on the function applied to each series.
Note: By default, the graphite renderer consolidates data points by
averaging data points over time. If you are using the 'min' or 'max'
function for aggregateLine, this can cause an unusual gap in the
line drawn by this function and the data itself. To fix this, you
should use the consolidateBy() function with the same function
argument you are using for aggregateLine. This will ensure that the
proper data points are retained and the graph should line up
correctly.
Example::
&target=aggregateLine(server01.connections.total, 'avg')
&target=aggregateLine(server*.connections.total, 'avg')
"""
t_funcs = {'avg': safeAvg, 'min': safeMin, 'max': safeMax}
if func not in t_funcs:
raise ValueError("Invalid function %s" % func)
results = []
for series in seriesList:
value = t_funcs[func](series)
if value is not None:
name = 'aggregateLine(%s, %g)' % (series.name, value)
else:
name = 'aggregateLine(%s, None)' % (series.name)
[series] = constantLine(requestContext, value)
series.name = name
series.pathExpression = series.name
results.append(series)
return results | python | def aggregateLine(requestContext, seriesList, func='avg'):
"""
Takes a metric or wildcard seriesList and draws a horizontal line
based on the function applied to each series.
Note: By default, the graphite renderer consolidates data points by
averaging data points over time. If you are using the 'min' or 'max'
function for aggregateLine, this can cause an unusual gap in the
line drawn by this function and the data itself. To fix this, you
should use the consolidateBy() function with the same function
argument you are using for aggregateLine. This will ensure that the
proper data points are retained and the graph should line up
correctly.
Example::
&target=aggregateLine(server01.connections.total, 'avg')
&target=aggregateLine(server*.connections.total, 'avg')
"""
t_funcs = {'avg': safeAvg, 'min': safeMin, 'max': safeMax}
if func not in t_funcs:
raise ValueError("Invalid function %s" % func)
results = []
for series in seriesList:
value = t_funcs[func](series)
if value is not None:
name = 'aggregateLine(%s, %g)' % (series.name, value)
else:
name = 'aggregateLine(%s, None)' % (series.name)
[series] = constantLine(requestContext, value)
series.name = name
series.pathExpression = series.name
results.append(series)
return results | [
"def",
"aggregateLine",
"(",
"requestContext",
",",
"seriesList",
",",
"func",
"=",
"'avg'",
")",
":",
"t_funcs",
"=",
"{",
"'avg'",
":",
"safeAvg",
",",
"'min'",
":",
"safeMin",
",",
"'max'",
":",
"safeMax",
"}",
"if",
"func",
"not",
"in",
"t_funcs",
":",
"raise",
"ValueError",
"(",
"\"Invalid function %s\"",
"%",
"func",
")",
"results",
"=",
"[",
"]",
"for",
"series",
"in",
"seriesList",
":",
"value",
"=",
"t_funcs",
"[",
"func",
"]",
"(",
"series",
")",
"if",
"value",
"is",
"not",
"None",
":",
"name",
"=",
"'aggregateLine(%s, %g)'",
"%",
"(",
"series",
".",
"name",
",",
"value",
")",
"else",
":",
"name",
"=",
"'aggregateLine(%s, None)'",
"%",
"(",
"series",
".",
"name",
")",
"[",
"series",
"]",
"=",
"constantLine",
"(",
"requestContext",
",",
"value",
")",
"series",
".",
"name",
"=",
"name",
"series",
".",
"pathExpression",
"=",
"series",
".",
"name",
"results",
".",
"append",
"(",
"series",
")",
"return",
"results"
] | Takes a metric or wildcard seriesList and draws a horizontal line
based on the function applied to each series.
Note: By default, the graphite renderer consolidates data points by
averaging data points over time. If you are using the 'min' or 'max'
function for aggregateLine, this can cause an unusual gap in the
line drawn by this function and the data itself. To fix this, you
should use the consolidateBy() function with the same function
argument you are using for aggregateLine. This will ensure that the
proper data points are retained and the graph should line up
correctly.
Example::
&target=aggregateLine(server01.connections.total, 'avg')
&target=aggregateLine(server*.connections.total, 'avg') | [
"Takes",
"a",
"metric",
"or",
"wildcard",
"seriesList",
"and",
"draws",
"a",
"horizontal",
"line",
"based",
"on",
"the",
"function",
"applied",
"to",
"each",
"series",
"."
] | train | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L3298-L3335 |
brutasse/graphite-api | graphite_api/functions.py | verticalLine | def verticalLine(requestContext, ts, label=None, color=None):
"""
Takes a timestamp string ts.
Draws a vertical line at the designated timestamp with optional
'label' and 'color'. Supported timestamp formats include both
relative (e.g. -3h) and absolute (e.g. 16:00_20110501) strings,
such as those used with ``from`` and ``until`` parameters. When
set, the 'label' will appear in the graph legend.
Note: Any timestamps defined outside the requested range will
raise a 'ValueError' exception.
Example::
&target=verticalLine("12:3420131108","event","blue")
&target=verticalLine("16:00_20110501","event")
&target=verticalLine("-5mins")
"""
ts = int(epoch(parseATTime(ts, requestContext['tzinfo'])))
start = int(epoch(requestContext['startTime']))
end = int(epoch(requestContext['endTime']))
if ts < start:
raise ValueError("verticalLine(): timestamp %s exists "
"before start of range" % ts)
elif ts > end:
raise ValueError("verticalLine(): timestamp %s exists "
"after end of range" % ts)
start = end = ts
step = 1.0
series = TimeSeries(label, start, end, step, [1.0, 1.0])
series.options['drawAsInfinite'] = True
if color:
series.color = color
return [series] | python | def verticalLine(requestContext, ts, label=None, color=None):
"""
Takes a timestamp string ts.
Draws a vertical line at the designated timestamp with optional
'label' and 'color'. Supported timestamp formats include both
relative (e.g. -3h) and absolute (e.g. 16:00_20110501) strings,
such as those used with ``from`` and ``until`` parameters. When
set, the 'label' will appear in the graph legend.
Note: Any timestamps defined outside the requested range will
raise a 'ValueError' exception.
Example::
&target=verticalLine("12:3420131108","event","blue")
&target=verticalLine("16:00_20110501","event")
&target=verticalLine("-5mins")
"""
ts = int(epoch(parseATTime(ts, requestContext['tzinfo'])))
start = int(epoch(requestContext['startTime']))
end = int(epoch(requestContext['endTime']))
if ts < start:
raise ValueError("verticalLine(): timestamp %s exists "
"before start of range" % ts)
elif ts > end:
raise ValueError("verticalLine(): timestamp %s exists "
"after end of range" % ts)
start = end = ts
step = 1.0
series = TimeSeries(label, start, end, step, [1.0, 1.0])
series.options['drawAsInfinite'] = True
if color:
series.color = color
return [series] | [
"def",
"verticalLine",
"(",
"requestContext",
",",
"ts",
",",
"label",
"=",
"None",
",",
"color",
"=",
"None",
")",
":",
"ts",
"=",
"int",
"(",
"epoch",
"(",
"parseATTime",
"(",
"ts",
",",
"requestContext",
"[",
"'tzinfo'",
"]",
")",
")",
")",
"start",
"=",
"int",
"(",
"epoch",
"(",
"requestContext",
"[",
"'startTime'",
"]",
")",
")",
"end",
"=",
"int",
"(",
"epoch",
"(",
"requestContext",
"[",
"'endTime'",
"]",
")",
")",
"if",
"ts",
"<",
"start",
":",
"raise",
"ValueError",
"(",
"\"verticalLine(): timestamp %s exists \"",
"\"before start of range\"",
"%",
"ts",
")",
"elif",
"ts",
">",
"end",
":",
"raise",
"ValueError",
"(",
"\"verticalLine(): timestamp %s exists \"",
"\"after end of range\"",
"%",
"ts",
")",
"start",
"=",
"end",
"=",
"ts",
"step",
"=",
"1.0",
"series",
"=",
"TimeSeries",
"(",
"label",
",",
"start",
",",
"end",
",",
"step",
",",
"[",
"1.0",
",",
"1.0",
"]",
")",
"series",
".",
"options",
"[",
"'drawAsInfinite'",
"]",
"=",
"True",
"if",
"color",
":",
"series",
".",
"color",
"=",
"color",
"return",
"[",
"series",
"]"
] | Takes a timestamp string ts.
Draws a vertical line at the designated timestamp with optional
'label' and 'color'. Supported timestamp formats include both
relative (e.g. -3h) and absolute (e.g. 16:00_20110501) strings,
such as those used with ``from`` and ``until`` parameters. When
set, the 'label' will appear in the graph legend.
Note: Any timestamps defined outside the requested range will
raise a 'ValueError' exception.
Example::
&target=verticalLine("12:3420131108","event","blue")
&target=verticalLine("16:00_20110501","event")
&target=verticalLine("-5mins") | [
"Takes",
"a",
"timestamp",
"string",
"ts",
"."
] | train | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L3338-L3373 |
brutasse/graphite-api | graphite_api/functions.py | threshold | def threshold(requestContext, value, label=None, color=None):
"""
Takes a float F, followed by a label (in double quotes) and a color.
(See ``bgcolor`` in the render\_api_ for valid color names & formats.)
Draws a horizontal line at value F across the graph.
Example::
&target=threshold(123.456, "omgwtfbbq", "red")
"""
[series] = constantLine(requestContext, value)
if label:
series.name = label
if color:
series.color = color
return [series] | python | def threshold(requestContext, value, label=None, color=None):
"""
Takes a float F, followed by a label (in double quotes) and a color.
(See ``bgcolor`` in the render\_api_ for valid color names & formats.)
Draws a horizontal line at value F across the graph.
Example::
&target=threshold(123.456, "omgwtfbbq", "red")
"""
[series] = constantLine(requestContext, value)
if label:
series.name = label
if color:
series.color = color
return [series] | [
"def",
"threshold",
"(",
"requestContext",
",",
"value",
",",
"label",
"=",
"None",
",",
"color",
"=",
"None",
")",
":",
"[",
"series",
"]",
"=",
"constantLine",
"(",
"requestContext",
",",
"value",
")",
"if",
"label",
":",
"series",
".",
"name",
"=",
"label",
"if",
"color",
":",
"series",
".",
"color",
"=",
"color",
"return",
"[",
"series",
"]"
] | Takes a float F, followed by a label (in double quotes) and a color.
(See ``bgcolor`` in the render\_api_ for valid color names & formats.)
Draws a horizontal line at value F across the graph.
Example::
&target=threshold(123.456, "omgwtfbbq", "red") | [
"Takes",
"a",
"float",
"F",
"followed",
"by",
"a",
"label",
"(",
"in",
"double",
"quotes",
")",
"and",
"a",
"color",
".",
"(",
"See",
"bgcolor",
"in",
"the",
"render",
"\\",
"_api_",
"for",
"valid",
"color",
"names",
"&",
"formats",
".",
")"
] | train | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L3376-L3393 |
brutasse/graphite-api | graphite_api/functions.py | transformNull | def transformNull(requestContext, seriesList, default=0, referenceSeries=None):
"""
Takes a metric or wildcard seriesList and replaces null values with
the value specified by `default`. The value 0 used if not specified.
The optional referenceSeries, if specified, is a metric or wildcard
series list that governs which time intervals nulls should be replaced.
If specified, nulls are replaced only in intervals where a non-null is
found for the same interval in any of referenceSeries. This method
compliments the drawNullAsZero function in graphical mode, but also
works in text-only mode.
Example::
&target=transformNull(webapp.pages.*.views,-1)
This would take any page that didn't have values and supply negative 1 as
a default. Any other numeric value may be used as well.
"""
def transform(v, d):
if v is None:
return d
else:
return v
if referenceSeries:
defaults = [default if any(v is not None for v in x) else None
for x in zip_longest(*referenceSeries)]
else:
defaults = None
for series in seriesList:
if referenceSeries:
series.name = "transformNull(%s,%g,referenceSeries)" % (
series.name, default)
else:
series.name = "transformNull(%s,%g)" % (series.name, default)
series.pathExpression = series.name
if defaults:
values = [transform(v, d) for v, d in zip_longest(series,
defaults)]
else:
values = [transform(v, default) for v in series]
series.extend(values)
del series[:len(values)]
return seriesList | python | def transformNull(requestContext, seriesList, default=0, referenceSeries=None):
"""
Takes a metric or wildcard seriesList and replaces null values with
the value specified by `default`. The value 0 used if not specified.
The optional referenceSeries, if specified, is a metric or wildcard
series list that governs which time intervals nulls should be replaced.
If specified, nulls are replaced only in intervals where a non-null is
found for the same interval in any of referenceSeries. This method
compliments the drawNullAsZero function in graphical mode, but also
works in text-only mode.
Example::
&target=transformNull(webapp.pages.*.views,-1)
This would take any page that didn't have values and supply negative 1 as
a default. Any other numeric value may be used as well.
"""
def transform(v, d):
if v is None:
return d
else:
return v
if referenceSeries:
defaults = [default if any(v is not None for v in x) else None
for x in zip_longest(*referenceSeries)]
else:
defaults = None
for series in seriesList:
if referenceSeries:
series.name = "transformNull(%s,%g,referenceSeries)" % (
series.name, default)
else:
series.name = "transformNull(%s,%g)" % (series.name, default)
series.pathExpression = series.name
if defaults:
values = [transform(v, d) for v, d in zip_longest(series,
defaults)]
else:
values = [transform(v, default) for v in series]
series.extend(values)
del series[:len(values)]
return seriesList | [
"def",
"transformNull",
"(",
"requestContext",
",",
"seriesList",
",",
"default",
"=",
"0",
",",
"referenceSeries",
"=",
"None",
")",
":",
"def",
"transform",
"(",
"v",
",",
"d",
")",
":",
"if",
"v",
"is",
"None",
":",
"return",
"d",
"else",
":",
"return",
"v",
"if",
"referenceSeries",
":",
"defaults",
"=",
"[",
"default",
"if",
"any",
"(",
"v",
"is",
"not",
"None",
"for",
"v",
"in",
"x",
")",
"else",
"None",
"for",
"x",
"in",
"zip_longest",
"(",
"*",
"referenceSeries",
")",
"]",
"else",
":",
"defaults",
"=",
"None",
"for",
"series",
"in",
"seriesList",
":",
"if",
"referenceSeries",
":",
"series",
".",
"name",
"=",
"\"transformNull(%s,%g,referenceSeries)\"",
"%",
"(",
"series",
".",
"name",
",",
"default",
")",
"else",
":",
"series",
".",
"name",
"=",
"\"transformNull(%s,%g)\"",
"%",
"(",
"series",
".",
"name",
",",
"default",
")",
"series",
".",
"pathExpression",
"=",
"series",
".",
"name",
"if",
"defaults",
":",
"values",
"=",
"[",
"transform",
"(",
"v",
",",
"d",
")",
"for",
"v",
",",
"d",
"in",
"zip_longest",
"(",
"series",
",",
"defaults",
")",
"]",
"else",
":",
"values",
"=",
"[",
"transform",
"(",
"v",
",",
"default",
")",
"for",
"v",
"in",
"series",
"]",
"series",
".",
"extend",
"(",
"values",
")",
"del",
"series",
"[",
":",
"len",
"(",
"values",
")",
"]",
"return",
"seriesList"
] | Takes a metric or wildcard seriesList and replaces null values with
the value specified by `default`. The value 0 used if not specified.
The optional referenceSeries, if specified, is a metric or wildcard
series list that governs which time intervals nulls should be replaced.
If specified, nulls are replaced only in intervals where a non-null is
found for the same interval in any of referenceSeries. This method
compliments the drawNullAsZero function in graphical mode, but also
works in text-only mode.
Example::
&target=transformNull(webapp.pages.*.views,-1)
This would take any page that didn't have values and supply negative 1 as
a default. Any other numeric value may be used as well. | [
"Takes",
"a",
"metric",
"or",
"wildcard",
"seriesList",
"and",
"replaces",
"null",
"values",
"with",
"the",
"value",
"specified",
"by",
"default",
".",
"The",
"value",
"0",
"used",
"if",
"not",
"specified",
".",
"The",
"optional",
"referenceSeries",
"if",
"specified",
"is",
"a",
"metric",
"or",
"wildcard",
"series",
"list",
"that",
"governs",
"which",
"time",
"intervals",
"nulls",
"should",
"be",
"replaced",
".",
"If",
"specified",
"nulls",
"are",
"replaced",
"only",
"in",
"intervals",
"where",
"a",
"non",
"-",
"null",
"is",
"found",
"for",
"the",
"same",
"interval",
"in",
"any",
"of",
"referenceSeries",
".",
"This",
"method",
"compliments",
"the",
"drawNullAsZero",
"function",
"in",
"graphical",
"mode",
"but",
"also",
"works",
"in",
"text",
"-",
"only",
"mode",
"."
] | train | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L3396-L3440 |
brutasse/graphite-api | graphite_api/functions.py | isNonNull | def isNonNull(requestContext, seriesList):
"""
Takes a metric or wild card seriesList and counts up how many
non-null values are specified. This is useful for understanding
which metrics have data at a given point in time (ie, to count
which servers are alive).
Example::
&target=isNonNull(webapp.pages.*.views)
Returns a seriesList where 1 is specified for non-null values, and
0 is specified for null values.
"""
def transform(v):
if v is None:
return 0
else:
return 1
for series in seriesList:
series.name = "isNonNull(%s)" % (series.name)
series.pathExpression = series.name
values = [transform(v) for v in series]
series.extend(values)
del series[:len(values)]
return seriesList | python | def isNonNull(requestContext, seriesList):
"""
Takes a metric or wild card seriesList and counts up how many
non-null values are specified. This is useful for understanding
which metrics have data at a given point in time (ie, to count
which servers are alive).
Example::
&target=isNonNull(webapp.pages.*.views)
Returns a seriesList where 1 is specified for non-null values, and
0 is specified for null values.
"""
def transform(v):
if v is None:
return 0
else:
return 1
for series in seriesList:
series.name = "isNonNull(%s)" % (series.name)
series.pathExpression = series.name
values = [transform(v) for v in series]
series.extend(values)
del series[:len(values)]
return seriesList | [
"def",
"isNonNull",
"(",
"requestContext",
",",
"seriesList",
")",
":",
"def",
"transform",
"(",
"v",
")",
":",
"if",
"v",
"is",
"None",
":",
"return",
"0",
"else",
":",
"return",
"1",
"for",
"series",
"in",
"seriesList",
":",
"series",
".",
"name",
"=",
"\"isNonNull(%s)\"",
"%",
"(",
"series",
".",
"name",
")",
"series",
".",
"pathExpression",
"=",
"series",
".",
"name",
"values",
"=",
"[",
"transform",
"(",
"v",
")",
"for",
"v",
"in",
"series",
"]",
"series",
".",
"extend",
"(",
"values",
")",
"del",
"series",
"[",
":",
"len",
"(",
"values",
")",
"]",
"return",
"seriesList"
] | Takes a metric or wild card seriesList and counts up how many
non-null values are specified. This is useful for understanding
which metrics have data at a given point in time (ie, to count
which servers are alive).
Example::
&target=isNonNull(webapp.pages.*.views)
Returns a seriesList where 1 is specified for non-null values, and
0 is specified for null values. | [
"Takes",
"a",
"metric",
"or",
"wild",
"card",
"seriesList",
"and",
"counts",
"up",
"how",
"many",
"non",
"-",
"null",
"values",
"are",
"specified",
".",
"This",
"is",
"useful",
"for",
"understanding",
"which",
"metrics",
"have",
"data",
"at",
"a",
"given",
"point",
"in",
"time",
"(",
"ie",
"to",
"count",
"which",
"servers",
"are",
"alive",
")",
"."
] | train | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L3443-L3470 |
brutasse/graphite-api | graphite_api/functions.py | identity | def identity(requestContext, name, step=60):
"""
Identity function:
Returns datapoints where the value equals the timestamp of the datapoint.
Useful when you have another series where the value is a timestamp, and
you want to compare it to the time of the datapoint, to render an age
Example::
&target=identity("The.time.series")
This would create a series named "The.time.series" that contains points
where x(t) == t.
Accepts optional second argument as 'step' parameter (default step is
60 sec)
"""
start = int(epoch(requestContext["startTime"]))
end = int(epoch(requestContext["endTime"]))
values = range(start, end, step)
series = TimeSeries(name, start, end, step, values)
series.pathExpression = 'identity("%s")' % name
return [series] | python | def identity(requestContext, name, step=60):
"""
Identity function:
Returns datapoints where the value equals the timestamp of the datapoint.
Useful when you have another series where the value is a timestamp, and
you want to compare it to the time of the datapoint, to render an age
Example::
&target=identity("The.time.series")
This would create a series named "The.time.series" that contains points
where x(t) == t.
Accepts optional second argument as 'step' parameter (default step is
60 sec)
"""
start = int(epoch(requestContext["startTime"]))
end = int(epoch(requestContext["endTime"]))
values = range(start, end, step)
series = TimeSeries(name, start, end, step, values)
series.pathExpression = 'identity("%s")' % name
return [series] | [
"def",
"identity",
"(",
"requestContext",
",",
"name",
",",
"step",
"=",
"60",
")",
":",
"start",
"=",
"int",
"(",
"epoch",
"(",
"requestContext",
"[",
"\"startTime\"",
"]",
")",
")",
"end",
"=",
"int",
"(",
"epoch",
"(",
"requestContext",
"[",
"\"endTime\"",
"]",
")",
")",
"values",
"=",
"range",
"(",
"start",
",",
"end",
",",
"step",
")",
"series",
"=",
"TimeSeries",
"(",
"name",
",",
"start",
",",
"end",
",",
"step",
",",
"values",
")",
"series",
".",
"pathExpression",
"=",
"'identity(\"%s\")'",
"%",
"name",
"return",
"[",
"series",
"]"
] | Identity function:
Returns datapoints where the value equals the timestamp of the datapoint.
Useful when you have another series where the value is a timestamp, and
you want to compare it to the time of the datapoint, to render an age
Example::
&target=identity("The.time.series")
This would create a series named "The.time.series" that contains points
where x(t) == t.
Accepts optional second argument as 'step' parameter (default step is
60 sec) | [
"Identity",
"function",
":",
"Returns",
"datapoints",
"where",
"the",
"value",
"equals",
"the",
"timestamp",
"of",
"the",
"datapoint",
".",
"Useful",
"when",
"you",
"have",
"another",
"series",
"where",
"the",
"value",
"is",
"a",
"timestamp",
"and",
"you",
"want",
"to",
"compare",
"it",
"to",
"the",
"time",
"of",
"the",
"datapoint",
"to",
"render",
"an",
"age"
] | train | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L3473-L3496 |
brutasse/graphite-api | graphite_api/functions.py | countSeries | def countSeries(requestContext, *seriesLists):
"""
Draws a horizontal line representing the number of nodes found in the
seriesList.
Example::
&target=countSeries(carbon.agents.*.*)
"""
if not seriesLists or not any(seriesLists):
series = constantLine(requestContext, 0).pop()
series.pathExpression = "countSeries()"
else:
seriesList, start, end, step = normalize(seriesLists)
name = "countSeries(%s)" % formatPathExpressions(seriesList)
values = (int(len(row)) for row in zip_longest(*seriesList))
series = TimeSeries(name, start, end, step, values)
series.pathExpression = name
return [series] | python | def countSeries(requestContext, *seriesLists):
"""
Draws a horizontal line representing the number of nodes found in the
seriesList.
Example::
&target=countSeries(carbon.agents.*.*)
"""
if not seriesLists or not any(seriesLists):
series = constantLine(requestContext, 0).pop()
series.pathExpression = "countSeries()"
else:
seriesList, start, end, step = normalize(seriesLists)
name = "countSeries(%s)" % formatPathExpressions(seriesList)
values = (int(len(row)) for row in zip_longest(*seriesList))
series = TimeSeries(name, start, end, step, values)
series.pathExpression = name
return [series] | [
"def",
"countSeries",
"(",
"requestContext",
",",
"*",
"seriesLists",
")",
":",
"if",
"not",
"seriesLists",
"or",
"not",
"any",
"(",
"seriesLists",
")",
":",
"series",
"=",
"constantLine",
"(",
"requestContext",
",",
"0",
")",
".",
"pop",
"(",
")",
"series",
".",
"pathExpression",
"=",
"\"countSeries()\"",
"else",
":",
"seriesList",
",",
"start",
",",
"end",
",",
"step",
"=",
"normalize",
"(",
"seriesLists",
")",
"name",
"=",
"\"countSeries(%s)\"",
"%",
"formatPathExpressions",
"(",
"seriesList",
")",
"values",
"=",
"(",
"int",
"(",
"len",
"(",
"row",
")",
")",
"for",
"row",
"in",
"zip_longest",
"(",
"*",
"seriesList",
")",
")",
"series",
"=",
"TimeSeries",
"(",
"name",
",",
"start",
",",
"end",
",",
"step",
",",
"values",
")",
"series",
".",
"pathExpression",
"=",
"name",
"return",
"[",
"series",
"]"
] | Draws a horizontal line representing the number of nodes found in the
seriesList.
Example::
&target=countSeries(carbon.agents.*.*) | [
"Draws",
"a",
"horizontal",
"line",
"representing",
"the",
"number",
"of",
"nodes",
"found",
"in",
"the",
"seriesList",
"."
] | train | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L3499-L3519 |
brutasse/graphite-api | graphite_api/functions.py | group | def group(requestContext, *seriesLists):
"""
Takes an arbitrary number of seriesLists and adds them to a single
seriesList. This is used to pass multiple seriesLists to a function which
only takes one.
"""
seriesGroup = []
for s in seriesLists:
seriesGroup.extend(s)
return seriesGroup | python | def group(requestContext, *seriesLists):
"""
Takes an arbitrary number of seriesLists and adds them to a single
seriesList. This is used to pass multiple seriesLists to a function which
only takes one.
"""
seriesGroup = []
for s in seriesLists:
seriesGroup.extend(s)
return seriesGroup | [
"def",
"group",
"(",
"requestContext",
",",
"*",
"seriesLists",
")",
":",
"seriesGroup",
"=",
"[",
"]",
"for",
"s",
"in",
"seriesLists",
":",
"seriesGroup",
".",
"extend",
"(",
"s",
")",
"return",
"seriesGroup"
] | Takes an arbitrary number of seriesLists and adds them to a single
seriesList. This is used to pass multiple seriesLists to a function which
only takes one. | [
"Takes",
"an",
"arbitrary",
"number",
"of",
"seriesLists",
"and",
"adds",
"them",
"to",
"a",
"single",
"seriesList",
".",
"This",
"is",
"used",
"to",
"pass",
"multiple",
"seriesLists",
"to",
"a",
"function",
"which",
"only",
"takes",
"one",
"."
] | train | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L3522-L3532 |
brutasse/graphite-api | graphite_api/functions.py | mapSeries | def mapSeries(requestContext, seriesList, mapNode):
"""
Short form: ``map()``.
Takes a seriesList and maps it to a list of sub-seriesList. Each
sub-seriesList has the given mapNode in common.
Example (note: This function is not very useful alone. It should be used
with :py:func:`reduceSeries`)::
mapSeries(servers.*.cpu.*,1) =>
[
servers.server1.cpu.*,
servers.server2.cpu.*,
...
servers.serverN.cpu.*
]
"""
metaSeries = {}
keys = []
for series in seriesList:
key = series.name.split(".")[mapNode]
if key not in metaSeries:
metaSeries[key] = [series]
keys.append(key)
else:
metaSeries[key].append(series)
return [metaSeries[k] for k in keys] | python | def mapSeries(requestContext, seriesList, mapNode):
"""
Short form: ``map()``.
Takes a seriesList and maps it to a list of sub-seriesList. Each
sub-seriesList has the given mapNode in common.
Example (note: This function is not very useful alone. It should be used
with :py:func:`reduceSeries`)::
mapSeries(servers.*.cpu.*,1) =>
[
servers.server1.cpu.*,
servers.server2.cpu.*,
...
servers.serverN.cpu.*
]
"""
metaSeries = {}
keys = []
for series in seriesList:
key = series.name.split(".")[mapNode]
if key not in metaSeries:
metaSeries[key] = [series]
keys.append(key)
else:
metaSeries[key].append(series)
return [metaSeries[k] for k in keys] | [
"def",
"mapSeries",
"(",
"requestContext",
",",
"seriesList",
",",
"mapNode",
")",
":",
"metaSeries",
"=",
"{",
"}",
"keys",
"=",
"[",
"]",
"for",
"series",
"in",
"seriesList",
":",
"key",
"=",
"series",
".",
"name",
".",
"split",
"(",
"\".\"",
")",
"[",
"mapNode",
"]",
"if",
"key",
"not",
"in",
"metaSeries",
":",
"metaSeries",
"[",
"key",
"]",
"=",
"[",
"series",
"]",
"keys",
".",
"append",
"(",
"key",
")",
"else",
":",
"metaSeries",
"[",
"key",
"]",
".",
"append",
"(",
"series",
")",
"return",
"[",
"metaSeries",
"[",
"k",
"]",
"for",
"k",
"in",
"keys",
"]"
] | Short form: ``map()``.
Takes a seriesList and maps it to a list of sub-seriesList. Each
sub-seriesList has the given mapNode in common.
Example (note: This function is not very useful alone. It should be used
with :py:func:`reduceSeries`)::
mapSeries(servers.*.cpu.*,1) =>
[
servers.server1.cpu.*,
servers.server2.cpu.*,
...
servers.serverN.cpu.*
] | [
"Short",
"form",
":",
"map",
"()",
"."
] | train | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L3535-L3562 |
brutasse/graphite-api | graphite_api/functions.py | reduceSeries | def reduceSeries(requestContext, seriesLists, reduceFunction, reduceNode,
*reduceMatchers):
"""
Short form: ``reduce()``.
Takes a list of seriesLists and reduces it to a list of series by means of
the reduceFunction.
Reduction is performed by matching the reduceNode in each series against
the list of reduceMatchers. The each series is then passed to the
reduceFunction as arguments in the order given by reduceMatchers. The
reduceFunction should yield a single series.
The resulting list of series are aliased so that they can easily be
nested in other functions.
**Example**: Map/Reduce asPercent(bytes_used,total_bytes) for each server.
Assume that metrics in the form below exist::
servers.server1.disk.bytes_used
servers.server1.disk.total_bytes
servers.server2.disk.bytes_used
servers.server2.disk.total_bytes
servers.server3.disk.bytes_used
servers.server3.disk.total_bytes
...
servers.serverN.disk.bytes_used
servers.serverN.disk.total_bytes
To get the percentage of disk used for each server::
reduceSeries(mapSeries(servers.*.disk.*,1),
"asPercent",3,"bytes_used","total_bytes") =>
alias(asPercent(servers.server1.disk.bytes_used,
servers.server1.disk.total_bytes),
"servers.server1.disk.reduce.asPercent"),
alias(asPercent(servers.server2.disk.bytes_used,
servers.server2.disk.total_bytes),
"servers.server2.disk.reduce.asPercent"),
...
alias(asPercent(servers.serverN.disk.bytes_used,
servers.serverN.disk.total_bytes),
"servers.serverN.disk.reduce.asPercent")
In other words, we will get back the following metrics::
servers.server1.disk.reduce.asPercent,
servers.server2.disk.reduce.asPercent,
...
servers.serverN.disk.reduce.asPercent
.. seealso:: :py:func:`mapSeries`
"""
metaSeries = {}
keys = []
for seriesList in seriesLists:
for series in seriesList:
nodes = series.name.split('.')
node = nodes[reduceNode]
reduceSeriesName = '.'.join(
nodes[0:reduceNode]) + '.reduce.' + reduceFunction
if node in reduceMatchers:
if reduceSeriesName not in metaSeries:
metaSeries[reduceSeriesName] = [None] * len(reduceMatchers)
keys.append(reduceSeriesName)
i = reduceMatchers.index(node)
metaSeries[reduceSeriesName][i] = series
for key in keys:
metaSeries[key] = app.functions[reduceFunction](
requestContext, *[[s] for s in metaSeries[key]])[0]
metaSeries[key].name = key
return [metaSeries[key] for key in keys] | python | def reduceSeries(requestContext, seriesLists, reduceFunction, reduceNode,
*reduceMatchers):
"""
Short form: ``reduce()``.
Takes a list of seriesLists and reduces it to a list of series by means of
the reduceFunction.
Reduction is performed by matching the reduceNode in each series against
the list of reduceMatchers. The each series is then passed to the
reduceFunction as arguments in the order given by reduceMatchers. The
reduceFunction should yield a single series.
The resulting list of series are aliased so that they can easily be
nested in other functions.
**Example**: Map/Reduce asPercent(bytes_used,total_bytes) for each server.
Assume that metrics in the form below exist::
servers.server1.disk.bytes_used
servers.server1.disk.total_bytes
servers.server2.disk.bytes_used
servers.server2.disk.total_bytes
servers.server3.disk.bytes_used
servers.server3.disk.total_bytes
...
servers.serverN.disk.bytes_used
servers.serverN.disk.total_bytes
To get the percentage of disk used for each server::
reduceSeries(mapSeries(servers.*.disk.*,1),
"asPercent",3,"bytes_used","total_bytes") =>
alias(asPercent(servers.server1.disk.bytes_used,
servers.server1.disk.total_bytes),
"servers.server1.disk.reduce.asPercent"),
alias(asPercent(servers.server2.disk.bytes_used,
servers.server2.disk.total_bytes),
"servers.server2.disk.reduce.asPercent"),
...
alias(asPercent(servers.serverN.disk.bytes_used,
servers.serverN.disk.total_bytes),
"servers.serverN.disk.reduce.asPercent")
In other words, we will get back the following metrics::
servers.server1.disk.reduce.asPercent,
servers.server2.disk.reduce.asPercent,
...
servers.serverN.disk.reduce.asPercent
.. seealso:: :py:func:`mapSeries`
"""
metaSeries = {}
keys = []
for seriesList in seriesLists:
for series in seriesList:
nodes = series.name.split('.')
node = nodes[reduceNode]
reduceSeriesName = '.'.join(
nodes[0:reduceNode]) + '.reduce.' + reduceFunction
if node in reduceMatchers:
if reduceSeriesName not in metaSeries:
metaSeries[reduceSeriesName] = [None] * len(reduceMatchers)
keys.append(reduceSeriesName)
i = reduceMatchers.index(node)
metaSeries[reduceSeriesName][i] = series
for key in keys:
metaSeries[key] = app.functions[reduceFunction](
requestContext, *[[s] for s in metaSeries[key]])[0]
metaSeries[key].name = key
return [metaSeries[key] for key in keys] | [
"def",
"reduceSeries",
"(",
"requestContext",
",",
"seriesLists",
",",
"reduceFunction",
",",
"reduceNode",
",",
"*",
"reduceMatchers",
")",
":",
"metaSeries",
"=",
"{",
"}",
"keys",
"=",
"[",
"]",
"for",
"seriesList",
"in",
"seriesLists",
":",
"for",
"series",
"in",
"seriesList",
":",
"nodes",
"=",
"series",
".",
"name",
".",
"split",
"(",
"'.'",
")",
"node",
"=",
"nodes",
"[",
"reduceNode",
"]",
"reduceSeriesName",
"=",
"'.'",
".",
"join",
"(",
"nodes",
"[",
"0",
":",
"reduceNode",
"]",
")",
"+",
"'.reduce.'",
"+",
"reduceFunction",
"if",
"node",
"in",
"reduceMatchers",
":",
"if",
"reduceSeriesName",
"not",
"in",
"metaSeries",
":",
"metaSeries",
"[",
"reduceSeriesName",
"]",
"=",
"[",
"None",
"]",
"*",
"len",
"(",
"reduceMatchers",
")",
"keys",
".",
"append",
"(",
"reduceSeriesName",
")",
"i",
"=",
"reduceMatchers",
".",
"index",
"(",
"node",
")",
"metaSeries",
"[",
"reduceSeriesName",
"]",
"[",
"i",
"]",
"=",
"series",
"for",
"key",
"in",
"keys",
":",
"metaSeries",
"[",
"key",
"]",
"=",
"app",
".",
"functions",
"[",
"reduceFunction",
"]",
"(",
"requestContext",
",",
"*",
"[",
"[",
"s",
"]",
"for",
"s",
"in",
"metaSeries",
"[",
"key",
"]",
"]",
")",
"[",
"0",
"]",
"metaSeries",
"[",
"key",
"]",
".",
"name",
"=",
"key",
"return",
"[",
"metaSeries",
"[",
"key",
"]",
"for",
"key",
"in",
"keys",
"]"
] | Short form: ``reduce()``.
Takes a list of seriesLists and reduces it to a list of series by means of
the reduceFunction.
Reduction is performed by matching the reduceNode in each series against
the list of reduceMatchers. The each series is then passed to the
reduceFunction as arguments in the order given by reduceMatchers. The
reduceFunction should yield a single series.
The resulting list of series are aliased so that they can easily be
nested in other functions.
**Example**: Map/Reduce asPercent(bytes_used,total_bytes) for each server.
Assume that metrics in the form below exist::
servers.server1.disk.bytes_used
servers.server1.disk.total_bytes
servers.server2.disk.bytes_used
servers.server2.disk.total_bytes
servers.server3.disk.bytes_used
servers.server3.disk.total_bytes
...
servers.serverN.disk.bytes_used
servers.serverN.disk.total_bytes
To get the percentage of disk used for each server::
reduceSeries(mapSeries(servers.*.disk.*,1),
"asPercent",3,"bytes_used","total_bytes") =>
alias(asPercent(servers.server1.disk.bytes_used,
servers.server1.disk.total_bytes),
"servers.server1.disk.reduce.asPercent"),
alias(asPercent(servers.server2.disk.bytes_used,
servers.server2.disk.total_bytes),
"servers.server2.disk.reduce.asPercent"),
...
alias(asPercent(servers.serverN.disk.bytes_used,
servers.serverN.disk.total_bytes),
"servers.serverN.disk.reduce.asPercent")
In other words, we will get back the following metrics::
servers.server1.disk.reduce.asPercent,
servers.server2.disk.reduce.asPercent,
...
servers.serverN.disk.reduce.asPercent
.. seealso:: :py:func:`mapSeries` | [
"Short",
"form",
":",
"reduce",
"()",
"."
] | train | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L3565-L3638 |
brutasse/graphite-api | graphite_api/functions.py | applyByNode | def applyByNode(requestContext, seriesList, nodeNum, templateFunction,
newName=None):
"""
Takes a seriesList and applies some complicated function (described by
a string), replacing templates with unique prefixes of keys from the
seriesList (the key is all nodes up to the index given as `nodeNum`).
If the `newName` parameter is provided, the name of the resulting series
will be given by that parameter, with any "%" characters replaced by the
unique prefix.
Example::
&target=applyByNode(servers.*.disk.bytes_free,1,
"divideSeries(%.disk.bytes_free,sumSeries(%.disk.bytes_*))")
Would find all series which match `servers.*.disk.bytes_free`, then trim
them down to unique series up to the node given by nodeNum, then fill them
into the template function provided (replacing % by the prefixes).
"""
from .app import evaluateTarget
prefixes = set()
for series in seriesList:
prefix = '.'.join(series.name.split('.')[:nodeNum + 1])
prefixes.add(prefix)
results = []
for prefix in sorted(prefixes):
target = templateFunction.replace('%', prefix)
for resultSeries in evaluateTarget(requestContext, target):
if newName:
resultSeries.name = newName.replace('%', prefix)
resultSeries.pathExpression = prefix
resultSeries.start = series.start
resultSeries.end = series.end
results.append(resultSeries)
return results | python | def applyByNode(requestContext, seriesList, nodeNum, templateFunction,
newName=None):
"""
Takes a seriesList and applies some complicated function (described by
a string), replacing templates with unique prefixes of keys from the
seriesList (the key is all nodes up to the index given as `nodeNum`).
If the `newName` parameter is provided, the name of the resulting series
will be given by that parameter, with any "%" characters replaced by the
unique prefix.
Example::
&target=applyByNode(servers.*.disk.bytes_free,1,
"divideSeries(%.disk.bytes_free,sumSeries(%.disk.bytes_*))")
Would find all series which match `servers.*.disk.bytes_free`, then trim
them down to unique series up to the node given by nodeNum, then fill them
into the template function provided (replacing % by the prefixes).
"""
from .app import evaluateTarget
prefixes = set()
for series in seriesList:
prefix = '.'.join(series.name.split('.')[:nodeNum + 1])
prefixes.add(prefix)
results = []
for prefix in sorted(prefixes):
target = templateFunction.replace('%', prefix)
for resultSeries in evaluateTarget(requestContext, target):
if newName:
resultSeries.name = newName.replace('%', prefix)
resultSeries.pathExpression = prefix
resultSeries.start = series.start
resultSeries.end = series.end
results.append(resultSeries)
return results | [
"def",
"applyByNode",
"(",
"requestContext",
",",
"seriesList",
",",
"nodeNum",
",",
"templateFunction",
",",
"newName",
"=",
"None",
")",
":",
"from",
".",
"app",
"import",
"evaluateTarget",
"prefixes",
"=",
"set",
"(",
")",
"for",
"series",
"in",
"seriesList",
":",
"prefix",
"=",
"'.'",
".",
"join",
"(",
"series",
".",
"name",
".",
"split",
"(",
"'.'",
")",
"[",
":",
"nodeNum",
"+",
"1",
"]",
")",
"prefixes",
".",
"add",
"(",
"prefix",
")",
"results",
"=",
"[",
"]",
"for",
"prefix",
"in",
"sorted",
"(",
"prefixes",
")",
":",
"target",
"=",
"templateFunction",
".",
"replace",
"(",
"'%'",
",",
"prefix",
")",
"for",
"resultSeries",
"in",
"evaluateTarget",
"(",
"requestContext",
",",
"target",
")",
":",
"if",
"newName",
":",
"resultSeries",
".",
"name",
"=",
"newName",
".",
"replace",
"(",
"'%'",
",",
"prefix",
")",
"resultSeries",
".",
"pathExpression",
"=",
"prefix",
"resultSeries",
".",
"start",
"=",
"series",
".",
"start",
"resultSeries",
".",
"end",
"=",
"series",
".",
"end",
"results",
".",
"append",
"(",
"resultSeries",
")",
"return",
"results"
] | Takes a seriesList and applies some complicated function (described by
a string), replacing templates with unique prefixes of keys from the
seriesList (the key is all nodes up to the index given as `nodeNum`).
If the `newName` parameter is provided, the name of the resulting series
will be given by that parameter, with any "%" characters replaced by the
unique prefix.
Example::
&target=applyByNode(servers.*.disk.bytes_free,1,
"divideSeries(%.disk.bytes_free,sumSeries(%.disk.bytes_*))")
Would find all series which match `servers.*.disk.bytes_free`, then trim
them down to unique series up to the node given by nodeNum, then fill them
into the template function provided (replacing % by the prefixes). | [
"Takes",
"a",
"seriesList",
"and",
"applies",
"some",
"complicated",
"function",
"(",
"described",
"by",
"a",
"string",
")",
"replacing",
"templates",
"with",
"unique",
"prefixes",
"of",
"keys",
"from",
"the",
"seriesList",
"(",
"the",
"key",
"is",
"all",
"nodes",
"up",
"to",
"the",
"index",
"given",
"as",
"nodeNum",
")",
"."
] | train | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L3641-L3677 |
brutasse/graphite-api | graphite_api/functions.py | groupByNode | def groupByNode(requestContext, seriesList, nodeNum, callback):
"""
Takes a serieslist and maps a callback to subgroups within as defined by a
common node.
Example::
&target=groupByNode(ganglia.by-function.*.*.cpu.load5,2,"sumSeries")
Would return multiple series which are each the result of applying the
"sumSeries" function to groups joined on the second node (0 indexed)
resulting in a list of targets like::
sumSeries(ganglia.by-function.server1.*.cpu.load5),
sumSeries(ganglia.by-function.server2.*.cpu.load5),...
"""
return groupByNodes(requestContext, seriesList, callback, nodeNum) | python | def groupByNode(requestContext, seriesList, nodeNum, callback):
"""
Takes a serieslist and maps a callback to subgroups within as defined by a
common node.
Example::
&target=groupByNode(ganglia.by-function.*.*.cpu.load5,2,"sumSeries")
Would return multiple series which are each the result of applying the
"sumSeries" function to groups joined on the second node (0 indexed)
resulting in a list of targets like::
sumSeries(ganglia.by-function.server1.*.cpu.load5),
sumSeries(ganglia.by-function.server2.*.cpu.load5),...
"""
return groupByNodes(requestContext, seriesList, callback, nodeNum) | [
"def",
"groupByNode",
"(",
"requestContext",
",",
"seriesList",
",",
"nodeNum",
",",
"callback",
")",
":",
"return",
"groupByNodes",
"(",
"requestContext",
",",
"seriesList",
",",
"callback",
",",
"nodeNum",
")"
] | Takes a serieslist and maps a callback to subgroups within as defined by a
common node.
Example::
&target=groupByNode(ganglia.by-function.*.*.cpu.load5,2,"sumSeries")
Would return multiple series which are each the result of applying the
"sumSeries" function to groups joined on the second node (0 indexed)
resulting in a list of targets like::
sumSeries(ganglia.by-function.server1.*.cpu.load5),
sumSeries(ganglia.by-function.server2.*.cpu.load5),... | [
"Takes",
"a",
"serieslist",
"and",
"maps",
"a",
"callback",
"to",
"subgroups",
"within",
"as",
"defined",
"by",
"a",
"common",
"node",
"."
] | train | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L3680-L3697 |
brutasse/graphite-api | graphite_api/functions.py | groupByNodes | def groupByNodes(requestContext, seriesList, callback, *nodes):
"""
Takes a serieslist and maps a callback to subgroups within as defined by
multiple nodes.
Example::
&target=groupByNodes(ganglia.server*.*.cpu.load*,"sumSeries",1,4)
Would return multiple series which are each the result of applying the
"sumSeries" function to groups joined on the nodes' list (0 indexed)
resulting in a list of targets like::
sumSeries(ganglia.server1.*.cpu.load5),
sumSeries(ganglia.server1.*.cpu.load10),
sumSeries(ganglia.server1.*.cpu.load15),
sumSeries(ganglia.server2.*.cpu.load5),
sumSeries(ganglia.server2.*.cpu.load10),
sumSeries(ganglia.server2.*.cpu.load15), ...
"""
from .app import app
metaSeries = {}
keys = []
if isinstance(nodes, int):
nodes = [nodes]
for series in seriesList:
key = '.'.join(series.name.split(".")[n] for n in nodes)
if key not in metaSeries:
metaSeries[key] = [series]
keys.append(key)
else:
metaSeries[key].append(series)
for key in metaSeries:
metaSeries[key] = app.functions[callback](requestContext,
metaSeries[key])[0]
metaSeries[key].name = key
return [metaSeries[key] for key in keys] | python | def groupByNodes(requestContext, seriesList, callback, *nodes):
"""
Takes a serieslist and maps a callback to subgroups within as defined by
multiple nodes.
Example::
&target=groupByNodes(ganglia.server*.*.cpu.load*,"sumSeries",1,4)
Would return multiple series which are each the result of applying the
"sumSeries" function to groups joined on the nodes' list (0 indexed)
resulting in a list of targets like::
sumSeries(ganglia.server1.*.cpu.load5),
sumSeries(ganglia.server1.*.cpu.load10),
sumSeries(ganglia.server1.*.cpu.load15),
sumSeries(ganglia.server2.*.cpu.load5),
sumSeries(ganglia.server2.*.cpu.load10),
sumSeries(ganglia.server2.*.cpu.load15), ...
"""
from .app import app
metaSeries = {}
keys = []
if isinstance(nodes, int):
nodes = [nodes]
for series in seriesList:
key = '.'.join(series.name.split(".")[n] for n in nodes)
if key not in metaSeries:
metaSeries[key] = [series]
keys.append(key)
else:
metaSeries[key].append(series)
for key in metaSeries:
metaSeries[key] = app.functions[callback](requestContext,
metaSeries[key])[0]
metaSeries[key].name = key
return [metaSeries[key] for key in keys] | [
"def",
"groupByNodes",
"(",
"requestContext",
",",
"seriesList",
",",
"callback",
",",
"*",
"nodes",
")",
":",
"from",
".",
"app",
"import",
"app",
"metaSeries",
"=",
"{",
"}",
"keys",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"nodes",
",",
"int",
")",
":",
"nodes",
"=",
"[",
"nodes",
"]",
"for",
"series",
"in",
"seriesList",
":",
"key",
"=",
"'.'",
".",
"join",
"(",
"series",
".",
"name",
".",
"split",
"(",
"\".\"",
")",
"[",
"n",
"]",
"for",
"n",
"in",
"nodes",
")",
"if",
"key",
"not",
"in",
"metaSeries",
":",
"metaSeries",
"[",
"key",
"]",
"=",
"[",
"series",
"]",
"keys",
".",
"append",
"(",
"key",
")",
"else",
":",
"metaSeries",
"[",
"key",
"]",
".",
"append",
"(",
"series",
")",
"for",
"key",
"in",
"metaSeries",
":",
"metaSeries",
"[",
"key",
"]",
"=",
"app",
".",
"functions",
"[",
"callback",
"]",
"(",
"requestContext",
",",
"metaSeries",
"[",
"key",
"]",
")",
"[",
"0",
"]",
"metaSeries",
"[",
"key",
"]",
".",
"name",
"=",
"key",
"return",
"[",
"metaSeries",
"[",
"key",
"]",
"for",
"key",
"in",
"keys",
"]"
] | Takes a serieslist and maps a callback to subgroups within as defined by
multiple nodes.
Example::
&target=groupByNodes(ganglia.server*.*.cpu.load*,"sumSeries",1,4)
Would return multiple series which are each the result of applying the
"sumSeries" function to groups joined on the nodes' list (0 indexed)
resulting in a list of targets like::
sumSeries(ganglia.server1.*.cpu.load5),
sumSeries(ganglia.server1.*.cpu.load10),
sumSeries(ganglia.server1.*.cpu.load15),
sumSeries(ganglia.server2.*.cpu.load5),
sumSeries(ganglia.server2.*.cpu.load10),
sumSeries(ganglia.server2.*.cpu.load15), ... | [
"Takes",
"a",
"serieslist",
"and",
"maps",
"a",
"callback",
"to",
"subgroups",
"within",
"as",
"defined",
"by",
"multiple",
"nodes",
"."
] | train | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L3700-L3737 |
brutasse/graphite-api | graphite_api/functions.py | exclude | def exclude(requestContext, seriesList, pattern):
"""
Takes a metric or a wildcard seriesList, followed by a regular expression
in double quotes. Excludes metrics that match the regular expression.
Example::
&target=exclude(servers*.instance*.threads.busy,"server02")
"""
regex = re.compile(pattern)
return [s for s in seriesList if not regex.search(s.name)] | python | def exclude(requestContext, seriesList, pattern):
"""
Takes a metric or a wildcard seriesList, followed by a regular expression
in double quotes. Excludes metrics that match the regular expression.
Example::
&target=exclude(servers*.instance*.threads.busy,"server02")
"""
regex = re.compile(pattern)
return [s for s in seriesList if not regex.search(s.name)] | [
"def",
"exclude",
"(",
"requestContext",
",",
"seriesList",
",",
"pattern",
")",
":",
"regex",
"=",
"re",
".",
"compile",
"(",
"pattern",
")",
"return",
"[",
"s",
"for",
"s",
"in",
"seriesList",
"if",
"not",
"regex",
".",
"search",
"(",
"s",
".",
"name",
")",
"]"
] | Takes a metric or a wildcard seriesList, followed by a regular expression
in double quotes. Excludes metrics that match the regular expression.
Example::
&target=exclude(servers*.instance*.threads.busy,"server02") | [
"Takes",
"a",
"metric",
"or",
"a",
"wildcard",
"seriesList",
"followed",
"by",
"a",
"regular",
"expression",
"in",
"double",
"quotes",
".",
"Excludes",
"metrics",
"that",
"match",
"the",
"regular",
"expression",
"."
] | train | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L3740-L3750 |
brutasse/graphite-api | graphite_api/functions.py | smartSummarize | def smartSummarize(requestContext, seriesList, intervalString, func='sum'):
"""
Smarter experimental version of summarize.
"""
results = []
delta = parseTimeOffset(intervalString)
interval = to_seconds(delta)
# Adjust the start time to fit an entire day for intervals >= 1 day
requestContext = requestContext.copy()
tzinfo = requestContext['tzinfo']
s = requestContext['startTime']
if interval >= DAY:
requestContext['startTime'] = datetime(s.year, s.month, s.day,
tzinfo=tzinfo)
elif interval >= HOUR:
requestContext['startTime'] = datetime(s.year, s.month, s.day, s.hour,
tzinfo=tzinfo)
elif interval >= MINUTE:
requestContext['startTime'] = datetime(s.year, s.month, s.day, s.hour,
s.minute, tzinfo=tzinfo)
paths = []
for series in seriesList:
paths.extend(pathsFromTarget(requestContext, series.pathExpression))
data_store = fetchData(requestContext, paths)
for series in seriesList:
# XXX: breaks with summarize(metric.{a,b})
# each series.pathExpression == metric.{a,b}
newSeries = evaluateTarget(requestContext,
series.pathExpression,
data_store)[0]
series[0:len(series)] = newSeries
series.start = newSeries.start
series.end = newSeries.end
series.step = newSeries.step
for series in seriesList:
buckets = {} # {timestamp: [values]}
timestamps = range(int(series.start), int(series.end),
int(series.step))
datapoints = zip_longest(timestamps, series)
# Populate buckets
for timestamp, value in datapoints:
# ISSUE: Sometimes there is a missing timestamp in datapoints when
# running a smartSummary
if not timestamp:
continue
bucketInterval = int((timestamp - series.start) / interval)
if bucketInterval not in buckets:
buckets[bucketInterval] = []
if value is not None:
buckets[bucketInterval].append(value)
newValues = []
for timestamp in range(series.start, series.end, interval):
bucketInterval = int((timestamp - series.start) / interval)
bucket = buckets.get(bucketInterval, [])
if bucket:
if func == 'avg':
newValues.append(float(sum(bucket)) / float(len(bucket)))
elif func == 'last':
newValues.append(bucket[len(bucket)-1])
elif func == 'max':
newValues.append(max(bucket))
elif func == 'min':
newValues.append(min(bucket))
else:
newValues.append(sum(bucket))
else:
newValues.append(None)
newName = "smartSummarize(%s, \"%s\", \"%s\")" % (series.name,
intervalString,
func)
alignedEnd = series.start + (bucketInterval * interval) + interval
newSeries = TimeSeries(newName, series.start, alignedEnd, interval,
newValues)
newSeries.pathExpression = newName
results.append(newSeries)
return results | python | def smartSummarize(requestContext, seriesList, intervalString, func='sum'):
"""
Smarter experimental version of summarize.
"""
results = []
delta = parseTimeOffset(intervalString)
interval = to_seconds(delta)
# Adjust the start time to fit an entire day for intervals >= 1 day
requestContext = requestContext.copy()
tzinfo = requestContext['tzinfo']
s = requestContext['startTime']
if interval >= DAY:
requestContext['startTime'] = datetime(s.year, s.month, s.day,
tzinfo=tzinfo)
elif interval >= HOUR:
requestContext['startTime'] = datetime(s.year, s.month, s.day, s.hour,
tzinfo=tzinfo)
elif interval >= MINUTE:
requestContext['startTime'] = datetime(s.year, s.month, s.day, s.hour,
s.minute, tzinfo=tzinfo)
paths = []
for series in seriesList:
paths.extend(pathsFromTarget(requestContext, series.pathExpression))
data_store = fetchData(requestContext, paths)
for series in seriesList:
# XXX: breaks with summarize(metric.{a,b})
# each series.pathExpression == metric.{a,b}
newSeries = evaluateTarget(requestContext,
series.pathExpression,
data_store)[0]
series[0:len(series)] = newSeries
series.start = newSeries.start
series.end = newSeries.end
series.step = newSeries.step
for series in seriesList:
buckets = {} # {timestamp: [values]}
timestamps = range(int(series.start), int(series.end),
int(series.step))
datapoints = zip_longest(timestamps, series)
# Populate buckets
for timestamp, value in datapoints:
# ISSUE: Sometimes there is a missing timestamp in datapoints when
# running a smartSummary
if not timestamp:
continue
bucketInterval = int((timestamp - series.start) / interval)
if bucketInterval not in buckets:
buckets[bucketInterval] = []
if value is not None:
buckets[bucketInterval].append(value)
newValues = []
for timestamp in range(series.start, series.end, interval):
bucketInterval = int((timestamp - series.start) / interval)
bucket = buckets.get(bucketInterval, [])
if bucket:
if func == 'avg':
newValues.append(float(sum(bucket)) / float(len(bucket)))
elif func == 'last':
newValues.append(bucket[len(bucket)-1])
elif func == 'max':
newValues.append(max(bucket))
elif func == 'min':
newValues.append(min(bucket))
else:
newValues.append(sum(bucket))
else:
newValues.append(None)
newName = "smartSummarize(%s, \"%s\", \"%s\")" % (series.name,
intervalString,
func)
alignedEnd = series.start + (bucketInterval * interval) + interval
newSeries = TimeSeries(newName, series.start, alignedEnd, interval,
newValues)
newSeries.pathExpression = newName
results.append(newSeries)
return results | [
"def",
"smartSummarize",
"(",
"requestContext",
",",
"seriesList",
",",
"intervalString",
",",
"func",
"=",
"'sum'",
")",
":",
"results",
"=",
"[",
"]",
"delta",
"=",
"parseTimeOffset",
"(",
"intervalString",
")",
"interval",
"=",
"to_seconds",
"(",
"delta",
")",
"# Adjust the start time to fit an entire day for intervals >= 1 day",
"requestContext",
"=",
"requestContext",
".",
"copy",
"(",
")",
"tzinfo",
"=",
"requestContext",
"[",
"'tzinfo'",
"]",
"s",
"=",
"requestContext",
"[",
"'startTime'",
"]",
"if",
"interval",
">=",
"DAY",
":",
"requestContext",
"[",
"'startTime'",
"]",
"=",
"datetime",
"(",
"s",
".",
"year",
",",
"s",
".",
"month",
",",
"s",
".",
"day",
",",
"tzinfo",
"=",
"tzinfo",
")",
"elif",
"interval",
">=",
"HOUR",
":",
"requestContext",
"[",
"'startTime'",
"]",
"=",
"datetime",
"(",
"s",
".",
"year",
",",
"s",
".",
"month",
",",
"s",
".",
"day",
",",
"s",
".",
"hour",
",",
"tzinfo",
"=",
"tzinfo",
")",
"elif",
"interval",
">=",
"MINUTE",
":",
"requestContext",
"[",
"'startTime'",
"]",
"=",
"datetime",
"(",
"s",
".",
"year",
",",
"s",
".",
"month",
",",
"s",
".",
"day",
",",
"s",
".",
"hour",
",",
"s",
".",
"minute",
",",
"tzinfo",
"=",
"tzinfo",
")",
"paths",
"=",
"[",
"]",
"for",
"series",
"in",
"seriesList",
":",
"paths",
".",
"extend",
"(",
"pathsFromTarget",
"(",
"requestContext",
",",
"series",
".",
"pathExpression",
")",
")",
"data_store",
"=",
"fetchData",
"(",
"requestContext",
",",
"paths",
")",
"for",
"series",
"in",
"seriesList",
":",
"# XXX: breaks with summarize(metric.{a,b})",
"# each series.pathExpression == metric.{a,b}",
"newSeries",
"=",
"evaluateTarget",
"(",
"requestContext",
",",
"series",
".",
"pathExpression",
",",
"data_store",
")",
"[",
"0",
"]",
"series",
"[",
"0",
":",
"len",
"(",
"series",
")",
"]",
"=",
"newSeries",
"series",
".",
"start",
"=",
"newSeries",
".",
"start",
"series",
".",
"end",
"=",
"newSeries",
".",
"end",
"series",
".",
"step",
"=",
"newSeries",
".",
"step",
"for",
"series",
"in",
"seriesList",
":",
"buckets",
"=",
"{",
"}",
"# {timestamp: [values]}",
"timestamps",
"=",
"range",
"(",
"int",
"(",
"series",
".",
"start",
")",
",",
"int",
"(",
"series",
".",
"end",
")",
",",
"int",
"(",
"series",
".",
"step",
")",
")",
"datapoints",
"=",
"zip_longest",
"(",
"timestamps",
",",
"series",
")",
"# Populate buckets",
"for",
"timestamp",
",",
"value",
"in",
"datapoints",
":",
"# ISSUE: Sometimes there is a missing timestamp in datapoints when",
"# running a smartSummary",
"if",
"not",
"timestamp",
":",
"continue",
"bucketInterval",
"=",
"int",
"(",
"(",
"timestamp",
"-",
"series",
".",
"start",
")",
"/",
"interval",
")",
"if",
"bucketInterval",
"not",
"in",
"buckets",
":",
"buckets",
"[",
"bucketInterval",
"]",
"=",
"[",
"]",
"if",
"value",
"is",
"not",
"None",
":",
"buckets",
"[",
"bucketInterval",
"]",
".",
"append",
"(",
"value",
")",
"newValues",
"=",
"[",
"]",
"for",
"timestamp",
"in",
"range",
"(",
"series",
".",
"start",
",",
"series",
".",
"end",
",",
"interval",
")",
":",
"bucketInterval",
"=",
"int",
"(",
"(",
"timestamp",
"-",
"series",
".",
"start",
")",
"/",
"interval",
")",
"bucket",
"=",
"buckets",
".",
"get",
"(",
"bucketInterval",
",",
"[",
"]",
")",
"if",
"bucket",
":",
"if",
"func",
"==",
"'avg'",
":",
"newValues",
".",
"append",
"(",
"float",
"(",
"sum",
"(",
"bucket",
")",
")",
"/",
"float",
"(",
"len",
"(",
"bucket",
")",
")",
")",
"elif",
"func",
"==",
"'last'",
":",
"newValues",
".",
"append",
"(",
"bucket",
"[",
"len",
"(",
"bucket",
")",
"-",
"1",
"]",
")",
"elif",
"func",
"==",
"'max'",
":",
"newValues",
".",
"append",
"(",
"max",
"(",
"bucket",
")",
")",
"elif",
"func",
"==",
"'min'",
":",
"newValues",
".",
"append",
"(",
"min",
"(",
"bucket",
")",
")",
"else",
":",
"newValues",
".",
"append",
"(",
"sum",
"(",
"bucket",
")",
")",
"else",
":",
"newValues",
".",
"append",
"(",
"None",
")",
"newName",
"=",
"\"smartSummarize(%s, \\\"%s\\\", \\\"%s\\\")\"",
"%",
"(",
"series",
".",
"name",
",",
"intervalString",
",",
"func",
")",
"alignedEnd",
"=",
"series",
".",
"start",
"+",
"(",
"bucketInterval",
"*",
"interval",
")",
"+",
"interval",
"newSeries",
"=",
"TimeSeries",
"(",
"newName",
",",
"series",
".",
"start",
",",
"alignedEnd",
",",
"interval",
",",
"newValues",
")",
"newSeries",
".",
"pathExpression",
"=",
"newName",
"results",
".",
"append",
"(",
"newSeries",
")",
"return",
"results"
] | Smarter experimental version of summarize. | [
"Smarter",
"experimental",
"version",
"of",
"summarize",
"."
] | train | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L3767-L3854 |
brutasse/graphite-api | graphite_api/functions.py | summarize | def summarize(requestContext, seriesList, intervalString, func='sum',
alignToFrom=False):
"""
Summarize the data into interval buckets of a certain size.
By default, the contents of each interval bucket are summed together.
This is useful for counters where each increment represents a discrete
event and retrieving a "per X" value requires summing all the events in
that interval.
Specifying 'avg' instead will return the mean for each bucket, which can
be more useful when the value is a gauge that represents a certain value
in time.
'max', 'min' or 'last' can also be specified.
By default, buckets are calculated by rounding to the nearest interval.
This works well for intervals smaller than a day. For example, 22:32 will
end up in the bucket 22:00-23:00 when the interval=1hour.
Passing alignToFrom=true will instead create buckets starting at the from
time. In this case, the bucket for 22:32 depends on the from time. If
from=6:30 then the 1hour bucket for 22:32 is 22:30-23:30.
Example::
# total errors per hour
&target=summarize(counter.errors, "1hour")
# new users per week
&target=summarize(nonNegativeDerivative(gauge.num_users), "1week")
# average queue size per hour
&target=summarize(queue.size, "1hour", "avg")
# maximum queue size during each hour
&target=summarize(queue.size, "1hour", "max")
# 2010 Q1-4
&target=summarize(metric, "13week", "avg", true)&from=midnight+20100101
"""
results = []
delta = parseTimeOffset(intervalString)
interval = to_seconds(delta)
for series in seriesList:
buckets = {}
timestamps = range(int(series.start), int(series.end) + 1,
int(series.step))
datapoints = zip_longest(timestamps, series)
for timestamp, value in datapoints:
if timestamp is None:
continue
if alignToFrom:
bucketInterval = int((timestamp - series.start) / interval)
else:
bucketInterval = timestamp - (timestamp % interval)
if bucketInterval not in buckets:
buckets[bucketInterval] = []
if value is not None:
buckets[bucketInterval].append(value)
if alignToFrom:
newStart = series.start
newEnd = series.end
else:
newStart = series.start - (series.start % interval)
newEnd = series.end - (series.end % interval) + interval
newValues = []
for timestamp in range(newStart, newEnd, interval):
if alignToFrom:
newEnd = timestamp
bucketInterval = int((timestamp - series.start) / interval)
else:
bucketInterval = timestamp - (timestamp % interval)
bucket = buckets.get(bucketInterval, [])
if bucket:
if func == 'avg':
newValues.append(float(sum(bucket)) / float(len(bucket)))
elif func == 'last':
newValues.append(bucket[len(bucket)-1])
elif func == 'max':
newValues.append(max(bucket))
elif func == 'min':
newValues.append(min(bucket))
else:
newValues.append(sum(bucket))
else:
newValues.append(None)
if alignToFrom:
newEnd += interval
newName = "summarize(%s, \"%s\", \"%s\"%s)" % (
series.name, intervalString, func, alignToFrom and ", true" or "")
newSeries = TimeSeries(newName, newStart, newEnd, interval, newValues)
newSeries.pathExpression = newName
results.append(newSeries)
return results | python | def summarize(requestContext, seriesList, intervalString, func='sum',
alignToFrom=False):
"""
Summarize the data into interval buckets of a certain size.
By default, the contents of each interval bucket are summed together.
This is useful for counters where each increment represents a discrete
event and retrieving a "per X" value requires summing all the events in
that interval.
Specifying 'avg' instead will return the mean for each bucket, which can
be more useful when the value is a gauge that represents a certain value
in time.
'max', 'min' or 'last' can also be specified.
By default, buckets are calculated by rounding to the nearest interval.
This works well for intervals smaller than a day. For example, 22:32 will
end up in the bucket 22:00-23:00 when the interval=1hour.
Passing alignToFrom=true will instead create buckets starting at the from
time. In this case, the bucket for 22:32 depends on the from time. If
from=6:30 then the 1hour bucket for 22:32 is 22:30-23:30.
Example::
# total errors per hour
&target=summarize(counter.errors, "1hour")
# new users per week
&target=summarize(nonNegativeDerivative(gauge.num_users), "1week")
# average queue size per hour
&target=summarize(queue.size, "1hour", "avg")
# maximum queue size during each hour
&target=summarize(queue.size, "1hour", "max")
# 2010 Q1-4
&target=summarize(metric, "13week", "avg", true)&from=midnight+20100101
"""
results = []
delta = parseTimeOffset(intervalString)
interval = to_seconds(delta)
for series in seriesList:
buckets = {}
timestamps = range(int(series.start), int(series.end) + 1,
int(series.step))
datapoints = zip_longest(timestamps, series)
for timestamp, value in datapoints:
if timestamp is None:
continue
if alignToFrom:
bucketInterval = int((timestamp - series.start) / interval)
else:
bucketInterval = timestamp - (timestamp % interval)
if bucketInterval not in buckets:
buckets[bucketInterval] = []
if value is not None:
buckets[bucketInterval].append(value)
if alignToFrom:
newStart = series.start
newEnd = series.end
else:
newStart = series.start - (series.start % interval)
newEnd = series.end - (series.end % interval) + interval
newValues = []
for timestamp in range(newStart, newEnd, interval):
if alignToFrom:
newEnd = timestamp
bucketInterval = int((timestamp - series.start) / interval)
else:
bucketInterval = timestamp - (timestamp % interval)
bucket = buckets.get(bucketInterval, [])
if bucket:
if func == 'avg':
newValues.append(float(sum(bucket)) / float(len(bucket)))
elif func == 'last':
newValues.append(bucket[len(bucket)-1])
elif func == 'max':
newValues.append(max(bucket))
elif func == 'min':
newValues.append(min(bucket))
else:
newValues.append(sum(bucket))
else:
newValues.append(None)
if alignToFrom:
newEnd += interval
newName = "summarize(%s, \"%s\", \"%s\"%s)" % (
series.name, intervalString, func, alignToFrom and ", true" or "")
newSeries = TimeSeries(newName, newStart, newEnd, interval, newValues)
newSeries.pathExpression = newName
results.append(newSeries)
return results | [
"def",
"summarize",
"(",
"requestContext",
",",
"seriesList",
",",
"intervalString",
",",
"func",
"=",
"'sum'",
",",
"alignToFrom",
"=",
"False",
")",
":",
"results",
"=",
"[",
"]",
"delta",
"=",
"parseTimeOffset",
"(",
"intervalString",
")",
"interval",
"=",
"to_seconds",
"(",
"delta",
")",
"for",
"series",
"in",
"seriesList",
":",
"buckets",
"=",
"{",
"}",
"timestamps",
"=",
"range",
"(",
"int",
"(",
"series",
".",
"start",
")",
",",
"int",
"(",
"series",
".",
"end",
")",
"+",
"1",
",",
"int",
"(",
"series",
".",
"step",
")",
")",
"datapoints",
"=",
"zip_longest",
"(",
"timestamps",
",",
"series",
")",
"for",
"timestamp",
",",
"value",
"in",
"datapoints",
":",
"if",
"timestamp",
"is",
"None",
":",
"continue",
"if",
"alignToFrom",
":",
"bucketInterval",
"=",
"int",
"(",
"(",
"timestamp",
"-",
"series",
".",
"start",
")",
"/",
"interval",
")",
"else",
":",
"bucketInterval",
"=",
"timestamp",
"-",
"(",
"timestamp",
"%",
"interval",
")",
"if",
"bucketInterval",
"not",
"in",
"buckets",
":",
"buckets",
"[",
"bucketInterval",
"]",
"=",
"[",
"]",
"if",
"value",
"is",
"not",
"None",
":",
"buckets",
"[",
"bucketInterval",
"]",
".",
"append",
"(",
"value",
")",
"if",
"alignToFrom",
":",
"newStart",
"=",
"series",
".",
"start",
"newEnd",
"=",
"series",
".",
"end",
"else",
":",
"newStart",
"=",
"series",
".",
"start",
"-",
"(",
"series",
".",
"start",
"%",
"interval",
")",
"newEnd",
"=",
"series",
".",
"end",
"-",
"(",
"series",
".",
"end",
"%",
"interval",
")",
"+",
"interval",
"newValues",
"=",
"[",
"]",
"for",
"timestamp",
"in",
"range",
"(",
"newStart",
",",
"newEnd",
",",
"interval",
")",
":",
"if",
"alignToFrom",
":",
"newEnd",
"=",
"timestamp",
"bucketInterval",
"=",
"int",
"(",
"(",
"timestamp",
"-",
"series",
".",
"start",
")",
"/",
"interval",
")",
"else",
":",
"bucketInterval",
"=",
"timestamp",
"-",
"(",
"timestamp",
"%",
"interval",
")",
"bucket",
"=",
"buckets",
".",
"get",
"(",
"bucketInterval",
",",
"[",
"]",
")",
"if",
"bucket",
":",
"if",
"func",
"==",
"'avg'",
":",
"newValues",
".",
"append",
"(",
"float",
"(",
"sum",
"(",
"bucket",
")",
")",
"/",
"float",
"(",
"len",
"(",
"bucket",
")",
")",
")",
"elif",
"func",
"==",
"'last'",
":",
"newValues",
".",
"append",
"(",
"bucket",
"[",
"len",
"(",
"bucket",
")",
"-",
"1",
"]",
")",
"elif",
"func",
"==",
"'max'",
":",
"newValues",
".",
"append",
"(",
"max",
"(",
"bucket",
")",
")",
"elif",
"func",
"==",
"'min'",
":",
"newValues",
".",
"append",
"(",
"min",
"(",
"bucket",
")",
")",
"else",
":",
"newValues",
".",
"append",
"(",
"sum",
"(",
"bucket",
")",
")",
"else",
":",
"newValues",
".",
"append",
"(",
"None",
")",
"if",
"alignToFrom",
":",
"newEnd",
"+=",
"interval",
"newName",
"=",
"\"summarize(%s, \\\"%s\\\", \\\"%s\\\"%s)\"",
"%",
"(",
"series",
".",
"name",
",",
"intervalString",
",",
"func",
",",
"alignToFrom",
"and",
"\", true\"",
"or",
"\"\"",
")",
"newSeries",
"=",
"TimeSeries",
"(",
"newName",
",",
"newStart",
",",
"newEnd",
",",
"interval",
",",
"newValues",
")",
"newSeries",
".",
"pathExpression",
"=",
"newName",
"results",
".",
"append",
"(",
"newSeries",
")",
"return",
"results"
] | Summarize the data into interval buckets of a certain size.
By default, the contents of each interval bucket are summed together.
This is useful for counters where each increment represents a discrete
event and retrieving a "per X" value requires summing all the events in
that interval.
Specifying 'avg' instead will return the mean for each bucket, which can
be more useful when the value is a gauge that represents a certain value
in time.
'max', 'min' or 'last' can also be specified.
By default, buckets are calculated by rounding to the nearest interval.
This works well for intervals smaller than a day. For example, 22:32 will
end up in the bucket 22:00-23:00 when the interval=1hour.
Passing alignToFrom=true will instead create buckets starting at the from
time. In this case, the bucket for 22:32 depends on the from time. If
from=6:30 then the 1hour bucket for 22:32 is 22:30-23:30.
Example::
# total errors per hour
&target=summarize(counter.errors, "1hour")
# new users per week
&target=summarize(nonNegativeDerivative(gauge.num_users), "1week")
# average queue size per hour
&target=summarize(queue.size, "1hour", "avg")
# maximum queue size during each hour
&target=summarize(queue.size, "1hour", "max")
# 2010 Q1-4
&target=summarize(metric, "13week", "avg", true)&from=midnight+20100101 | [
"Summarize",
"the",
"data",
"into",
"interval",
"buckets",
"of",
"a",
"certain",
"size",
"."
] | train | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L3857-L3963 |
brutasse/graphite-api | graphite_api/functions.py | hitcount | def hitcount(requestContext, seriesList, intervalString,
alignToInterval=False):
"""
Estimate hit counts from a list of time series.
This function assumes the values in each time series represent
hits per second. It calculates hits per some larger interval
such as per day or per hour. This function is like summarize(),
except that it compensates automatically for different time scales
(so that a similar graph results from using either fine-grained
or coarse-grained records) and handles rarely-occurring events
gracefully.
"""
results = []
delta = parseTimeOffset(intervalString)
interval = to_seconds(delta)
if alignToInterval:
requestContext = requestContext.copy()
tzinfo = requestContext['tzinfo']
s = requestContext['startTime']
if interval >= DAY:
requestContext['startTime'] = datetime(s.year, s.month, s.day,
tzinfo=tzinfo)
elif interval >= HOUR:
requestContext['startTime'] = datetime(s.year, s.month, s.day,
s.hour, tzinfo=tzinfo)
elif interval >= MINUTE:
requestContext['startTime'] = datetime(s.year, s.month, s.day,
s.hour, s.minute,
tzinfo=tzinfo)
# Gather all paths first, then the data
paths = []
for series in seriesList:
paths.extend(pathsFromTarget(requestContext,
series.pathExpression))
data_store = fetchData(requestContext, paths)
for series in seriesList:
newSeries = evaluateTarget(requestContext,
series.pathExpression,
data_store)[0]
intervalCount = int((series.end - series.start) / interval)
series[0:len(series)] = newSeries
series.start = newSeries.start
series.end = newSeries.start + (
intervalCount * interval) + interval
series.step = newSeries.step
for series in seriesList:
step = int(series.step)
bucket_count = int(math.ceil(
float(series.end - series.start) / interval))
buckets = [[] for _ in range(bucket_count)]
newStart = int(series.end - bucket_count * interval)
for i, value in enumerate(series):
if value is None:
continue
start_time = int(series.start + i * step)
start_bucket, start_mod = divmod(start_time - newStart, interval)
end_time = start_time + step
end_bucket, end_mod = divmod(end_time - newStart, interval)
if end_bucket >= bucket_count:
end_bucket = bucket_count - 1
end_mod = interval
if start_bucket == end_bucket:
# All of the hits go to a single bucket.
if start_bucket >= 0:
buckets[start_bucket].append(value * (end_mod - start_mod))
else:
# Spread the hits among 2 or more buckets.
if start_bucket >= 0:
buckets[start_bucket].append(
value * (interval - start_mod))
hits_per_bucket = value * interval
for j in range(start_bucket + 1, end_bucket):
buckets[j].append(hits_per_bucket)
if end_mod > 0:
buckets[end_bucket].append(value * end_mod)
newValues = []
for bucket in buckets:
if bucket:
newValues.append(sum(bucket))
else:
newValues.append(None)
newName = 'hitcount(%s, "%s"%s)' % (series.name, intervalString,
alignToInterval and ", true" or "")
newSeries = TimeSeries(newName, newStart, series.end, interval,
newValues)
newSeries.pathExpression = newName
results.append(newSeries)
return results | python | def hitcount(requestContext, seriesList, intervalString,
alignToInterval=False):
"""
Estimate hit counts from a list of time series.
This function assumes the values in each time series represent
hits per second. It calculates hits per some larger interval
such as per day or per hour. This function is like summarize(),
except that it compensates automatically for different time scales
(so that a similar graph results from using either fine-grained
or coarse-grained records) and handles rarely-occurring events
gracefully.
"""
results = []
delta = parseTimeOffset(intervalString)
interval = to_seconds(delta)
if alignToInterval:
requestContext = requestContext.copy()
tzinfo = requestContext['tzinfo']
s = requestContext['startTime']
if interval >= DAY:
requestContext['startTime'] = datetime(s.year, s.month, s.day,
tzinfo=tzinfo)
elif interval >= HOUR:
requestContext['startTime'] = datetime(s.year, s.month, s.day,
s.hour, tzinfo=tzinfo)
elif interval >= MINUTE:
requestContext['startTime'] = datetime(s.year, s.month, s.day,
s.hour, s.minute,
tzinfo=tzinfo)
# Gather all paths first, then the data
paths = []
for series in seriesList:
paths.extend(pathsFromTarget(requestContext,
series.pathExpression))
data_store = fetchData(requestContext, paths)
for series in seriesList:
newSeries = evaluateTarget(requestContext,
series.pathExpression,
data_store)[0]
intervalCount = int((series.end - series.start) / interval)
series[0:len(series)] = newSeries
series.start = newSeries.start
series.end = newSeries.start + (
intervalCount * interval) + interval
series.step = newSeries.step
for series in seriesList:
step = int(series.step)
bucket_count = int(math.ceil(
float(series.end - series.start) / interval))
buckets = [[] for _ in range(bucket_count)]
newStart = int(series.end - bucket_count * interval)
for i, value in enumerate(series):
if value is None:
continue
start_time = int(series.start + i * step)
start_bucket, start_mod = divmod(start_time - newStart, interval)
end_time = start_time + step
end_bucket, end_mod = divmod(end_time - newStart, interval)
if end_bucket >= bucket_count:
end_bucket = bucket_count - 1
end_mod = interval
if start_bucket == end_bucket:
# All of the hits go to a single bucket.
if start_bucket >= 0:
buckets[start_bucket].append(value * (end_mod - start_mod))
else:
# Spread the hits among 2 or more buckets.
if start_bucket >= 0:
buckets[start_bucket].append(
value * (interval - start_mod))
hits_per_bucket = value * interval
for j in range(start_bucket + 1, end_bucket):
buckets[j].append(hits_per_bucket)
if end_mod > 0:
buckets[end_bucket].append(value * end_mod)
newValues = []
for bucket in buckets:
if bucket:
newValues.append(sum(bucket))
else:
newValues.append(None)
newName = 'hitcount(%s, "%s"%s)' % (series.name, intervalString,
alignToInterval and ", true" or "")
newSeries = TimeSeries(newName, newStart, series.end, interval,
newValues)
newSeries.pathExpression = newName
results.append(newSeries)
return results | [
"def",
"hitcount",
"(",
"requestContext",
",",
"seriesList",
",",
"intervalString",
",",
"alignToInterval",
"=",
"False",
")",
":",
"results",
"=",
"[",
"]",
"delta",
"=",
"parseTimeOffset",
"(",
"intervalString",
")",
"interval",
"=",
"to_seconds",
"(",
"delta",
")",
"if",
"alignToInterval",
":",
"requestContext",
"=",
"requestContext",
".",
"copy",
"(",
")",
"tzinfo",
"=",
"requestContext",
"[",
"'tzinfo'",
"]",
"s",
"=",
"requestContext",
"[",
"'startTime'",
"]",
"if",
"interval",
">=",
"DAY",
":",
"requestContext",
"[",
"'startTime'",
"]",
"=",
"datetime",
"(",
"s",
".",
"year",
",",
"s",
".",
"month",
",",
"s",
".",
"day",
",",
"tzinfo",
"=",
"tzinfo",
")",
"elif",
"interval",
">=",
"HOUR",
":",
"requestContext",
"[",
"'startTime'",
"]",
"=",
"datetime",
"(",
"s",
".",
"year",
",",
"s",
".",
"month",
",",
"s",
".",
"day",
",",
"s",
".",
"hour",
",",
"tzinfo",
"=",
"tzinfo",
")",
"elif",
"interval",
">=",
"MINUTE",
":",
"requestContext",
"[",
"'startTime'",
"]",
"=",
"datetime",
"(",
"s",
".",
"year",
",",
"s",
".",
"month",
",",
"s",
".",
"day",
",",
"s",
".",
"hour",
",",
"s",
".",
"minute",
",",
"tzinfo",
"=",
"tzinfo",
")",
"# Gather all paths first, then the data",
"paths",
"=",
"[",
"]",
"for",
"series",
"in",
"seriesList",
":",
"paths",
".",
"extend",
"(",
"pathsFromTarget",
"(",
"requestContext",
",",
"series",
".",
"pathExpression",
")",
")",
"data_store",
"=",
"fetchData",
"(",
"requestContext",
",",
"paths",
")",
"for",
"series",
"in",
"seriesList",
":",
"newSeries",
"=",
"evaluateTarget",
"(",
"requestContext",
",",
"series",
".",
"pathExpression",
",",
"data_store",
")",
"[",
"0",
"]",
"intervalCount",
"=",
"int",
"(",
"(",
"series",
".",
"end",
"-",
"series",
".",
"start",
")",
"/",
"interval",
")",
"series",
"[",
"0",
":",
"len",
"(",
"series",
")",
"]",
"=",
"newSeries",
"series",
".",
"start",
"=",
"newSeries",
".",
"start",
"series",
".",
"end",
"=",
"newSeries",
".",
"start",
"+",
"(",
"intervalCount",
"*",
"interval",
")",
"+",
"interval",
"series",
".",
"step",
"=",
"newSeries",
".",
"step",
"for",
"series",
"in",
"seriesList",
":",
"step",
"=",
"int",
"(",
"series",
".",
"step",
")",
"bucket_count",
"=",
"int",
"(",
"math",
".",
"ceil",
"(",
"float",
"(",
"series",
".",
"end",
"-",
"series",
".",
"start",
")",
"/",
"interval",
")",
")",
"buckets",
"=",
"[",
"[",
"]",
"for",
"_",
"in",
"range",
"(",
"bucket_count",
")",
"]",
"newStart",
"=",
"int",
"(",
"series",
".",
"end",
"-",
"bucket_count",
"*",
"interval",
")",
"for",
"i",
",",
"value",
"in",
"enumerate",
"(",
"series",
")",
":",
"if",
"value",
"is",
"None",
":",
"continue",
"start_time",
"=",
"int",
"(",
"series",
".",
"start",
"+",
"i",
"*",
"step",
")",
"start_bucket",
",",
"start_mod",
"=",
"divmod",
"(",
"start_time",
"-",
"newStart",
",",
"interval",
")",
"end_time",
"=",
"start_time",
"+",
"step",
"end_bucket",
",",
"end_mod",
"=",
"divmod",
"(",
"end_time",
"-",
"newStart",
",",
"interval",
")",
"if",
"end_bucket",
">=",
"bucket_count",
":",
"end_bucket",
"=",
"bucket_count",
"-",
"1",
"end_mod",
"=",
"interval",
"if",
"start_bucket",
"==",
"end_bucket",
":",
"# All of the hits go to a single bucket.",
"if",
"start_bucket",
">=",
"0",
":",
"buckets",
"[",
"start_bucket",
"]",
".",
"append",
"(",
"value",
"*",
"(",
"end_mod",
"-",
"start_mod",
")",
")",
"else",
":",
"# Spread the hits among 2 or more buckets.",
"if",
"start_bucket",
">=",
"0",
":",
"buckets",
"[",
"start_bucket",
"]",
".",
"append",
"(",
"value",
"*",
"(",
"interval",
"-",
"start_mod",
")",
")",
"hits_per_bucket",
"=",
"value",
"*",
"interval",
"for",
"j",
"in",
"range",
"(",
"start_bucket",
"+",
"1",
",",
"end_bucket",
")",
":",
"buckets",
"[",
"j",
"]",
".",
"append",
"(",
"hits_per_bucket",
")",
"if",
"end_mod",
">",
"0",
":",
"buckets",
"[",
"end_bucket",
"]",
".",
"append",
"(",
"value",
"*",
"end_mod",
")",
"newValues",
"=",
"[",
"]",
"for",
"bucket",
"in",
"buckets",
":",
"if",
"bucket",
":",
"newValues",
".",
"append",
"(",
"sum",
"(",
"bucket",
")",
")",
"else",
":",
"newValues",
".",
"append",
"(",
"None",
")",
"newName",
"=",
"'hitcount(%s, \"%s\"%s)'",
"%",
"(",
"series",
".",
"name",
",",
"intervalString",
",",
"alignToInterval",
"and",
"\", true\"",
"or",
"\"\"",
")",
"newSeries",
"=",
"TimeSeries",
"(",
"newName",
",",
"newStart",
",",
"series",
".",
"end",
",",
"interval",
",",
"newValues",
")",
"newSeries",
".",
"pathExpression",
"=",
"newName",
"results",
".",
"append",
"(",
"newSeries",
")",
"return",
"results"
] | Estimate hit counts from a list of time series.
This function assumes the values in each time series represent
hits per second. It calculates hits per some larger interval
such as per day or per hour. This function is like summarize(),
except that it compensates automatically for different time scales
(so that a similar graph results from using either fine-grained
or coarse-grained records) and handles rarely-occurring events
gracefully. | [
"Estimate",
"hit",
"counts",
"from",
"a",
"list",
"of",
"time",
"series",
"."
] | train | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L3966-L4066 |
brutasse/graphite-api | graphite_api/functions.py | timeFunction | def timeFunction(requestContext, name, step=60):
"""
Short Alias: time()
Just returns the timestamp for each X value. T
Example::
&target=time("The.time.series")
This would create a series named "The.time.series" that contains in Y
the same value (in seconds) as X.
A second argument can be provided as a step parameter (default is 60 secs)
"""
start = int(epoch(requestContext["startTime"]))
end = int(epoch(requestContext["endTime"]))
delta = timedelta(seconds=step)
when = requestContext["startTime"]
values = []
while when < requestContext["endTime"]:
values.append(epoch(when))
when += delta
series = TimeSeries(name, start, end, step, values)
series.pathExpression = name
return [series] | python | def timeFunction(requestContext, name, step=60):
"""
Short Alias: time()
Just returns the timestamp for each X value. T
Example::
&target=time("The.time.series")
This would create a series named "The.time.series" that contains in Y
the same value (in seconds) as X.
A second argument can be provided as a step parameter (default is 60 secs)
"""
start = int(epoch(requestContext["startTime"]))
end = int(epoch(requestContext["endTime"]))
delta = timedelta(seconds=step)
when = requestContext["startTime"]
values = []
while when < requestContext["endTime"]:
values.append(epoch(when))
when += delta
series = TimeSeries(name, start, end, step, values)
series.pathExpression = name
return [series] | [
"def",
"timeFunction",
"(",
"requestContext",
",",
"name",
",",
"step",
"=",
"60",
")",
":",
"start",
"=",
"int",
"(",
"epoch",
"(",
"requestContext",
"[",
"\"startTime\"",
"]",
")",
")",
"end",
"=",
"int",
"(",
"epoch",
"(",
"requestContext",
"[",
"\"endTime\"",
"]",
")",
")",
"delta",
"=",
"timedelta",
"(",
"seconds",
"=",
"step",
")",
"when",
"=",
"requestContext",
"[",
"\"startTime\"",
"]",
"values",
"=",
"[",
"]",
"while",
"when",
"<",
"requestContext",
"[",
"\"endTime\"",
"]",
":",
"values",
".",
"append",
"(",
"epoch",
"(",
"when",
")",
")",
"when",
"+=",
"delta",
"series",
"=",
"TimeSeries",
"(",
"name",
",",
"start",
",",
"end",
",",
"step",
",",
"values",
")",
"series",
".",
"pathExpression",
"=",
"name",
"return",
"[",
"series",
"]"
] | Short Alias: time()
Just returns the timestamp for each X value. T
Example::
&target=time("The.time.series")
This would create a series named "The.time.series" that contains in Y
the same value (in seconds) as X.
A second argument can be provided as a step parameter (default is 60 secs) | [
"Short",
"Alias",
":",
"time",
"()"
] | train | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L4069-L4098 |
brutasse/graphite-api | graphite_api/functions.py | sinFunction | def sinFunction(requestContext, name, amplitude=1, step=60):
"""
Short Alias: sin()
Just returns the sine of the current time. The optional amplitude parameter
changes the amplitude of the wave.
Example::
&target=sin("The.time.series", 2)
This would create a series named "The.time.series" that contains sin(x)*2.
A third argument can be provided as a step parameter (default is 60 secs).
"""
delta = timedelta(seconds=step)
when = requestContext["startTime"]
values = []
while when < requestContext["endTime"]:
values.append(math.sin(epoch(when))*amplitude)
when += delta
series = TimeSeries(
name, int(epoch(requestContext["startTime"])),
int(epoch(requestContext["endTime"])),
step, values)
series.pathExpression = 'sin({0})'.format(name)
return [series] | python | def sinFunction(requestContext, name, amplitude=1, step=60):
"""
Short Alias: sin()
Just returns the sine of the current time. The optional amplitude parameter
changes the amplitude of the wave.
Example::
&target=sin("The.time.series", 2)
This would create a series named "The.time.series" that contains sin(x)*2.
A third argument can be provided as a step parameter (default is 60 secs).
"""
delta = timedelta(seconds=step)
when = requestContext["startTime"]
values = []
while when < requestContext["endTime"]:
values.append(math.sin(epoch(when))*amplitude)
when += delta
series = TimeSeries(
name, int(epoch(requestContext["startTime"])),
int(epoch(requestContext["endTime"])),
step, values)
series.pathExpression = 'sin({0})'.format(name)
return [series] | [
"def",
"sinFunction",
"(",
"requestContext",
",",
"name",
",",
"amplitude",
"=",
"1",
",",
"step",
"=",
"60",
")",
":",
"delta",
"=",
"timedelta",
"(",
"seconds",
"=",
"step",
")",
"when",
"=",
"requestContext",
"[",
"\"startTime\"",
"]",
"values",
"=",
"[",
"]",
"while",
"when",
"<",
"requestContext",
"[",
"\"endTime\"",
"]",
":",
"values",
".",
"append",
"(",
"math",
".",
"sin",
"(",
"epoch",
"(",
"when",
")",
")",
"*",
"amplitude",
")",
"when",
"+=",
"delta",
"series",
"=",
"TimeSeries",
"(",
"name",
",",
"int",
"(",
"epoch",
"(",
"requestContext",
"[",
"\"startTime\"",
"]",
")",
")",
",",
"int",
"(",
"epoch",
"(",
"requestContext",
"[",
"\"endTime\"",
"]",
")",
")",
",",
"step",
",",
"values",
")",
"series",
".",
"pathExpression",
"=",
"'sin({0})'",
".",
"format",
"(",
"name",
")",
"return",
"[",
"series",
"]"
] | Short Alias: sin()
Just returns the sine of the current time. The optional amplitude parameter
changes the amplitude of the wave.
Example::
&target=sin("The.time.series", 2)
This would create a series named "The.time.series" that contains sin(x)*2.
A third argument can be provided as a step parameter (default is 60 secs). | [
"Short",
"Alias",
":",
"sin",
"()"
] | train | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L4101-L4129 |
brutasse/graphite-api | graphite_api/functions.py | randomWalkFunction | def randomWalkFunction(requestContext, name, step=60):
"""
Short Alias: randomWalk()
Returns a random walk starting at 0. This is great for testing when there
is no real data in whisper.
Example::
&target=randomWalk("The.time.series")
This would create a series named "The.time.series" that contains points
where x(t) == x(t-1)+random()-0.5, and x(0) == 0.
Accepts an optional second argument as step parameter (default step is
60 sec).
"""
delta = timedelta(seconds=step)
when = requestContext["startTime"]
values = []
current = 0
while when < requestContext["endTime"]:
values.append(current)
current += random.random() - 0.5
when += delta
return [TimeSeries(
name, int(epoch(requestContext["startTime"])),
int(epoch(requestContext["endTime"])),
step, values)] | python | def randomWalkFunction(requestContext, name, step=60):
"""
Short Alias: randomWalk()
Returns a random walk starting at 0. This is great for testing when there
is no real data in whisper.
Example::
&target=randomWalk("The.time.series")
This would create a series named "The.time.series" that contains points
where x(t) == x(t-1)+random()-0.5, and x(0) == 0.
Accepts an optional second argument as step parameter (default step is
60 sec).
"""
delta = timedelta(seconds=step)
when = requestContext["startTime"]
values = []
current = 0
while when < requestContext["endTime"]:
values.append(current)
current += random.random() - 0.5
when += delta
return [TimeSeries(
name, int(epoch(requestContext["startTime"])),
int(epoch(requestContext["endTime"])),
step, values)] | [
"def",
"randomWalkFunction",
"(",
"requestContext",
",",
"name",
",",
"step",
"=",
"60",
")",
":",
"delta",
"=",
"timedelta",
"(",
"seconds",
"=",
"step",
")",
"when",
"=",
"requestContext",
"[",
"\"startTime\"",
"]",
"values",
"=",
"[",
"]",
"current",
"=",
"0",
"while",
"when",
"<",
"requestContext",
"[",
"\"endTime\"",
"]",
":",
"values",
".",
"append",
"(",
"current",
")",
"current",
"+=",
"random",
".",
"random",
"(",
")",
"-",
"0.5",
"when",
"+=",
"delta",
"return",
"[",
"TimeSeries",
"(",
"name",
",",
"int",
"(",
"epoch",
"(",
"requestContext",
"[",
"\"startTime\"",
"]",
")",
")",
",",
"int",
"(",
"epoch",
"(",
"requestContext",
"[",
"\"endTime\"",
"]",
")",
")",
",",
"step",
",",
"values",
")",
"]"
] | Short Alias: randomWalk()
Returns a random walk starting at 0. This is great for testing when there
is no real data in whisper.
Example::
&target=randomWalk("The.time.series")
This would create a series named "The.time.series" that contains points
where x(t) == x(t-1)+random()-0.5, and x(0) == 0.
Accepts an optional second argument as step parameter (default step is
60 sec). | [
"Short",
"Alias",
":",
"randomWalk",
"()"
] | train | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L4146-L4175 |
opencobra/memote | memote/experimental/medium.py | Medium.validate | def validate(self, model, checks=[]):
"""Use a defined schema to validate the medium table format."""
custom = [
check_partial(reaction_id_check,
frozenset(r.id for r in model.reactions))
]
super(Medium, self).validate(model=model, checks=checks + custom) | python | def validate(self, model, checks=[]):
"""Use a defined schema to validate the medium table format."""
custom = [
check_partial(reaction_id_check,
frozenset(r.id for r in model.reactions))
]
super(Medium, self).validate(model=model, checks=checks + custom) | [
"def",
"validate",
"(",
"self",
",",
"model",
",",
"checks",
"=",
"[",
"]",
")",
":",
"custom",
"=",
"[",
"check_partial",
"(",
"reaction_id_check",
",",
"frozenset",
"(",
"r",
".",
"id",
"for",
"r",
"in",
"model",
".",
"reactions",
")",
")",
"]",
"super",
"(",
"Medium",
",",
"self",
")",
".",
"validate",
"(",
"model",
"=",
"model",
",",
"checks",
"=",
"checks",
"+",
"custom",
")"
] | Use a defined schema to validate the medium table format. | [
"Use",
"a",
"defined",
"schema",
"to",
"validate",
"the",
"medium",
"table",
"format",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/experimental/medium.py#L48-L54 |
opencobra/memote | memote/experimental/medium.py | Medium.apply | def apply(self, model):
"""Set the defined medium on the given model."""
model.medium = {row.exchange: row.uptake
for row in self.data.itertuples(index=False)} | python | def apply(self, model):
"""Set the defined medium on the given model."""
model.medium = {row.exchange: row.uptake
for row in self.data.itertuples(index=False)} | [
"def",
"apply",
"(",
"self",
",",
"model",
")",
":",
"model",
".",
"medium",
"=",
"{",
"row",
".",
"exchange",
":",
"row",
".",
"uptake",
"for",
"row",
"in",
"self",
".",
"data",
".",
"itertuples",
"(",
"index",
"=",
"False",
")",
"}"
] | Set the defined medium on the given model. | [
"Set",
"the",
"defined",
"medium",
"on",
"the",
"given",
"model",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/experimental/medium.py#L56-L59 |
opencobra/memote | memote/suite/results/result.py | MemoteResult.add_environment_information | def add_environment_information(meta):
"""Record environment information."""
meta["timestamp"] = datetime.utcnow().isoformat(" ")
meta["platform"] = platform.system()
meta["release"] = platform.release()
meta["python"] = platform.python_version()
meta["packages"] = get_pkg_info("memote") | python | def add_environment_information(meta):
"""Record environment information."""
meta["timestamp"] = datetime.utcnow().isoformat(" ")
meta["platform"] = platform.system()
meta["release"] = platform.release()
meta["python"] = platform.python_version()
meta["packages"] = get_pkg_info("memote") | [
"def",
"add_environment_information",
"(",
"meta",
")",
":",
"meta",
"[",
"\"timestamp\"",
"]",
"=",
"datetime",
".",
"utcnow",
"(",
")",
".",
"isoformat",
"(",
"\" \"",
")",
"meta",
"[",
"\"platform\"",
"]",
"=",
"platform",
".",
"system",
"(",
")",
"meta",
"[",
"\"release\"",
"]",
"=",
"platform",
".",
"release",
"(",
")",
"meta",
"[",
"\"python\"",
"]",
"=",
"platform",
".",
"python_version",
"(",
")",
"meta",
"[",
"\"packages\"",
"]",
"=",
"get_pkg_info",
"(",
"\"memote\"",
")"
] | Record environment information. | [
"Record",
"environment",
"information",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/suite/results/result.py#L46-L52 |
opencobra/memote | memote/support/helpers.py | find_transported_elements | def find_transported_elements(rxn):
"""
Return a dictionary showing the amount of transported elements of a rxn.
Collects the elements for each metabolite participating in a reaction,
multiplies the amount by the metabolite's stoichiometry in the reaction and
bins the result according to the compartment that metabolite is in. This
produces a dictionary of dictionaries such as this
``{'p': {'C': -1, 'H': -4}, c: {'C': 1, 'H': 4}}`` which shows the
transported entities. This dictionary is then simplified to only include
the non-zero elements of one single compartment i.e. showing the precise
elements that are transported.
Parameters
----------
rxn : cobra.Reaction
Any cobra.Reaction containing metabolites.
"""
element_dist = defaultdict()
# Collecting elements for each metabolite.
for met in rxn.metabolites:
if met.compartment not in element_dist:
# Multiplication by the metabolite stoichiometry.
element_dist[met.compartment] = \
{k: v * rxn.metabolites[met]
for (k, v) in iteritems(met.elements)}
else:
x = {k: v * rxn.metabolites[met] for (k, v) in
iteritems(met.elements)}
y = element_dist[met.compartment]
element_dist[met.compartment] = \
{k: x.get(k, 0) + y.get(k, 0) for k in set(x) | set(y)}
delta_dict = defaultdict()
# Simplification of the resulting dictionary of dictionaries.
for elements in itervalues(element_dist):
delta_dict.update(elements)
# Only non-zero values get included in the returned delta-dict.
delta_dict = {k: abs(v) for (k, v) in iteritems(delta_dict) if v != 0}
return delta_dict | python | def find_transported_elements(rxn):
"""
Return a dictionary showing the amount of transported elements of a rxn.
Collects the elements for each metabolite participating in a reaction,
multiplies the amount by the metabolite's stoichiometry in the reaction and
bins the result according to the compartment that metabolite is in. This
produces a dictionary of dictionaries such as this
``{'p': {'C': -1, 'H': -4}, c: {'C': 1, 'H': 4}}`` which shows the
transported entities. This dictionary is then simplified to only include
the non-zero elements of one single compartment i.e. showing the precise
elements that are transported.
Parameters
----------
rxn : cobra.Reaction
Any cobra.Reaction containing metabolites.
"""
element_dist = defaultdict()
# Collecting elements for each metabolite.
for met in rxn.metabolites:
if met.compartment not in element_dist:
# Multiplication by the metabolite stoichiometry.
element_dist[met.compartment] = \
{k: v * rxn.metabolites[met]
for (k, v) in iteritems(met.elements)}
else:
x = {k: v * rxn.metabolites[met] for (k, v) in
iteritems(met.elements)}
y = element_dist[met.compartment]
element_dist[met.compartment] = \
{k: x.get(k, 0) + y.get(k, 0) for k in set(x) | set(y)}
delta_dict = defaultdict()
# Simplification of the resulting dictionary of dictionaries.
for elements in itervalues(element_dist):
delta_dict.update(elements)
# Only non-zero values get included in the returned delta-dict.
delta_dict = {k: abs(v) for (k, v) in iteritems(delta_dict) if v != 0}
return delta_dict | [
"def",
"find_transported_elements",
"(",
"rxn",
")",
":",
"element_dist",
"=",
"defaultdict",
"(",
")",
"# Collecting elements for each metabolite.",
"for",
"met",
"in",
"rxn",
".",
"metabolites",
":",
"if",
"met",
".",
"compartment",
"not",
"in",
"element_dist",
":",
"# Multiplication by the metabolite stoichiometry.",
"element_dist",
"[",
"met",
".",
"compartment",
"]",
"=",
"{",
"k",
":",
"v",
"*",
"rxn",
".",
"metabolites",
"[",
"met",
"]",
"for",
"(",
"k",
",",
"v",
")",
"in",
"iteritems",
"(",
"met",
".",
"elements",
")",
"}",
"else",
":",
"x",
"=",
"{",
"k",
":",
"v",
"*",
"rxn",
".",
"metabolites",
"[",
"met",
"]",
"for",
"(",
"k",
",",
"v",
")",
"in",
"iteritems",
"(",
"met",
".",
"elements",
")",
"}",
"y",
"=",
"element_dist",
"[",
"met",
".",
"compartment",
"]",
"element_dist",
"[",
"met",
".",
"compartment",
"]",
"=",
"{",
"k",
":",
"x",
".",
"get",
"(",
"k",
",",
"0",
")",
"+",
"y",
".",
"get",
"(",
"k",
",",
"0",
")",
"for",
"k",
"in",
"set",
"(",
"x",
")",
"|",
"set",
"(",
"y",
")",
"}",
"delta_dict",
"=",
"defaultdict",
"(",
")",
"# Simplification of the resulting dictionary of dictionaries.",
"for",
"elements",
"in",
"itervalues",
"(",
"element_dist",
")",
":",
"delta_dict",
".",
"update",
"(",
"elements",
")",
"# Only non-zero values get included in the returned delta-dict.",
"delta_dict",
"=",
"{",
"k",
":",
"abs",
"(",
"v",
")",
"for",
"(",
"k",
",",
"v",
")",
"in",
"iteritems",
"(",
"delta_dict",
")",
"if",
"v",
"!=",
"0",
"}",
"return",
"delta_dict"
] | Return a dictionary showing the amount of transported elements of a rxn.
Collects the elements for each metabolite participating in a reaction,
multiplies the amount by the metabolite's stoichiometry in the reaction and
bins the result according to the compartment that metabolite is in. This
produces a dictionary of dictionaries such as this
``{'p': {'C': -1, 'H': -4}, c: {'C': 1, 'H': 4}}`` which shows the
transported entities. This dictionary is then simplified to only include
the non-zero elements of one single compartment i.e. showing the precise
elements that are transported.
Parameters
----------
rxn : cobra.Reaction
Any cobra.Reaction containing metabolites. | [
"Return",
"a",
"dictionary",
"showing",
"the",
"amount",
"of",
"transported",
"elements",
"of",
"a",
"rxn",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/helpers.py#L81-L120 |
opencobra/memote | memote/support/helpers.py | find_transport_reactions | def find_transport_reactions(model):
"""
Return a list of all transport reactions.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Notes
-----
A transport reaction is defined as follows:
1. It contains metabolites from at least 2 compartments and
2. at least 1 metabolite undergoes no chemical reaction, i.e.,
the formula and/or annotation stays the same on both sides of the equation.
A notable exception is transport via PTS, which also contains the following
restriction:
3. The transported metabolite(s) are transported into a compartment through
the exchange of a phosphate group.
An example of transport via PTS would be
pep(c) + glucose(e) -> glucose-6-phosphate(c) + pyr(c)
Reactions similar to transport via PTS (referred to as "modified transport
reactions") follow a similar pattern:
A(x) + B-R(y) -> A-R(y) + B(y)
Such modified transport reactions can be detected, but only when a formula
field exists for all metabolites in a particular reaction. If this is not
the case, transport reactions are identified through annotations, which
cannot detect modified transport reactions.
"""
transport_reactions = []
transport_rxn_candidates = set(model.reactions) - set(model.boundary) \
- set(find_biomass_reaction(model))
transport_rxn_candidates = set(
[rxn for rxn in transport_rxn_candidates if len(rxn.compartments) >= 2]
)
# Add all labeled transport reactions
sbo_matches = set([rxn for rxn in transport_rxn_candidates if
rxn.annotation is not None and
'sbo' in rxn.annotation and
rxn.annotation['sbo'] in TRANSPORT_RXN_SBO_TERMS])
if len(sbo_matches) > 0:
transport_reactions += list(sbo_matches)
# Find unlabeled transport reactions via formula or annotation checks
for rxn in transport_rxn_candidates:
# Check if metabolites have formula field
rxn_mets = set([met.formula for met in rxn.metabolites])
if (None not in rxn_mets) and (len(rxn_mets) != 0):
if is_transport_reaction_formulae(rxn):
transport_reactions.append(rxn)
elif is_transport_reaction_annotations(rxn):
transport_reactions.append(rxn)
return set(transport_reactions) | python | def find_transport_reactions(model):
"""
Return a list of all transport reactions.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Notes
-----
A transport reaction is defined as follows:
1. It contains metabolites from at least 2 compartments and
2. at least 1 metabolite undergoes no chemical reaction, i.e.,
the formula and/or annotation stays the same on both sides of the equation.
A notable exception is transport via PTS, which also contains the following
restriction:
3. The transported metabolite(s) are transported into a compartment through
the exchange of a phosphate group.
An example of transport via PTS would be
pep(c) + glucose(e) -> glucose-6-phosphate(c) + pyr(c)
Reactions similar to transport via PTS (referred to as "modified transport
reactions") follow a similar pattern:
A(x) + B-R(y) -> A-R(y) + B(y)
Such modified transport reactions can be detected, but only when a formula
field exists for all metabolites in a particular reaction. If this is not
the case, transport reactions are identified through annotations, which
cannot detect modified transport reactions.
"""
transport_reactions = []
transport_rxn_candidates = set(model.reactions) - set(model.boundary) \
- set(find_biomass_reaction(model))
transport_rxn_candidates = set(
[rxn for rxn in transport_rxn_candidates if len(rxn.compartments) >= 2]
)
# Add all labeled transport reactions
sbo_matches = set([rxn for rxn in transport_rxn_candidates if
rxn.annotation is not None and
'sbo' in rxn.annotation and
rxn.annotation['sbo'] in TRANSPORT_RXN_SBO_TERMS])
if len(sbo_matches) > 0:
transport_reactions += list(sbo_matches)
# Find unlabeled transport reactions via formula or annotation checks
for rxn in transport_rxn_candidates:
# Check if metabolites have formula field
rxn_mets = set([met.formula for met in rxn.metabolites])
if (None not in rxn_mets) and (len(rxn_mets) != 0):
if is_transport_reaction_formulae(rxn):
transport_reactions.append(rxn)
elif is_transport_reaction_annotations(rxn):
transport_reactions.append(rxn)
return set(transport_reactions) | [
"def",
"find_transport_reactions",
"(",
"model",
")",
":",
"transport_reactions",
"=",
"[",
"]",
"transport_rxn_candidates",
"=",
"set",
"(",
"model",
".",
"reactions",
")",
"-",
"set",
"(",
"model",
".",
"boundary",
")",
"-",
"set",
"(",
"find_biomass_reaction",
"(",
"model",
")",
")",
"transport_rxn_candidates",
"=",
"set",
"(",
"[",
"rxn",
"for",
"rxn",
"in",
"transport_rxn_candidates",
"if",
"len",
"(",
"rxn",
".",
"compartments",
")",
">=",
"2",
"]",
")",
"# Add all labeled transport reactions",
"sbo_matches",
"=",
"set",
"(",
"[",
"rxn",
"for",
"rxn",
"in",
"transport_rxn_candidates",
"if",
"rxn",
".",
"annotation",
"is",
"not",
"None",
"and",
"'sbo'",
"in",
"rxn",
".",
"annotation",
"and",
"rxn",
".",
"annotation",
"[",
"'sbo'",
"]",
"in",
"TRANSPORT_RXN_SBO_TERMS",
"]",
")",
"if",
"len",
"(",
"sbo_matches",
")",
">",
"0",
":",
"transport_reactions",
"+=",
"list",
"(",
"sbo_matches",
")",
"# Find unlabeled transport reactions via formula or annotation checks",
"for",
"rxn",
"in",
"transport_rxn_candidates",
":",
"# Check if metabolites have formula field",
"rxn_mets",
"=",
"set",
"(",
"[",
"met",
".",
"formula",
"for",
"met",
"in",
"rxn",
".",
"metabolites",
"]",
")",
"if",
"(",
"None",
"not",
"in",
"rxn_mets",
")",
"and",
"(",
"len",
"(",
"rxn_mets",
")",
"!=",
"0",
")",
":",
"if",
"is_transport_reaction_formulae",
"(",
"rxn",
")",
":",
"transport_reactions",
".",
"append",
"(",
"rxn",
")",
"elif",
"is_transport_reaction_annotations",
"(",
"rxn",
")",
":",
"transport_reactions",
".",
"append",
"(",
"rxn",
")",
"return",
"set",
"(",
"transport_reactions",
")"
] | Return a list of all transport reactions.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Notes
-----
A transport reaction is defined as follows:
1. It contains metabolites from at least 2 compartments and
2. at least 1 metabolite undergoes no chemical reaction, i.e.,
the formula and/or annotation stays the same on both sides of the equation.
A notable exception is transport via PTS, which also contains the following
restriction:
3. The transported metabolite(s) are transported into a compartment through
the exchange of a phosphate group.
An example of transport via PTS would be
pep(c) + glucose(e) -> glucose-6-phosphate(c) + pyr(c)
Reactions similar to transport via PTS (referred to as "modified transport
reactions") follow a similar pattern:
A(x) + B-R(y) -> A-R(y) + B(y)
Such modified transport reactions can be detected, but only when a formula
field exists for all metabolites in a particular reaction. If this is not
the case, transport reactions are identified through annotations, which
cannot detect modified transport reactions. | [
"Return",
"a",
"list",
"of",
"all",
"transport",
"reactions",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/helpers.py#L124-L181 |
opencobra/memote | memote/support/helpers.py | is_transport_reaction_formulae | def is_transport_reaction_formulae(rxn):
"""
Return boolean if a reaction is a transport reaction (from formulae).
Parameters
----------
rxn: cobra.Reaction
The metabolic reaction under investigation.
"""
# Collecting criteria to classify transporters by.
rxn_reactants = set([met.formula for met in rxn.reactants])
rxn_products = set([met.formula for met in rxn.products])
# Looking for formulas that stay the same on both side of the reaction.
transported_mets = \
[formula for formula in rxn_reactants if formula in rxn_products]
# Collect information on the elemental differences between
# compartments in the reaction.
delta_dicts = find_transported_elements(rxn)
non_zero_array = [v for (k, v) in iteritems(delta_dicts) if v != 0]
# Excluding reactions such as oxidoreductases where no net
# transport of Hydrogen is occurring, but rather just an exchange of
# electrons or charges effecting a change in protonation.
if set(transported_mets) != set('H') and list(
delta_dicts.keys()
) == ['H']:
pass
# All other reactions for which the amount of transported elements is
# not zero, which are not part of the model's exchange nor
# biomass reactions, are defined as transport reactions.
# This includes reactions where the transported metabolite reacts with
# a carrier molecule.
elif sum(non_zero_array):
return True | python | def is_transport_reaction_formulae(rxn):
"""
Return boolean if a reaction is a transport reaction (from formulae).
Parameters
----------
rxn: cobra.Reaction
The metabolic reaction under investigation.
"""
# Collecting criteria to classify transporters by.
rxn_reactants = set([met.formula for met in rxn.reactants])
rxn_products = set([met.formula for met in rxn.products])
# Looking for formulas that stay the same on both side of the reaction.
transported_mets = \
[formula for formula in rxn_reactants if formula in rxn_products]
# Collect information on the elemental differences between
# compartments in the reaction.
delta_dicts = find_transported_elements(rxn)
non_zero_array = [v for (k, v) in iteritems(delta_dicts) if v != 0]
# Excluding reactions such as oxidoreductases where no net
# transport of Hydrogen is occurring, but rather just an exchange of
# electrons or charges effecting a change in protonation.
if set(transported_mets) != set('H') and list(
delta_dicts.keys()
) == ['H']:
pass
# All other reactions for which the amount of transported elements is
# not zero, which are not part of the model's exchange nor
# biomass reactions, are defined as transport reactions.
# This includes reactions where the transported metabolite reacts with
# a carrier molecule.
elif sum(non_zero_array):
return True | [
"def",
"is_transport_reaction_formulae",
"(",
"rxn",
")",
":",
"# Collecting criteria to classify transporters by.",
"rxn_reactants",
"=",
"set",
"(",
"[",
"met",
".",
"formula",
"for",
"met",
"in",
"rxn",
".",
"reactants",
"]",
")",
"rxn_products",
"=",
"set",
"(",
"[",
"met",
".",
"formula",
"for",
"met",
"in",
"rxn",
".",
"products",
"]",
")",
"# Looking for formulas that stay the same on both side of the reaction.",
"transported_mets",
"=",
"[",
"formula",
"for",
"formula",
"in",
"rxn_reactants",
"if",
"formula",
"in",
"rxn_products",
"]",
"# Collect information on the elemental differences between",
"# compartments in the reaction.",
"delta_dicts",
"=",
"find_transported_elements",
"(",
"rxn",
")",
"non_zero_array",
"=",
"[",
"v",
"for",
"(",
"k",
",",
"v",
")",
"in",
"iteritems",
"(",
"delta_dicts",
")",
"if",
"v",
"!=",
"0",
"]",
"# Excluding reactions such as oxidoreductases where no net",
"# transport of Hydrogen is occurring, but rather just an exchange of",
"# electrons or charges effecting a change in protonation.",
"if",
"set",
"(",
"transported_mets",
")",
"!=",
"set",
"(",
"'H'",
")",
"and",
"list",
"(",
"delta_dicts",
".",
"keys",
"(",
")",
")",
"==",
"[",
"'H'",
"]",
":",
"pass",
"# All other reactions for which the amount of transported elements is",
"# not zero, which are not part of the model's exchange nor",
"# biomass reactions, are defined as transport reactions.",
"# This includes reactions where the transported metabolite reacts with",
"# a carrier molecule.",
"elif",
"sum",
"(",
"non_zero_array",
")",
":",
"return",
"True"
] | Return boolean if a reaction is a transport reaction (from formulae).
Parameters
----------
rxn: cobra.Reaction
The metabolic reaction under investigation. | [
"Return",
"boolean",
"if",
"a",
"reaction",
"is",
"a",
"transport",
"reaction",
"(",
"from",
"formulae",
")",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/helpers.py#L184-L217 |
opencobra/memote | memote/support/helpers.py | is_transport_reaction_annotations | def is_transport_reaction_annotations(rxn):
"""
Return boolean if a reaction is a transport reaction (from annotations).
Parameters
----------
rxn: cobra.Reaction
The metabolic reaction under investigation.
"""
reactants = set([(k, tuple(v)) for met in rxn.reactants
for k, v in iteritems(met.annotation)
if met.id != "H"
and k is not None and k != 'sbo' and v is not None])
products = set([(k, tuple(v)) for met in rxn.products
for k, v in iteritems(met.annotation)
if met.id != "H"
and k is not None and k != 'sbo' and v is not None])
# Find intersection between reactant annotations and
# product annotations to find common metabolites between them,
# satisfying the requirements for a transport reaction. Reactions such
# as those involving oxidoreductases (where no net transport of
# Hydrogen is occurring, but rather just an exchange of electrons or
# charges effecting a change in protonation) are excluded.
transported_mets = reactants & products
if len(transported_mets) > 0:
return True | python | def is_transport_reaction_annotations(rxn):
"""
Return boolean if a reaction is a transport reaction (from annotations).
Parameters
----------
rxn: cobra.Reaction
The metabolic reaction under investigation.
"""
reactants = set([(k, tuple(v)) for met in rxn.reactants
for k, v in iteritems(met.annotation)
if met.id != "H"
and k is not None and k != 'sbo' and v is not None])
products = set([(k, tuple(v)) for met in rxn.products
for k, v in iteritems(met.annotation)
if met.id != "H"
and k is not None and k != 'sbo' and v is not None])
# Find intersection between reactant annotations and
# product annotations to find common metabolites between them,
# satisfying the requirements for a transport reaction. Reactions such
# as those involving oxidoreductases (where no net transport of
# Hydrogen is occurring, but rather just an exchange of electrons or
# charges effecting a change in protonation) are excluded.
transported_mets = reactants & products
if len(transported_mets) > 0:
return True | [
"def",
"is_transport_reaction_annotations",
"(",
"rxn",
")",
":",
"reactants",
"=",
"set",
"(",
"[",
"(",
"k",
",",
"tuple",
"(",
"v",
")",
")",
"for",
"met",
"in",
"rxn",
".",
"reactants",
"for",
"k",
",",
"v",
"in",
"iteritems",
"(",
"met",
".",
"annotation",
")",
"if",
"met",
".",
"id",
"!=",
"\"H\"",
"and",
"k",
"is",
"not",
"None",
"and",
"k",
"!=",
"'sbo'",
"and",
"v",
"is",
"not",
"None",
"]",
")",
"products",
"=",
"set",
"(",
"[",
"(",
"k",
",",
"tuple",
"(",
"v",
")",
")",
"for",
"met",
"in",
"rxn",
".",
"products",
"for",
"k",
",",
"v",
"in",
"iteritems",
"(",
"met",
".",
"annotation",
")",
"if",
"met",
".",
"id",
"!=",
"\"H\"",
"and",
"k",
"is",
"not",
"None",
"and",
"k",
"!=",
"'sbo'",
"and",
"v",
"is",
"not",
"None",
"]",
")",
"# Find intersection between reactant annotations and",
"# product annotations to find common metabolites between them,",
"# satisfying the requirements for a transport reaction. Reactions such",
"# as those involving oxidoreductases (where no net transport of",
"# Hydrogen is occurring, but rather just an exchange of electrons or",
"# charges effecting a change in protonation) are excluded.",
"transported_mets",
"=",
"reactants",
"&",
"products",
"if",
"len",
"(",
"transported_mets",
")",
">",
"0",
":",
"return",
"True"
] | Return boolean if a reaction is a transport reaction (from annotations).
Parameters
----------
rxn: cobra.Reaction
The metabolic reaction under investigation. | [
"Return",
"boolean",
"if",
"a",
"reaction",
"is",
"a",
"transport",
"reaction",
"(",
"from",
"annotations",
")",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/helpers.py#L220-L246 |
opencobra/memote | memote/support/helpers.py | find_converting_reactions | def find_converting_reactions(model, pair):
"""
Find all reactions which convert a given metabolite pair.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
pair: tuple or list
A pair of metabolite identifiers without compartment suffix.
Returns
-------
frozenset
The set of reactions that have one of the pair on their left-hand
side and the other on the right-hand side.
"""
first = set(find_met_in_model(model, pair[0]))
second = set(find_met_in_model(model, pair[1]))
hits = list()
for rxn in model.reactions:
# FIXME: Use `set.issubset` much more idiomatic.
if len(first & set(rxn.reactants)) > 0 and len(
second & set(rxn.products)) > 0:
hits.append(rxn)
elif len(first & set(rxn.products)) > 0 and len(
second & set(rxn.reactants)) > 0:
hits.append(rxn)
return frozenset(hits) | python | def find_converting_reactions(model, pair):
"""
Find all reactions which convert a given metabolite pair.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
pair: tuple or list
A pair of metabolite identifiers without compartment suffix.
Returns
-------
frozenset
The set of reactions that have one of the pair on their left-hand
side and the other on the right-hand side.
"""
first = set(find_met_in_model(model, pair[0]))
second = set(find_met_in_model(model, pair[1]))
hits = list()
for rxn in model.reactions:
# FIXME: Use `set.issubset` much more idiomatic.
if len(first & set(rxn.reactants)) > 0 and len(
second & set(rxn.products)) > 0:
hits.append(rxn)
elif len(first & set(rxn.products)) > 0 and len(
second & set(rxn.reactants)) > 0:
hits.append(rxn)
return frozenset(hits) | [
"def",
"find_converting_reactions",
"(",
"model",
",",
"pair",
")",
":",
"first",
"=",
"set",
"(",
"find_met_in_model",
"(",
"model",
",",
"pair",
"[",
"0",
"]",
")",
")",
"second",
"=",
"set",
"(",
"find_met_in_model",
"(",
"model",
",",
"pair",
"[",
"1",
"]",
")",
")",
"hits",
"=",
"list",
"(",
")",
"for",
"rxn",
"in",
"model",
".",
"reactions",
":",
"# FIXME: Use `set.issubset` much more idiomatic.",
"if",
"len",
"(",
"first",
"&",
"set",
"(",
"rxn",
".",
"reactants",
")",
")",
">",
"0",
"and",
"len",
"(",
"second",
"&",
"set",
"(",
"rxn",
".",
"products",
")",
")",
">",
"0",
":",
"hits",
".",
"append",
"(",
"rxn",
")",
"elif",
"len",
"(",
"first",
"&",
"set",
"(",
"rxn",
".",
"products",
")",
")",
">",
"0",
"and",
"len",
"(",
"second",
"&",
"set",
"(",
"rxn",
".",
"reactants",
")",
")",
">",
"0",
":",
"hits",
".",
"append",
"(",
"rxn",
")",
"return",
"frozenset",
"(",
"hits",
")"
] | Find all reactions which convert a given metabolite pair.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
pair: tuple or list
A pair of metabolite identifiers without compartment suffix.
Returns
-------
frozenset
The set of reactions that have one of the pair on their left-hand
side and the other on the right-hand side. | [
"Find",
"all",
"reactions",
"which",
"convert",
"a",
"given",
"metabolite",
"pair",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/helpers.py#L249-L278 |
opencobra/memote | memote/support/helpers.py | find_biomass_reaction | def find_biomass_reaction(model):
"""
Return a list of the biomass reaction(s) of the model.
This function identifies possible biomass reactions using two steps:
1. Return reactions that include the SBO annotation "SBO:0000629" for
biomass.
If no reactions can be identifies this way:
2. Look for the ``buzzwords`` "biomass", "growth" and "bof" in reaction IDs.
3. Look for metabolite IDs or names that contain the ``buzzword`` "biomass"
and obtain the set of reactions they are involved in.
4. Remove boundary reactions from this set.
5. Return the union of reactions that match the buzzwords and of the
reactions that metabolites are involved in that match the buzzword.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Returns
-------
list
Identified biomass reactions.
"""
sbo_matches = set([rxn for rxn in model.reactions if
rxn.annotation is not None and
'sbo' in rxn.annotation and
rxn.annotation['sbo'] == 'SBO:0000629'])
if len(sbo_matches) > 0:
return list(sbo_matches)
buzzwords = ['biomass', 'growth', 'bof']
buzzword_matches = set([rxn for rxn in model.reactions if any(
string in rxn.id.lower() for string in buzzwords)])
biomass_met = []
for met in model.metabolites:
if 'biomass' in met.id.lower() or (
met.name is not None and 'biomass' in met.name.lower()):
biomass_met.append(met)
if biomass_met == 1:
biomass_met_matches = set(
biomass_met.reactions
) - set(model.boundary)
else:
biomass_met_matches = set()
return list(buzzword_matches | biomass_met_matches) | python | def find_biomass_reaction(model):
"""
Return a list of the biomass reaction(s) of the model.
This function identifies possible biomass reactions using two steps:
1. Return reactions that include the SBO annotation "SBO:0000629" for
biomass.
If no reactions can be identifies this way:
2. Look for the ``buzzwords`` "biomass", "growth" and "bof" in reaction IDs.
3. Look for metabolite IDs or names that contain the ``buzzword`` "biomass"
and obtain the set of reactions they are involved in.
4. Remove boundary reactions from this set.
5. Return the union of reactions that match the buzzwords and of the
reactions that metabolites are involved in that match the buzzword.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Returns
-------
list
Identified biomass reactions.
"""
sbo_matches = set([rxn for rxn in model.reactions if
rxn.annotation is not None and
'sbo' in rxn.annotation and
rxn.annotation['sbo'] == 'SBO:0000629'])
if len(sbo_matches) > 0:
return list(sbo_matches)
buzzwords = ['biomass', 'growth', 'bof']
buzzword_matches = set([rxn for rxn in model.reactions if any(
string in rxn.id.lower() for string in buzzwords)])
biomass_met = []
for met in model.metabolites:
if 'biomass' in met.id.lower() or (
met.name is not None and 'biomass' in met.name.lower()):
biomass_met.append(met)
if biomass_met == 1:
biomass_met_matches = set(
biomass_met.reactions
) - set(model.boundary)
else:
biomass_met_matches = set()
return list(buzzword_matches | biomass_met_matches) | [
"def",
"find_biomass_reaction",
"(",
"model",
")",
":",
"sbo_matches",
"=",
"set",
"(",
"[",
"rxn",
"for",
"rxn",
"in",
"model",
".",
"reactions",
"if",
"rxn",
".",
"annotation",
"is",
"not",
"None",
"and",
"'sbo'",
"in",
"rxn",
".",
"annotation",
"and",
"rxn",
".",
"annotation",
"[",
"'sbo'",
"]",
"==",
"'SBO:0000629'",
"]",
")",
"if",
"len",
"(",
"sbo_matches",
")",
">",
"0",
":",
"return",
"list",
"(",
"sbo_matches",
")",
"buzzwords",
"=",
"[",
"'biomass'",
",",
"'growth'",
",",
"'bof'",
"]",
"buzzword_matches",
"=",
"set",
"(",
"[",
"rxn",
"for",
"rxn",
"in",
"model",
".",
"reactions",
"if",
"any",
"(",
"string",
"in",
"rxn",
".",
"id",
".",
"lower",
"(",
")",
"for",
"string",
"in",
"buzzwords",
")",
"]",
")",
"biomass_met",
"=",
"[",
"]",
"for",
"met",
"in",
"model",
".",
"metabolites",
":",
"if",
"'biomass'",
"in",
"met",
".",
"id",
".",
"lower",
"(",
")",
"or",
"(",
"met",
".",
"name",
"is",
"not",
"None",
"and",
"'biomass'",
"in",
"met",
".",
"name",
".",
"lower",
"(",
")",
")",
":",
"biomass_met",
".",
"append",
"(",
"met",
")",
"if",
"biomass_met",
"==",
"1",
":",
"biomass_met_matches",
"=",
"set",
"(",
"biomass_met",
".",
"reactions",
")",
"-",
"set",
"(",
"model",
".",
"boundary",
")",
"else",
":",
"biomass_met_matches",
"=",
"set",
"(",
")",
"return",
"list",
"(",
"buzzword_matches",
"|",
"biomass_met_matches",
")"
] | Return a list of the biomass reaction(s) of the model.
This function identifies possible biomass reactions using two steps:
1. Return reactions that include the SBO annotation "SBO:0000629" for
biomass.
If no reactions can be identifies this way:
2. Look for the ``buzzwords`` "biomass", "growth" and "bof" in reaction IDs.
3. Look for metabolite IDs or names that contain the ``buzzword`` "biomass"
and obtain the set of reactions they are involved in.
4. Remove boundary reactions from this set.
5. Return the union of reactions that match the buzzwords and of the
reactions that metabolites are involved in that match the buzzword.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Returns
-------
list
Identified biomass reactions. | [
"Return",
"a",
"list",
"of",
"the",
"biomass",
"reaction",
"(",
"s",
")",
"of",
"the",
"model",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/helpers.py#L282-L333 |
opencobra/memote | memote/support/helpers.py | find_demand_reactions | def find_demand_reactions(model):
u"""
Return a list of demand reactions.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Notes
-----
[1] defines demand reactions as:
-- 'unbalanced network reactions that allow the accumulation of a compound'
-- reactions that are chiefly added during the gap-filling process
-- as a means of dealing with 'compounds that are known to be produced by
the organism [..] (i) for which no information is available about their
fractional distribution to the biomass or (ii) which may only be produced
in some environmental conditions
-- reactions with a formula such as: 'met_c -> '
Demand reactions differ from exchange reactions in that the metabolites
are not removed from the extracellular environment, but from any of the
organism's compartments.
References
----------
.. [1] Thiele, I., & Palsson, B. Ø. (2010, January). A protocol for
generating a high-quality genome-scale metabolic reconstruction.
Nature protocols. Nature Publishing Group.
http://doi.org/10.1038/nprot.2009.203
"""
try:
extracellular = find_compartment_id_in_model(model, 'e')
except KeyError:
extracellular = None
return find_boundary_types(model, 'demand', extracellular) | python | def find_demand_reactions(model):
u"""
Return a list of demand reactions.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Notes
-----
[1] defines demand reactions as:
-- 'unbalanced network reactions that allow the accumulation of a compound'
-- reactions that are chiefly added during the gap-filling process
-- as a means of dealing with 'compounds that are known to be produced by
the organism [..] (i) for which no information is available about their
fractional distribution to the biomass or (ii) which may only be produced
in some environmental conditions
-- reactions with a formula such as: 'met_c -> '
Demand reactions differ from exchange reactions in that the metabolites
are not removed from the extracellular environment, but from any of the
organism's compartments.
References
----------
.. [1] Thiele, I., & Palsson, B. Ø. (2010, January). A protocol for
generating a high-quality genome-scale metabolic reconstruction.
Nature protocols. Nature Publishing Group.
http://doi.org/10.1038/nprot.2009.203
"""
try:
extracellular = find_compartment_id_in_model(model, 'e')
except KeyError:
extracellular = None
return find_boundary_types(model, 'demand', extracellular) | [
"def",
"find_demand_reactions",
"(",
"model",
")",
":",
"try",
":",
"extracellular",
"=",
"find_compartment_id_in_model",
"(",
"model",
",",
"'e'",
")",
"except",
"KeyError",
":",
"extracellular",
"=",
"None",
"return",
"find_boundary_types",
"(",
"model",
",",
"'demand'",
",",
"extracellular",
")"
] | u"""
Return a list of demand reactions.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Notes
-----
[1] defines demand reactions as:
-- 'unbalanced network reactions that allow the accumulation of a compound'
-- reactions that are chiefly added during the gap-filling process
-- as a means of dealing with 'compounds that are known to be produced by
the organism [..] (i) for which no information is available about their
fractional distribution to the biomass or (ii) which may only be produced
in some environmental conditions
-- reactions with a formula such as: 'met_c -> '
Demand reactions differ from exchange reactions in that the metabolites
are not removed from the extracellular environment, but from any of the
organism's compartments.
References
----------
.. [1] Thiele, I., & Palsson, B. Ø. (2010, January). A protocol for
generating a high-quality genome-scale metabolic reconstruction.
Nature protocols. Nature Publishing Group.
http://doi.org/10.1038/nprot.2009.203 | [
"u",
"Return",
"a",
"list",
"of",
"demand",
"reactions",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/helpers.py#L337-L373 |
opencobra/memote | memote/support/helpers.py | find_sink_reactions | def find_sink_reactions(model):
u"""
Return a list of sink reactions.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Notes
-----
[1] defines sink reactions as:
-- 'similar to demand reactions' but reversible, thus able to supply the
model with metabolites
-- reactions that are chiefly added during the gap-filling process
-- as a means of dealing with 'compounds that are produced by nonmetabolic
cellular processes but that need to be metabolized'
-- reactions with a formula such as: 'met_c <-> '
Sink reactions differ from exchange reactions in that the metabolites
are not removed from the extracellular environment, but from any of the
organism's compartments.
References
----------
.. [1] Thiele, I., & Palsson, B. Ø. (2010, January). A protocol for
generating a high-quality genome-scale metabolic reconstruction.
Nature protocols. Nature Publishing Group.
http://doi.org/10.1038/nprot.2009.203
"""
try:
extracellular = find_compartment_id_in_model(model, 'e')
except KeyError:
extracellular = None
return find_boundary_types(model, 'sink', extracellular) | python | def find_sink_reactions(model):
u"""
Return a list of sink reactions.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Notes
-----
[1] defines sink reactions as:
-- 'similar to demand reactions' but reversible, thus able to supply the
model with metabolites
-- reactions that are chiefly added during the gap-filling process
-- as a means of dealing with 'compounds that are produced by nonmetabolic
cellular processes but that need to be metabolized'
-- reactions with a formula such as: 'met_c <-> '
Sink reactions differ from exchange reactions in that the metabolites
are not removed from the extracellular environment, but from any of the
organism's compartments.
References
----------
.. [1] Thiele, I., & Palsson, B. Ø. (2010, January). A protocol for
generating a high-quality genome-scale metabolic reconstruction.
Nature protocols. Nature Publishing Group.
http://doi.org/10.1038/nprot.2009.203
"""
try:
extracellular = find_compartment_id_in_model(model, 'e')
except KeyError:
extracellular = None
return find_boundary_types(model, 'sink', extracellular) | [
"def",
"find_sink_reactions",
"(",
"model",
")",
":",
"try",
":",
"extracellular",
"=",
"find_compartment_id_in_model",
"(",
"model",
",",
"'e'",
")",
"except",
"KeyError",
":",
"extracellular",
"=",
"None",
"return",
"find_boundary_types",
"(",
"model",
",",
"'sink'",
",",
"extracellular",
")"
] | u"""
Return a list of sink reactions.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Notes
-----
[1] defines sink reactions as:
-- 'similar to demand reactions' but reversible, thus able to supply the
model with metabolites
-- reactions that are chiefly added during the gap-filling process
-- as a means of dealing with 'compounds that are produced by nonmetabolic
cellular processes but that need to be metabolized'
-- reactions with a formula such as: 'met_c <-> '
Sink reactions differ from exchange reactions in that the metabolites
are not removed from the extracellular environment, but from any of the
organism's compartments.
References
----------
.. [1] Thiele, I., & Palsson, B. Ø. (2010, January). A protocol for
generating a high-quality genome-scale metabolic reconstruction.
Nature protocols. Nature Publishing Group.
http://doi.org/10.1038/nprot.2009.203 | [
"u",
"Return",
"a",
"list",
"of",
"sink",
"reactions",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/helpers.py#L377-L412 |
opencobra/memote | memote/support/helpers.py | find_exchange_rxns | def find_exchange_rxns(model):
u"""
Return a list of exchange reactions.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Notes
-----
[1] defines exchange reactions as:
-- reactions that 'define the extracellular environment'
-- 'unbalanced, extra-organism reactions that represent the supply to or
removal of metabolites from the extra-organism "space"'
-- reactions with a formula such as: 'met_e -> ' or ' -> met_e' or
'met_e <=> '
Exchange reactions differ from demand reactions in that the metabolites
are removed from or added to the extracellular environment only. With this
the uptake or secretion of a metabolite is modeled, respectively.
References
----------
.. [1] Thiele, I., & Palsson, B. Ø. (2010, January). A protocol for
generating a high-quality genome-scale metabolic reconstruction.
Nature protocols. Nature Publishing Group.
http://doi.org/10.1038/nprot.2009.203
"""
try:
extracellular = find_compartment_id_in_model(model, 'e')
except KeyError:
extracellular = None
return find_boundary_types(model, 'exchange', extracellular) | python | def find_exchange_rxns(model):
u"""
Return a list of exchange reactions.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Notes
-----
[1] defines exchange reactions as:
-- reactions that 'define the extracellular environment'
-- 'unbalanced, extra-organism reactions that represent the supply to or
removal of metabolites from the extra-organism "space"'
-- reactions with a formula such as: 'met_e -> ' or ' -> met_e' or
'met_e <=> '
Exchange reactions differ from demand reactions in that the metabolites
are removed from or added to the extracellular environment only. With this
the uptake or secretion of a metabolite is modeled, respectively.
References
----------
.. [1] Thiele, I., & Palsson, B. Ø. (2010, January). A protocol for
generating a high-quality genome-scale metabolic reconstruction.
Nature protocols. Nature Publishing Group.
http://doi.org/10.1038/nprot.2009.203
"""
try:
extracellular = find_compartment_id_in_model(model, 'e')
except KeyError:
extracellular = None
return find_boundary_types(model, 'exchange', extracellular) | [
"def",
"find_exchange_rxns",
"(",
"model",
")",
":",
"try",
":",
"extracellular",
"=",
"find_compartment_id_in_model",
"(",
"model",
",",
"'e'",
")",
"except",
"KeyError",
":",
"extracellular",
"=",
"None",
"return",
"find_boundary_types",
"(",
"model",
",",
"'exchange'",
",",
"extracellular",
")"
] | u"""
Return a list of exchange reactions.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Notes
-----
[1] defines exchange reactions as:
-- reactions that 'define the extracellular environment'
-- 'unbalanced, extra-organism reactions that represent the supply to or
removal of metabolites from the extra-organism "space"'
-- reactions with a formula such as: 'met_e -> ' or ' -> met_e' or
'met_e <=> '
Exchange reactions differ from demand reactions in that the metabolites
are removed from or added to the extracellular environment only. With this
the uptake or secretion of a metabolite is modeled, respectively.
References
----------
.. [1] Thiele, I., & Palsson, B. Ø. (2010, January). A protocol for
generating a high-quality genome-scale metabolic reconstruction.
Nature protocols. Nature Publishing Group.
http://doi.org/10.1038/nprot.2009.203 | [
"u",
"Return",
"a",
"list",
"of",
"exchange",
"reactions",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/helpers.py#L416-L450 |
opencobra/memote | memote/support/helpers.py | find_interchange_biomass_reactions | def find_interchange_biomass_reactions(model, biomass=None):
"""
Return the set of all transport, boundary, and biomass reactions.
These reactions are either pseudo-reactions, or incorporated to allow
metabolites to pass between compartments. Some tests focus on purely
metabolic reactions and hence exclude this set.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
biomass : list or cobra.Reaction, optional
A list of cobrapy biomass reactions.
"""
boundary = set(model.boundary)
transporters = find_transport_reactions(model)
if biomass is None:
biomass = set(find_biomass_reaction(model))
return boundary | transporters | biomass | python | def find_interchange_biomass_reactions(model, biomass=None):
"""
Return the set of all transport, boundary, and biomass reactions.
These reactions are either pseudo-reactions, or incorporated to allow
metabolites to pass between compartments. Some tests focus on purely
metabolic reactions and hence exclude this set.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
biomass : list or cobra.Reaction, optional
A list of cobrapy biomass reactions.
"""
boundary = set(model.boundary)
transporters = find_transport_reactions(model)
if biomass is None:
biomass = set(find_biomass_reaction(model))
return boundary | transporters | biomass | [
"def",
"find_interchange_biomass_reactions",
"(",
"model",
",",
"biomass",
"=",
"None",
")",
":",
"boundary",
"=",
"set",
"(",
"model",
".",
"boundary",
")",
"transporters",
"=",
"find_transport_reactions",
"(",
"model",
")",
"if",
"biomass",
"is",
"None",
":",
"biomass",
"=",
"set",
"(",
"find_biomass_reaction",
"(",
"model",
")",
")",
"return",
"boundary",
"|",
"transporters",
"|",
"biomass"
] | Return the set of all transport, boundary, and biomass reactions.
These reactions are either pseudo-reactions, or incorporated to allow
metabolites to pass between compartments. Some tests focus on purely
metabolic reactions and hence exclude this set.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
biomass : list or cobra.Reaction, optional
A list of cobrapy biomass reactions. | [
"Return",
"the",
"set",
"of",
"all",
"transport",
"boundary",
"and",
"biomass",
"reactions",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/helpers.py#L453-L473 |
opencobra/memote | memote/support/helpers.py | run_fba | def run_fba(model, rxn_id, direction="max", single_value=True):
"""
Return the solution of an FBA to a set objective function.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
rxn_id : string
A string containing the reaction ID of the desired FBA objective.
direction: string
A string containing either "max" or "min" to specify the direction
of the desired FBA objective function.
single_value: boolean
Indicates whether the results for all reactions are gathered from the
solver, or only the result for the objective value.
Returns
-------
cobra.solution
The cobra solution object for the corresponding FBA problem.
"""
model.objective = model.reactions.get_by_id(rxn_id)
model.objective_direction = direction
if single_value:
try:
return model.slim_optimize()
except Infeasible:
return np.nan
else:
try:
solution = model.optimize()
return solution
except Infeasible:
return np.nan | python | def run_fba(model, rxn_id, direction="max", single_value=True):
"""
Return the solution of an FBA to a set objective function.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
rxn_id : string
A string containing the reaction ID of the desired FBA objective.
direction: string
A string containing either "max" or "min" to specify the direction
of the desired FBA objective function.
single_value: boolean
Indicates whether the results for all reactions are gathered from the
solver, or only the result for the objective value.
Returns
-------
cobra.solution
The cobra solution object for the corresponding FBA problem.
"""
model.objective = model.reactions.get_by_id(rxn_id)
model.objective_direction = direction
if single_value:
try:
return model.slim_optimize()
except Infeasible:
return np.nan
else:
try:
solution = model.optimize()
return solution
except Infeasible:
return np.nan | [
"def",
"run_fba",
"(",
"model",
",",
"rxn_id",
",",
"direction",
"=",
"\"max\"",
",",
"single_value",
"=",
"True",
")",
":",
"model",
".",
"objective",
"=",
"model",
".",
"reactions",
".",
"get_by_id",
"(",
"rxn_id",
")",
"model",
".",
"objective_direction",
"=",
"direction",
"if",
"single_value",
":",
"try",
":",
"return",
"model",
".",
"slim_optimize",
"(",
")",
"except",
"Infeasible",
":",
"return",
"np",
".",
"nan",
"else",
":",
"try",
":",
"solution",
"=",
"model",
".",
"optimize",
"(",
")",
"return",
"solution",
"except",
"Infeasible",
":",
"return",
"np",
".",
"nan"
] | Return the solution of an FBA to a set objective function.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
rxn_id : string
A string containing the reaction ID of the desired FBA objective.
direction: string
A string containing either "max" or "min" to specify the direction
of the desired FBA objective function.
single_value: boolean
Indicates whether the results for all reactions are gathered from the
solver, or only the result for the objective value.
Returns
-------
cobra.solution
The cobra solution object for the corresponding FBA problem. | [
"Return",
"the",
"solution",
"of",
"an",
"FBA",
"to",
"a",
"set",
"objective",
"function",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/helpers.py#L476-L511 |
opencobra/memote | memote/support/helpers.py | close_boundaries_sensibly | def close_boundaries_sensibly(model):
"""
Return a cobra model with all boundaries closed and changed constraints.
In the returned model previously fixed reactions are no longer constrained
as such. Instead reactions are constrained according to their
reversibility. This is to prevent the FBA from becoming infeasible when
trying to solve a model with closed exchanges and one fixed reaction.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Returns
-------
cobra.Model
A cobra model with all boundary reactions closed and the constraints
of each reaction set according to their reversibility.
"""
for rxn in model.reactions:
if rxn.reversibility:
rxn.bounds = -1, 1
else:
rxn.bounds = 0, 1
for boundary in model.boundary:
boundary.bounds = (0, 0) | python | def close_boundaries_sensibly(model):
"""
Return a cobra model with all boundaries closed and changed constraints.
In the returned model previously fixed reactions are no longer constrained
as such. Instead reactions are constrained according to their
reversibility. This is to prevent the FBA from becoming infeasible when
trying to solve a model with closed exchanges and one fixed reaction.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Returns
-------
cobra.Model
A cobra model with all boundary reactions closed and the constraints
of each reaction set according to their reversibility.
"""
for rxn in model.reactions:
if rxn.reversibility:
rxn.bounds = -1, 1
else:
rxn.bounds = 0, 1
for boundary in model.boundary:
boundary.bounds = (0, 0) | [
"def",
"close_boundaries_sensibly",
"(",
"model",
")",
":",
"for",
"rxn",
"in",
"model",
".",
"reactions",
":",
"if",
"rxn",
".",
"reversibility",
":",
"rxn",
".",
"bounds",
"=",
"-",
"1",
",",
"1",
"else",
":",
"rxn",
".",
"bounds",
"=",
"0",
",",
"1",
"for",
"boundary",
"in",
"model",
".",
"boundary",
":",
"boundary",
".",
"bounds",
"=",
"(",
"0",
",",
"0",
")"
] | Return a cobra model with all boundaries closed and changed constraints.
In the returned model previously fixed reactions are no longer constrained
as such. Instead reactions are constrained according to their
reversibility. This is to prevent the FBA from becoming infeasible when
trying to solve a model with closed exchanges and one fixed reaction.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Returns
-------
cobra.Model
A cobra model with all boundary reactions closed and the constraints
of each reaction set according to their reversibility. | [
"Return",
"a",
"cobra",
"model",
"with",
"all",
"boundaries",
"closed",
"and",
"changed",
"constraints",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/helpers.py#L514-L541 |
opencobra/memote | memote/support/helpers.py | metabolites_per_compartment | def metabolites_per_compartment(model, compartment_id):
"""
Identify all metabolites that belong to a given compartment.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
compartment_id : string
Model specific compartment identifier.
Returns
-------
list
List of metabolites belonging to a given compartment.
"""
return [met for met in model.metabolites
if met.compartment == compartment_id] | python | def metabolites_per_compartment(model, compartment_id):
"""
Identify all metabolites that belong to a given compartment.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
compartment_id : string
Model specific compartment identifier.
Returns
-------
list
List of metabolites belonging to a given compartment.
"""
return [met for met in model.metabolites
if met.compartment == compartment_id] | [
"def",
"metabolites_per_compartment",
"(",
"model",
",",
"compartment_id",
")",
":",
"return",
"[",
"met",
"for",
"met",
"in",
"model",
".",
"metabolites",
"if",
"met",
".",
"compartment",
"==",
"compartment_id",
"]"
] | Identify all metabolites that belong to a given compartment.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
compartment_id : string
Model specific compartment identifier.
Returns
-------
list
List of metabolites belonging to a given compartment. | [
"Identify",
"all",
"metabolites",
"that",
"belong",
"to",
"a",
"given",
"compartment",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/helpers.py#L572-L590 |
opencobra/memote | memote/support/helpers.py | largest_compartment_id_met | def largest_compartment_id_met(model):
"""
Return the ID of the compartment with the most metabolites.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Returns
-------
string
Compartment ID of the compartment with the most metabolites.
"""
# Sort compartments by decreasing size and extract the largest two.
candidate, second = sorted(
((c, len(metabolites_per_compartment(model, c)))
for c in model.compartments), reverse=True, key=itemgetter(1))[:2]
# Compare the size of the compartments.
if candidate[1] == second[1]:
raise RuntimeError("There is a tie for the largest compartment. "
"Compartment {} and {} have equal amounts of "
"metabolites.".format(candidate[0], second[0]))
else:
return candidate[0] | python | def largest_compartment_id_met(model):
"""
Return the ID of the compartment with the most metabolites.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Returns
-------
string
Compartment ID of the compartment with the most metabolites.
"""
# Sort compartments by decreasing size and extract the largest two.
candidate, second = sorted(
((c, len(metabolites_per_compartment(model, c)))
for c in model.compartments), reverse=True, key=itemgetter(1))[:2]
# Compare the size of the compartments.
if candidate[1] == second[1]:
raise RuntimeError("There is a tie for the largest compartment. "
"Compartment {} and {} have equal amounts of "
"metabolites.".format(candidate[0], second[0]))
else:
return candidate[0] | [
"def",
"largest_compartment_id_met",
"(",
"model",
")",
":",
"# Sort compartments by decreasing size and extract the largest two.",
"candidate",
",",
"second",
"=",
"sorted",
"(",
"(",
"(",
"c",
",",
"len",
"(",
"metabolites_per_compartment",
"(",
"model",
",",
"c",
")",
")",
")",
"for",
"c",
"in",
"model",
".",
"compartments",
")",
",",
"reverse",
"=",
"True",
",",
"key",
"=",
"itemgetter",
"(",
"1",
")",
")",
"[",
":",
"2",
"]",
"# Compare the size of the compartments.",
"if",
"candidate",
"[",
"1",
"]",
"==",
"second",
"[",
"1",
"]",
":",
"raise",
"RuntimeError",
"(",
"\"There is a tie for the largest compartment. \"",
"\"Compartment {} and {} have equal amounts of \"",
"\"metabolites.\"",
".",
"format",
"(",
"candidate",
"[",
"0",
"]",
",",
"second",
"[",
"0",
"]",
")",
")",
"else",
":",
"return",
"candidate",
"[",
"0",
"]"
] | Return the ID of the compartment with the most metabolites.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Returns
-------
string
Compartment ID of the compartment with the most metabolites. | [
"Return",
"the",
"ID",
"of",
"the",
"compartment",
"with",
"the",
"most",
"metabolites",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/helpers.py#L593-L618 |
opencobra/memote | memote/support/helpers.py | find_compartment_id_in_model | def find_compartment_id_in_model(model, compartment_id):
"""
Identify a model compartment by looking up names in COMPARTMENT_SHORTLIST.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
compartment_id : string
Memote internal compartment identifier used to access compartment name
shortlist to look up potential compartment names.
Returns
-------
string
Compartment identifier in the model corresponding to compartment_id.
"""
if compartment_id not in COMPARTMENT_SHORTLIST.keys():
raise KeyError("{} is not in the COMPARTMENT_SHORTLIST! Make sure "
"you typed the ID correctly, if yes, update the "
"shortlist manually.".format(compartment_id))
if len(model.compartments) == 0:
raise KeyError(
"It was not possible to identify the "
"compartment {}, since the "
"model has no compartments at "
"all.".format(COMPARTMENT_SHORTLIST[compartment_id][0])
)
if compartment_id in model.compartments.keys():
return compartment_id
for name in COMPARTMENT_SHORTLIST[compartment_id]:
for c_id, c_name in model.compartments.items():
if c_name.lower() == name:
return c_id
if compartment_id == 'c':
return largest_compartment_id_met(model) | python | def find_compartment_id_in_model(model, compartment_id):
"""
Identify a model compartment by looking up names in COMPARTMENT_SHORTLIST.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
compartment_id : string
Memote internal compartment identifier used to access compartment name
shortlist to look up potential compartment names.
Returns
-------
string
Compartment identifier in the model corresponding to compartment_id.
"""
if compartment_id not in COMPARTMENT_SHORTLIST.keys():
raise KeyError("{} is not in the COMPARTMENT_SHORTLIST! Make sure "
"you typed the ID correctly, if yes, update the "
"shortlist manually.".format(compartment_id))
if len(model.compartments) == 0:
raise KeyError(
"It was not possible to identify the "
"compartment {}, since the "
"model has no compartments at "
"all.".format(COMPARTMENT_SHORTLIST[compartment_id][0])
)
if compartment_id in model.compartments.keys():
return compartment_id
for name in COMPARTMENT_SHORTLIST[compartment_id]:
for c_id, c_name in model.compartments.items():
if c_name.lower() == name:
return c_id
if compartment_id == 'c':
return largest_compartment_id_met(model) | [
"def",
"find_compartment_id_in_model",
"(",
"model",
",",
"compartment_id",
")",
":",
"if",
"compartment_id",
"not",
"in",
"COMPARTMENT_SHORTLIST",
".",
"keys",
"(",
")",
":",
"raise",
"KeyError",
"(",
"\"{} is not in the COMPARTMENT_SHORTLIST! Make sure \"",
"\"you typed the ID correctly, if yes, update the \"",
"\"shortlist manually.\"",
".",
"format",
"(",
"compartment_id",
")",
")",
"if",
"len",
"(",
"model",
".",
"compartments",
")",
"==",
"0",
":",
"raise",
"KeyError",
"(",
"\"It was not possible to identify the \"",
"\"compartment {}, since the \"",
"\"model has no compartments at \"",
"\"all.\"",
".",
"format",
"(",
"COMPARTMENT_SHORTLIST",
"[",
"compartment_id",
"]",
"[",
"0",
"]",
")",
")",
"if",
"compartment_id",
"in",
"model",
".",
"compartments",
".",
"keys",
"(",
")",
":",
"return",
"compartment_id",
"for",
"name",
"in",
"COMPARTMENT_SHORTLIST",
"[",
"compartment_id",
"]",
":",
"for",
"c_id",
",",
"c_name",
"in",
"model",
".",
"compartments",
".",
"items",
"(",
")",
":",
"if",
"c_name",
".",
"lower",
"(",
")",
"==",
"name",
":",
"return",
"c_id",
"if",
"compartment_id",
"==",
"'c'",
":",
"return",
"largest_compartment_id_met",
"(",
"model",
")"
] | Identify a model compartment by looking up names in COMPARTMENT_SHORTLIST.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
compartment_id : string
Memote internal compartment identifier used to access compartment name
shortlist to look up potential compartment names.
Returns
-------
string
Compartment identifier in the model corresponding to compartment_id. | [
"Identify",
"a",
"model",
"compartment",
"by",
"looking",
"up",
"names",
"in",
"COMPARTMENT_SHORTLIST",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/helpers.py#L621-L661 |
opencobra/memote | memote/support/helpers.py | find_met_in_model | def find_met_in_model(model, mnx_id, compartment_id=None):
"""
Return specific metabolites by looking up IDs in METANETX_SHORTLIST.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
mnx_id : string
Memote internal MetaNetX metabolite identifier used to map between
cross-references in the METANETX_SHORTLIST.
compartment_id : string, optional
ID of the specific compartment where the metabolites should be found.
Defaults to returning matching metabolites from all compartments.
Returns
-------
list
cobra.Metabolite(s) matching the mnx_id.
"""
def compare_annotation(annotation):
"""
Return annotation IDs that match to METANETX_SHORTLIST references.
Compares the set of METANETX_SHORTLIST references for a given mnx_id
and the annotation IDs stored in a given annotation dictionary.
"""
query_values = set(utils.flatten(annotation.values()))
ref_values = set(utils.flatten(METANETX_SHORTLIST[mnx_id]))
return query_values & ref_values
# Make sure that the MNX ID we're looking up exists in the metabolite
# shortlist.
if mnx_id not in METANETX_SHORTLIST.columns:
raise ValueError(
"{} is not in the MetaNetX Shortlist! Make sure "
"you typed the ID correctly, if yes, update the "
"shortlist by updating and re-running the script "
"generate_mnx_shortlists.py.".format(mnx_id)
)
candidates = []
# The MNX ID used in the model may or may not be tagged with a compartment
# tag e.g. `MNXM23141_c` vs. `MNXM23141`, which is tested with the
# following regex.
# If the MNX ID itself cannot be found as an ID, we try all other
# identifiers that are provided by our shortlist of MetaNetX' mapping
# table.
regex = re.compile('^{}(_[a-zA-Z0-9]+)?$'.format(mnx_id))
if model.metabolites.query(regex):
candidates = model.metabolites.query(regex)
elif model.metabolites.query(compare_annotation, attribute='annotation'):
candidates = model.metabolites.query(
compare_annotation, attribute='annotation'
)
else:
for value in METANETX_SHORTLIST[mnx_id]:
if value:
for ident in value:
regex = re.compile('^{}(_[a-zA-Z0-9]+)?$'.format(ident))
if model.metabolites.query(regex, attribute='id'):
candidates.extend(
model.metabolites.query(regex, attribute='id'))
# Return a list of all possible candidates if no specific compartment ID
# is provided.
# Otherwise, just return the candidate in one specific compartment. Raise
# an exception if there are more than one possible candidates for a given
# compartment.
if compartment_id is None:
print("compartment_id = None?")
return candidates
else:
candidates_in_compartment = \
[cand for cand in candidates if cand.compartment == compartment_id]
if len(candidates_in_compartment) == 0:
raise RuntimeError("It was not possible to identify "
"any metabolite in compartment {} corresponding to "
"the following MetaNetX identifier: {}."
"Make sure that a cross-reference to this ID in "
"the MetaNetX Database exists for your "
"identifier "
"namespace.".format(compartment_id, mnx_id))
elif len(candidates_in_compartment) > 1:
raise RuntimeError("It was not possible to uniquely identify "
"a single metabolite in compartment {} that "
"corresponds to the following MetaNetX "
"identifier: {}."
"Instead these candidates were found: {}."
"Check that metabolite compartment tags are "
"correct. Consider switching to a namespace scheme "
"where identifiers are truly "
"unique.".format(compartment_id,
mnx_id,
utils.get_ids(
candidates_in_compartment
))
)
else:
return candidates_in_compartment | python | def find_met_in_model(model, mnx_id, compartment_id=None):
"""
Return specific metabolites by looking up IDs in METANETX_SHORTLIST.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
mnx_id : string
Memote internal MetaNetX metabolite identifier used to map between
cross-references in the METANETX_SHORTLIST.
compartment_id : string, optional
ID of the specific compartment where the metabolites should be found.
Defaults to returning matching metabolites from all compartments.
Returns
-------
list
cobra.Metabolite(s) matching the mnx_id.
"""
def compare_annotation(annotation):
"""
Return annotation IDs that match to METANETX_SHORTLIST references.
Compares the set of METANETX_SHORTLIST references for a given mnx_id
and the annotation IDs stored in a given annotation dictionary.
"""
query_values = set(utils.flatten(annotation.values()))
ref_values = set(utils.flatten(METANETX_SHORTLIST[mnx_id]))
return query_values & ref_values
# Make sure that the MNX ID we're looking up exists in the metabolite
# shortlist.
if mnx_id not in METANETX_SHORTLIST.columns:
raise ValueError(
"{} is not in the MetaNetX Shortlist! Make sure "
"you typed the ID correctly, if yes, update the "
"shortlist by updating and re-running the script "
"generate_mnx_shortlists.py.".format(mnx_id)
)
candidates = []
# The MNX ID used in the model may or may not be tagged with a compartment
# tag e.g. `MNXM23141_c` vs. `MNXM23141`, which is tested with the
# following regex.
# If the MNX ID itself cannot be found as an ID, we try all other
# identifiers that are provided by our shortlist of MetaNetX' mapping
# table.
regex = re.compile('^{}(_[a-zA-Z0-9]+)?$'.format(mnx_id))
if model.metabolites.query(regex):
candidates = model.metabolites.query(regex)
elif model.metabolites.query(compare_annotation, attribute='annotation'):
candidates = model.metabolites.query(
compare_annotation, attribute='annotation'
)
else:
for value in METANETX_SHORTLIST[mnx_id]:
if value:
for ident in value:
regex = re.compile('^{}(_[a-zA-Z0-9]+)?$'.format(ident))
if model.metabolites.query(regex, attribute='id'):
candidates.extend(
model.metabolites.query(regex, attribute='id'))
# Return a list of all possible candidates if no specific compartment ID
# is provided.
# Otherwise, just return the candidate in one specific compartment. Raise
# an exception if there are more than one possible candidates for a given
# compartment.
if compartment_id is None:
print("compartment_id = None?")
return candidates
else:
candidates_in_compartment = \
[cand for cand in candidates if cand.compartment == compartment_id]
if len(candidates_in_compartment) == 0:
raise RuntimeError("It was not possible to identify "
"any metabolite in compartment {} corresponding to "
"the following MetaNetX identifier: {}."
"Make sure that a cross-reference to this ID in "
"the MetaNetX Database exists for your "
"identifier "
"namespace.".format(compartment_id, mnx_id))
elif len(candidates_in_compartment) > 1:
raise RuntimeError("It was not possible to uniquely identify "
"a single metabolite in compartment {} that "
"corresponds to the following MetaNetX "
"identifier: {}."
"Instead these candidates were found: {}."
"Check that metabolite compartment tags are "
"correct. Consider switching to a namespace scheme "
"where identifiers are truly "
"unique.".format(compartment_id,
mnx_id,
utils.get_ids(
candidates_in_compartment
))
)
else:
return candidates_in_compartment | [
"def",
"find_met_in_model",
"(",
"model",
",",
"mnx_id",
",",
"compartment_id",
"=",
"None",
")",
":",
"def",
"compare_annotation",
"(",
"annotation",
")",
":",
"\"\"\"\n Return annotation IDs that match to METANETX_SHORTLIST references.\n\n Compares the set of METANETX_SHORTLIST references for a given mnx_id\n and the annotation IDs stored in a given annotation dictionary.\n \"\"\"",
"query_values",
"=",
"set",
"(",
"utils",
".",
"flatten",
"(",
"annotation",
".",
"values",
"(",
")",
")",
")",
"ref_values",
"=",
"set",
"(",
"utils",
".",
"flatten",
"(",
"METANETX_SHORTLIST",
"[",
"mnx_id",
"]",
")",
")",
"return",
"query_values",
"&",
"ref_values",
"# Make sure that the MNX ID we're looking up exists in the metabolite",
"# shortlist.",
"if",
"mnx_id",
"not",
"in",
"METANETX_SHORTLIST",
".",
"columns",
":",
"raise",
"ValueError",
"(",
"\"{} is not in the MetaNetX Shortlist! Make sure \"",
"\"you typed the ID correctly, if yes, update the \"",
"\"shortlist by updating and re-running the script \"",
"\"generate_mnx_shortlists.py.\"",
".",
"format",
"(",
"mnx_id",
")",
")",
"candidates",
"=",
"[",
"]",
"# The MNX ID used in the model may or may not be tagged with a compartment",
"# tag e.g. `MNXM23141_c` vs. `MNXM23141`, which is tested with the",
"# following regex.",
"# If the MNX ID itself cannot be found as an ID, we try all other",
"# identifiers that are provided by our shortlist of MetaNetX' mapping",
"# table.",
"regex",
"=",
"re",
".",
"compile",
"(",
"'^{}(_[a-zA-Z0-9]+)?$'",
".",
"format",
"(",
"mnx_id",
")",
")",
"if",
"model",
".",
"metabolites",
".",
"query",
"(",
"regex",
")",
":",
"candidates",
"=",
"model",
".",
"metabolites",
".",
"query",
"(",
"regex",
")",
"elif",
"model",
".",
"metabolites",
".",
"query",
"(",
"compare_annotation",
",",
"attribute",
"=",
"'annotation'",
")",
":",
"candidates",
"=",
"model",
".",
"metabolites",
".",
"query",
"(",
"compare_annotation",
",",
"attribute",
"=",
"'annotation'",
")",
"else",
":",
"for",
"value",
"in",
"METANETX_SHORTLIST",
"[",
"mnx_id",
"]",
":",
"if",
"value",
":",
"for",
"ident",
"in",
"value",
":",
"regex",
"=",
"re",
".",
"compile",
"(",
"'^{}(_[a-zA-Z0-9]+)?$'",
".",
"format",
"(",
"ident",
")",
")",
"if",
"model",
".",
"metabolites",
".",
"query",
"(",
"regex",
",",
"attribute",
"=",
"'id'",
")",
":",
"candidates",
".",
"extend",
"(",
"model",
".",
"metabolites",
".",
"query",
"(",
"regex",
",",
"attribute",
"=",
"'id'",
")",
")",
"# Return a list of all possible candidates if no specific compartment ID",
"# is provided.",
"# Otherwise, just return the candidate in one specific compartment. Raise",
"# an exception if there are more than one possible candidates for a given",
"# compartment.",
"if",
"compartment_id",
"is",
"None",
":",
"print",
"(",
"\"compartment_id = None?\"",
")",
"return",
"candidates",
"else",
":",
"candidates_in_compartment",
"=",
"[",
"cand",
"for",
"cand",
"in",
"candidates",
"if",
"cand",
".",
"compartment",
"==",
"compartment_id",
"]",
"if",
"len",
"(",
"candidates_in_compartment",
")",
"==",
"0",
":",
"raise",
"RuntimeError",
"(",
"\"It was not possible to identify \"",
"\"any metabolite in compartment {} corresponding to \"",
"\"the following MetaNetX identifier: {}.\"",
"\"Make sure that a cross-reference to this ID in \"",
"\"the MetaNetX Database exists for your \"",
"\"identifier \"",
"\"namespace.\"",
".",
"format",
"(",
"compartment_id",
",",
"mnx_id",
")",
")",
"elif",
"len",
"(",
"candidates_in_compartment",
")",
">",
"1",
":",
"raise",
"RuntimeError",
"(",
"\"It was not possible to uniquely identify \"",
"\"a single metabolite in compartment {} that \"",
"\"corresponds to the following MetaNetX \"",
"\"identifier: {}.\"",
"\"Instead these candidates were found: {}.\"",
"\"Check that metabolite compartment tags are \"",
"\"correct. Consider switching to a namespace scheme \"",
"\"where identifiers are truly \"",
"\"unique.\"",
".",
"format",
"(",
"compartment_id",
",",
"mnx_id",
",",
"utils",
".",
"get_ids",
"(",
"candidates_in_compartment",
")",
")",
")",
"else",
":",
"return",
"candidates_in_compartment"
] | Return specific metabolites by looking up IDs in METANETX_SHORTLIST.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
mnx_id : string
Memote internal MetaNetX metabolite identifier used to map between
cross-references in the METANETX_SHORTLIST.
compartment_id : string, optional
ID of the specific compartment where the metabolites should be found.
Defaults to returning matching metabolites from all compartments.
Returns
-------
list
cobra.Metabolite(s) matching the mnx_id. | [
"Return",
"specific",
"metabolites",
"by",
"looking",
"up",
"IDs",
"in",
"METANETX_SHORTLIST",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/helpers.py#L664-L764 |
opencobra/memote | memote/support/helpers.py | find_bounds | def find_bounds(model):
"""
Return the median upper and lower bound of the metabolic model.
Bounds can vary from model to model. Cobrapy defaults to (-1000, 1000) but
this may not be the case for merged or autogenerated models. In these
cases, this function is used to iterate over all the bounds of all the
reactions and find the median bound values in the model, which are
then used as the 'most common' bounds.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
"""
lower_bounds = np.asarray([rxn.lower_bound for rxn in model.reactions],
dtype=float)
upper_bounds = np.asarray([rxn.upper_bound for rxn in model.reactions],
dtype=float)
lower_bound = np.nanmedian(lower_bounds[lower_bounds != 0.0])
upper_bound = np.nanmedian(upper_bounds[upper_bounds != 0.0])
if np.isnan(lower_bound):
LOGGER.warning("Could not identify a median lower bound.")
lower_bound = -1000.0
if np.isnan(upper_bound):
LOGGER.warning("Could not identify a median upper bound.")
upper_bound = 1000.0
return lower_bound, upper_bound | python | def find_bounds(model):
"""
Return the median upper and lower bound of the metabolic model.
Bounds can vary from model to model. Cobrapy defaults to (-1000, 1000) but
this may not be the case for merged or autogenerated models. In these
cases, this function is used to iterate over all the bounds of all the
reactions and find the median bound values in the model, which are
then used as the 'most common' bounds.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
"""
lower_bounds = np.asarray([rxn.lower_bound for rxn in model.reactions],
dtype=float)
upper_bounds = np.asarray([rxn.upper_bound for rxn in model.reactions],
dtype=float)
lower_bound = np.nanmedian(lower_bounds[lower_bounds != 0.0])
upper_bound = np.nanmedian(upper_bounds[upper_bounds != 0.0])
if np.isnan(lower_bound):
LOGGER.warning("Could not identify a median lower bound.")
lower_bound = -1000.0
if np.isnan(upper_bound):
LOGGER.warning("Could not identify a median upper bound.")
upper_bound = 1000.0
return lower_bound, upper_bound | [
"def",
"find_bounds",
"(",
"model",
")",
":",
"lower_bounds",
"=",
"np",
".",
"asarray",
"(",
"[",
"rxn",
".",
"lower_bound",
"for",
"rxn",
"in",
"model",
".",
"reactions",
"]",
",",
"dtype",
"=",
"float",
")",
"upper_bounds",
"=",
"np",
".",
"asarray",
"(",
"[",
"rxn",
".",
"upper_bound",
"for",
"rxn",
"in",
"model",
".",
"reactions",
"]",
",",
"dtype",
"=",
"float",
")",
"lower_bound",
"=",
"np",
".",
"nanmedian",
"(",
"lower_bounds",
"[",
"lower_bounds",
"!=",
"0.0",
"]",
")",
"upper_bound",
"=",
"np",
".",
"nanmedian",
"(",
"upper_bounds",
"[",
"upper_bounds",
"!=",
"0.0",
"]",
")",
"if",
"np",
".",
"isnan",
"(",
"lower_bound",
")",
":",
"LOGGER",
".",
"warning",
"(",
"\"Could not identify a median lower bound.\"",
")",
"lower_bound",
"=",
"-",
"1000.0",
"if",
"np",
".",
"isnan",
"(",
"upper_bound",
")",
":",
"LOGGER",
".",
"warning",
"(",
"\"Could not identify a median upper bound.\"",
")",
"upper_bound",
"=",
"1000.0",
"return",
"lower_bound",
",",
"upper_bound"
] | Return the median upper and lower bound of the metabolic model.
Bounds can vary from model to model. Cobrapy defaults to (-1000, 1000) but
this may not be the case for merged or autogenerated models. In these
cases, this function is used to iterate over all the bounds of all the
reactions and find the median bound values in the model, which are
then used as the 'most common' bounds.
Parameters
----------
model : cobra.Model
The metabolic model under investigation. | [
"Return",
"the",
"median",
"upper",
"and",
"lower",
"bound",
"of",
"the",
"metabolic",
"model",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/helpers.py#L781-L809 |
opencobra/memote | memote/suite/reporting/report.py | Report.render_html | def render_html(self):
"""Render an HTML report."""
return self._template.safe_substitute(
report_type=self._report_type,
results=self.render_json()
) | python | def render_html(self):
"""Render an HTML report."""
return self._template.safe_substitute(
report_type=self._report_type,
results=self.render_json()
) | [
"def",
"render_html",
"(",
"self",
")",
":",
"return",
"self",
".",
"_template",
".",
"safe_substitute",
"(",
"report_type",
"=",
"self",
".",
"_report_type",
",",
"results",
"=",
"self",
".",
"render_json",
"(",
")",
")"
] | Render an HTML report. | [
"Render",
"an",
"HTML",
"report",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/suite/reporting/report.py#L80-L85 |
opencobra/memote | memote/suite/reporting/report.py | Report.compute_score | def compute_score(self):
"""Calculate the overall test score using the configuration."""
# LOGGER.info("Begin scoring")
cases = self.get_configured_tests() | set(self.result.cases)
scores = DataFrame({"score": 0.0, "max": 1.0},
index=sorted(cases))
self.result.setdefault("score", dict())
self.result["score"]["sections"] = list()
# Calculate the scores for each test individually.
for test, result in iteritems(self.result.cases):
# LOGGER.info("Calculate score for test: '%s'.", test)
# Test metric may be a dictionary for a parametrized test.
metric = result["metric"]
if hasattr(metric, "items"):
result["score"] = test_score = dict()
total = 0.0
for key, value in iteritems(metric):
value = 1.0 - value
total += value
test_score[key] = value
# For some reason there are parametrized tests without cases.
if len(metric) == 0:
metric = 0.0
else:
metric = total / len(metric)
else:
metric = 1.0 - metric
scores.at[test, "score"] = metric
scores.loc[test, :] *= self.config["weights"].get(test, 1.0)
score = 0.0
maximum = 0.0
# Calculate the scores for each section considering the individual test
# case scores.
for section_id, card in iteritems(
self.config['cards']['scored']['sections']
):
# LOGGER.info("Calculate score for section: '%s'.", section_id)
cases = card.get("cases", None)
if cases is None:
continue
card_score = scores.loc[cases, "score"].sum()
card_total = scores.loc[cases, "max"].sum()
# Format results nicely to work immediately with Vega Bar Chart.
section_score = {"section": section_id,
"score": card_score / card_total}
self.result["score"]["sections"].append(section_score)
# Calculate the final score for the entire model.
weight = card.get("weight", 1.0)
score += card_score * weight
maximum += card_total * weight
self.result["score"]["total_score"] = score / maximum | python | def compute_score(self):
"""Calculate the overall test score using the configuration."""
# LOGGER.info("Begin scoring")
cases = self.get_configured_tests() | set(self.result.cases)
scores = DataFrame({"score": 0.0, "max": 1.0},
index=sorted(cases))
self.result.setdefault("score", dict())
self.result["score"]["sections"] = list()
# Calculate the scores for each test individually.
for test, result in iteritems(self.result.cases):
# LOGGER.info("Calculate score for test: '%s'.", test)
# Test metric may be a dictionary for a parametrized test.
metric = result["metric"]
if hasattr(metric, "items"):
result["score"] = test_score = dict()
total = 0.0
for key, value in iteritems(metric):
value = 1.0 - value
total += value
test_score[key] = value
# For some reason there are parametrized tests without cases.
if len(metric) == 0:
metric = 0.0
else:
metric = total / len(metric)
else:
metric = 1.0 - metric
scores.at[test, "score"] = metric
scores.loc[test, :] *= self.config["weights"].get(test, 1.0)
score = 0.0
maximum = 0.0
# Calculate the scores for each section considering the individual test
# case scores.
for section_id, card in iteritems(
self.config['cards']['scored']['sections']
):
# LOGGER.info("Calculate score for section: '%s'.", section_id)
cases = card.get("cases", None)
if cases is None:
continue
card_score = scores.loc[cases, "score"].sum()
card_total = scores.loc[cases, "max"].sum()
# Format results nicely to work immediately with Vega Bar Chart.
section_score = {"section": section_id,
"score": card_score / card_total}
self.result["score"]["sections"].append(section_score)
# Calculate the final score for the entire model.
weight = card.get("weight", 1.0)
score += card_score * weight
maximum += card_total * weight
self.result["score"]["total_score"] = score / maximum | [
"def",
"compute_score",
"(",
"self",
")",
":",
"# LOGGER.info(\"Begin scoring\")",
"cases",
"=",
"self",
".",
"get_configured_tests",
"(",
")",
"|",
"set",
"(",
"self",
".",
"result",
".",
"cases",
")",
"scores",
"=",
"DataFrame",
"(",
"{",
"\"score\"",
":",
"0.0",
",",
"\"max\"",
":",
"1.0",
"}",
",",
"index",
"=",
"sorted",
"(",
"cases",
")",
")",
"self",
".",
"result",
".",
"setdefault",
"(",
"\"score\"",
",",
"dict",
"(",
")",
")",
"self",
".",
"result",
"[",
"\"score\"",
"]",
"[",
"\"sections\"",
"]",
"=",
"list",
"(",
")",
"# Calculate the scores for each test individually.",
"for",
"test",
",",
"result",
"in",
"iteritems",
"(",
"self",
".",
"result",
".",
"cases",
")",
":",
"# LOGGER.info(\"Calculate score for test: '%s'.\", test)",
"# Test metric may be a dictionary for a parametrized test.",
"metric",
"=",
"result",
"[",
"\"metric\"",
"]",
"if",
"hasattr",
"(",
"metric",
",",
"\"items\"",
")",
":",
"result",
"[",
"\"score\"",
"]",
"=",
"test_score",
"=",
"dict",
"(",
")",
"total",
"=",
"0.0",
"for",
"key",
",",
"value",
"in",
"iteritems",
"(",
"metric",
")",
":",
"value",
"=",
"1.0",
"-",
"value",
"total",
"+=",
"value",
"test_score",
"[",
"key",
"]",
"=",
"value",
"# For some reason there are parametrized tests without cases.",
"if",
"len",
"(",
"metric",
")",
"==",
"0",
":",
"metric",
"=",
"0.0",
"else",
":",
"metric",
"=",
"total",
"/",
"len",
"(",
"metric",
")",
"else",
":",
"metric",
"=",
"1.0",
"-",
"metric",
"scores",
".",
"at",
"[",
"test",
",",
"\"score\"",
"]",
"=",
"metric",
"scores",
".",
"loc",
"[",
"test",
",",
":",
"]",
"*=",
"self",
".",
"config",
"[",
"\"weights\"",
"]",
".",
"get",
"(",
"test",
",",
"1.0",
")",
"score",
"=",
"0.0",
"maximum",
"=",
"0.0",
"# Calculate the scores for each section considering the individual test",
"# case scores.",
"for",
"section_id",
",",
"card",
"in",
"iteritems",
"(",
"self",
".",
"config",
"[",
"'cards'",
"]",
"[",
"'scored'",
"]",
"[",
"'sections'",
"]",
")",
":",
"# LOGGER.info(\"Calculate score for section: '%s'.\", section_id)",
"cases",
"=",
"card",
".",
"get",
"(",
"\"cases\"",
",",
"None",
")",
"if",
"cases",
"is",
"None",
":",
"continue",
"card_score",
"=",
"scores",
".",
"loc",
"[",
"cases",
",",
"\"score\"",
"]",
".",
"sum",
"(",
")",
"card_total",
"=",
"scores",
".",
"loc",
"[",
"cases",
",",
"\"max\"",
"]",
".",
"sum",
"(",
")",
"# Format results nicely to work immediately with Vega Bar Chart.",
"section_score",
"=",
"{",
"\"section\"",
":",
"section_id",
",",
"\"score\"",
":",
"card_score",
"/",
"card_total",
"}",
"self",
".",
"result",
"[",
"\"score\"",
"]",
"[",
"\"sections\"",
"]",
".",
"append",
"(",
"section_score",
")",
"# Calculate the final score for the entire model.",
"weight",
"=",
"card",
".",
"get",
"(",
"\"weight\"",
",",
"1.0",
")",
"score",
"+=",
"card_score",
"*",
"weight",
"maximum",
"+=",
"card_total",
"*",
"weight",
"self",
".",
"result",
"[",
"\"score\"",
"]",
"[",
"\"total_score\"",
"]",
"=",
"score",
"/",
"maximum"
] | Calculate the overall test score using the configuration. | [
"Calculate",
"the",
"overall",
"test",
"score",
"using",
"the",
"configuration",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/suite/reporting/report.py#L114-L164 |
opencobra/memote | memote/support/sbo.py | find_components_without_sbo_terms | def find_components_without_sbo_terms(model, components):
"""
Find model components that are not annotated with any SBO terms.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
components : {"metabolites", "reactions", "genes"}
A string denoting `cobra.Model` components.
Returns
-------
list
The components without any SBO term annotation.
"""
return [elem for elem in getattr(model, components) if
elem.annotation is None or 'sbo' not in elem.annotation] | python | def find_components_without_sbo_terms(model, components):
"""
Find model components that are not annotated with any SBO terms.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
components : {"metabolites", "reactions", "genes"}
A string denoting `cobra.Model` components.
Returns
-------
list
The components without any SBO term annotation.
"""
return [elem for elem in getattr(model, components) if
elem.annotation is None or 'sbo' not in elem.annotation] | [
"def",
"find_components_without_sbo_terms",
"(",
"model",
",",
"components",
")",
":",
"return",
"[",
"elem",
"for",
"elem",
"in",
"getattr",
"(",
"model",
",",
"components",
")",
"if",
"elem",
".",
"annotation",
"is",
"None",
"or",
"'sbo'",
"not",
"in",
"elem",
".",
"annotation",
"]"
] | Find model components that are not annotated with any SBO terms.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
components : {"metabolites", "reactions", "genes"}
A string denoting `cobra.Model` components.
Returns
-------
list
The components without any SBO term annotation. | [
"Find",
"model",
"components",
"that",
"are",
"not",
"annotated",
"with",
"any",
"SBO",
"terms",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/sbo.py#L27-L45 |
opencobra/memote | memote/support/sbo.py | check_component_for_specific_sbo_term | def check_component_for_specific_sbo_term(items, term):
r"""
Identify model components that lack a specific SBO term(s).
Parameters
----------
items : list
A list of model components i.e. reactions to be checked for a specific
SBO term.
term : str or list of str
A string denoting a valid SBO term matching the regex '^SBO:\d{7}$'
or a list containing such string elements.
Returns
-------
list
The components without any or that specific SBO term annotation.
"""
# check for multiple allowable SBO terms
if isinstance(term, list):
return [elem for elem in items if
elem.annotation is None or
'sbo' not in elem.annotation or
not any(i in elem.annotation['sbo'] for i in term)]
else:
return [elem for elem in items if
elem.annotation is None or
'sbo' not in elem.annotation or
term not in elem.annotation['sbo']] | python | def check_component_for_specific_sbo_term(items, term):
r"""
Identify model components that lack a specific SBO term(s).
Parameters
----------
items : list
A list of model components i.e. reactions to be checked for a specific
SBO term.
term : str or list of str
A string denoting a valid SBO term matching the regex '^SBO:\d{7}$'
or a list containing such string elements.
Returns
-------
list
The components without any or that specific SBO term annotation.
"""
# check for multiple allowable SBO terms
if isinstance(term, list):
return [elem for elem in items if
elem.annotation is None or
'sbo' not in elem.annotation or
not any(i in elem.annotation['sbo'] for i in term)]
else:
return [elem for elem in items if
elem.annotation is None or
'sbo' not in elem.annotation or
term not in elem.annotation['sbo']] | [
"def",
"check_component_for_specific_sbo_term",
"(",
"items",
",",
"term",
")",
":",
"# check for multiple allowable SBO terms",
"if",
"isinstance",
"(",
"term",
",",
"list",
")",
":",
"return",
"[",
"elem",
"for",
"elem",
"in",
"items",
"if",
"elem",
".",
"annotation",
"is",
"None",
"or",
"'sbo'",
"not",
"in",
"elem",
".",
"annotation",
"or",
"not",
"any",
"(",
"i",
"in",
"elem",
".",
"annotation",
"[",
"'sbo'",
"]",
"for",
"i",
"in",
"term",
")",
"]",
"else",
":",
"return",
"[",
"elem",
"for",
"elem",
"in",
"items",
"if",
"elem",
".",
"annotation",
"is",
"None",
"or",
"'sbo'",
"not",
"in",
"elem",
".",
"annotation",
"or",
"term",
"not",
"in",
"elem",
".",
"annotation",
"[",
"'sbo'",
"]",
"]"
] | r"""
Identify model components that lack a specific SBO term(s).
Parameters
----------
items : list
A list of model components i.e. reactions to be checked for a specific
SBO term.
term : str or list of str
A string denoting a valid SBO term matching the regex '^SBO:\d{7}$'
or a list containing such string elements.
Returns
-------
list
The components without any or that specific SBO term annotation. | [
"r",
"Identify",
"model",
"components",
"that",
"lack",
"a",
"specific",
"SBO",
"term",
"(",
"s",
")",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/sbo.py#L48-L77 |
opencobra/memote | memote/support/thermodynamics.py | get_smallest_compound_id | def get_smallest_compound_id(compounds_identifiers):
"""
Return the smallest KEGG compound identifier from a list.
KEGG identifiers may map to compounds, drugs or glycans prefixed
respectively with "C", "D", and "G" followed by at least 5 digits. We
choose the lowest KEGG identifier with the assumption that several
identifiers are due to chirality and that the lower one represents the
more common form.
Parameters
----------
compounds_identifiers : list
A list of mixed KEGG identifiers.
Returns
-------
str
The KEGG compound identifier with the smallest number.
Raises
------
ValueError
When compound_identifiers contains no KEGG compound identifiers.
"""
return min((c for c in compounds_identifiers if c.startswith("C")),
key=lambda c: int(c[1:])) | python | def get_smallest_compound_id(compounds_identifiers):
"""
Return the smallest KEGG compound identifier from a list.
KEGG identifiers may map to compounds, drugs or glycans prefixed
respectively with "C", "D", and "G" followed by at least 5 digits. We
choose the lowest KEGG identifier with the assumption that several
identifiers are due to chirality and that the lower one represents the
more common form.
Parameters
----------
compounds_identifiers : list
A list of mixed KEGG identifiers.
Returns
-------
str
The KEGG compound identifier with the smallest number.
Raises
------
ValueError
When compound_identifiers contains no KEGG compound identifiers.
"""
return min((c for c in compounds_identifiers if c.startswith("C")),
key=lambda c: int(c[1:])) | [
"def",
"get_smallest_compound_id",
"(",
"compounds_identifiers",
")",
":",
"return",
"min",
"(",
"(",
"c",
"for",
"c",
"in",
"compounds_identifiers",
"if",
"c",
".",
"startswith",
"(",
"\"C\"",
")",
")",
",",
"key",
"=",
"lambda",
"c",
":",
"int",
"(",
"c",
"[",
"1",
":",
"]",
")",
")"
] | Return the smallest KEGG compound identifier from a list.
KEGG identifiers may map to compounds, drugs or glycans prefixed
respectively with "C", "D", and "G" followed by at least 5 digits. We
choose the lowest KEGG identifier with the assumption that several
identifiers are due to chirality and that the lower one represents the
more common form.
Parameters
----------
compounds_identifiers : list
A list of mixed KEGG identifiers.
Returns
-------
str
The KEGG compound identifier with the smallest number.
Raises
------
ValueError
When compound_identifiers contains no KEGG compound identifiers. | [
"Return",
"the",
"smallest",
"KEGG",
"compound",
"identifier",
"from",
"a",
"list",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/thermodynamics.py#L37-L64 |
opencobra/memote | memote/support/thermodynamics.py | map_metabolite2kegg | def map_metabolite2kegg(metabolite):
"""
Return a KEGG compound identifier for the metabolite if it exists.
First see if there is an unambiguous mapping to a single KEGG compound ID
provided with the model. If not, check if there is any KEGG compound ID in
a list of mappings. KEGG IDs may map to compounds, drugs and glycans. KEGG
compound IDs are sorted so we keep the lowest that is there. If none of
this works try mapping to KEGG via the CompoundMatcher by the name of the
metabolite. If the metabolite cannot be mapped at all we simply map it back
to its own ID.
Parameters
----------
metabolite : cobra.Metabolite
The metabolite to be mapped to its KEGG compound identifier.
Returns
-------
None
If the metabolite could not be mapped.
str
The smallest KEGG compound identifier that was found.
"""
logger.debug("Looking for KEGG compound identifier for %s.", metabolite.id)
kegg_annotation = metabolite.annotation.get("kegg.compound")
if kegg_annotation is None:
# TODO (Moritz Beber): Currently name matching is very slow and
# inaccurate. We disable it until there is a better solution.
# if metabolite.name:
# # The compound matcher uses regular expression and chokes
# # with a low level error on `[` in the name, for example.
# df = compound_matcher.match(metabolite.name)
# try:
# return df.loc[df["score"] > threshold, "CID"].iat[0]
# except (IndexError, AttributeError):
# logger.warning(
# "Could not match the name %r to any kegg.compound "
# "annotation for metabolite %s.",
# metabolite.name, metabolite.id
# )
# return
# else:
logger.warning("No kegg.compound annotation for metabolite %s.",
metabolite.id)
return
if isinstance(kegg_annotation, string_types) and \
kegg_annotation.startswith("C"):
return kegg_annotation
elif isinstance(kegg_annotation, Iterable):
try:
return get_smallest_compound_id(kegg_annotation)
except ValueError:
return
logger.warning(
"No matching kegg.compound annotation for metabolite %s.",
metabolite.id
)
return | python | def map_metabolite2kegg(metabolite):
"""
Return a KEGG compound identifier for the metabolite if it exists.
First see if there is an unambiguous mapping to a single KEGG compound ID
provided with the model. If not, check if there is any KEGG compound ID in
a list of mappings. KEGG IDs may map to compounds, drugs and glycans. KEGG
compound IDs are sorted so we keep the lowest that is there. If none of
this works try mapping to KEGG via the CompoundMatcher by the name of the
metabolite. If the metabolite cannot be mapped at all we simply map it back
to its own ID.
Parameters
----------
metabolite : cobra.Metabolite
The metabolite to be mapped to its KEGG compound identifier.
Returns
-------
None
If the metabolite could not be mapped.
str
The smallest KEGG compound identifier that was found.
"""
logger.debug("Looking for KEGG compound identifier for %s.", metabolite.id)
kegg_annotation = metabolite.annotation.get("kegg.compound")
if kegg_annotation is None:
# TODO (Moritz Beber): Currently name matching is very slow and
# inaccurate. We disable it until there is a better solution.
# if metabolite.name:
# # The compound matcher uses regular expression and chokes
# # with a low level error on `[` in the name, for example.
# df = compound_matcher.match(metabolite.name)
# try:
# return df.loc[df["score"] > threshold, "CID"].iat[0]
# except (IndexError, AttributeError):
# logger.warning(
# "Could not match the name %r to any kegg.compound "
# "annotation for metabolite %s.",
# metabolite.name, metabolite.id
# )
# return
# else:
logger.warning("No kegg.compound annotation for metabolite %s.",
metabolite.id)
return
if isinstance(kegg_annotation, string_types) and \
kegg_annotation.startswith("C"):
return kegg_annotation
elif isinstance(kegg_annotation, Iterable):
try:
return get_smallest_compound_id(kegg_annotation)
except ValueError:
return
logger.warning(
"No matching kegg.compound annotation for metabolite %s.",
metabolite.id
)
return | [
"def",
"map_metabolite2kegg",
"(",
"metabolite",
")",
":",
"logger",
".",
"debug",
"(",
"\"Looking for KEGG compound identifier for %s.\"",
",",
"metabolite",
".",
"id",
")",
"kegg_annotation",
"=",
"metabolite",
".",
"annotation",
".",
"get",
"(",
"\"kegg.compound\"",
")",
"if",
"kegg_annotation",
"is",
"None",
":",
"# TODO (Moritz Beber): Currently name matching is very slow and",
"# inaccurate. We disable it until there is a better solution.",
"# if metabolite.name:",
"# # The compound matcher uses regular expression and chokes",
"# # with a low level error on `[` in the name, for example.",
"# df = compound_matcher.match(metabolite.name)",
"# try:",
"# return df.loc[df[\"score\"] > threshold, \"CID\"].iat[0]",
"# except (IndexError, AttributeError):",
"# logger.warning(",
"# \"Could not match the name %r to any kegg.compound \"",
"# \"annotation for metabolite %s.\",",
"# metabolite.name, metabolite.id",
"# )",
"# return",
"# else:",
"logger",
".",
"warning",
"(",
"\"No kegg.compound annotation for metabolite %s.\"",
",",
"metabolite",
".",
"id",
")",
"return",
"if",
"isinstance",
"(",
"kegg_annotation",
",",
"string_types",
")",
"and",
"kegg_annotation",
".",
"startswith",
"(",
"\"C\"",
")",
":",
"return",
"kegg_annotation",
"elif",
"isinstance",
"(",
"kegg_annotation",
",",
"Iterable",
")",
":",
"try",
":",
"return",
"get_smallest_compound_id",
"(",
"kegg_annotation",
")",
"except",
"ValueError",
":",
"return",
"logger",
".",
"warning",
"(",
"\"No matching kegg.compound annotation for metabolite %s.\"",
",",
"metabolite",
".",
"id",
")",
"return"
] | Return a KEGG compound identifier for the metabolite if it exists.
First see if there is an unambiguous mapping to a single KEGG compound ID
provided with the model. If not, check if there is any KEGG compound ID in
a list of mappings. KEGG IDs may map to compounds, drugs and glycans. KEGG
compound IDs are sorted so we keep the lowest that is there. If none of
this works try mapping to KEGG via the CompoundMatcher by the name of the
metabolite. If the metabolite cannot be mapped at all we simply map it back
to its own ID.
Parameters
----------
metabolite : cobra.Metabolite
The metabolite to be mapped to its KEGG compound identifier.
Returns
-------
None
If the metabolite could not be mapped.
str
The smallest KEGG compound identifier that was found. | [
"Return",
"a",
"KEGG",
"compound",
"identifier",
"for",
"the",
"metabolite",
"if",
"it",
"exists",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/thermodynamics.py#L67-L126 |
opencobra/memote | memote/support/thermodynamics.py | translate_reaction | def translate_reaction(reaction, metabolite_mapping):
"""
Return a mapping from KEGG compound identifiers to coefficients.
Parameters
----------
reaction : cobra.Reaction
The reaction whose metabolites are to be translated.
metabolite_mapping : dict
An existing mapping from cobra.Metabolite to KEGG compound identifier
that may already contain the metabolites in question or will have to be
extended.
Returns
-------
dict
The stoichiometry of the reaction given as a mapping from metabolite
KEGG identifier to coefficient.
"""
# Transport reactions where the same metabolite occurs in different
# compartments should have been filtered out but just to be sure, we add
# coefficients in the mapping.
stoichiometry = defaultdict(float)
for met, coef in iteritems(reaction.metabolites):
kegg_id = metabolite_mapping.setdefault(met, map_metabolite2kegg(met))
if kegg_id is None:
continue
stoichiometry[kegg_id] += coef
return dict(stoichiometry) | python | def translate_reaction(reaction, metabolite_mapping):
"""
Return a mapping from KEGG compound identifiers to coefficients.
Parameters
----------
reaction : cobra.Reaction
The reaction whose metabolites are to be translated.
metabolite_mapping : dict
An existing mapping from cobra.Metabolite to KEGG compound identifier
that may already contain the metabolites in question or will have to be
extended.
Returns
-------
dict
The stoichiometry of the reaction given as a mapping from metabolite
KEGG identifier to coefficient.
"""
# Transport reactions where the same metabolite occurs in different
# compartments should have been filtered out but just to be sure, we add
# coefficients in the mapping.
stoichiometry = defaultdict(float)
for met, coef in iteritems(reaction.metabolites):
kegg_id = metabolite_mapping.setdefault(met, map_metabolite2kegg(met))
if kegg_id is None:
continue
stoichiometry[kegg_id] += coef
return dict(stoichiometry) | [
"def",
"translate_reaction",
"(",
"reaction",
",",
"metabolite_mapping",
")",
":",
"# Transport reactions where the same metabolite occurs in different",
"# compartments should have been filtered out but just to be sure, we add",
"# coefficients in the mapping.",
"stoichiometry",
"=",
"defaultdict",
"(",
"float",
")",
"for",
"met",
",",
"coef",
"in",
"iteritems",
"(",
"reaction",
".",
"metabolites",
")",
":",
"kegg_id",
"=",
"metabolite_mapping",
".",
"setdefault",
"(",
"met",
",",
"map_metabolite2kegg",
"(",
"met",
")",
")",
"if",
"kegg_id",
"is",
"None",
":",
"continue",
"stoichiometry",
"[",
"kegg_id",
"]",
"+=",
"coef",
"return",
"dict",
"(",
"stoichiometry",
")"
] | Return a mapping from KEGG compound identifiers to coefficients.
Parameters
----------
reaction : cobra.Reaction
The reaction whose metabolites are to be translated.
metabolite_mapping : dict
An existing mapping from cobra.Metabolite to KEGG compound identifier
that may already contain the metabolites in question or will have to be
extended.
Returns
-------
dict
The stoichiometry of the reaction given as a mapping from metabolite
KEGG identifier to coefficient. | [
"Return",
"a",
"mapping",
"from",
"KEGG",
"compound",
"identifiers",
"to",
"coefficients",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/thermodynamics.py#L129-L158 |
opencobra/memote | memote/support/thermodynamics.py | find_thermodynamic_reversibility_index | def find_thermodynamic_reversibility_index(reactions):
u"""
Return the reversibility index of the given reactions.
To determine the reversibility index, we calculate
the reversibility index ln_gamma (see [1]_ section 3.5) of each reaction
using the eQuilibrator API [2]_.
Parameters
----------
reactions: list of cobra.Reaction
A list of reactions for which to calculate the reversibility index.
Returns
-------
tuple
list of cobra.Reaction, index pairs
A list of pairs of reactions and their reversibility indexes.
list of cobra.Reaction
A list of reactions which contain at least one metabolite that
could not be mapped to KEGG on the basis of its annotation.
list of cobra.Reaction
A list of reactions for which it is not possible to calculate the
standard change in Gibbs free energy potential. Reasons of failure
include that participating metabolites cannot be broken down with
the group contribution method.
list of cobra.Reaction
A list of reactions that are not chemically or redox balanced.
References
----------
.. [1] Elad Noor, Arren Bar-Even, Avi Flamholz, Yaniv Lubling, Dan Davidi,
Ron Milo; An integrated open framework for thermodynamics of
reactions that combines accuracy and coverage, Bioinformatics,
Volume 28, Issue 15, 1 August 2012, Pages 2037–2044,
https://doi.org/10.1093/bioinformatics/bts317
.. [2] https://pypi.org/project/equilibrator-api/
"""
incomplete_mapping = []
problematic_calculation = []
reversibility_indexes = []
unbalanced = []
metabolite_mapping = {}
for rxn in reactions:
stoich = translate_reaction(rxn, metabolite_mapping)
if len(stoich) < len(rxn.metabolites):
incomplete_mapping.append(rxn)
continue
try:
# Remove protons from stoichiometry.
if "C00080" in stoich:
del stoich["C00080"]
eq_rxn = Reaction(stoich, rxn.id)
except KeyError:
incomplete_mapping.append(rxn)
continue
if eq_rxn.check_full_reaction_balancing():
try:
ln_rev_index = eq_rxn.reversibility_index()
# TODO (Moritz Beber): Which exceptions can we expect here?
except Exception:
problematic_calculation.append(rxn)
continue
reversibility_indexes.append((rxn, ln_rev_index))
else:
unbalanced.append(rxn)
reversibility_indexes.sort(key=lambda p: abs(p[1]), reverse=True)
return (
reversibility_indexes, incomplete_mapping, problematic_calculation,
unbalanced
) | python | def find_thermodynamic_reversibility_index(reactions):
u"""
Return the reversibility index of the given reactions.
To determine the reversibility index, we calculate
the reversibility index ln_gamma (see [1]_ section 3.5) of each reaction
using the eQuilibrator API [2]_.
Parameters
----------
reactions: list of cobra.Reaction
A list of reactions for which to calculate the reversibility index.
Returns
-------
tuple
list of cobra.Reaction, index pairs
A list of pairs of reactions and their reversibility indexes.
list of cobra.Reaction
A list of reactions which contain at least one metabolite that
could not be mapped to KEGG on the basis of its annotation.
list of cobra.Reaction
A list of reactions for which it is not possible to calculate the
standard change in Gibbs free energy potential. Reasons of failure
include that participating metabolites cannot be broken down with
the group contribution method.
list of cobra.Reaction
A list of reactions that are not chemically or redox balanced.
References
----------
.. [1] Elad Noor, Arren Bar-Even, Avi Flamholz, Yaniv Lubling, Dan Davidi,
Ron Milo; An integrated open framework for thermodynamics of
reactions that combines accuracy and coverage, Bioinformatics,
Volume 28, Issue 15, 1 August 2012, Pages 2037–2044,
https://doi.org/10.1093/bioinformatics/bts317
.. [2] https://pypi.org/project/equilibrator-api/
"""
incomplete_mapping = []
problematic_calculation = []
reversibility_indexes = []
unbalanced = []
metabolite_mapping = {}
for rxn in reactions:
stoich = translate_reaction(rxn, metabolite_mapping)
if len(stoich) < len(rxn.metabolites):
incomplete_mapping.append(rxn)
continue
try:
# Remove protons from stoichiometry.
if "C00080" in stoich:
del stoich["C00080"]
eq_rxn = Reaction(stoich, rxn.id)
except KeyError:
incomplete_mapping.append(rxn)
continue
if eq_rxn.check_full_reaction_balancing():
try:
ln_rev_index = eq_rxn.reversibility_index()
# TODO (Moritz Beber): Which exceptions can we expect here?
except Exception:
problematic_calculation.append(rxn)
continue
reversibility_indexes.append((rxn, ln_rev_index))
else:
unbalanced.append(rxn)
reversibility_indexes.sort(key=lambda p: abs(p[1]), reverse=True)
return (
reversibility_indexes, incomplete_mapping, problematic_calculation,
unbalanced
) | [
"def",
"find_thermodynamic_reversibility_index",
"(",
"reactions",
")",
":",
"incomplete_mapping",
"=",
"[",
"]",
"problematic_calculation",
"=",
"[",
"]",
"reversibility_indexes",
"=",
"[",
"]",
"unbalanced",
"=",
"[",
"]",
"metabolite_mapping",
"=",
"{",
"}",
"for",
"rxn",
"in",
"reactions",
":",
"stoich",
"=",
"translate_reaction",
"(",
"rxn",
",",
"metabolite_mapping",
")",
"if",
"len",
"(",
"stoich",
")",
"<",
"len",
"(",
"rxn",
".",
"metabolites",
")",
":",
"incomplete_mapping",
".",
"append",
"(",
"rxn",
")",
"continue",
"try",
":",
"# Remove protons from stoichiometry.",
"if",
"\"C00080\"",
"in",
"stoich",
":",
"del",
"stoich",
"[",
"\"C00080\"",
"]",
"eq_rxn",
"=",
"Reaction",
"(",
"stoich",
",",
"rxn",
".",
"id",
")",
"except",
"KeyError",
":",
"incomplete_mapping",
".",
"append",
"(",
"rxn",
")",
"continue",
"if",
"eq_rxn",
".",
"check_full_reaction_balancing",
"(",
")",
":",
"try",
":",
"ln_rev_index",
"=",
"eq_rxn",
".",
"reversibility_index",
"(",
")",
"# TODO (Moritz Beber): Which exceptions can we expect here?",
"except",
"Exception",
":",
"problematic_calculation",
".",
"append",
"(",
"rxn",
")",
"continue",
"reversibility_indexes",
".",
"append",
"(",
"(",
"rxn",
",",
"ln_rev_index",
")",
")",
"else",
":",
"unbalanced",
".",
"append",
"(",
"rxn",
")",
"reversibility_indexes",
".",
"sort",
"(",
"key",
"=",
"lambda",
"p",
":",
"abs",
"(",
"p",
"[",
"1",
"]",
")",
",",
"reverse",
"=",
"True",
")",
"return",
"(",
"reversibility_indexes",
",",
"incomplete_mapping",
",",
"problematic_calculation",
",",
"unbalanced",
")"
] | u"""
Return the reversibility index of the given reactions.
To determine the reversibility index, we calculate
the reversibility index ln_gamma (see [1]_ section 3.5) of each reaction
using the eQuilibrator API [2]_.
Parameters
----------
reactions: list of cobra.Reaction
A list of reactions for which to calculate the reversibility index.
Returns
-------
tuple
list of cobra.Reaction, index pairs
A list of pairs of reactions and their reversibility indexes.
list of cobra.Reaction
A list of reactions which contain at least one metabolite that
could not be mapped to KEGG on the basis of its annotation.
list of cobra.Reaction
A list of reactions for which it is not possible to calculate the
standard change in Gibbs free energy potential. Reasons of failure
include that participating metabolites cannot be broken down with
the group contribution method.
list of cobra.Reaction
A list of reactions that are not chemically or redox balanced.
References
----------
.. [1] Elad Noor, Arren Bar-Even, Avi Flamholz, Yaniv Lubling, Dan Davidi,
Ron Milo; An integrated open framework for thermodynamics of
reactions that combines accuracy and coverage, Bioinformatics,
Volume 28, Issue 15, 1 August 2012, Pages 2037–2044,
https://doi.org/10.1093/bioinformatics/bts317
.. [2] https://pypi.org/project/equilibrator-api/ | [
"u",
"Return",
"the",
"reversibility",
"index",
"of",
"the",
"given",
"reactions",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/thermodynamics.py#L161-L234 |
opencobra/memote | memote/support/consistency.py | check_stoichiometric_consistency | def check_stoichiometric_consistency(model):
"""
Verify the consistency of the model's stoichiometry.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Notes
-----
See [1]_ section 3.1 for a complete description of the algorithm.
.. [1] Gevorgyan, A., M. G Poolman, and D. A Fell.
"Detection of Stoichiometric Inconsistencies in Biomolecular
Models."
Bioinformatics 24, no. 19 (2008): 2245.
"""
problem = model.problem
# The transpose of the stoichiometric matrix N.T in the paper.
stoich_trans = problem.Model()
internal_rxns = con_helpers.get_internals(model)
metabolites = set(met for rxn in internal_rxns for met in rxn.metabolites)
LOGGER.info("model '%s' has %d internal reactions", model.id,
len(internal_rxns))
LOGGER.info("model '%s' has %d internal metabolites", model.id,
len(metabolites))
stoich_trans.add([problem.Variable(m.id, lb=1) for m in metabolites])
stoich_trans.update()
con_helpers.add_reaction_constraints(
stoich_trans, internal_rxns, problem.Constraint)
# The objective is to minimize the metabolite mass vector.
stoich_trans.objective = problem.Objective(
Zero, direction="min", sloppy=True)
stoich_trans.objective.set_linear_coefficients(
{var: 1. for var in stoich_trans.variables})
status = stoich_trans.optimize()
if status == OPTIMAL:
return True
elif status == INFEASIBLE:
return False
else:
raise RuntimeError(
"Could not determine stoichiometric consistencty."
" Solver status is '{}'"
" (only optimal or infeasible expected).".format(status)) | python | def check_stoichiometric_consistency(model):
"""
Verify the consistency of the model's stoichiometry.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Notes
-----
See [1]_ section 3.1 for a complete description of the algorithm.
.. [1] Gevorgyan, A., M. G Poolman, and D. A Fell.
"Detection of Stoichiometric Inconsistencies in Biomolecular
Models."
Bioinformatics 24, no. 19 (2008): 2245.
"""
problem = model.problem
# The transpose of the stoichiometric matrix N.T in the paper.
stoich_trans = problem.Model()
internal_rxns = con_helpers.get_internals(model)
metabolites = set(met for rxn in internal_rxns for met in rxn.metabolites)
LOGGER.info("model '%s' has %d internal reactions", model.id,
len(internal_rxns))
LOGGER.info("model '%s' has %d internal metabolites", model.id,
len(metabolites))
stoich_trans.add([problem.Variable(m.id, lb=1) for m in metabolites])
stoich_trans.update()
con_helpers.add_reaction_constraints(
stoich_trans, internal_rxns, problem.Constraint)
# The objective is to minimize the metabolite mass vector.
stoich_trans.objective = problem.Objective(
Zero, direction="min", sloppy=True)
stoich_trans.objective.set_linear_coefficients(
{var: 1. for var in stoich_trans.variables})
status = stoich_trans.optimize()
if status == OPTIMAL:
return True
elif status == INFEASIBLE:
return False
else:
raise RuntimeError(
"Could not determine stoichiometric consistencty."
" Solver status is '{}'"
" (only optimal or infeasible expected).".format(status)) | [
"def",
"check_stoichiometric_consistency",
"(",
"model",
")",
":",
"problem",
"=",
"model",
".",
"problem",
"# The transpose of the stoichiometric matrix N.T in the paper.",
"stoich_trans",
"=",
"problem",
".",
"Model",
"(",
")",
"internal_rxns",
"=",
"con_helpers",
".",
"get_internals",
"(",
"model",
")",
"metabolites",
"=",
"set",
"(",
"met",
"for",
"rxn",
"in",
"internal_rxns",
"for",
"met",
"in",
"rxn",
".",
"metabolites",
")",
"LOGGER",
".",
"info",
"(",
"\"model '%s' has %d internal reactions\"",
",",
"model",
".",
"id",
",",
"len",
"(",
"internal_rxns",
")",
")",
"LOGGER",
".",
"info",
"(",
"\"model '%s' has %d internal metabolites\"",
",",
"model",
".",
"id",
",",
"len",
"(",
"metabolites",
")",
")",
"stoich_trans",
".",
"add",
"(",
"[",
"problem",
".",
"Variable",
"(",
"m",
".",
"id",
",",
"lb",
"=",
"1",
")",
"for",
"m",
"in",
"metabolites",
"]",
")",
"stoich_trans",
".",
"update",
"(",
")",
"con_helpers",
".",
"add_reaction_constraints",
"(",
"stoich_trans",
",",
"internal_rxns",
",",
"problem",
".",
"Constraint",
")",
"# The objective is to minimize the metabolite mass vector.",
"stoich_trans",
".",
"objective",
"=",
"problem",
".",
"Objective",
"(",
"Zero",
",",
"direction",
"=",
"\"min\"",
",",
"sloppy",
"=",
"True",
")",
"stoich_trans",
".",
"objective",
".",
"set_linear_coefficients",
"(",
"{",
"var",
":",
"1.",
"for",
"var",
"in",
"stoich_trans",
".",
"variables",
"}",
")",
"status",
"=",
"stoich_trans",
".",
"optimize",
"(",
")",
"if",
"status",
"==",
"OPTIMAL",
":",
"return",
"True",
"elif",
"status",
"==",
"INFEASIBLE",
":",
"return",
"False",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"Could not determine stoichiometric consistencty.\"",
"\" Solver status is '{}'\"",
"\" (only optimal or infeasible expected).\"",
".",
"format",
"(",
"status",
")",
")"
] | Verify the consistency of the model's stoichiometry.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Notes
-----
See [1]_ section 3.1 for a complete description of the algorithm.
.. [1] Gevorgyan, A., M. G Poolman, and D. A Fell.
"Detection of Stoichiometric Inconsistencies in Biomolecular
Models."
Bioinformatics 24, no. 19 (2008): 2245. | [
"Verify",
"the",
"consistency",
"of",
"the",
"model",
"s",
"stoichiometry",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/consistency.py#L63-L110 |
opencobra/memote | memote/support/consistency.py | find_unconserved_metabolites | def find_unconserved_metabolites(model):
"""
Detect unconserved metabolites.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Notes
-----
See [1]_ section 3.2 for a complete description of the algorithm.
.. [1] Gevorgyan, A., M. G Poolman, and D. A Fell.
"Detection of Stoichiometric Inconsistencies in Biomolecular
Models."
Bioinformatics 24, no. 19 (2008): 2245.
"""
problem = model.problem
stoich_trans = problem.Model()
internal_rxns = con_helpers.get_internals(model)
metabolites = set(met for rxn in internal_rxns for met in rxn.metabolites)
# The binary variables k[i] in the paper.
k_vars = list()
for met in metabolites:
# The element m[i] of the mass vector.
m_var = problem.Variable(met.id)
k_var = problem.Variable("k_{}".format(met.id), type="binary")
k_vars.append(k_var)
stoich_trans.add([m_var, k_var])
# This constraint is equivalent to 0 <= k[i] <= m[i].
stoich_trans.add(problem.Constraint(
k_var - m_var, ub=0, name="switch_{}".format(met.id)))
stoich_trans.update()
con_helpers.add_reaction_constraints(
stoich_trans, internal_rxns, problem.Constraint)
# The objective is to maximize the binary indicators k[i], subject to the
# above inequality constraints.
stoich_trans.objective = problem.Objective(
Zero, sloppy=True, direction="max")
stoich_trans.objective.set_linear_coefficients(
{var: 1. for var in k_vars})
status = stoich_trans.optimize()
if status == OPTIMAL:
# TODO: See if that could be a Boolean test `bool(var.primal)`.
return set([model.metabolites.get_by_id(var.name[2:])
for var in k_vars if var.primal < 0.8])
else:
raise RuntimeError(
"Could not compute list of unconserved metabolites."
" Solver status is '{}' (only optimal expected).".format(status)) | python | def find_unconserved_metabolites(model):
"""
Detect unconserved metabolites.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Notes
-----
See [1]_ section 3.2 for a complete description of the algorithm.
.. [1] Gevorgyan, A., M. G Poolman, and D. A Fell.
"Detection of Stoichiometric Inconsistencies in Biomolecular
Models."
Bioinformatics 24, no. 19 (2008): 2245.
"""
problem = model.problem
stoich_trans = problem.Model()
internal_rxns = con_helpers.get_internals(model)
metabolites = set(met for rxn in internal_rxns for met in rxn.metabolites)
# The binary variables k[i] in the paper.
k_vars = list()
for met in metabolites:
# The element m[i] of the mass vector.
m_var = problem.Variable(met.id)
k_var = problem.Variable("k_{}".format(met.id), type="binary")
k_vars.append(k_var)
stoich_trans.add([m_var, k_var])
# This constraint is equivalent to 0 <= k[i] <= m[i].
stoich_trans.add(problem.Constraint(
k_var - m_var, ub=0, name="switch_{}".format(met.id)))
stoich_trans.update()
con_helpers.add_reaction_constraints(
stoich_trans, internal_rxns, problem.Constraint)
# The objective is to maximize the binary indicators k[i], subject to the
# above inequality constraints.
stoich_trans.objective = problem.Objective(
Zero, sloppy=True, direction="max")
stoich_trans.objective.set_linear_coefficients(
{var: 1. for var in k_vars})
status = stoich_trans.optimize()
if status == OPTIMAL:
# TODO: See if that could be a Boolean test `bool(var.primal)`.
return set([model.metabolites.get_by_id(var.name[2:])
for var in k_vars if var.primal < 0.8])
else:
raise RuntimeError(
"Could not compute list of unconserved metabolites."
" Solver status is '{}' (only optimal expected).".format(status)) | [
"def",
"find_unconserved_metabolites",
"(",
"model",
")",
":",
"problem",
"=",
"model",
".",
"problem",
"stoich_trans",
"=",
"problem",
".",
"Model",
"(",
")",
"internal_rxns",
"=",
"con_helpers",
".",
"get_internals",
"(",
"model",
")",
"metabolites",
"=",
"set",
"(",
"met",
"for",
"rxn",
"in",
"internal_rxns",
"for",
"met",
"in",
"rxn",
".",
"metabolites",
")",
"# The binary variables k[i] in the paper.",
"k_vars",
"=",
"list",
"(",
")",
"for",
"met",
"in",
"metabolites",
":",
"# The element m[i] of the mass vector.",
"m_var",
"=",
"problem",
".",
"Variable",
"(",
"met",
".",
"id",
")",
"k_var",
"=",
"problem",
".",
"Variable",
"(",
"\"k_{}\"",
".",
"format",
"(",
"met",
".",
"id",
")",
",",
"type",
"=",
"\"binary\"",
")",
"k_vars",
".",
"append",
"(",
"k_var",
")",
"stoich_trans",
".",
"add",
"(",
"[",
"m_var",
",",
"k_var",
"]",
")",
"# This constraint is equivalent to 0 <= k[i] <= m[i].",
"stoich_trans",
".",
"add",
"(",
"problem",
".",
"Constraint",
"(",
"k_var",
"-",
"m_var",
",",
"ub",
"=",
"0",
",",
"name",
"=",
"\"switch_{}\"",
".",
"format",
"(",
"met",
".",
"id",
")",
")",
")",
"stoich_trans",
".",
"update",
"(",
")",
"con_helpers",
".",
"add_reaction_constraints",
"(",
"stoich_trans",
",",
"internal_rxns",
",",
"problem",
".",
"Constraint",
")",
"# The objective is to maximize the binary indicators k[i], subject to the",
"# above inequality constraints.",
"stoich_trans",
".",
"objective",
"=",
"problem",
".",
"Objective",
"(",
"Zero",
",",
"sloppy",
"=",
"True",
",",
"direction",
"=",
"\"max\"",
")",
"stoich_trans",
".",
"objective",
".",
"set_linear_coefficients",
"(",
"{",
"var",
":",
"1.",
"for",
"var",
"in",
"k_vars",
"}",
")",
"status",
"=",
"stoich_trans",
".",
"optimize",
"(",
")",
"if",
"status",
"==",
"OPTIMAL",
":",
"# TODO: See if that could be a Boolean test `bool(var.primal)`.",
"return",
"set",
"(",
"[",
"model",
".",
"metabolites",
".",
"get_by_id",
"(",
"var",
".",
"name",
"[",
"2",
":",
"]",
")",
"for",
"var",
"in",
"k_vars",
"if",
"var",
".",
"primal",
"<",
"0.8",
"]",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"Could not compute list of unconserved metabolites.\"",
"\" Solver status is '{}' (only optimal expected).\"",
".",
"format",
"(",
"status",
")",
")"
] | Detect unconserved metabolites.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Notes
-----
See [1]_ section 3.2 for a complete description of the algorithm.
.. [1] Gevorgyan, A., M. G Poolman, and D. A Fell.
"Detection of Stoichiometric Inconsistencies in Biomolecular
Models."
Bioinformatics 24, no. 19 (2008): 2245. | [
"Detect",
"unconserved",
"metabolites",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/consistency.py#L113-L165 |
opencobra/memote | memote/support/consistency.py | find_inconsistent_min_stoichiometry | def find_inconsistent_min_stoichiometry(model, atol=1e-13):
"""
Detect inconsistent minimal net stoichiometries.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
atol : float, optional
Values below the absolute tolerance are treated as zero. Expected to be
very small but larger than zero.
Notes
-----
See [1]_ section 3.3 for a complete description of the algorithm.
References
----------
.. [1] Gevorgyan, A., M. G Poolman, and D. A Fell.
"Detection of Stoichiometric Inconsistencies in Biomolecular
Models."
Bioinformatics 24, no. 19 (2008): 2245.
"""
if check_stoichiometric_consistency(model):
return set()
Model, Constraint, Variable, Objective = con_helpers.get_interface(model)
unconserved_mets = find_unconserved_metabolites(model)
LOGGER.info("model has %d unconserved metabolites", len(unconserved_mets))
internal_rxns = con_helpers.get_internals(model)
internal_mets = set(
met for rxn in internal_rxns for met in rxn.metabolites)
get_id = attrgetter("id")
reactions = sorted(internal_rxns, key=get_id)
metabolites = sorted(internal_mets, key=get_id)
stoich, met_index, rxn_index = con_helpers.stoichiometry_matrix(
metabolites, reactions)
left_ns = con_helpers.nullspace(stoich.T)
# deal with numerical instabilities
left_ns[np.abs(left_ns) < atol] = 0.0
LOGGER.info("nullspace has dimension %d", left_ns.shape[1])
inc_minimal = set()
(problem, indicators) = con_helpers.create_milp_problem(
left_ns, metabolites, Model, Variable, Constraint, Objective)
LOGGER.debug(str(problem))
cuts = list()
for met in unconserved_mets:
row = met_index[met]
if (left_ns[row] == 0.0).all():
LOGGER.debug("%s: singleton minimal unconservable set", met.id)
# singleton set!
inc_minimal.add((met,))
continue
# expect a positive mass for the unconserved metabolite
problem.variables[met.id].lb = 1e-3
status = problem.optimize()
while status == "optimal":
LOGGER.debug("%s: status %s", met.id, status)
LOGGER.debug("sum of all primal values: %f",
sum(problem.primal_values.values()))
LOGGER.debug("sum of binary indicators: %f",
sum(var.primal for var in indicators))
solution = [model.metabolites.get_by_id(var.name[2:])
for var in indicators if var.primal > 0.2]
LOGGER.debug("%s: set size %d", met.id, len(solution))
inc_minimal.add(tuple(solution))
if len(solution) == 1:
break
cuts.append(con_helpers.add_cut(
problem, indicators, len(solution) - 1, Constraint))
status = problem.optimize()
LOGGER.debug("%s: last status %s", met.id, status)
# reset
problem.variables[met.id].lb = 0.0
problem.remove(cuts)
cuts.clear()
return inc_minimal | python | def find_inconsistent_min_stoichiometry(model, atol=1e-13):
"""
Detect inconsistent minimal net stoichiometries.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
atol : float, optional
Values below the absolute tolerance are treated as zero. Expected to be
very small but larger than zero.
Notes
-----
See [1]_ section 3.3 for a complete description of the algorithm.
References
----------
.. [1] Gevorgyan, A., M. G Poolman, and D. A Fell.
"Detection of Stoichiometric Inconsistencies in Biomolecular
Models."
Bioinformatics 24, no. 19 (2008): 2245.
"""
if check_stoichiometric_consistency(model):
return set()
Model, Constraint, Variable, Objective = con_helpers.get_interface(model)
unconserved_mets = find_unconserved_metabolites(model)
LOGGER.info("model has %d unconserved metabolites", len(unconserved_mets))
internal_rxns = con_helpers.get_internals(model)
internal_mets = set(
met for rxn in internal_rxns for met in rxn.metabolites)
get_id = attrgetter("id")
reactions = sorted(internal_rxns, key=get_id)
metabolites = sorted(internal_mets, key=get_id)
stoich, met_index, rxn_index = con_helpers.stoichiometry_matrix(
metabolites, reactions)
left_ns = con_helpers.nullspace(stoich.T)
# deal with numerical instabilities
left_ns[np.abs(left_ns) < atol] = 0.0
LOGGER.info("nullspace has dimension %d", left_ns.shape[1])
inc_minimal = set()
(problem, indicators) = con_helpers.create_milp_problem(
left_ns, metabolites, Model, Variable, Constraint, Objective)
LOGGER.debug(str(problem))
cuts = list()
for met in unconserved_mets:
row = met_index[met]
if (left_ns[row] == 0.0).all():
LOGGER.debug("%s: singleton minimal unconservable set", met.id)
# singleton set!
inc_minimal.add((met,))
continue
# expect a positive mass for the unconserved metabolite
problem.variables[met.id].lb = 1e-3
status = problem.optimize()
while status == "optimal":
LOGGER.debug("%s: status %s", met.id, status)
LOGGER.debug("sum of all primal values: %f",
sum(problem.primal_values.values()))
LOGGER.debug("sum of binary indicators: %f",
sum(var.primal for var in indicators))
solution = [model.metabolites.get_by_id(var.name[2:])
for var in indicators if var.primal > 0.2]
LOGGER.debug("%s: set size %d", met.id, len(solution))
inc_minimal.add(tuple(solution))
if len(solution) == 1:
break
cuts.append(con_helpers.add_cut(
problem, indicators, len(solution) - 1, Constraint))
status = problem.optimize()
LOGGER.debug("%s: last status %s", met.id, status)
# reset
problem.variables[met.id].lb = 0.0
problem.remove(cuts)
cuts.clear()
return inc_minimal | [
"def",
"find_inconsistent_min_stoichiometry",
"(",
"model",
",",
"atol",
"=",
"1e-13",
")",
":",
"if",
"check_stoichiometric_consistency",
"(",
"model",
")",
":",
"return",
"set",
"(",
")",
"Model",
",",
"Constraint",
",",
"Variable",
",",
"Objective",
"=",
"con_helpers",
".",
"get_interface",
"(",
"model",
")",
"unconserved_mets",
"=",
"find_unconserved_metabolites",
"(",
"model",
")",
"LOGGER",
".",
"info",
"(",
"\"model has %d unconserved metabolites\"",
",",
"len",
"(",
"unconserved_mets",
")",
")",
"internal_rxns",
"=",
"con_helpers",
".",
"get_internals",
"(",
"model",
")",
"internal_mets",
"=",
"set",
"(",
"met",
"for",
"rxn",
"in",
"internal_rxns",
"for",
"met",
"in",
"rxn",
".",
"metabolites",
")",
"get_id",
"=",
"attrgetter",
"(",
"\"id\"",
")",
"reactions",
"=",
"sorted",
"(",
"internal_rxns",
",",
"key",
"=",
"get_id",
")",
"metabolites",
"=",
"sorted",
"(",
"internal_mets",
",",
"key",
"=",
"get_id",
")",
"stoich",
",",
"met_index",
",",
"rxn_index",
"=",
"con_helpers",
".",
"stoichiometry_matrix",
"(",
"metabolites",
",",
"reactions",
")",
"left_ns",
"=",
"con_helpers",
".",
"nullspace",
"(",
"stoich",
".",
"T",
")",
"# deal with numerical instabilities",
"left_ns",
"[",
"np",
".",
"abs",
"(",
"left_ns",
")",
"<",
"atol",
"]",
"=",
"0.0",
"LOGGER",
".",
"info",
"(",
"\"nullspace has dimension %d\"",
",",
"left_ns",
".",
"shape",
"[",
"1",
"]",
")",
"inc_minimal",
"=",
"set",
"(",
")",
"(",
"problem",
",",
"indicators",
")",
"=",
"con_helpers",
".",
"create_milp_problem",
"(",
"left_ns",
",",
"metabolites",
",",
"Model",
",",
"Variable",
",",
"Constraint",
",",
"Objective",
")",
"LOGGER",
".",
"debug",
"(",
"str",
"(",
"problem",
")",
")",
"cuts",
"=",
"list",
"(",
")",
"for",
"met",
"in",
"unconserved_mets",
":",
"row",
"=",
"met_index",
"[",
"met",
"]",
"if",
"(",
"left_ns",
"[",
"row",
"]",
"==",
"0.0",
")",
".",
"all",
"(",
")",
":",
"LOGGER",
".",
"debug",
"(",
"\"%s: singleton minimal unconservable set\"",
",",
"met",
".",
"id",
")",
"# singleton set!",
"inc_minimal",
".",
"add",
"(",
"(",
"met",
",",
")",
")",
"continue",
"# expect a positive mass for the unconserved metabolite",
"problem",
".",
"variables",
"[",
"met",
".",
"id",
"]",
".",
"lb",
"=",
"1e-3",
"status",
"=",
"problem",
".",
"optimize",
"(",
")",
"while",
"status",
"==",
"\"optimal\"",
":",
"LOGGER",
".",
"debug",
"(",
"\"%s: status %s\"",
",",
"met",
".",
"id",
",",
"status",
")",
"LOGGER",
".",
"debug",
"(",
"\"sum of all primal values: %f\"",
",",
"sum",
"(",
"problem",
".",
"primal_values",
".",
"values",
"(",
")",
")",
")",
"LOGGER",
".",
"debug",
"(",
"\"sum of binary indicators: %f\"",
",",
"sum",
"(",
"var",
".",
"primal",
"for",
"var",
"in",
"indicators",
")",
")",
"solution",
"=",
"[",
"model",
".",
"metabolites",
".",
"get_by_id",
"(",
"var",
".",
"name",
"[",
"2",
":",
"]",
")",
"for",
"var",
"in",
"indicators",
"if",
"var",
".",
"primal",
">",
"0.2",
"]",
"LOGGER",
".",
"debug",
"(",
"\"%s: set size %d\"",
",",
"met",
".",
"id",
",",
"len",
"(",
"solution",
")",
")",
"inc_minimal",
".",
"add",
"(",
"tuple",
"(",
"solution",
")",
")",
"if",
"len",
"(",
"solution",
")",
"==",
"1",
":",
"break",
"cuts",
".",
"append",
"(",
"con_helpers",
".",
"add_cut",
"(",
"problem",
",",
"indicators",
",",
"len",
"(",
"solution",
")",
"-",
"1",
",",
"Constraint",
")",
")",
"status",
"=",
"problem",
".",
"optimize",
"(",
")",
"LOGGER",
".",
"debug",
"(",
"\"%s: last status %s\"",
",",
"met",
".",
"id",
",",
"status",
")",
"# reset",
"problem",
".",
"variables",
"[",
"met",
".",
"id",
"]",
".",
"lb",
"=",
"0.0",
"problem",
".",
"remove",
"(",
"cuts",
")",
"cuts",
".",
"clear",
"(",
")",
"return",
"inc_minimal"
] | Detect inconsistent minimal net stoichiometries.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
atol : float, optional
Values below the absolute tolerance are treated as zero. Expected to be
very small but larger than zero.
Notes
-----
See [1]_ section 3.3 for a complete description of the algorithm.
References
----------
.. [1] Gevorgyan, A., M. G Poolman, and D. A Fell.
"Detection of Stoichiometric Inconsistencies in Biomolecular
Models."
Bioinformatics 24, no. 19 (2008): 2245. | [
"Detect",
"inconsistent",
"minimal",
"net",
"stoichiometries",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/consistency.py#L170-L246 |
opencobra/memote | memote/support/consistency.py | detect_energy_generating_cycles | def detect_energy_generating_cycles(model, metabolite_id):
u"""
Detect erroneous energy-generating cycles for a a single metabolite.
The function will first build a dissipation reaction corresponding to the
input metabolite. This reaction is then set as the objective for
optimization, after closing all exchanges. If the reaction was able to
carry flux, an erroneous energy-generating cycle must be present. In this
case a list of reactions with a flux greater than zero is returned.
Otherwise, the function returns False.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
metabolite_id : str
The identifier of an energy metabolite.
Notes
-----
"[...] energy generating cycles (EGC) [...] charge energy metabolites
without a source of energy. [...] To efficiently identify the existence of
diverse EGCs, we first add a dissipation reaction to the metabolic network
for each metabolite used to transmit cellular energy; e.g., for ATP, the
irreversible reaction ATP + H2O → ADP + P + H+ is added. These dissipation
reactions close any existing energy-generating cycles, thereby converting
them to type-III pathways. Fluxes through any of the dissipation reactions
at steady state indicate the generation of energy through the metabolic
network. Second, all uptake reactions are constrained to zero. The sum of
the fluxes through the energy dissipation reactions is now maximized using
FBA. For a model without EGCs, these reactions cannot carry any flux
without the uptake of nutrients. [1]_."
References
----------
.. [1] Fritzemeier, C. J., Hartleb, D., Szappanos, B., Papp, B., & Lercher,
M. J. (2017). Erroneous energy-generating cycles in published genome scale
metabolic networks: Identification and removal. PLoS Computational
Biology, 13(4), 1–14. http://doi.org/10.1371/journal.pcbi.1005494
"""
main_comp = helpers.find_compartment_id_in_model(model, 'c')
met = helpers.find_met_in_model(model, metabolite_id, main_comp)[0]
dissipation_rxn = Reaction('Dissipation')
if metabolite_id in ['MNXM3', 'MNXM63', 'MNXM51', 'MNXM121', 'MNXM423']:
# build nucleotide-type dissipation reaction
dissipation_rxn.add_metabolites({
helpers.find_met_in_model(model, 'MNXM2', main_comp)[0]: -1,
helpers.find_met_in_model(model, 'MNXM1', main_comp)[0]: 1,
helpers.find_met_in_model(model, 'MNXM9', main_comp)[0]: 1,
})
elif metabolite_id in ['MNXM6', 'MNXM10']:
# build nicotinamide-type dissipation reaction
dissipation_rxn.add_metabolites({
helpers.find_met_in_model(model, 'MNXM1', main_comp)[0]: 1
})
elif metabolite_id in ['MNXM38', 'MNXM208', 'MNXM191', 'MNXM223',
'MNXM7517', 'MNXM12233', 'MNXM558']:
# build redox-partner-type dissipation reaction
dissipation_rxn.add_metabolites({
helpers.find_met_in_model(model, 'MNXM1', main_comp)[0]: 2
})
elif metabolite_id == 'MNXM21':
dissipation_rxn.add_metabolites({
helpers.find_met_in_model(model, 'MNXM2', main_comp)[0]: -1,
helpers.find_met_in_model(model, 'MNXM1', main_comp)[0]: 1,
helpers.find_met_in_model(model, 'MNXM26', main_comp)[0]: 1,
})
elif metabolite_id == 'MNXM89557':
dissipation_rxn.add_metabolites({
helpers.find_met_in_model(model, 'MNXM2', main_comp)[0]: -1,
helpers.find_met_in_model(model, 'MNXM1', main_comp)[0]: 2,
helpers.find_met_in_model(model, 'MNXM15', main_comp)[0]: 1,
})
dissipation_product = helpers.find_met_in_model(
model,
ENERGY_COUPLES[metabolite_id],
main_comp)[0]
dissipation_rxn.add_metabolites(
{met: -1, dissipation_product: 1})
helpers.close_boundaries_sensibly(model)
model.add_reactions([dissipation_rxn])
model.objective = dissipation_rxn
solution = model.optimize(raise_error=True)
if solution.objective_value > 0.0:
return solution.fluxes[solution.fluxes.abs() > 0.0].index. \
drop(["Dissipation"]).tolist()
else:
return [] | python | def detect_energy_generating_cycles(model, metabolite_id):
u"""
Detect erroneous energy-generating cycles for a a single metabolite.
The function will first build a dissipation reaction corresponding to the
input metabolite. This reaction is then set as the objective for
optimization, after closing all exchanges. If the reaction was able to
carry flux, an erroneous energy-generating cycle must be present. In this
case a list of reactions with a flux greater than zero is returned.
Otherwise, the function returns False.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
metabolite_id : str
The identifier of an energy metabolite.
Notes
-----
"[...] energy generating cycles (EGC) [...] charge energy metabolites
without a source of energy. [...] To efficiently identify the existence of
diverse EGCs, we first add a dissipation reaction to the metabolic network
for each metabolite used to transmit cellular energy; e.g., for ATP, the
irreversible reaction ATP + H2O → ADP + P + H+ is added. These dissipation
reactions close any existing energy-generating cycles, thereby converting
them to type-III pathways. Fluxes through any of the dissipation reactions
at steady state indicate the generation of energy through the metabolic
network. Second, all uptake reactions are constrained to zero. The sum of
the fluxes through the energy dissipation reactions is now maximized using
FBA. For a model without EGCs, these reactions cannot carry any flux
without the uptake of nutrients. [1]_."
References
----------
.. [1] Fritzemeier, C. J., Hartleb, D., Szappanos, B., Papp, B., & Lercher,
M. J. (2017). Erroneous energy-generating cycles in published genome scale
metabolic networks: Identification and removal. PLoS Computational
Biology, 13(4), 1–14. http://doi.org/10.1371/journal.pcbi.1005494
"""
main_comp = helpers.find_compartment_id_in_model(model, 'c')
met = helpers.find_met_in_model(model, metabolite_id, main_comp)[0]
dissipation_rxn = Reaction('Dissipation')
if metabolite_id in ['MNXM3', 'MNXM63', 'MNXM51', 'MNXM121', 'MNXM423']:
# build nucleotide-type dissipation reaction
dissipation_rxn.add_metabolites({
helpers.find_met_in_model(model, 'MNXM2', main_comp)[0]: -1,
helpers.find_met_in_model(model, 'MNXM1', main_comp)[0]: 1,
helpers.find_met_in_model(model, 'MNXM9', main_comp)[0]: 1,
})
elif metabolite_id in ['MNXM6', 'MNXM10']:
# build nicotinamide-type dissipation reaction
dissipation_rxn.add_metabolites({
helpers.find_met_in_model(model, 'MNXM1', main_comp)[0]: 1
})
elif metabolite_id in ['MNXM38', 'MNXM208', 'MNXM191', 'MNXM223',
'MNXM7517', 'MNXM12233', 'MNXM558']:
# build redox-partner-type dissipation reaction
dissipation_rxn.add_metabolites({
helpers.find_met_in_model(model, 'MNXM1', main_comp)[0]: 2
})
elif metabolite_id == 'MNXM21':
dissipation_rxn.add_metabolites({
helpers.find_met_in_model(model, 'MNXM2', main_comp)[0]: -1,
helpers.find_met_in_model(model, 'MNXM1', main_comp)[0]: 1,
helpers.find_met_in_model(model, 'MNXM26', main_comp)[0]: 1,
})
elif metabolite_id == 'MNXM89557':
dissipation_rxn.add_metabolites({
helpers.find_met_in_model(model, 'MNXM2', main_comp)[0]: -1,
helpers.find_met_in_model(model, 'MNXM1', main_comp)[0]: 2,
helpers.find_met_in_model(model, 'MNXM15', main_comp)[0]: 1,
})
dissipation_product = helpers.find_met_in_model(
model,
ENERGY_COUPLES[metabolite_id],
main_comp)[0]
dissipation_rxn.add_metabolites(
{met: -1, dissipation_product: 1})
helpers.close_boundaries_sensibly(model)
model.add_reactions([dissipation_rxn])
model.objective = dissipation_rxn
solution = model.optimize(raise_error=True)
if solution.objective_value > 0.0:
return solution.fluxes[solution.fluxes.abs() > 0.0].index. \
drop(["Dissipation"]).tolist()
else:
return [] | [
"def",
"detect_energy_generating_cycles",
"(",
"model",
",",
"metabolite_id",
")",
":",
"main_comp",
"=",
"helpers",
".",
"find_compartment_id_in_model",
"(",
"model",
",",
"'c'",
")",
"met",
"=",
"helpers",
".",
"find_met_in_model",
"(",
"model",
",",
"metabolite_id",
",",
"main_comp",
")",
"[",
"0",
"]",
"dissipation_rxn",
"=",
"Reaction",
"(",
"'Dissipation'",
")",
"if",
"metabolite_id",
"in",
"[",
"'MNXM3'",
",",
"'MNXM63'",
",",
"'MNXM51'",
",",
"'MNXM121'",
",",
"'MNXM423'",
"]",
":",
"# build nucleotide-type dissipation reaction",
"dissipation_rxn",
".",
"add_metabolites",
"(",
"{",
"helpers",
".",
"find_met_in_model",
"(",
"model",
",",
"'MNXM2'",
",",
"main_comp",
")",
"[",
"0",
"]",
":",
"-",
"1",
",",
"helpers",
".",
"find_met_in_model",
"(",
"model",
",",
"'MNXM1'",
",",
"main_comp",
")",
"[",
"0",
"]",
":",
"1",
",",
"helpers",
".",
"find_met_in_model",
"(",
"model",
",",
"'MNXM9'",
",",
"main_comp",
")",
"[",
"0",
"]",
":",
"1",
",",
"}",
")",
"elif",
"metabolite_id",
"in",
"[",
"'MNXM6'",
",",
"'MNXM10'",
"]",
":",
"# build nicotinamide-type dissipation reaction",
"dissipation_rxn",
".",
"add_metabolites",
"(",
"{",
"helpers",
".",
"find_met_in_model",
"(",
"model",
",",
"'MNXM1'",
",",
"main_comp",
")",
"[",
"0",
"]",
":",
"1",
"}",
")",
"elif",
"metabolite_id",
"in",
"[",
"'MNXM38'",
",",
"'MNXM208'",
",",
"'MNXM191'",
",",
"'MNXM223'",
",",
"'MNXM7517'",
",",
"'MNXM12233'",
",",
"'MNXM558'",
"]",
":",
"# build redox-partner-type dissipation reaction",
"dissipation_rxn",
".",
"add_metabolites",
"(",
"{",
"helpers",
".",
"find_met_in_model",
"(",
"model",
",",
"'MNXM1'",
",",
"main_comp",
")",
"[",
"0",
"]",
":",
"2",
"}",
")",
"elif",
"metabolite_id",
"==",
"'MNXM21'",
":",
"dissipation_rxn",
".",
"add_metabolites",
"(",
"{",
"helpers",
".",
"find_met_in_model",
"(",
"model",
",",
"'MNXM2'",
",",
"main_comp",
")",
"[",
"0",
"]",
":",
"-",
"1",
",",
"helpers",
".",
"find_met_in_model",
"(",
"model",
",",
"'MNXM1'",
",",
"main_comp",
")",
"[",
"0",
"]",
":",
"1",
",",
"helpers",
".",
"find_met_in_model",
"(",
"model",
",",
"'MNXM26'",
",",
"main_comp",
")",
"[",
"0",
"]",
":",
"1",
",",
"}",
")",
"elif",
"metabolite_id",
"==",
"'MNXM89557'",
":",
"dissipation_rxn",
".",
"add_metabolites",
"(",
"{",
"helpers",
".",
"find_met_in_model",
"(",
"model",
",",
"'MNXM2'",
",",
"main_comp",
")",
"[",
"0",
"]",
":",
"-",
"1",
",",
"helpers",
".",
"find_met_in_model",
"(",
"model",
",",
"'MNXM1'",
",",
"main_comp",
")",
"[",
"0",
"]",
":",
"2",
",",
"helpers",
".",
"find_met_in_model",
"(",
"model",
",",
"'MNXM15'",
",",
"main_comp",
")",
"[",
"0",
"]",
":",
"1",
",",
"}",
")",
"dissipation_product",
"=",
"helpers",
".",
"find_met_in_model",
"(",
"model",
",",
"ENERGY_COUPLES",
"[",
"metabolite_id",
"]",
",",
"main_comp",
")",
"[",
"0",
"]",
"dissipation_rxn",
".",
"add_metabolites",
"(",
"{",
"met",
":",
"-",
"1",
",",
"dissipation_product",
":",
"1",
"}",
")",
"helpers",
".",
"close_boundaries_sensibly",
"(",
"model",
")",
"model",
".",
"add_reactions",
"(",
"[",
"dissipation_rxn",
"]",
")",
"model",
".",
"objective",
"=",
"dissipation_rxn",
"solution",
"=",
"model",
".",
"optimize",
"(",
"raise_error",
"=",
"True",
")",
"if",
"solution",
".",
"objective_value",
">",
"0.0",
":",
"return",
"solution",
".",
"fluxes",
"[",
"solution",
".",
"fluxes",
".",
"abs",
"(",
")",
">",
"0.0",
"]",
".",
"index",
".",
"drop",
"(",
"[",
"\"Dissipation\"",
"]",
")",
".",
"tolist",
"(",
")",
"else",
":",
"return",
"[",
"]"
] | u"""
Detect erroneous energy-generating cycles for a a single metabolite.
The function will first build a dissipation reaction corresponding to the
input metabolite. This reaction is then set as the objective for
optimization, after closing all exchanges. If the reaction was able to
carry flux, an erroneous energy-generating cycle must be present. In this
case a list of reactions with a flux greater than zero is returned.
Otherwise, the function returns False.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
metabolite_id : str
The identifier of an energy metabolite.
Notes
-----
"[...] energy generating cycles (EGC) [...] charge energy metabolites
without a source of energy. [...] To efficiently identify the existence of
diverse EGCs, we first add a dissipation reaction to the metabolic network
for each metabolite used to transmit cellular energy; e.g., for ATP, the
irreversible reaction ATP + H2O → ADP + P + H+ is added. These dissipation
reactions close any existing energy-generating cycles, thereby converting
them to type-III pathways. Fluxes through any of the dissipation reactions
at steady state indicate the generation of energy through the metabolic
network. Second, all uptake reactions are constrained to zero. The sum of
the fluxes through the energy dissipation reactions is now maximized using
FBA. For a model without EGCs, these reactions cannot carry any flux
without the uptake of nutrients. [1]_."
References
----------
.. [1] Fritzemeier, C. J., Hartleb, D., Szappanos, B., Papp, B., & Lercher,
M. J. (2017). Erroneous energy-generating cycles in published genome scale
metabolic networks: Identification and removal. PLoS Computational
Biology, 13(4), 1–14. http://doi.org/10.1371/journal.pcbi.1005494 | [
"u",
"Detect",
"erroneous",
"energy",
"-",
"generating",
"cycles",
"for",
"a",
"a",
"single",
"metabolite",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/consistency.py#L277-L369 |
opencobra/memote | memote/support/consistency.py | find_stoichiometrically_balanced_cycles | def find_stoichiometrically_balanced_cycles(model):
u"""
Find metabolic reactions in stoichiometrically balanced cycles (SBCs).
Identify forward and reverse cycles by closing all exchanges and using FVA.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Notes
-----
"SBCs are artifacts of metabolic reconstructions due to insufficient
constraints (e.g., thermodynamic constraints and regulatory
constraints) [1]_." They are defined by internal reactions that carry
flux in spite of closed exchange reactions.
References
----------
.. [1] Thiele, I., & Palsson, B. Ø. (2010, January). A protocol for
generating a high-quality genome-scale metabolic reconstruction.
Nature protocols. Nature Publishing Group.
http://doi.org/10.1038/nprot.2009.203
"""
helpers.close_boundaries_sensibly(model)
fva_result = flux_variability_analysis(model, loopless=False)
return fva_result.index[
(fva_result["minimum"] <= (-1 + TOLERANCE_THRESHOLD)) |
(fva_result["maximum"] >= (1 - TOLERANCE_THRESHOLD))
].tolist() | python | def find_stoichiometrically_balanced_cycles(model):
u"""
Find metabolic reactions in stoichiometrically balanced cycles (SBCs).
Identify forward and reverse cycles by closing all exchanges and using FVA.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Notes
-----
"SBCs are artifacts of metabolic reconstructions due to insufficient
constraints (e.g., thermodynamic constraints and regulatory
constraints) [1]_." They are defined by internal reactions that carry
flux in spite of closed exchange reactions.
References
----------
.. [1] Thiele, I., & Palsson, B. Ø. (2010, January). A protocol for
generating a high-quality genome-scale metabolic reconstruction.
Nature protocols. Nature Publishing Group.
http://doi.org/10.1038/nprot.2009.203
"""
helpers.close_boundaries_sensibly(model)
fva_result = flux_variability_analysis(model, loopless=False)
return fva_result.index[
(fva_result["minimum"] <= (-1 + TOLERANCE_THRESHOLD)) |
(fva_result["maximum"] >= (1 - TOLERANCE_THRESHOLD))
].tolist() | [
"def",
"find_stoichiometrically_balanced_cycles",
"(",
"model",
")",
":",
"helpers",
".",
"close_boundaries_sensibly",
"(",
"model",
")",
"fva_result",
"=",
"flux_variability_analysis",
"(",
"model",
",",
"loopless",
"=",
"False",
")",
"return",
"fva_result",
".",
"index",
"[",
"(",
"fva_result",
"[",
"\"minimum\"",
"]",
"<=",
"(",
"-",
"1",
"+",
"TOLERANCE_THRESHOLD",
")",
")",
"|",
"(",
"fva_result",
"[",
"\"maximum\"",
"]",
">=",
"(",
"1",
"-",
"TOLERANCE_THRESHOLD",
")",
")",
"]",
".",
"tolist",
"(",
")"
] | u"""
Find metabolic reactions in stoichiometrically balanced cycles (SBCs).
Identify forward and reverse cycles by closing all exchanges and using FVA.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Notes
-----
"SBCs are artifacts of metabolic reconstructions due to insufficient
constraints (e.g., thermodynamic constraints and regulatory
constraints) [1]_." They are defined by internal reactions that carry
flux in spite of closed exchange reactions.
References
----------
.. [1] Thiele, I., & Palsson, B. Ø. (2010, January). A protocol for
generating a high-quality genome-scale metabolic reconstruction.
Nature protocols. Nature Publishing Group.
http://doi.org/10.1038/nprot.2009.203 | [
"u",
"Find",
"metabolic",
"reactions",
"in",
"stoichiometrically",
"balanced",
"cycles",
"(",
"SBCs",
")",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/consistency.py#L400-L431 |
opencobra/memote | memote/support/consistency.py | find_orphans | def find_orphans(model):
"""
Return metabolites that are only consumed in reactions.
Metabolites that are involved in an exchange reaction are never
considered to be orphaned.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
"""
exchange = frozenset(model.exchanges)
return [
met for met in model.metabolites
if (len(met.reactions) > 0) and all(
(not rxn.reversibility) and (rxn not in exchange) and
(rxn.metabolites[met] < 0) for rxn in met.reactions
)
] | python | def find_orphans(model):
"""
Return metabolites that are only consumed in reactions.
Metabolites that are involved in an exchange reaction are never
considered to be orphaned.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
"""
exchange = frozenset(model.exchanges)
return [
met for met in model.metabolites
if (len(met.reactions) > 0) and all(
(not rxn.reversibility) and (rxn not in exchange) and
(rxn.metabolites[met] < 0) for rxn in met.reactions
)
] | [
"def",
"find_orphans",
"(",
"model",
")",
":",
"exchange",
"=",
"frozenset",
"(",
"model",
".",
"exchanges",
")",
"return",
"[",
"met",
"for",
"met",
"in",
"model",
".",
"metabolites",
"if",
"(",
"len",
"(",
"met",
".",
"reactions",
")",
">",
"0",
")",
"and",
"all",
"(",
"(",
"not",
"rxn",
".",
"reversibility",
")",
"and",
"(",
"rxn",
"not",
"in",
"exchange",
")",
"and",
"(",
"rxn",
".",
"metabolites",
"[",
"met",
"]",
"<",
"0",
")",
"for",
"rxn",
"in",
"met",
".",
"reactions",
")",
"]"
] | Return metabolites that are only consumed in reactions.
Metabolites that are involved in an exchange reaction are never
considered to be orphaned.
Parameters
----------
model : cobra.Model
The metabolic model under investigation. | [
"Return",
"metabolites",
"that",
"are",
"only",
"consumed",
"in",
"reactions",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/consistency.py#L434-L454 |
opencobra/memote | memote/support/consistency.py | find_metabolites_not_produced_with_open_bounds | def find_metabolites_not_produced_with_open_bounds(model):
"""
Return metabolites that cannot be produced with open exchange reactions.
A perfect model should be able to produce each and every metabolite when
all medium components are available.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Returns
-------
list
Those metabolites that could not be produced.
"""
mets_not_produced = list()
helpers.open_exchanges(model)
for met in model.metabolites:
with model:
exch = model.add_boundary(
met, type="irrex", reaction_id="IRREX", lb=0, ub=1000)
solution = helpers.run_fba(model, exch.id)
if np.isnan(solution) or solution < TOLERANCE_THRESHOLD:
mets_not_produced.append(met)
return mets_not_produced | python | def find_metabolites_not_produced_with_open_bounds(model):
"""
Return metabolites that cannot be produced with open exchange reactions.
A perfect model should be able to produce each and every metabolite when
all medium components are available.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Returns
-------
list
Those metabolites that could not be produced.
"""
mets_not_produced = list()
helpers.open_exchanges(model)
for met in model.metabolites:
with model:
exch = model.add_boundary(
met, type="irrex", reaction_id="IRREX", lb=0, ub=1000)
solution = helpers.run_fba(model, exch.id)
if np.isnan(solution) or solution < TOLERANCE_THRESHOLD:
mets_not_produced.append(met)
return mets_not_produced | [
"def",
"find_metabolites_not_produced_with_open_bounds",
"(",
"model",
")",
":",
"mets_not_produced",
"=",
"list",
"(",
")",
"helpers",
".",
"open_exchanges",
"(",
"model",
")",
"for",
"met",
"in",
"model",
".",
"metabolites",
":",
"with",
"model",
":",
"exch",
"=",
"model",
".",
"add_boundary",
"(",
"met",
",",
"type",
"=",
"\"irrex\"",
",",
"reaction_id",
"=",
"\"IRREX\"",
",",
"lb",
"=",
"0",
",",
"ub",
"=",
"1000",
")",
"solution",
"=",
"helpers",
".",
"run_fba",
"(",
"model",
",",
"exch",
".",
"id",
")",
"if",
"np",
".",
"isnan",
"(",
"solution",
")",
"or",
"solution",
"<",
"TOLERANCE_THRESHOLD",
":",
"mets_not_produced",
".",
"append",
"(",
"met",
")",
"return",
"mets_not_produced"
] | Return metabolites that cannot be produced with open exchange reactions.
A perfect model should be able to produce each and every metabolite when
all medium components are available.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Returns
-------
list
Those metabolites that could not be produced. | [
"Return",
"metabolites",
"that",
"cannot",
"be",
"produced",
"with",
"open",
"exchange",
"reactions",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/consistency.py#L493-L520 |
opencobra/memote | memote/support/consistency.py | find_metabolites_not_consumed_with_open_bounds | def find_metabolites_not_consumed_with_open_bounds(model):
"""
Return metabolites that cannot be consumed with open boundary reactions.
When all metabolites can be secreted, it should be possible for each and
every metabolite to be consumed in some form.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Returns
-------
list
Those metabolites that could not be consumed.
"""
mets_not_consumed = list()
helpers.open_exchanges(model)
for met in model.metabolites:
with model:
exch = model.add_boundary(
met, type="irrex", reaction_id="IRREX", lb=-1000, ub=0)
solution = helpers.run_fba(model, exch.id, direction="min")
if np.isnan(solution) or abs(solution) < TOLERANCE_THRESHOLD:
mets_not_consumed.append(met)
return mets_not_consumed | python | def find_metabolites_not_consumed_with_open_bounds(model):
"""
Return metabolites that cannot be consumed with open boundary reactions.
When all metabolites can be secreted, it should be possible for each and
every metabolite to be consumed in some form.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Returns
-------
list
Those metabolites that could not be consumed.
"""
mets_not_consumed = list()
helpers.open_exchanges(model)
for met in model.metabolites:
with model:
exch = model.add_boundary(
met, type="irrex", reaction_id="IRREX", lb=-1000, ub=0)
solution = helpers.run_fba(model, exch.id, direction="min")
if np.isnan(solution) or abs(solution) < TOLERANCE_THRESHOLD:
mets_not_consumed.append(met)
return mets_not_consumed | [
"def",
"find_metabolites_not_consumed_with_open_bounds",
"(",
"model",
")",
":",
"mets_not_consumed",
"=",
"list",
"(",
")",
"helpers",
".",
"open_exchanges",
"(",
"model",
")",
"for",
"met",
"in",
"model",
".",
"metabolites",
":",
"with",
"model",
":",
"exch",
"=",
"model",
".",
"add_boundary",
"(",
"met",
",",
"type",
"=",
"\"irrex\"",
",",
"reaction_id",
"=",
"\"IRREX\"",
",",
"lb",
"=",
"-",
"1000",
",",
"ub",
"=",
"0",
")",
"solution",
"=",
"helpers",
".",
"run_fba",
"(",
"model",
",",
"exch",
".",
"id",
",",
"direction",
"=",
"\"min\"",
")",
"if",
"np",
".",
"isnan",
"(",
"solution",
")",
"or",
"abs",
"(",
"solution",
")",
"<",
"TOLERANCE_THRESHOLD",
":",
"mets_not_consumed",
".",
"append",
"(",
"met",
")",
"return",
"mets_not_consumed"
] | Return metabolites that cannot be consumed with open boundary reactions.
When all metabolites can be secreted, it should be possible for each and
every metabolite to be consumed in some form.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Returns
-------
list
Those metabolites that could not be consumed. | [
"Return",
"metabolites",
"that",
"cannot",
"be",
"consumed",
"with",
"open",
"boundary",
"reactions",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/consistency.py#L523-L550 |
opencobra/memote | memote/support/consistency.py | find_reactions_with_unbounded_flux_default_condition | def find_reactions_with_unbounded_flux_default_condition(model):
"""
Return list of reactions whose flux is unbounded in the default condition.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Returns
-------
tuple
list
A list of reactions that in default modeling conditions are able to
carry flux as high/low as the systems maximal and minimal bounds.
float
The fraction of the amount of unbounded reactions to the amount of
non-blocked reactions.
list
A list of reactions that in default modeling conditions are not able
to carry flux at all.
"""
try:
fva_result = flux_variability_analysis(model, fraction_of_optimum=1.0)
except Infeasible as err:
LOGGER.error("Failed to find reactions with unbounded flux "
"because '{}'. This may be a bug.".format(err))
raise Infeasible("It was not possible to run flux variability "
"analysis on the model. Make sure that the model "
"can be solved! Check if the constraints are not "
"too strict.")
# Per reaction (row) the flux is below threshold (close to zero).
conditionally_blocked = fva_result.loc[
fva_result.abs().max(axis=1) < TOLERANCE_THRESHOLD
].index.tolist()
small, large = helpers.find_bounds(model)
# Find those reactions whose flux is close to or outside of the median
# upper or lower bound, i.e., appears unconstrained.
unlimited_flux = fva_result.loc[
np.isclose(fva_result["maximum"], large, atol=TOLERANCE_THRESHOLD) |
(fva_result["maximum"] > large) |
np.isclose(fva_result["minimum"], small, atol=TOLERANCE_THRESHOLD) |
(fva_result["minimum"] < small)
].index.tolist()
try:
fraction = len(unlimited_flux) / \
(len(model.reactions) - len(conditionally_blocked))
except ZeroDivisionError:
LOGGER.error("Division by Zero! Failed to calculate the "
"fraction of unbounded reactions. Does this model "
"have any reactions at all?")
raise ZeroDivisionError("It was not possible to calculate the "
"fraction of unbounded reactions to "
"un-blocked reactions. This may be because"
"the model doesn't have any reactions at "
"all or that none of the reactions can "
"carry a flux larger than zero!")
return unlimited_flux, fraction, conditionally_blocked | python | def find_reactions_with_unbounded_flux_default_condition(model):
"""
Return list of reactions whose flux is unbounded in the default condition.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Returns
-------
tuple
list
A list of reactions that in default modeling conditions are able to
carry flux as high/low as the systems maximal and minimal bounds.
float
The fraction of the amount of unbounded reactions to the amount of
non-blocked reactions.
list
A list of reactions that in default modeling conditions are not able
to carry flux at all.
"""
try:
fva_result = flux_variability_analysis(model, fraction_of_optimum=1.0)
except Infeasible as err:
LOGGER.error("Failed to find reactions with unbounded flux "
"because '{}'. This may be a bug.".format(err))
raise Infeasible("It was not possible to run flux variability "
"analysis on the model. Make sure that the model "
"can be solved! Check if the constraints are not "
"too strict.")
# Per reaction (row) the flux is below threshold (close to zero).
conditionally_blocked = fva_result.loc[
fva_result.abs().max(axis=1) < TOLERANCE_THRESHOLD
].index.tolist()
small, large = helpers.find_bounds(model)
# Find those reactions whose flux is close to or outside of the median
# upper or lower bound, i.e., appears unconstrained.
unlimited_flux = fva_result.loc[
np.isclose(fva_result["maximum"], large, atol=TOLERANCE_THRESHOLD) |
(fva_result["maximum"] > large) |
np.isclose(fva_result["minimum"], small, atol=TOLERANCE_THRESHOLD) |
(fva_result["minimum"] < small)
].index.tolist()
try:
fraction = len(unlimited_flux) / \
(len(model.reactions) - len(conditionally_blocked))
except ZeroDivisionError:
LOGGER.error("Division by Zero! Failed to calculate the "
"fraction of unbounded reactions. Does this model "
"have any reactions at all?")
raise ZeroDivisionError("It was not possible to calculate the "
"fraction of unbounded reactions to "
"un-blocked reactions. This may be because"
"the model doesn't have any reactions at "
"all or that none of the reactions can "
"carry a flux larger than zero!")
return unlimited_flux, fraction, conditionally_blocked | [
"def",
"find_reactions_with_unbounded_flux_default_condition",
"(",
"model",
")",
":",
"try",
":",
"fva_result",
"=",
"flux_variability_analysis",
"(",
"model",
",",
"fraction_of_optimum",
"=",
"1.0",
")",
"except",
"Infeasible",
"as",
"err",
":",
"LOGGER",
".",
"error",
"(",
"\"Failed to find reactions with unbounded flux \"",
"\"because '{}'. This may be a bug.\"",
".",
"format",
"(",
"err",
")",
")",
"raise",
"Infeasible",
"(",
"\"It was not possible to run flux variability \"",
"\"analysis on the model. Make sure that the model \"",
"\"can be solved! Check if the constraints are not \"",
"\"too strict.\"",
")",
"# Per reaction (row) the flux is below threshold (close to zero).",
"conditionally_blocked",
"=",
"fva_result",
".",
"loc",
"[",
"fva_result",
".",
"abs",
"(",
")",
".",
"max",
"(",
"axis",
"=",
"1",
")",
"<",
"TOLERANCE_THRESHOLD",
"]",
".",
"index",
".",
"tolist",
"(",
")",
"small",
",",
"large",
"=",
"helpers",
".",
"find_bounds",
"(",
"model",
")",
"# Find those reactions whose flux is close to or outside of the median",
"# upper or lower bound, i.e., appears unconstrained.",
"unlimited_flux",
"=",
"fva_result",
".",
"loc",
"[",
"np",
".",
"isclose",
"(",
"fva_result",
"[",
"\"maximum\"",
"]",
",",
"large",
",",
"atol",
"=",
"TOLERANCE_THRESHOLD",
")",
"|",
"(",
"fva_result",
"[",
"\"maximum\"",
"]",
">",
"large",
")",
"|",
"np",
".",
"isclose",
"(",
"fva_result",
"[",
"\"minimum\"",
"]",
",",
"small",
",",
"atol",
"=",
"TOLERANCE_THRESHOLD",
")",
"|",
"(",
"fva_result",
"[",
"\"minimum\"",
"]",
"<",
"small",
")",
"]",
".",
"index",
".",
"tolist",
"(",
")",
"try",
":",
"fraction",
"=",
"len",
"(",
"unlimited_flux",
")",
"/",
"(",
"len",
"(",
"model",
".",
"reactions",
")",
"-",
"len",
"(",
"conditionally_blocked",
")",
")",
"except",
"ZeroDivisionError",
":",
"LOGGER",
".",
"error",
"(",
"\"Division by Zero! Failed to calculate the \"",
"\"fraction of unbounded reactions. Does this model \"",
"\"have any reactions at all?\"",
")",
"raise",
"ZeroDivisionError",
"(",
"\"It was not possible to calculate the \"",
"\"fraction of unbounded reactions to \"",
"\"un-blocked reactions. This may be because\"",
"\"the model doesn't have any reactions at \"",
"\"all or that none of the reactions can \"",
"\"carry a flux larger than zero!\"",
")",
"return",
"unlimited_flux",
",",
"fraction",
",",
"conditionally_blocked"
] | Return list of reactions whose flux is unbounded in the default condition.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Returns
-------
tuple
list
A list of reactions that in default modeling conditions are able to
carry flux as high/low as the systems maximal and minimal bounds.
float
The fraction of the amount of unbounded reactions to the amount of
non-blocked reactions.
list
A list of reactions that in default modeling conditions are not able
to carry flux at all. | [
"Return",
"list",
"of",
"reactions",
"whose",
"flux",
"is",
"unbounded",
"in",
"the",
"default",
"condition",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/consistency.py#L553-L612 |
opencobra/memote | memote/experimental/tabular.py | read_tabular | def read_tabular(filename, dtype_conversion=None):
"""
Read a tabular data file which can be CSV, TSV, XLS or XLSX.
Parameters
----------
filename : str or pathlib.Path
The full file path. May be a compressed file.
dtype_conversion : dict
Column names as keys and corresponding type for loading the data.
Please take a look at the `pandas documentation
<https://pandas.pydata.org/pandas-docs/stable/io.html#specifying-column-data-types>`__
for detailed explanations.
Returns
-------
pandas.DataFrame
The data table.
"""
if dtype_conversion is None:
dtype_conversion = {}
name, ext = filename.split(".", 1)
ext = ext.lower()
# Completely empty columns are interpreted as float by default.
dtype_conversion["comment"] = str
if "csv" in ext:
df = pd.read_csv(filename, dtype=dtype_conversion, encoding="utf-8")
elif "tsv" in ext:
df = pd.read_table(filename, dtype=dtype_conversion, encoding="utf-8")
elif "xls" in ext or "xlsx" in ext:
df = pd.read_excel(filename, dtype=dtype_conversion, encoding="utf-8")
# TODO: Add a function to parse ODS data into a pandas data frame.
else:
raise ValueError("Unknown file format '{}'.".format(ext))
return df | python | def read_tabular(filename, dtype_conversion=None):
"""
Read a tabular data file which can be CSV, TSV, XLS or XLSX.
Parameters
----------
filename : str or pathlib.Path
The full file path. May be a compressed file.
dtype_conversion : dict
Column names as keys and corresponding type for loading the data.
Please take a look at the `pandas documentation
<https://pandas.pydata.org/pandas-docs/stable/io.html#specifying-column-data-types>`__
for detailed explanations.
Returns
-------
pandas.DataFrame
The data table.
"""
if dtype_conversion is None:
dtype_conversion = {}
name, ext = filename.split(".", 1)
ext = ext.lower()
# Completely empty columns are interpreted as float by default.
dtype_conversion["comment"] = str
if "csv" in ext:
df = pd.read_csv(filename, dtype=dtype_conversion, encoding="utf-8")
elif "tsv" in ext:
df = pd.read_table(filename, dtype=dtype_conversion, encoding="utf-8")
elif "xls" in ext or "xlsx" in ext:
df = pd.read_excel(filename, dtype=dtype_conversion, encoding="utf-8")
# TODO: Add a function to parse ODS data into a pandas data frame.
else:
raise ValueError("Unknown file format '{}'.".format(ext))
return df | [
"def",
"read_tabular",
"(",
"filename",
",",
"dtype_conversion",
"=",
"None",
")",
":",
"if",
"dtype_conversion",
"is",
"None",
":",
"dtype_conversion",
"=",
"{",
"}",
"name",
",",
"ext",
"=",
"filename",
".",
"split",
"(",
"\".\"",
",",
"1",
")",
"ext",
"=",
"ext",
".",
"lower",
"(",
")",
"# Completely empty columns are interpreted as float by default.",
"dtype_conversion",
"[",
"\"comment\"",
"]",
"=",
"str",
"if",
"\"csv\"",
"in",
"ext",
":",
"df",
"=",
"pd",
".",
"read_csv",
"(",
"filename",
",",
"dtype",
"=",
"dtype_conversion",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"elif",
"\"tsv\"",
"in",
"ext",
":",
"df",
"=",
"pd",
".",
"read_table",
"(",
"filename",
",",
"dtype",
"=",
"dtype_conversion",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"elif",
"\"xls\"",
"in",
"ext",
"or",
"\"xlsx\"",
"in",
"ext",
":",
"df",
"=",
"pd",
".",
"read_excel",
"(",
"filename",
",",
"dtype",
"=",
"dtype_conversion",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"# TODO: Add a function to parse ODS data into a pandas data frame.",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unknown file format '{}'.\"",
".",
"format",
"(",
"ext",
")",
")",
"return",
"df"
] | Read a tabular data file which can be CSV, TSV, XLS or XLSX.
Parameters
----------
filename : str or pathlib.Path
The full file path. May be a compressed file.
dtype_conversion : dict
Column names as keys and corresponding type for loading the data.
Please take a look at the `pandas documentation
<https://pandas.pydata.org/pandas-docs/stable/io.html#specifying-column-data-types>`__
for detailed explanations.
Returns
-------
pandas.DataFrame
The data table. | [
"Read",
"a",
"tabular",
"data",
"file",
"which",
"can",
"be",
"CSV",
"TSV",
"XLS",
"or",
"XLSX",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/experimental/tabular.py#L25-L60 |
opencobra/memote | memote/suite/cli/reports.py | snapshot | def snapshot(model, filename, pytest_args, exclusive, skip, solver,
experimental, custom_tests, custom_config):
"""
Take a snapshot of a model's state and generate a report.
MODEL: Path to model file. Can also be supplied via the environment variable
MEMOTE_MODEL or configured in 'setup.cfg' or 'memote.ini'.
"""
model_obj, sbml_ver, notifications = api.validate_model(
model)
if model_obj is None:
LOGGER.critical(
"The model could not be loaded due to the following SBML errors.")
utils.stdout_notifications(notifications)
api.validation_report(model, notifications, filename)
sys.exit(1)
if not any(a.startswith("--tb") for a in pytest_args):
pytest_args = ["--tb", "no"] + pytest_args
# Add further directories to search for tests.
pytest_args.extend(custom_tests)
config = ReportConfiguration.load()
# Update the default test configuration with custom ones (if any).
for custom in custom_config:
config.merge(ReportConfiguration.load(custom))
model_obj.solver = solver
_, results = api.test_model(model_obj, sbml_version=sbml_ver, results=True,
pytest_args=pytest_args, skip=skip,
exclusive=exclusive, experimental=experimental)
with open(filename, "w", encoding="utf-8") as file_handle:
LOGGER.info("Writing snapshot report to '%s'.", filename)
file_handle.write(api.snapshot_report(results, config)) | python | def snapshot(model, filename, pytest_args, exclusive, skip, solver,
experimental, custom_tests, custom_config):
"""
Take a snapshot of a model's state and generate a report.
MODEL: Path to model file. Can also be supplied via the environment variable
MEMOTE_MODEL or configured in 'setup.cfg' or 'memote.ini'.
"""
model_obj, sbml_ver, notifications = api.validate_model(
model)
if model_obj is None:
LOGGER.critical(
"The model could not be loaded due to the following SBML errors.")
utils.stdout_notifications(notifications)
api.validation_report(model, notifications, filename)
sys.exit(1)
if not any(a.startswith("--tb") for a in pytest_args):
pytest_args = ["--tb", "no"] + pytest_args
# Add further directories to search for tests.
pytest_args.extend(custom_tests)
config = ReportConfiguration.load()
# Update the default test configuration with custom ones (if any).
for custom in custom_config:
config.merge(ReportConfiguration.load(custom))
model_obj.solver = solver
_, results = api.test_model(model_obj, sbml_version=sbml_ver, results=True,
pytest_args=pytest_args, skip=skip,
exclusive=exclusive, experimental=experimental)
with open(filename, "w", encoding="utf-8") as file_handle:
LOGGER.info("Writing snapshot report to '%s'.", filename)
file_handle.write(api.snapshot_report(results, config)) | [
"def",
"snapshot",
"(",
"model",
",",
"filename",
",",
"pytest_args",
",",
"exclusive",
",",
"skip",
",",
"solver",
",",
"experimental",
",",
"custom_tests",
",",
"custom_config",
")",
":",
"model_obj",
",",
"sbml_ver",
",",
"notifications",
"=",
"api",
".",
"validate_model",
"(",
"model",
")",
"if",
"model_obj",
"is",
"None",
":",
"LOGGER",
".",
"critical",
"(",
"\"The model could not be loaded due to the following SBML errors.\"",
")",
"utils",
".",
"stdout_notifications",
"(",
"notifications",
")",
"api",
".",
"validation_report",
"(",
"model",
",",
"notifications",
",",
"filename",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"if",
"not",
"any",
"(",
"a",
".",
"startswith",
"(",
"\"--tb\"",
")",
"for",
"a",
"in",
"pytest_args",
")",
":",
"pytest_args",
"=",
"[",
"\"--tb\"",
",",
"\"no\"",
"]",
"+",
"pytest_args",
"# Add further directories to search for tests.",
"pytest_args",
".",
"extend",
"(",
"custom_tests",
")",
"config",
"=",
"ReportConfiguration",
".",
"load",
"(",
")",
"# Update the default test configuration with custom ones (if any).",
"for",
"custom",
"in",
"custom_config",
":",
"config",
".",
"merge",
"(",
"ReportConfiguration",
".",
"load",
"(",
"custom",
")",
")",
"model_obj",
".",
"solver",
"=",
"solver",
"_",
",",
"results",
"=",
"api",
".",
"test_model",
"(",
"model_obj",
",",
"sbml_version",
"=",
"sbml_ver",
",",
"results",
"=",
"True",
",",
"pytest_args",
"=",
"pytest_args",
",",
"skip",
"=",
"skip",
",",
"exclusive",
"=",
"exclusive",
",",
"experimental",
"=",
"experimental",
")",
"with",
"open",
"(",
"filename",
",",
"\"w\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"as",
"file_handle",
":",
"LOGGER",
".",
"info",
"(",
"\"Writing snapshot report to '%s'.\"",
",",
"filename",
")",
"file_handle",
".",
"write",
"(",
"api",
".",
"snapshot_report",
"(",
"results",
",",
"config",
")",
")"
] | Take a snapshot of a model's state and generate a report.
MODEL: Path to model file. Can also be supplied via the environment variable
MEMOTE_MODEL or configured in 'setup.cfg' or 'memote.ini'. | [
"Take",
"a",
"snapshot",
"of",
"a",
"model",
"s",
"state",
"and",
"generate",
"a",
"report",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/suite/cli/reports.py#L89-L119 |
opencobra/memote | memote/suite/cli/reports.py | history | def history(location, model, filename, deployment, custom_config):
"""Generate a report over a model's git commit history."""
callbacks.git_installed()
LOGGER.info("Initialising history report generation.")
if location is None:
raise click.BadParameter("No 'location' given or configured.")
try:
repo = git.Repo()
except git.InvalidGitRepositoryError:
LOGGER.critical(
"The history report requires a git repository in order to check "
"the model's commit history.")
sys.exit(1)
LOGGER.info("Obtaining history of results from "
"the deployment branch {}.".format(deployment))
repo.git.checkout(deployment)
try:
manager = managers.SQLResultManager(repository=repo, location=location)
except (AttributeError, ArgumentError):
manager = managers.RepoResultManager(
repository=repo, location=location)
config = ReportConfiguration.load()
# Update the default test configuration with custom ones (if any).
for custom in custom_config:
config.merge(ReportConfiguration.load(custom))
LOGGER.info("Tracing the commit history.")
history = managers.HistoryManager(repository=repo, manager=manager)
history.load_history(model, skip={deployment})
LOGGER.info("Composing the history report.")
report = api.history_report(history, config=config)
with open(filename, "w", encoding="utf-8") as file_handle:
file_handle.write(report) | python | def history(location, model, filename, deployment, custom_config):
"""Generate a report over a model's git commit history."""
callbacks.git_installed()
LOGGER.info("Initialising history report generation.")
if location is None:
raise click.BadParameter("No 'location' given or configured.")
try:
repo = git.Repo()
except git.InvalidGitRepositoryError:
LOGGER.critical(
"The history report requires a git repository in order to check "
"the model's commit history.")
sys.exit(1)
LOGGER.info("Obtaining history of results from "
"the deployment branch {}.".format(deployment))
repo.git.checkout(deployment)
try:
manager = managers.SQLResultManager(repository=repo, location=location)
except (AttributeError, ArgumentError):
manager = managers.RepoResultManager(
repository=repo, location=location)
config = ReportConfiguration.load()
# Update the default test configuration with custom ones (if any).
for custom in custom_config:
config.merge(ReportConfiguration.load(custom))
LOGGER.info("Tracing the commit history.")
history = managers.HistoryManager(repository=repo, manager=manager)
history.load_history(model, skip={deployment})
LOGGER.info("Composing the history report.")
report = api.history_report(history, config=config)
with open(filename, "w", encoding="utf-8") as file_handle:
file_handle.write(report) | [
"def",
"history",
"(",
"location",
",",
"model",
",",
"filename",
",",
"deployment",
",",
"custom_config",
")",
":",
"callbacks",
".",
"git_installed",
"(",
")",
"LOGGER",
".",
"info",
"(",
"\"Initialising history report generation.\"",
")",
"if",
"location",
"is",
"None",
":",
"raise",
"click",
".",
"BadParameter",
"(",
"\"No 'location' given or configured.\"",
")",
"try",
":",
"repo",
"=",
"git",
".",
"Repo",
"(",
")",
"except",
"git",
".",
"InvalidGitRepositoryError",
":",
"LOGGER",
".",
"critical",
"(",
"\"The history report requires a git repository in order to check \"",
"\"the model's commit history.\"",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"LOGGER",
".",
"info",
"(",
"\"Obtaining history of results from \"",
"\"the deployment branch {}.\"",
".",
"format",
"(",
"deployment",
")",
")",
"repo",
".",
"git",
".",
"checkout",
"(",
"deployment",
")",
"try",
":",
"manager",
"=",
"managers",
".",
"SQLResultManager",
"(",
"repository",
"=",
"repo",
",",
"location",
"=",
"location",
")",
"except",
"(",
"AttributeError",
",",
"ArgumentError",
")",
":",
"manager",
"=",
"managers",
".",
"RepoResultManager",
"(",
"repository",
"=",
"repo",
",",
"location",
"=",
"location",
")",
"config",
"=",
"ReportConfiguration",
".",
"load",
"(",
")",
"# Update the default test configuration with custom ones (if any).",
"for",
"custom",
"in",
"custom_config",
":",
"config",
".",
"merge",
"(",
"ReportConfiguration",
".",
"load",
"(",
"custom",
")",
")",
"LOGGER",
".",
"info",
"(",
"\"Tracing the commit history.\"",
")",
"history",
"=",
"managers",
".",
"HistoryManager",
"(",
"repository",
"=",
"repo",
",",
"manager",
"=",
"manager",
")",
"history",
".",
"load_history",
"(",
"model",
",",
"skip",
"=",
"{",
"deployment",
"}",
")",
"LOGGER",
".",
"info",
"(",
"\"Composing the history report.\"",
")",
"report",
"=",
"api",
".",
"history_report",
"(",
"history",
",",
"config",
"=",
"config",
")",
"with",
"open",
"(",
"filename",
",",
"\"w\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"as",
"file_handle",
":",
"file_handle",
".",
"write",
"(",
"report",
")"
] | Generate a report over a model's git commit history. | [
"Generate",
"a",
"report",
"over",
"a",
"model",
"s",
"git",
"commit",
"history",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/suite/cli/reports.py#L144-L175 |
opencobra/memote | memote/suite/cli/reports.py | diff | def diff(models, filename, pytest_args, exclusive, skip, solver,
experimental, custom_tests, custom_config):
"""
Take a snapshot of all the supplied models and generate a diff report.
MODELS: List of paths to two or more model files.
"""
if not any(a.startswith("--tb") for a in pytest_args):
pytest_args = ["--tb", "no"] + pytest_args
# Add further directories to search for tests.
pytest_args.extend(custom_tests)
config = ReportConfiguration.load()
# Update the default test configuration with custom ones (if any).
for custom in custom_config:
config.merge(ReportConfiguration.load(custom))
# Build the diff report specific data structure
diff_results = dict()
model_and_model_ver_tuple = list()
for model_path in models:
try:
model_filename = os.path.basename(model_path)
diff_results.setdefault(model_filename, dict())
model, model_ver, notifications = api.validate_model(model_path)
if model is None:
head, tail = os.path.split(filename)
report_path = os.path.join(
head, '{}_structural_report.html'.format(model_filename))
api.validation_report(
model_path, notifications, report_path)
LOGGER.critical(
"The model {} could not be loaded due to SBML errors "
"reported in {}.".format(model_filename, report_path))
continue
model.solver = solver
model_and_model_ver_tuple.append((model, model_ver))
except (IOError, SBMLError):
LOGGER.debug(exc_info=True)
LOGGER.warning("An error occurred while loading the model '%s'. "
"Skipping.", model_filename)
# Abort the diff report unless at least two models can be loaded
# successfully.
if len(model_and_model_ver_tuple) < 2:
LOGGER.critical(
"Out of the %d provided models only %d could be loaded. Please, "
"check if the models that could not be loaded are valid SBML. "
"Aborting.",
len(models), len(model_and_model_ver_tuple))
sys.exit(1)
# Running pytest in individual processes to avoid interference
partial_test_diff = partial(_test_diff, pytest_args=pytest_args,
skip=skip, exclusive=exclusive,
experimental=experimental)
pool = Pool(min(len(models), cpu_count()))
results = pool.map(partial_test_diff, model_and_model_ver_tuple)
for model_path, result in zip(models, results):
model_filename = os.path.basename(model_path)
diff_results[model_filename] = result
with open(filename, "w", encoding="utf-8") as file_handle:
LOGGER.info("Writing diff report to '%s'.", filename)
file_handle.write(api.diff_report(diff_results, config)) | python | def diff(models, filename, pytest_args, exclusive, skip, solver,
experimental, custom_tests, custom_config):
"""
Take a snapshot of all the supplied models and generate a diff report.
MODELS: List of paths to two or more model files.
"""
if not any(a.startswith("--tb") for a in pytest_args):
pytest_args = ["--tb", "no"] + pytest_args
# Add further directories to search for tests.
pytest_args.extend(custom_tests)
config = ReportConfiguration.load()
# Update the default test configuration with custom ones (if any).
for custom in custom_config:
config.merge(ReportConfiguration.load(custom))
# Build the diff report specific data structure
diff_results = dict()
model_and_model_ver_tuple = list()
for model_path in models:
try:
model_filename = os.path.basename(model_path)
diff_results.setdefault(model_filename, dict())
model, model_ver, notifications = api.validate_model(model_path)
if model is None:
head, tail = os.path.split(filename)
report_path = os.path.join(
head, '{}_structural_report.html'.format(model_filename))
api.validation_report(
model_path, notifications, report_path)
LOGGER.critical(
"The model {} could not be loaded due to SBML errors "
"reported in {}.".format(model_filename, report_path))
continue
model.solver = solver
model_and_model_ver_tuple.append((model, model_ver))
except (IOError, SBMLError):
LOGGER.debug(exc_info=True)
LOGGER.warning("An error occurred while loading the model '%s'. "
"Skipping.", model_filename)
# Abort the diff report unless at least two models can be loaded
# successfully.
if len(model_and_model_ver_tuple) < 2:
LOGGER.critical(
"Out of the %d provided models only %d could be loaded. Please, "
"check if the models that could not be loaded are valid SBML. "
"Aborting.",
len(models), len(model_and_model_ver_tuple))
sys.exit(1)
# Running pytest in individual processes to avoid interference
partial_test_diff = partial(_test_diff, pytest_args=pytest_args,
skip=skip, exclusive=exclusive,
experimental=experimental)
pool = Pool(min(len(models), cpu_count()))
results = pool.map(partial_test_diff, model_and_model_ver_tuple)
for model_path, result in zip(models, results):
model_filename = os.path.basename(model_path)
diff_results[model_filename] = result
with open(filename, "w", encoding="utf-8") as file_handle:
LOGGER.info("Writing diff report to '%s'.", filename)
file_handle.write(api.diff_report(diff_results, config)) | [
"def",
"diff",
"(",
"models",
",",
"filename",
",",
"pytest_args",
",",
"exclusive",
",",
"skip",
",",
"solver",
",",
"experimental",
",",
"custom_tests",
",",
"custom_config",
")",
":",
"if",
"not",
"any",
"(",
"a",
".",
"startswith",
"(",
"\"--tb\"",
")",
"for",
"a",
"in",
"pytest_args",
")",
":",
"pytest_args",
"=",
"[",
"\"--tb\"",
",",
"\"no\"",
"]",
"+",
"pytest_args",
"# Add further directories to search for tests.",
"pytest_args",
".",
"extend",
"(",
"custom_tests",
")",
"config",
"=",
"ReportConfiguration",
".",
"load",
"(",
")",
"# Update the default test configuration with custom ones (if any).",
"for",
"custom",
"in",
"custom_config",
":",
"config",
".",
"merge",
"(",
"ReportConfiguration",
".",
"load",
"(",
"custom",
")",
")",
"# Build the diff report specific data structure",
"diff_results",
"=",
"dict",
"(",
")",
"model_and_model_ver_tuple",
"=",
"list",
"(",
")",
"for",
"model_path",
"in",
"models",
":",
"try",
":",
"model_filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"model_path",
")",
"diff_results",
".",
"setdefault",
"(",
"model_filename",
",",
"dict",
"(",
")",
")",
"model",
",",
"model_ver",
",",
"notifications",
"=",
"api",
".",
"validate_model",
"(",
"model_path",
")",
"if",
"model",
"is",
"None",
":",
"head",
",",
"tail",
"=",
"os",
".",
"path",
".",
"split",
"(",
"filename",
")",
"report_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"head",
",",
"'{}_structural_report.html'",
".",
"format",
"(",
"model_filename",
")",
")",
"api",
".",
"validation_report",
"(",
"model_path",
",",
"notifications",
",",
"report_path",
")",
"LOGGER",
".",
"critical",
"(",
"\"The model {} could not be loaded due to SBML errors \"",
"\"reported in {}.\"",
".",
"format",
"(",
"model_filename",
",",
"report_path",
")",
")",
"continue",
"model",
".",
"solver",
"=",
"solver",
"model_and_model_ver_tuple",
".",
"append",
"(",
"(",
"model",
",",
"model_ver",
")",
")",
"except",
"(",
"IOError",
",",
"SBMLError",
")",
":",
"LOGGER",
".",
"debug",
"(",
"exc_info",
"=",
"True",
")",
"LOGGER",
".",
"warning",
"(",
"\"An error occurred while loading the model '%s'. \"",
"\"Skipping.\"",
",",
"model_filename",
")",
"# Abort the diff report unless at least two models can be loaded",
"# successfully.",
"if",
"len",
"(",
"model_and_model_ver_tuple",
")",
"<",
"2",
":",
"LOGGER",
".",
"critical",
"(",
"\"Out of the %d provided models only %d could be loaded. Please, \"",
"\"check if the models that could not be loaded are valid SBML. \"",
"\"Aborting.\"",
",",
"len",
"(",
"models",
")",
",",
"len",
"(",
"model_and_model_ver_tuple",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"# Running pytest in individual processes to avoid interference",
"partial_test_diff",
"=",
"partial",
"(",
"_test_diff",
",",
"pytest_args",
"=",
"pytest_args",
",",
"skip",
"=",
"skip",
",",
"exclusive",
"=",
"exclusive",
",",
"experimental",
"=",
"experimental",
")",
"pool",
"=",
"Pool",
"(",
"min",
"(",
"len",
"(",
"models",
")",
",",
"cpu_count",
"(",
")",
")",
")",
"results",
"=",
"pool",
".",
"map",
"(",
"partial_test_diff",
",",
"model_and_model_ver_tuple",
")",
"for",
"model_path",
",",
"result",
"in",
"zip",
"(",
"models",
",",
"results",
")",
":",
"model_filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"model_path",
")",
"diff_results",
"[",
"model_filename",
"]",
"=",
"result",
"with",
"open",
"(",
"filename",
",",
"\"w\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"as",
"file_handle",
":",
"LOGGER",
".",
"info",
"(",
"\"Writing diff report to '%s'.\"",
",",
"filename",
")",
"file_handle",
".",
"write",
"(",
"api",
".",
"diff_report",
"(",
"diff_results",
",",
"config",
")",
")"
] | Take a snapshot of all the supplied models and generate a diff report.
MODELS: List of paths to two or more model files. | [
"Take",
"a",
"snapshot",
"of",
"all",
"the",
"supplied",
"models",
"and",
"generate",
"a",
"diff",
"report",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/suite/cli/reports.py#L226-L287 |
opencobra/memote | memote/suite/results/history_manager.py | HistoryManager.build_branch_structure | def build_branch_structure(self, model, skip):
"""Inspect and record the repo's branches and their history."""
self._history = dict()
self._history["commits"] = commits = dict()
self._history["branches"] = branches = dict()
for branch in self._repo.refs:
LOGGER.debug(branch.name)
if branch.name in skip:
continue
branches[branch.name] = branch_history = list()
latest = branch.commit
history = [latest] + list(latest.iter_parents())
for commit in history:
# Find model in committed files.
if not is_modified(model, commit):
LOGGER.info(
"The model was not modified in commit '{}'. "
"Skipping.".format(commit))
continue
branch_history.append(commit.hexsha)
if commit.hexsha not in commits:
commits[commit.hexsha] = sub = dict()
sub["timestamp"] = commit.authored_datetime.isoformat(" ")
sub["author"] = commit.author.name
sub["email"] = commit.author.email
LOGGER.debug("%s", json.dumps(self._history, indent=2)) | python | def build_branch_structure(self, model, skip):
"""Inspect and record the repo's branches and their history."""
self._history = dict()
self._history["commits"] = commits = dict()
self._history["branches"] = branches = dict()
for branch in self._repo.refs:
LOGGER.debug(branch.name)
if branch.name in skip:
continue
branches[branch.name] = branch_history = list()
latest = branch.commit
history = [latest] + list(latest.iter_parents())
for commit in history:
# Find model in committed files.
if not is_modified(model, commit):
LOGGER.info(
"The model was not modified in commit '{}'. "
"Skipping.".format(commit))
continue
branch_history.append(commit.hexsha)
if commit.hexsha not in commits:
commits[commit.hexsha] = sub = dict()
sub["timestamp"] = commit.authored_datetime.isoformat(" ")
sub["author"] = commit.author.name
sub["email"] = commit.author.email
LOGGER.debug("%s", json.dumps(self._history, indent=2)) | [
"def",
"build_branch_structure",
"(",
"self",
",",
"model",
",",
"skip",
")",
":",
"self",
".",
"_history",
"=",
"dict",
"(",
")",
"self",
".",
"_history",
"[",
"\"commits\"",
"]",
"=",
"commits",
"=",
"dict",
"(",
")",
"self",
".",
"_history",
"[",
"\"branches\"",
"]",
"=",
"branches",
"=",
"dict",
"(",
")",
"for",
"branch",
"in",
"self",
".",
"_repo",
".",
"refs",
":",
"LOGGER",
".",
"debug",
"(",
"branch",
".",
"name",
")",
"if",
"branch",
".",
"name",
"in",
"skip",
":",
"continue",
"branches",
"[",
"branch",
".",
"name",
"]",
"=",
"branch_history",
"=",
"list",
"(",
")",
"latest",
"=",
"branch",
".",
"commit",
"history",
"=",
"[",
"latest",
"]",
"+",
"list",
"(",
"latest",
".",
"iter_parents",
"(",
")",
")",
"for",
"commit",
"in",
"history",
":",
"# Find model in committed files.",
"if",
"not",
"is_modified",
"(",
"model",
",",
"commit",
")",
":",
"LOGGER",
".",
"info",
"(",
"\"The model was not modified in commit '{}'. \"",
"\"Skipping.\"",
".",
"format",
"(",
"commit",
")",
")",
"continue",
"branch_history",
".",
"append",
"(",
"commit",
".",
"hexsha",
")",
"if",
"commit",
".",
"hexsha",
"not",
"in",
"commits",
":",
"commits",
"[",
"commit",
".",
"hexsha",
"]",
"=",
"sub",
"=",
"dict",
"(",
")",
"sub",
"[",
"\"timestamp\"",
"]",
"=",
"commit",
".",
"authored_datetime",
".",
"isoformat",
"(",
"\" \"",
")",
"sub",
"[",
"\"author\"",
"]",
"=",
"commit",
".",
"author",
".",
"name",
"sub",
"[",
"\"email\"",
"]",
"=",
"commit",
".",
"author",
".",
"email",
"LOGGER",
".",
"debug",
"(",
"\"%s\"",
",",
"json",
".",
"dumps",
"(",
"self",
".",
"_history",
",",
"indent",
"=",
"2",
")",
")"
] | Inspect and record the repo's branches and their history. | [
"Inspect",
"and",
"record",
"the",
"repo",
"s",
"branches",
"and",
"their",
"history",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/suite/results/history_manager.py#L65-L90 |
opencobra/memote | memote/suite/results/history_manager.py | HistoryManager.load_history | def load_history(self, model, skip={"gh-pages"}):
"""
Load the entire results history into memory.
Could be a bad idea in a far future.
"""
if self._history is None:
self.build_branch_structure(model, skip)
self._results = dict()
all_commits = list(self._history["commits"])
for commit in all_commits:
try:
self._results[commit] = self.manager.load(commit)
except (IOError, NoResultFound) as err:
LOGGER.error("Could not load result '%s'.", commit)
LOGGER.debug("%s", str(err)) | python | def load_history(self, model, skip={"gh-pages"}):
"""
Load the entire results history into memory.
Could be a bad idea in a far future.
"""
if self._history is None:
self.build_branch_structure(model, skip)
self._results = dict()
all_commits = list(self._history["commits"])
for commit in all_commits:
try:
self._results[commit] = self.manager.load(commit)
except (IOError, NoResultFound) as err:
LOGGER.error("Could not load result '%s'.", commit)
LOGGER.debug("%s", str(err)) | [
"def",
"load_history",
"(",
"self",
",",
"model",
",",
"skip",
"=",
"{",
"\"gh-pages\"",
"}",
")",
":",
"if",
"self",
".",
"_history",
"is",
"None",
":",
"self",
".",
"build_branch_structure",
"(",
"model",
",",
"skip",
")",
"self",
".",
"_results",
"=",
"dict",
"(",
")",
"all_commits",
"=",
"list",
"(",
"self",
".",
"_history",
"[",
"\"commits\"",
"]",
")",
"for",
"commit",
"in",
"all_commits",
":",
"try",
":",
"self",
".",
"_results",
"[",
"commit",
"]",
"=",
"self",
".",
"manager",
".",
"load",
"(",
"commit",
")",
"except",
"(",
"IOError",
",",
"NoResultFound",
")",
"as",
"err",
":",
"LOGGER",
".",
"error",
"(",
"\"Could not load result '%s'.\"",
",",
"commit",
")",
"LOGGER",
".",
"debug",
"(",
"\"%s\"",
",",
"str",
"(",
"err",
")",
")"
] | Load the entire results history into memory.
Could be a bad idea in a far future. | [
"Load",
"the",
"entire",
"results",
"history",
"into",
"memory",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/suite/results/history_manager.py#L100-L116 |
opencobra/memote | memote/suite/results/history_manager.py | HistoryManager.get_result | def get_result(self, commit, default=MemoteResult()):
"""Return an individual result from the history if it exists."""
assert self._results is not None, \
"Please call the method `load_history` first."
return self._results.get(commit, default) | python | def get_result(self, commit, default=MemoteResult()):
"""Return an individual result from the history if it exists."""
assert self._results is not None, \
"Please call the method `load_history` first."
return self._results.get(commit, default) | [
"def",
"get_result",
"(",
"self",
",",
"commit",
",",
"default",
"=",
"MemoteResult",
"(",
")",
")",
":",
"assert",
"self",
".",
"_results",
"is",
"not",
"None",
",",
"\"Please call the method `load_history` first.\"",
"return",
"self",
".",
"_results",
".",
"get",
"(",
"commit",
",",
"default",
")"
] | Return an individual result from the history if it exists. | [
"Return",
"an",
"individual",
"result",
"from",
"the",
"history",
"if",
"it",
"exists",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/suite/results/history_manager.py#L118-L122 |
opencobra/memote | memote/support/matrix.py | absolute_extreme_coefficient_ratio | def absolute_extreme_coefficient_ratio(model):
"""
Return the maximum and minimum absolute, non-zero coefficients.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
"""
s_matrix, _, _ = con_helpers.stoichiometry_matrix(
model.metabolites, model.reactions
)
abs_matrix = np.abs(s_matrix)
return abs_matrix.max(), abs_matrix[abs_matrix > 0].min() | python | def absolute_extreme_coefficient_ratio(model):
"""
Return the maximum and minimum absolute, non-zero coefficients.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
"""
s_matrix, _, _ = con_helpers.stoichiometry_matrix(
model.metabolites, model.reactions
)
abs_matrix = np.abs(s_matrix)
return abs_matrix.max(), abs_matrix[abs_matrix > 0].min() | [
"def",
"absolute_extreme_coefficient_ratio",
"(",
"model",
")",
":",
"s_matrix",
",",
"_",
",",
"_",
"=",
"con_helpers",
".",
"stoichiometry_matrix",
"(",
"model",
".",
"metabolites",
",",
"model",
".",
"reactions",
")",
"abs_matrix",
"=",
"np",
".",
"abs",
"(",
"s_matrix",
")",
"return",
"abs_matrix",
".",
"max",
"(",
")",
",",
"abs_matrix",
"[",
"abs_matrix",
">",
"0",
"]",
".",
"min",
"(",
")"
] | Return the maximum and minimum absolute, non-zero coefficients.
Parameters
----------
model : cobra.Model
The metabolic model under investigation. | [
"Return",
"the",
"maximum",
"and",
"minimum",
"absolute",
"non",
"-",
"zero",
"coefficients",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/matrix.py#L29-L43 |
opencobra/memote | memote/support/matrix.py | number_independent_conservation_relations | def number_independent_conservation_relations(model):
"""
Return the number of conserved metabolite pools.
This number is given by the left null space of the stoichiometric matrix.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
"""
s_matrix, _, _ = con_helpers.stoichiometry_matrix(
model.metabolites, model.reactions
)
ln_matrix = con_helpers.nullspace(s_matrix.T)
return ln_matrix.shape[1] | python | def number_independent_conservation_relations(model):
"""
Return the number of conserved metabolite pools.
This number is given by the left null space of the stoichiometric matrix.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
"""
s_matrix, _, _ = con_helpers.stoichiometry_matrix(
model.metabolites, model.reactions
)
ln_matrix = con_helpers.nullspace(s_matrix.T)
return ln_matrix.shape[1] | [
"def",
"number_independent_conservation_relations",
"(",
"model",
")",
":",
"s_matrix",
",",
"_",
",",
"_",
"=",
"con_helpers",
".",
"stoichiometry_matrix",
"(",
"model",
".",
"metabolites",
",",
"model",
".",
"reactions",
")",
"ln_matrix",
"=",
"con_helpers",
".",
"nullspace",
"(",
"s_matrix",
".",
"T",
")",
"return",
"ln_matrix",
".",
"shape",
"[",
"1",
"]"
] | Return the number of conserved metabolite pools.
This number is given by the left null space of the stoichiometric matrix.
Parameters
----------
model : cobra.Model
The metabolic model under investigation. | [
"Return",
"the",
"number",
"of",
"conserved",
"metabolite",
"pools",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/matrix.py#L46-L62 |
opencobra/memote | memote/support/matrix.py | matrix_rank | def matrix_rank(model):
"""
Return the rank of the model's stoichiometric matrix.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
"""
s_matrix, _, _ = con_helpers.stoichiometry_matrix(
model.metabolites, model.reactions
)
return con_helpers.rank(s_matrix) | python | def matrix_rank(model):
"""
Return the rank of the model's stoichiometric matrix.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
"""
s_matrix, _, _ = con_helpers.stoichiometry_matrix(
model.metabolites, model.reactions
)
return con_helpers.rank(s_matrix) | [
"def",
"matrix_rank",
"(",
"model",
")",
":",
"s_matrix",
",",
"_",
",",
"_",
"=",
"con_helpers",
".",
"stoichiometry_matrix",
"(",
"model",
".",
"metabolites",
",",
"model",
".",
"reactions",
")",
"return",
"con_helpers",
".",
"rank",
"(",
"s_matrix",
")"
] | Return the rank of the model's stoichiometric matrix.
Parameters
----------
model : cobra.Model
The metabolic model under investigation. | [
"Return",
"the",
"rank",
"of",
"the",
"model",
"s",
"stoichiometric",
"matrix",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/matrix.py#L65-L78 |
opencobra/memote | memote/support/matrix.py | degrees_of_freedom | def degrees_of_freedom(model):
"""
Return the degrees of freedom, i.e., number of "free variables".
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Notes
-----
This specifically refers to the dimensionality of the (right) null space
of the stoichiometric matrix, as dim(Null(S)) corresponds directly to the
number of free variables in the system [1]_. The formula used calculates
this using the rank-nullity theorem [2]_.
References
----------
.. [1] Fukuda, K. & Terlaky, T. Criss-cross methods: A fresh view on
pivot algorithms. Mathematical Programming 79, 369-395 (1997).
.. [2] Alama, J. The Rank+Nullity Theorem. Formalized Mathematics 15,
(2007).
"""
s_matrix, _, _ = con_helpers.stoichiometry_matrix(
model.metabolites, model.reactions
)
return s_matrix.shape[1] - matrix_rank(model) | python | def degrees_of_freedom(model):
"""
Return the degrees of freedom, i.e., number of "free variables".
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Notes
-----
This specifically refers to the dimensionality of the (right) null space
of the stoichiometric matrix, as dim(Null(S)) corresponds directly to the
number of free variables in the system [1]_. The formula used calculates
this using the rank-nullity theorem [2]_.
References
----------
.. [1] Fukuda, K. & Terlaky, T. Criss-cross methods: A fresh view on
pivot algorithms. Mathematical Programming 79, 369-395 (1997).
.. [2] Alama, J. The Rank+Nullity Theorem. Formalized Mathematics 15,
(2007).
"""
s_matrix, _, _ = con_helpers.stoichiometry_matrix(
model.metabolites, model.reactions
)
return s_matrix.shape[1] - matrix_rank(model) | [
"def",
"degrees_of_freedom",
"(",
"model",
")",
":",
"s_matrix",
",",
"_",
",",
"_",
"=",
"con_helpers",
".",
"stoichiometry_matrix",
"(",
"model",
".",
"metabolites",
",",
"model",
".",
"reactions",
")",
"return",
"s_matrix",
".",
"shape",
"[",
"1",
"]",
"-",
"matrix_rank",
"(",
"model",
")"
] | Return the degrees of freedom, i.e., number of "free variables".
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Notes
-----
This specifically refers to the dimensionality of the (right) null space
of the stoichiometric matrix, as dim(Null(S)) corresponds directly to the
number of free variables in the system [1]_. The formula used calculates
this using the rank-nullity theorem [2]_.
References
----------
.. [1] Fukuda, K. & Terlaky, T. Criss-cross methods: A fresh view on
pivot algorithms. Mathematical Programming 79, 369-395 (1997).
.. [2] Alama, J. The Rank+Nullity Theorem. Formalized Mathematics 15,
(2007). | [
"Return",
"the",
"degrees",
"of",
"freedom",
"i",
".",
"e",
".",
"number",
"of",
"free",
"variables",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/matrix.py#L81-L109 |
opencobra/memote | memote/experimental/config.py | ExperimentConfiguration.load | def load(self, model):
"""
Load all information from an experimental configuration file.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
"""
self.load_medium(model)
self.load_essentiality(model)
self.load_growth(model)
# self.load_experiment(config.config.get("growth"), model)
return self | python | def load(self, model):
"""
Load all information from an experimental configuration file.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
"""
self.load_medium(model)
self.load_essentiality(model)
self.load_growth(model)
# self.load_experiment(config.config.get("growth"), model)
return self | [
"def",
"load",
"(",
"self",
",",
"model",
")",
":",
"self",
".",
"load_medium",
"(",
"model",
")",
"self",
".",
"load_essentiality",
"(",
"model",
")",
"self",
".",
"load_growth",
"(",
"model",
")",
"# self.load_experiment(config.config.get(\"growth\"), model)",
"return",
"self"
] | Load all information from an experimental configuration file.
Parameters
----------
model : cobra.Model
The metabolic model under investigation. | [
"Load",
"all",
"information",
"from",
"an",
"experimental",
"configuration",
"file",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/experimental/config.py#L67-L81 |
opencobra/memote | memote/experimental/config.py | ExperimentConfiguration.validate | def validate(self):
"""Validate the configuration file."""
validator = Draft4Validator(self.SCHEMA)
if not validator.is_valid(self.config):
for err in validator.iter_errors(self.config):
LOGGER.error(str(err.message))
validator.validate(self.config) | python | def validate(self):
"""Validate the configuration file."""
validator = Draft4Validator(self.SCHEMA)
if not validator.is_valid(self.config):
for err in validator.iter_errors(self.config):
LOGGER.error(str(err.message))
validator.validate(self.config) | [
"def",
"validate",
"(",
"self",
")",
":",
"validator",
"=",
"Draft4Validator",
"(",
"self",
".",
"SCHEMA",
")",
"if",
"not",
"validator",
".",
"is_valid",
"(",
"self",
".",
"config",
")",
":",
"for",
"err",
"in",
"validator",
".",
"iter_errors",
"(",
"self",
".",
"config",
")",
":",
"LOGGER",
".",
"error",
"(",
"str",
"(",
"err",
".",
"message",
")",
")",
"validator",
".",
"validate",
"(",
"self",
".",
"config",
")"
] | Validate the configuration file. | [
"Validate",
"the",
"configuration",
"file",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/experimental/config.py#L83-L89 |
opencobra/memote | memote/experimental/config.py | ExperimentConfiguration.load_medium | def load_medium(self, model):
"""Load and validate all media."""
media = self.config.get("medium")
if media is None:
return
definitions = media.get("definitions")
if definitions is None or len(definitions) == 0:
return
path = self.get_path(media, join("data", "experimental", "media"))
for medium_id, medium in iteritems(definitions):
if medium is None:
medium = dict()
filename = medium.get("filename")
if filename is None:
filename = join(path, "{}.csv".format(medium_id))
elif not isabs(filename):
filename = join(path, filename)
tmp = Medium(identifier=medium_id, obj=medium, filename=filename)
tmp.load()
tmp.validate(model)
self.media[medium_id] = tmp | python | def load_medium(self, model):
"""Load and validate all media."""
media = self.config.get("medium")
if media is None:
return
definitions = media.get("definitions")
if definitions is None or len(definitions) == 0:
return
path = self.get_path(media, join("data", "experimental", "media"))
for medium_id, medium in iteritems(definitions):
if medium is None:
medium = dict()
filename = medium.get("filename")
if filename is None:
filename = join(path, "{}.csv".format(medium_id))
elif not isabs(filename):
filename = join(path, filename)
tmp = Medium(identifier=medium_id, obj=medium, filename=filename)
tmp.load()
tmp.validate(model)
self.media[medium_id] = tmp | [
"def",
"load_medium",
"(",
"self",
",",
"model",
")",
":",
"media",
"=",
"self",
".",
"config",
".",
"get",
"(",
"\"medium\"",
")",
"if",
"media",
"is",
"None",
":",
"return",
"definitions",
"=",
"media",
".",
"get",
"(",
"\"definitions\"",
")",
"if",
"definitions",
"is",
"None",
"or",
"len",
"(",
"definitions",
")",
"==",
"0",
":",
"return",
"path",
"=",
"self",
".",
"get_path",
"(",
"media",
",",
"join",
"(",
"\"data\"",
",",
"\"experimental\"",
",",
"\"media\"",
")",
")",
"for",
"medium_id",
",",
"medium",
"in",
"iteritems",
"(",
"definitions",
")",
":",
"if",
"medium",
"is",
"None",
":",
"medium",
"=",
"dict",
"(",
")",
"filename",
"=",
"medium",
".",
"get",
"(",
"\"filename\"",
")",
"if",
"filename",
"is",
"None",
":",
"filename",
"=",
"join",
"(",
"path",
",",
"\"{}.csv\"",
".",
"format",
"(",
"medium_id",
")",
")",
"elif",
"not",
"isabs",
"(",
"filename",
")",
":",
"filename",
"=",
"join",
"(",
"path",
",",
"filename",
")",
"tmp",
"=",
"Medium",
"(",
"identifier",
"=",
"medium_id",
",",
"obj",
"=",
"medium",
",",
"filename",
"=",
"filename",
")",
"tmp",
".",
"load",
"(",
")",
"tmp",
".",
"validate",
"(",
"model",
")",
"self",
".",
"media",
"[",
"medium_id",
"]",
"=",
"tmp"
] | Load and validate all media. | [
"Load",
"and",
"validate",
"all",
"media",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/experimental/config.py#L91-L111 |
opencobra/memote | memote/experimental/config.py | ExperimentConfiguration.load_essentiality | def load_essentiality(self, model):
"""Load and validate all data files."""
data = self.config.get("essentiality")
if data is None:
return
experiments = data.get("experiments")
if experiments is None or len(experiments) == 0:
return
path = self.get_path(data,
join("data", "experimental", "essentiality"))
for exp_id, exp in iteritems(experiments):
if exp is None:
exp = dict()
filename = exp.get("filename")
if filename is None:
filename = join(path, "{}.csv".format(exp_id))
elif not isabs(filename):
filename = join(path, filename)
experiment = EssentialityExperiment(
identifier=exp_id, obj=exp, filename=filename)
if experiment.medium is not None:
assert experiment.medium in self.media, \
"Experiment '{}' has an undefined medium '{}'.".format(
exp_id, experiment.medium)
experiment.medium = self.media[experiment.medium]
experiment.load()
experiment.validate(model)
self.essentiality[exp_id] = experiment | python | def load_essentiality(self, model):
"""Load and validate all data files."""
data = self.config.get("essentiality")
if data is None:
return
experiments = data.get("experiments")
if experiments is None or len(experiments) == 0:
return
path = self.get_path(data,
join("data", "experimental", "essentiality"))
for exp_id, exp in iteritems(experiments):
if exp is None:
exp = dict()
filename = exp.get("filename")
if filename is None:
filename = join(path, "{}.csv".format(exp_id))
elif not isabs(filename):
filename = join(path, filename)
experiment = EssentialityExperiment(
identifier=exp_id, obj=exp, filename=filename)
if experiment.medium is not None:
assert experiment.medium in self.media, \
"Experiment '{}' has an undefined medium '{}'.".format(
exp_id, experiment.medium)
experiment.medium = self.media[experiment.medium]
experiment.load()
experiment.validate(model)
self.essentiality[exp_id] = experiment | [
"def",
"load_essentiality",
"(",
"self",
",",
"model",
")",
":",
"data",
"=",
"self",
".",
"config",
".",
"get",
"(",
"\"essentiality\"",
")",
"if",
"data",
"is",
"None",
":",
"return",
"experiments",
"=",
"data",
".",
"get",
"(",
"\"experiments\"",
")",
"if",
"experiments",
"is",
"None",
"or",
"len",
"(",
"experiments",
")",
"==",
"0",
":",
"return",
"path",
"=",
"self",
".",
"get_path",
"(",
"data",
",",
"join",
"(",
"\"data\"",
",",
"\"experimental\"",
",",
"\"essentiality\"",
")",
")",
"for",
"exp_id",
",",
"exp",
"in",
"iteritems",
"(",
"experiments",
")",
":",
"if",
"exp",
"is",
"None",
":",
"exp",
"=",
"dict",
"(",
")",
"filename",
"=",
"exp",
".",
"get",
"(",
"\"filename\"",
")",
"if",
"filename",
"is",
"None",
":",
"filename",
"=",
"join",
"(",
"path",
",",
"\"{}.csv\"",
".",
"format",
"(",
"exp_id",
")",
")",
"elif",
"not",
"isabs",
"(",
"filename",
")",
":",
"filename",
"=",
"join",
"(",
"path",
",",
"filename",
")",
"experiment",
"=",
"EssentialityExperiment",
"(",
"identifier",
"=",
"exp_id",
",",
"obj",
"=",
"exp",
",",
"filename",
"=",
"filename",
")",
"if",
"experiment",
".",
"medium",
"is",
"not",
"None",
":",
"assert",
"experiment",
".",
"medium",
"in",
"self",
".",
"media",
",",
"\"Experiment '{}' has an undefined medium '{}'.\"",
".",
"format",
"(",
"exp_id",
",",
"experiment",
".",
"medium",
")",
"experiment",
".",
"medium",
"=",
"self",
".",
"media",
"[",
"experiment",
".",
"medium",
"]",
"experiment",
".",
"load",
"(",
")",
"experiment",
".",
"validate",
"(",
"model",
")",
"self",
".",
"essentiality",
"[",
"exp_id",
"]",
"=",
"experiment"
] | Load and validate all data files. | [
"Load",
"and",
"validate",
"all",
"data",
"files",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/experimental/config.py#L113-L140 |
opencobra/memote | memote/experimental/config.py | ExperimentConfiguration.load_growth | def load_growth(self, model):
"""Load and validate all data files."""
data = self.config.get("growth")
if data is None:
return
experiments = data.get("experiments")
if experiments is None or len(experiments) == 0:
return
path = self.get_path(data,
join("data", "experimental", "growth"))
for exp_id, exp in iteritems(experiments):
if exp is None:
exp = dict()
filename = exp.get("filename")
if filename is None:
filename = join(path, "{}.csv".format(exp_id))
elif not isabs(filename):
filename = join(path, filename)
growth = GrowthExperiment(
identifier=exp_id, obj=exp, filename=filename)
if growth.medium is not None:
assert growth.medium in self.media, \
"Growth-experiment '{}' has an undefined medium '{}'." \
"".format(exp_id, growth.medium)
growth.medium = self.media[growth.medium]
growth.load()
growth.validate(model)
self.growth[exp_id] = growth | python | def load_growth(self, model):
"""Load and validate all data files."""
data = self.config.get("growth")
if data is None:
return
experiments = data.get("experiments")
if experiments is None or len(experiments) == 0:
return
path = self.get_path(data,
join("data", "experimental", "growth"))
for exp_id, exp in iteritems(experiments):
if exp is None:
exp = dict()
filename = exp.get("filename")
if filename is None:
filename = join(path, "{}.csv".format(exp_id))
elif not isabs(filename):
filename = join(path, filename)
growth = GrowthExperiment(
identifier=exp_id, obj=exp, filename=filename)
if growth.medium is not None:
assert growth.medium in self.media, \
"Growth-experiment '{}' has an undefined medium '{}'." \
"".format(exp_id, growth.medium)
growth.medium = self.media[growth.medium]
growth.load()
growth.validate(model)
self.growth[exp_id] = growth | [
"def",
"load_growth",
"(",
"self",
",",
"model",
")",
":",
"data",
"=",
"self",
".",
"config",
".",
"get",
"(",
"\"growth\"",
")",
"if",
"data",
"is",
"None",
":",
"return",
"experiments",
"=",
"data",
".",
"get",
"(",
"\"experiments\"",
")",
"if",
"experiments",
"is",
"None",
"or",
"len",
"(",
"experiments",
")",
"==",
"0",
":",
"return",
"path",
"=",
"self",
".",
"get_path",
"(",
"data",
",",
"join",
"(",
"\"data\"",
",",
"\"experimental\"",
",",
"\"growth\"",
")",
")",
"for",
"exp_id",
",",
"exp",
"in",
"iteritems",
"(",
"experiments",
")",
":",
"if",
"exp",
"is",
"None",
":",
"exp",
"=",
"dict",
"(",
")",
"filename",
"=",
"exp",
".",
"get",
"(",
"\"filename\"",
")",
"if",
"filename",
"is",
"None",
":",
"filename",
"=",
"join",
"(",
"path",
",",
"\"{}.csv\"",
".",
"format",
"(",
"exp_id",
")",
")",
"elif",
"not",
"isabs",
"(",
"filename",
")",
":",
"filename",
"=",
"join",
"(",
"path",
",",
"filename",
")",
"growth",
"=",
"GrowthExperiment",
"(",
"identifier",
"=",
"exp_id",
",",
"obj",
"=",
"exp",
",",
"filename",
"=",
"filename",
")",
"if",
"growth",
".",
"medium",
"is",
"not",
"None",
":",
"assert",
"growth",
".",
"medium",
"in",
"self",
".",
"media",
",",
"\"Growth-experiment '{}' has an undefined medium '{}'.\"",
"\"\"",
".",
"format",
"(",
"exp_id",
",",
"growth",
".",
"medium",
")",
"growth",
".",
"medium",
"=",
"self",
".",
"media",
"[",
"growth",
".",
"medium",
"]",
"growth",
".",
"load",
"(",
")",
"growth",
".",
"validate",
"(",
"model",
")",
"self",
".",
"growth",
"[",
"exp_id",
"]",
"=",
"growth"
] | Load and validate all data files. | [
"Load",
"and",
"validate",
"all",
"data",
"files",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/experimental/config.py#L142-L169 |
opencobra/memote | memote/experimental/config.py | ExperimentConfiguration.get_path | def get_path(self, obj, default):
"""Return a relative or absolute path to experimental data."""
path = obj.get("path")
if path is None:
path = join(self._base, default)
if not isabs(path):
path = join(self._base, path)
return path | python | def get_path(self, obj, default):
"""Return a relative or absolute path to experimental data."""
path = obj.get("path")
if path is None:
path = join(self._base, default)
if not isabs(path):
path = join(self._base, path)
return path | [
"def",
"get_path",
"(",
"self",
",",
"obj",
",",
"default",
")",
":",
"path",
"=",
"obj",
".",
"get",
"(",
"\"path\"",
")",
"if",
"path",
"is",
"None",
":",
"path",
"=",
"join",
"(",
"self",
".",
"_base",
",",
"default",
")",
"if",
"not",
"isabs",
"(",
"path",
")",
":",
"path",
"=",
"join",
"(",
"self",
".",
"_base",
",",
"path",
")",
"return",
"path"
] | Return a relative or absolute path to experimental data. | [
"Return",
"a",
"relative",
"or",
"absolute",
"path",
"to",
"experimental",
"data",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/experimental/config.py#L171-L178 |
opencobra/memote | memote/support/annotation.py | find_components_without_annotation | def find_components_without_annotation(model, components):
"""
Find model components with empty annotation attributes.
Parameters
----------
model : cobra.Model
A cobrapy metabolic model.
components : {"metabolites", "reactions", "genes"}
A string denoting `cobra.Model` components.
Returns
-------
list
The components without any annotation.
"""
return [elem for elem in getattr(model, components) if
elem.annotation is None or len(elem.annotation) == 0] | python | def find_components_without_annotation(model, components):
"""
Find model components with empty annotation attributes.
Parameters
----------
model : cobra.Model
A cobrapy metabolic model.
components : {"metabolites", "reactions", "genes"}
A string denoting `cobra.Model` components.
Returns
-------
list
The components without any annotation.
"""
return [elem for elem in getattr(model, components) if
elem.annotation is None or len(elem.annotation) == 0] | [
"def",
"find_components_without_annotation",
"(",
"model",
",",
"components",
")",
":",
"return",
"[",
"elem",
"for",
"elem",
"in",
"getattr",
"(",
"model",
",",
"components",
")",
"if",
"elem",
".",
"annotation",
"is",
"None",
"or",
"len",
"(",
"elem",
".",
"annotation",
")",
"==",
"0",
"]"
] | Find model components with empty annotation attributes.
Parameters
----------
model : cobra.Model
A cobrapy metabolic model.
components : {"metabolites", "reactions", "genes"}
A string denoting `cobra.Model` components.
Returns
-------
list
The components without any annotation. | [
"Find",
"model",
"components",
"with",
"empty",
"annotation",
"attributes",
"."
] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/annotation.py#L125-L143 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.