code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
''' Alternative constructor which accepts a path as taken from URL and uses the given app or the current app config to get the real path. If class has attribute `generic` set to True, `directory_class` or `file_class` will be used as type. :param path: relative path as from URL :param app: optional, flask application :return: file object pointing to path :rtype: File ''' app = app or current_app base = app.config['directory_base'] path = urlpath_to_abspath(path, base) if not cls.generic: kls = cls elif os.path.isdir(path): kls = cls.directory_class else: kls = cls.file_class return kls(path=path, app=app)
def from_urlpath(cls, path, app=None)
Alternative constructor which accepts a path as taken from URL and uses the given app or the current app config to get the real path. If class has attribute `generic` set to True, `directory_class` or `file_class` will be used as type. :param path: relative path as from URL :param app: optional, flask application :return: file object pointing to path :rtype: File
4.869467
1.801076
2.703643
pattern = patterns[0] patterns = patterns[1:] has_wildcard = is_pattern(pattern) using_globstar = pattern == "**" # This avoids os.listdir() for performance if has_wildcard: entries = [x.name for x in scandir(current_dir)] else: entries = [pattern] if using_globstar: matching_subdirs = map(lambda x: x[0], walk(current_dir)) else: subdirs = [e for e in entries if os.path.isdir(os.path.join(current_dir, e))] matching_subdirs = match_entries(subdirs, pattern) # For terminal globstar, add a pattern for all files in subdirs if using_globstar and not patterns: patterns = ['*'] if patterns: # we've still got more directories to traverse for subdir in matching_subdirs: absolute_path = os.path.join(current_dir, subdir) for match in self._find_paths(absolute_path, patterns): yield match else: # we've got the last pattern if not has_wildcard: entries = [pattern + '.wsp', pattern + '.wsp.gz'] files = [e for e in entries if os.path.isfile(os.path.join(current_dir, e))] matching_files = match_entries(files, pattern + '.*') for _basename in matching_files + matching_subdirs: yield os.path.join(current_dir, _basename)
def _find_paths(self, current_dir, patterns)
Recursively generates absolute paths whose components underneath current_dir match the corresponding pattern in patterns
3.276847
3.380353
0.96938
disjoint_intervals = [] for interval in intervals: if disjoint_intervals and disjoint_intervals[-1].overlaps(interval): disjoint_intervals[-1] = disjoint_intervals[-1].union(interval) else: disjoint_intervals.append(interval) return disjoint_intervals
def union_overlapping(intervals)
Union any overlapping intervals in the given set.
1.999864
2.062285
0.969732
for node in app.store.find(query): if node.is_leaf: index.add(node.path) else: recurse('{0}.*'.format(node.path), index)
def recurse(query, index)
Recursively walk across paths, adding leaves to the index as they're found.
5.259458
4.37425
1.202368
fh = None try: fh = open(path,'r+b') if LOCK: fcntl.flock( fh.fileno(), fcntl.LOCK_EX ) packedMetadata = fh.read(metadataSize) try: (aggregationType,maxRetention,xff,archiveCount) = struct.unpack(metadataFormat,packedMetadata) except: raise CorruptWhisperFile("Unable to read header", fh.name) try: newAggregationType = struct.pack( longFormat, aggregationMethodToType[aggregationMethod] ) except KeyError: raise InvalidAggregationMethod("Unrecognized aggregation method: %s" % aggregationMethod) if xFilesFactor is not None: #use specified xFilesFactor xff = struct.pack( floatFormat, float(xFilesFactor) ) else: #retain old value xff = struct.pack( floatFormat, xff ) #repack the remaining header information maxRetention = struct.pack( longFormat, maxRetention ) archiveCount = struct.pack(longFormat, archiveCount) packedMetadata = newAggregationType + maxRetention + xff + archiveCount fh.seek(0) #fh.write(newAggregationType) fh.write(packedMetadata) if AUTOFLUSH: fh.flush() os.fsync(fh.fileno()) if CACHE_HEADERS and fh.name in __headerCache: del __headerCache[fh.name] finally: if fh: fh.close() return aggregationTypeToMethod.get(aggregationType, 'average')
def setAggregationMethod(path, aggregationMethod, xFilesFactor=None)
setAggregationMethod(path,aggregationMethod,xFilesFactor=None) path is a string aggregationMethod specifies the method to use when propagating data (see ``whisper.aggregationMethods``) xFilesFactor specifies the fraction of data points in a propagation interval that must have known values for a propagation to occur. If None, the existing xFilesFactor in path will not be changed
3.96046
4.013458
0.986795
if not archiveList: raise InvalidConfiguration("You must specify at least one archive configuration!") archiveList = sorted(archiveList, key=lambda a: a[0]) #sort by precision (secondsPerPoint) for i,archive in enumerate(archiveList): if i == len(archiveList) - 1: break nextArchive = archiveList[i+1] if not archive[0] < nextArchive[0]: raise InvalidConfiguration("A Whisper database may not configured having" "two archives with the same precision (archive%d: %s, archive%d: %s)" % (i, archive, i + 1, nextArchive)) if nextArchive[0] % archive[0] != 0: raise InvalidConfiguration("Higher precision archives' precision " "must evenly divide all lower precision archives' precision " "(archive%d: %s, archive%d: %s)" % (i, archive[0], i + 1, nextArchive[0])) retention = archive[0] * archive[1] nextRetention = nextArchive[0] * nextArchive[1] if not nextRetention > retention: raise InvalidConfiguration("Lower precision archives must cover " "larger time intervals than higher precision archives " "(archive%d: %s seconds, archive%d: %s seconds)" % (i, retention, i + 1, nextRetention)) archivePoints = archive[1] pointsPerConsolidation = nextArchive[0] // archive[0] if not archivePoints >= pointsPerConsolidation: raise InvalidConfiguration("Each archive must have at least enough points " "to consolidate to the next archive (archive%d consolidates %d of " "archive%d's points but it has only %d total points)" % (i + 1, pointsPerConsolidation, i, archivePoints))
def validateArchiveList(archiveList)
Validates an archiveList. An ArchiveList must: 1. Have at least one archive config. Example: (60, 86400) 2. No archive may be a duplicate of another. 3. Higher precision archives' precision must evenly divide all lower precision archives' precision. 4. Lower precision archives must cover larger time intervals than higher precision archives. 5. Each archive must have at least enough points to consolidate to the next archive Returns True or False
3.236243
2.546325
1.270946
# Set default params if xFilesFactor is None: xFilesFactor = 0.5 if aggregationMethod is None: aggregationMethod = 'average' #Validate archive configurations... validateArchiveList(archiveList) #Looks good, now we create the file and write the header if os.path.exists(path): raise InvalidConfiguration("File %s already exists!" % path) fh = None try: fh = open(path,'wb') if LOCK: fcntl.flock( fh.fileno(), fcntl.LOCK_EX ) aggregationType = struct.pack( longFormat, aggregationMethodToType.get(aggregationMethod, 1) ) oldest = max([secondsPerPoint * points for secondsPerPoint,points in archiveList]) maxRetention = struct.pack( longFormat, oldest ) xFilesFactor = struct.pack( floatFormat, float(xFilesFactor) ) archiveCount = struct.pack(longFormat, len(archiveList)) packedMetadata = aggregationType + maxRetention + xFilesFactor + archiveCount fh.write(packedMetadata) headerSize = metadataSize + (archiveInfoSize * len(archiveList)) archiveOffsetPointer = headerSize for secondsPerPoint,points in archiveList: archiveInfo = struct.pack(archiveInfoFormat, archiveOffsetPointer, secondsPerPoint, points) fh.write(archiveInfo) archiveOffsetPointer += (points * pointSize) #If configured to use fallocate and capable of fallocate use that, else #attempt sparse if configure or zero pre-allocate if sparse isn't configured. if CAN_FALLOCATE and useFallocate: remaining = archiveOffsetPointer - headerSize fallocate(fh, headerSize, remaining) elif sparse: fh.seek(archiveOffsetPointer - 1) fh.write('\x00') else: remaining = archiveOffsetPointer - headerSize chunksize = 16384 zeroes = b'\x00' * chunksize while remaining > chunksize: fh.write(zeroes) remaining -= chunksize fh.write(zeroes[:remaining]) if AUTOFLUSH: fh.flush() os.fsync(fh.fileno()) finally: if fh: fh.close()
def create(path,archiveList,xFilesFactor=None,aggregationMethod=None,sparse=False,useFallocate=False)
create(path,archiveList,xFilesFactor=0.5,aggregationMethod='average') path is a string archiveList is a list of archives, each of which is of the form (secondsPerPoint,numberOfPoints) xFilesFactor specifies the fraction of data points in a propagation interval that must have known values for a propagation to occur aggregationMethod specifies the function to use when propagating data (see ``whisper.aggregationMethods``)
3.940441
3.910127
1.007753
value = float(value) fh = None try: fh = open(path,'r+b') return file_update(fh, value, timestamp) finally: if fh: fh.close()
def update(path,value,timestamp=None)
update(path,value,timestamp=None) path is a string value is a float timestamp is either an int or float
3.791528
4.472087
0.847821
if not points: return points = [ (int(t),float(v)) for (t,v) in points] points.sort(key=lambda p: p[0],reverse=True) #order points by timestamp, newest first fh = None try: fh = open(path,'r+b') return file_update_many(fh, points) finally: if fh: fh.close()
def update_many(path,points)
update_many(path,points) path is a string points is a list of (timestamp,value) points
3.732583
3.47739
1.073387
fh = None try: fh = open(path,'rb') return __readHeader(fh) finally: if fh: fh.close() return None
def info(path)
info(path) path is a string
4.262611
4.915082
0.867251
fh = None try: fh = open(path,'rb') return file_fetch(fh, fromTime, untilTime, now) finally: if fh: fh.close()
def fetch(path,fromTime,untilTime=None,now=None)
fetch(path,fromTime,untilTime=None) path is a string fromTime is an epoch time untilTime is also an epoch time, but defaults to now. Returns a tuple of (timeInfo, valueList) where timeInfo is itself a tuple of (fromTime, untilTime, step) Returns None if no data can be returned
3.334802
4.23107
0.78817
fromInterval = int( fromTime - (fromTime % archive['secondsPerPoint']) ) + archive['secondsPerPoint'] untilInterval = int( untilTime - (untilTime % archive['secondsPerPoint']) ) + archive['secondsPerPoint'] if fromInterval == untilInterval: # Zero-length time range: always include the next point untilInterval += archive['secondsPerPoint'] fh.seek(archive['offset']) packedPoint = fh.read(pointSize) (baseInterval,baseValue) = struct.unpack(pointFormat,packedPoint) if baseInterval == 0: step = archive['secondsPerPoint'] points = (untilInterval - fromInterval) // step timeInfo = (fromInterval,untilInterval,step) valueList = [None] * points return (timeInfo,valueList) #Determine fromOffset timeDistance = fromInterval - baseInterval pointDistance = timeDistance // archive['secondsPerPoint'] byteDistance = pointDistance * pointSize fromOffset = archive['offset'] + (byteDistance % archive['size']) #Determine untilOffset timeDistance = untilInterval - baseInterval pointDistance = timeDistance // archive['secondsPerPoint'] byteDistance = pointDistance * pointSize untilOffset = archive['offset'] + (byteDistance % archive['size']) #Read all the points in the interval fh.seek(fromOffset) if fromOffset < untilOffset: #If we don't wrap around the archive seriesString = fh.read(untilOffset - fromOffset) else: #We do wrap around the archive, so we need two reads archiveEnd = archive['offset'] + archive['size'] seriesString = fh.read(archiveEnd - fromOffset) fh.seek(archive['offset']) seriesString += fh.read(untilOffset - archive['offset']) #Now we unpack the series data we just read (anything faster than unpack?) byteOrder,pointTypes = pointFormat[0],pointFormat[1:] points = len(seriesString) // pointSize seriesFormat = byteOrder + (pointTypes * points) unpackedSeries = struct.unpack(seriesFormat, seriesString) #And finally we construct a list of values (optimize this!) valueList = [None] * points #pre-allocate entire list for speed currentInterval = fromInterval step = archive['secondsPerPoint'] for i in xrange(0,len(unpackedSeries),2): pointTime = unpackedSeries[i] if pointTime == currentInterval: pointValue = unpackedSeries[i+1] valueList[i//2] = pointValue #in-place reassignment is faster than append() currentInterval += step timeInfo = (fromInterval,untilInterval,step) return (timeInfo,valueList)
def __archive_fetch(fh, archive, fromTime, untilTime)
Fetch data from a single archive. Note that checks for validity of the time period requested happen above this level so it's possible to wrap around the archive on a read and request data older than the archive's retention
3.171871
3.192875
0.993422
fh_from = open(path_from, 'rb') fh_to = open(path_to, 'rb+') return file_merge(fh_from, fh_to)
def merge(path_from, path_to)
Merges the data from one whisper file into another. Each file must have the same archive configuration
3.300328
3.493909
0.944595
fh_from = open(path_from, 'rb') fh_to = open(path_to, 'rb') diffs = file_diff(fh_from, fh_to, ignore_empty) fh_to.close() fh_from.close() return diffs
def diff(path_from, path_to, ignore_empty = False)
Compare two whisper databases. Each file must have the same archive configuration
2.05817
2.164644
0.950812
# Dont add if empty if not nonempty(data): for d in self.data[path]: if nonempty(d['values']): return # Add data to path for expr in exprs: self.paths[expr].add(path) self.data[path].append({ 'time_info': time_info, 'values': data })
def add_data(self, path, time_info, data, exprs)
Stores data before it can be put into a time series
4.500783
4.737862
0.949961
v1, v2 = pattern.find('{'), pattern.find('}') if v1 > -1 and v2 > v1: variations = pattern[v1+1:v2].split(',') variants = [pattern[:v1] + v + pattern[v2+1:] for v in variations] else: variants = [pattern] return list(_deduplicate(variants))
def extract_variants(pattern)
Extract the pattern variants (ie. {foo,bar}baz = foobaz or barbaz).
2.853052
2.577074
1.10709
matching = [] for variant in expand_braces(pattern): matching.extend(fnmatch.filter(entries, variant)) return list(_deduplicate(matching))
def match_entries(entries, pattern)
A drop-in replacement for fnmatch.filter that supports pattern variants (ie. {foo,bar}baz = foobaz or barbaz).
8.207303
6.238034
1.315687
res = set() # Used instead of s.strip('{}') because strip is greedy. # We want to remove only ONE leading { and ONE trailing }, if both exist def remove_outer_braces(s): if s[0] == '{' and s[-1] == '}': return s[1:-1] return s match = EXPAND_BRACES_RE.search(pattern) if match is not None: sub = match.group(1) v1, v2 = match.span(1) if "," in sub: for pat in sub.strip('{}').split(','): subpattern = pattern[:v1] + pat + pattern[v2:] res.update(expand_braces(subpattern)) else: subpattern = pattern[:v1] + remove_outer_braces(sub) + pattern[v2:] res.update(expand_braces(subpattern)) else: res.add(pattern.replace('\\}', '}')) return list(res)
def expand_braces(pattern)
Find the rightmost, innermost set of braces and, if it contains a comma-separated list, expand its contents recursively (any of its items may itself be a list enclosed in braces). Return the full list of expanded strings.
3.252937
3.219795
1.010293
key = self.keyfunc(metric) nodes = [] servers = set() for node in self.hash_ring.get_nodes(key): server, instance = node if server in servers: continue servers.add(server) nodes.append(node) if len(servers) >= self.replication_factor: break available = [n for n in nodes if self.is_available(n)] return random.choice(available or nodes)
def select_host(self, metric)
Returns the carbon host that has data for the given metric.
3.729457
3.71841
1.002971
return (arg for arg in args if arg is not None and not math.isnan(arg) and not math.isinf(arg))
def safeArgs(args)
Iterate over valid, finite values in an iterable. Skip any items that are None, NaN, or infinite.
3.603601
2.912023
1.237491
missingValues = any(None in series for series in data) finiteData = [series for series in data if not series.options.get('drawAsInfinite')] yMinValue = safeMin(safeMin(series) for series in finiteData) if yMinValue is None: # This can only happen if there are no valid, non-infinite data. return (0.0, 1.0) if yMinValue > 0.0 and drawNullAsZero and missingValues: yMinValue = 0.0 if stacked: length = safeMin(len(series) for series in finiteData) sumSeries = [] for i in range(0, length): sumSeries.append(safeSum(series[i] for series in finiteData)) yMaxValue = safeMax(sumSeries) else: yMaxValue = safeMax(safeMax(series) for series in finiteData) if yMaxValue < 0.0 and drawNullAsZero and missingValues: yMaxValue = 0.0 return (yMinValue, yMaxValue)
def dataLimits(data, drawNullAsZero=False, stacked=False)
Return the range of values in data as (yMinValue, yMaxValue). data is an array of TimeSeries objects.
2.808434
2.680368
1.047779
if v is None: return 0, '' for prefix, size in UnitSystems[system]: if condition(v, size, step): v2 = v / size if v2 - math.floor(v2) < 0.00000000001 and v > 1: v2 = float(math.floor(v2)) if units: prefix = "%s%s" % (prefix, units) return v2, prefix if v - math.floor(v) < 0.00000000001 and v > 1: v = float(math.floor(v)) if units: prefix = units else: prefix = '' return v, prefix
def format_units(v, step=None, system="si", units=None)
Format the given value in standardized units. ``system`` is either 'binary' or 'si' For more info, see: http://en.wikipedia.org/wiki/SI_prefix http://en.wikipedia.org/wiki/Binary_prefix
2.708692
2.71613
0.997261
if math.isnan(value): raise GraphError('Encountered NaN %s' % (name,)) elif math.isinf(value): raise GraphError('Encountered infinite %s' % (name,)) return value
def checkFinite(value, name='value')
Check that value is a finite number. If it is, return it. If not, raise GraphError describing the problem, using name in the error message.
3.046703
2.579493
1.181125
if self.minValue < self.maxValue: # The limits are already OK. return minFixed = (self.minValueSource in ['min']) maxFixed = (self.maxValueSource in ['max', 'limit']) if minFixed and maxFixed: raise GraphError('The %s must be less than the %s' % (self.minValueSource, self.maxValueSource)) elif minFixed: self.maxValue = self.minValue + self.chooseDelta(self.minValue) elif maxFixed: self.minValue = self.maxValue - self.chooseDelta(self.maxValue) else: delta = self.chooseDelta(max(abs(self.minValue), abs(self.maxValue))) average = (self.minValue + self.maxValue) / 2.0 self.minValue = average - delta self.maxValue = average + delta
def reconcileLimits(self)
If self.minValue is not less than self.maxValue, fix the problem. If self.minValue is not less than self.maxValue, adjust self.minValue and/or self.maxValue (depending on which was not specified explicitly by the user) to make self.minValue < self.maxValue. If the user specified both limits explicitly, then raise GraphError.
2.739247
2.347432
1.166912
if axisMin is not None and not math.isnan(axisMin): self.minValueSource = 'min' self.minValue = self.checkFinite(axisMin, 'axis min') if axisMax == 'max': self.maxValueSource = 'extremum' elif axisMax is not None and not math.isnan(axisMax): self.maxValueSource = 'max' self.maxValue = self.checkFinite(axisMax, 'axis max') if axisLimit is None or math.isnan(axisLimit): self.axisLimit = None elif axisLimit < self.maxValue: self.maxValue = self.checkFinite(axisLimit, 'axis limit') self.maxValueSource = 'limit' # The limit has already been imposed, so there is no need to # remember it: self.axisLimit = None elif math.isinf(axisLimit): # It must be positive infinity, which is the same as no limit: self.axisLimit = None else: # We still need to remember axisLimit to avoid rounding top to # a value larger than axisLimit: self.axisLimit = axisLimit self.reconcileLimits()
def applySettings(self, axisMin=None, axisMax=None, axisLimit=None)
Apply the specified settings to this axis. Set self.minValue, self.minValueSource, self.maxValue, self.maxValueSource, and self.axisLimit reasonably based on the parameters provided. Arguments: axisMin -- a finite number, or None to choose a round minimum limit that includes all of the data. axisMax -- a finite number, 'max' to use the maximum value contained in the data, or None to choose a round maximum limit that includes all of the data. axisLimit -- a finite number to use as an upper limit on maxValue, or None to impose no upper limit.
3.247133
2.863027
1.134161
value, prefix = format_units(value, self.step, system=self.unitSystem) span, spanPrefix = format_units(self.span, self.step, system=self.unitSystem) if prefix: prefix += " " if value < 0.1: return "%g %s" % (float(value), prefix) elif value < 1.0: return "%.2f %s" % (float(value), prefix) if span > 10 or spanPrefix != prefix: if type(value) is float: return "%.1f %s" % (value, prefix) else: return "%d %s" % (int(value), prefix) elif span > 3: return "%.1f %s" % (float(value), prefix) elif span > 0.1: return "%.2f %s" % (float(value), prefix) else: return "%g %s" % (float(value), prefix)
def makeLabel(self, value)
Create a label for the specified value. Create a label string containing the value and its units (if any), based on the values of self.step, self.span, and self.unitSystem.
2.487205
2.365164
1.051599
self.checkFinite(minStep) if self.binary: base = 2.0 mantissas = [1.0] exponent = math.floor(math.log(minStep, 2) - EPSILON) else: base = 10.0 mantissas = [1.0, 2.0, 5.0] exponent = math.floor(math.log10(minStep) - EPSILON) while True: multiplier = base ** exponent for mantissa in mantissas: value = mantissa * multiplier if value >= minStep * (1.0 - EPSILON): yield value exponent += 1
def generateSteps(self, minStep)
Generate allowed steps with step >= minStep in increasing order.
2.820836
2.704493
1.043019
bottom = step * math.floor(self.minValue / float(step) + EPSILON) top = bottom + step * divisor if top >= self.maxValue - EPSILON * step: return max(top - self.maxValue, self.minValue - bottom) else: return None
def computeSlop(self, step, divisor)
Compute the slop that would result from step and divisor. Return the slop, or None if this combination can't cover the full range. See chooseStep() for the definition of "slop".
5.997557
5.355998
1.119783
self.binary = binary if divisors is None: divisors = [4, 5, 6] else: for divisor in divisors: self.checkFinite(divisor, 'divisor') if divisor < 1: raise GraphError('Divisors must be greater than or equal ' 'to one') if self.minValue == self.maxValue: if self.minValue == 0.0: self.maxValue = 1.0 elif self.minValue < 0.0: self.minValue *= 1.1 self.maxValue *= 0.9 else: self.minValue *= 0.9 self.maxValue *= 1.1 variance = self.maxValue - self.minValue bestSlop = None bestStep = None for step in self.generateSteps(variance / float(max(divisors))): if ( bestSlop is not None and step * min(divisors) >= 2 * bestSlop + variance ): break for divisor in divisors: slop = self.computeSlop(step, divisor) if slop is not None and (bestSlop is None or slop < bestSlop): bestSlop = slop bestStep = step self.step = bestStep
def chooseStep(self, divisors=None, binary=False)
Choose a nice, pretty size for the steps between axis labels. Our main constraint is that the number of divisions must be taken from the divisors list. We pick a number of divisions and a step size that minimizes the amount of whitespace ("slop") that would need to be included outside of the range [self.minValue, self.maxValue] if we were to push out the axis values to the next larger multiples of the step size. The minimum step that could possibly cover the variance satisfies minStep * max(divisors) >= variance or minStep = variance / max(divisors) It's not necessarily possible to cover the variance with a step that size, but we know that any smaller step definitely *cannot* cover it. So we can start there. For a sufficiently large step size, it is definitely possible to cover the variance, but at some point the slop will start growing. Let's define the slop to be slop = max(minValue - bottom, top - maxValue) Then for a given, step size, we know that slop >= (1/2) * (step * min(divisors) - variance) (the factor of 1/2 is for the best-case scenario that the slop is distributed equally on the two sides of the range). So suppose we already have a choice that yields bestSlop. Then there is no need to choose steps so large that the slop is guaranteed to be larger than bestSlop. Therefore, the maximum step size that we need to consider is maxStep = (2 * bestSlop + variance) / min(divisors)
2.657688
2.394295
1.110009
pathExpressions = sorted(set([s.pathExpression for s in seriesList])) return ','.join(pathExpressions)
def formatPathExpressions(seriesList)
Returns a comma-separated list of unique path expressions.
4.078918
2.952191
1.381658
if not seriesLists or not any(seriesLists): return [] seriesList, start, end, step = normalize(seriesLists) name = "sumSeries(%s)" % formatPathExpressions(seriesList) values = (safeSum(row) for row in zip_longest(*seriesList)) series = TimeSeries(name, start, end, step, values) series.pathExpression = name return [series]
def sumSeries(requestContext, *seriesLists)
Short form: sum() This will add metrics together and return the sum at each datapoint. (See integral for a sum over time) Example:: &target=sum(company.server.application*.requestsHandled) This would show the sum of all requests handled per minute (provided requestsHandled are collected once a minute). If metrics with different retention rates are combined, the coarsest metric is graphed, and the sum of the other metrics is averaged for the metrics with finer retention rates.
5.275437
5.891492
0.895433
newSeries = {} newNames = list() for series in seriesList: newname = '.'.join(map(lambda x: x[1], filter(lambda i: i[0] not in positions, enumerate(series.name.split('.'))))) if newname in newSeries: newSeries[newname] = sumSeries(requestContext, (series, newSeries[newname]))[0] else: newSeries[newname] = series newNames.append(newname) newSeries[newname].name = newname return [newSeries[name] for name in newNames]
def sumSeriesWithWildcards(requestContext, seriesList, *positions)
Call sumSeries after inserting wildcards at the given position(s). Example:: &target=sumSeriesWithWildcards(host.cpu-[0-7].cpu-{user,system}.value, 1) This would be the equivalent of:: &target=sumSeries(host.*.cpu-user.value)&target=sumSeries( host.*.cpu-system.value)
2.802584
3.000559
0.934021
matchedList = defaultdict(list) for series in seriesList: newname = '.'.join(map(lambda x: x[1], filter(lambda i: i[0] not in positions, enumerate(series.name.split('.'))))) matchedList[newname].append(series) result = [] for name in matchedList: [series] = averageSeries(requestContext, (matchedList[name])) series.name = name result.append(series) return result
def averageSeriesWithWildcards(requestContext, seriesList, *positions)
Call averageSeries after inserting wildcards at the given position(s). Example:: &target=averageSeriesWithWildcards( host.cpu-[0-7].cpu-{user,system}.value, 1) This would be the equivalent of:: &target=averageSeries(host.*.cpu-user.value)&target=averageSeries( host.*.cpu-system.value)
3.508277
3.676976
0.95412
positions = [position] if isinstance(position, int) else position newSeries = {} newNames = [] for series in seriesList: new_name = ".".join(map(lambda x: x[1], filter(lambda i: i[0] not in positions, enumerate(series.name.split('.'))))) if new_name in newSeries: [newSeries[new_name]] = multiplySeries(requestContext, (newSeries[new_name], series)) else: newSeries[new_name] = series newNames.append(new_name) newSeries[new_name].name = new_name return [newSeries[name] for name in newNames]
def multiplySeriesWithWildcards(requestContext, seriesList, *position)
Call multiplySeries after inserting wildcards at the given position(s). Example:: &target=multiplySeriesWithWildcards( web.host-[0-7].{avg-response,total-request}.value, 2) This would be the equivalent of:: &target=multiplySeries(web.host-0.{avg-response,total-request}.value) &target=multiplySeries(web.host-1.{avg-response,total-request}.value) ...
2.798698
3.298851
0.848386
if not seriesLists or not any(seriesLists): return [] seriesList, start, end, step = normalize(seriesLists) name = "rangeOfSeries(%s)" % formatPathExpressions(seriesList) values = (safeSubtract(max(row), min(row)) for row in zip_longest(*seriesList)) series = TimeSeries(name, start, end, step, values) series.pathExpression = name return [series]
def rangeOfSeries(requestContext, *seriesLists)
Takes a wildcard seriesList. Distills down a set of inputs into the range of the series Example:: &target=rangeOfSeries(Server*.connections.total)
5.819502
6.893213
0.844236
if n <= 0: raise ValueError( 'The requested percent is required to be greater than 0') if not seriesList: return [] name = 'percentileOfSeries(%s,%g)' % (seriesList[0].pathExpression, n) start, end, step = normalize([seriesList])[1:] values = [_getPercentile(row, n, interpolate) for row in zip_longest(*seriesList)] resultSeries = TimeSeries(name, start, end, step, values) resultSeries.pathExpression = name return [resultSeries]
def percentileOfSeries(requestContext, seriesList, n, interpolate=False)
percentileOfSeries returns a single series which is composed of the n-percentile values taken across a wildcard series at each point. Unless `interpolate` is set to True, percentile values are actual values contained in one of the supplied series.
4.95934
5.05615
0.980853
for series in seriesList: series.name = "keepLastValue(%s)" % (series.name) series.pathExpression = series.name consecutiveNones = 0 for i, value in enumerate(series): series[i] = value # No 'keeping' can be done on the first value because we have no # idea what came before it. if i == 0: continue if value is None: consecutiveNones += 1 else: if 0 < consecutiveNones <= limit: # If a non-None value is seen before the limit of Nones is # hit, backfill all the missing datapoints with the last # known value. for index in range(i - consecutiveNones, i): series[index] = series[i - consecutiveNones - 1] consecutiveNones = 0 # If the series ends with some None values, try to backfill a bit to # cover it. if 0 < consecutiveNones <= limit: for index in range(len(series) - consecutiveNones, len(series)): series[index] = series[len(series) - consecutiveNones - 1] return seriesList
def keepLastValue(requestContext, seriesList, limit=INF)
Takes one metric or a wildcard seriesList, and optionally a limit to the number of 'None' values to skip over. Continues the line with the last received value when gaps ('None' values) appear in your data, rather than breaking your line. Example:: &target=keepLastValue(Server01.connections.handled) &target=keepLastValue(Server01.connections.handled, 10)
3.500152
3.813124
0.917922
for series in seriesList: series.name = "interpolate(%s)" % (series.name) series.pathExpression = series.name consecutiveNones = 0 for i, value in enumerate(series): series[i] = value # No 'keeping' can be done on the first value because we have no # idea what came before it. if i == 0: continue if value is None: consecutiveNones += 1 elif consecutiveNones == 0: # Have a value but no need to interpolate continue elif series[i - consecutiveNones - 1] is None: # Have a value but can't interpolate: reset count consecutiveNones = 0 continue else: # Have a value and can interpolate. If a non-None value is # seen before the limit of Nones is hit, backfill all the # missing datapoints with the last known value. if consecutiveNones > 0 and consecutiveNones <= limit: lastIndex = i - consecutiveNones - 1 lastValue = series[lastIndex] for index in range(i - consecutiveNones, i): nextValue = lastValue + (index - lastIndex) nextValue = nextValue * (value - lastValue) nextValue = nextValue / (consecutiveNones + 1) series[index] = nextValue consecutiveNones = 0 return seriesList
def interpolate(requestContext, seriesList, limit=INF)
Takes one metric or a wildcard seriesList, and optionally a limit to the number of 'None' values to skip over. Continues the line with the last received value when gaps ('None' values) appear in your data, rather than breaking your line. Example:: &target=interpolate(Server01.connections.handled) &target=interpolate(Server01.connections.handled, 10)
3.836159
4.012927
0.95595
for series in seriesList: series.name = series.pathExpression = 'changed(%s)' % series.name previous = None for index, value in enumerate(series): if previous is None: series[index] = 0 elif value is not None and previous != value: series[index] = 1 else: series[index] = 0 previous = value return seriesList
def changed(requestContext, seriesList)
Takes one metric or a wildcard seriesList. Output 1 when the value changed, 0 when null or the same Example:: &target=changed(Server01.connections.handled)
3.378631
4.030995
0.838163
if len(dividendSeriesList) != len(divisorSeriesList): raise ValueError("dividendSeriesList and divisorSeriesList argument\ must have equal length") results = [] for dividendSeries, divisorSeries in zip(dividendSeriesList, divisorSeriesList): name = "divideSeries(%s,%s)" % (dividendSeries.name, divisorSeries.name) bothSeries = (dividendSeries, divisorSeries) step = reduce(lcm, [s.step for s in bothSeries]) for s in bothSeries: s.consolidate(step // s.step) start = min([s.start for s in bothSeries]) end = max([s.end for s in bothSeries]) end -= (end - start) % step values = (safeDiv(v1, v2) for v1, v2 in zip(*bothSeries)) quotientSeries = TimeSeries(name, start, end, step, values) results.append(quotientSeries) return results
def divideSeriesLists(requestContext, dividendSeriesList, divisorSeriesList)
Iterates over a two lists and divides list1[0] by list2[0], list1[1] by list2[1] and so on. The lists need to be the same length
2.896017
3.021196
0.958566
if len(divisorSeriesList) == 0: for series in dividendSeriesList: series.name = "divideSeries(%s,MISSING)" % series.name series.pathExpression = series.name for i in range(len(series)): series[i] = None return dividendSeriesList if len(divisorSeriesList) > 1: raise ValueError( "divideSeries second argument must reference exactly 1 series" " (got {0})".format(len(divisorSeriesList))) [divisorSeries] = divisorSeriesList results = [] for dividendSeries in dividendSeriesList: name = "divideSeries(%s,%s)" % (dividendSeries.name, divisorSeries.name) bothSeries = (dividendSeries, divisorSeries) step = reduce(lcm, [s.step for s in bothSeries]) for s in bothSeries: s.consolidate(step / s.step) start = min([s.start for s in bothSeries]) end = max([s.end for s in bothSeries]) end -= (end - start) % step values = (safeDiv(v1, v2) for v1, v2 in zip_longest(*bothSeries)) quotientSeries = TimeSeries(name, start, end, step, values) quotientSeries.pathExpression = name results.append(quotientSeries) return results
def divideSeries(requestContext, dividendSeriesList, divisorSeriesList)
Takes a dividend metric and a divisor metric and draws the division result. A constant may *not* be passed. To divide by a constant, use the scale() function (which is essentially a multiplication operation) and use the inverse of the dividend. (Division by 8 = multiplication by 1/8 or 0.125) Example:: &target=divideSeries(Series.dividends,Series.divisors)
3.201283
3.399921
0.941576
if not seriesLists or not any(seriesLists): return [] seriesList, start, end, step = normalize(seriesLists) if len(seriesList) == 1: return seriesList name = "multiplySeries(%s)" % ','.join([s.name for s in seriesList]) product = map(lambda x: safeMul(*x), zip_longest(*seriesList)) resultSeries = TimeSeries(name, start, end, step, product) resultSeries.pathExpression = name return [resultSeries]
def multiplySeries(requestContext, *seriesLists)
Takes two or more series and multiplies their points. A constant may not be used. To multiply by a constant, use the scale() function. Example:: &target=multiplySeries(Series.dividends,Series.divisors)
4.088712
4.536291
0.901334
if isinstance(nodes, int): nodes = [nodes] sortedSeries = {} for seriesAvg, seriesWeight in zip_longest( seriesListAvg, seriesListWeight): key = '' for node in nodes: key += seriesAvg.name.split(".")[node] sortedSeries.setdefault(key, {}) sortedSeries[key]['avg'] = seriesAvg key = '' for node in nodes: key += seriesWeight.name.split(".")[node] sortedSeries.setdefault(key, {}) sortedSeries[key]['weight'] = seriesWeight productList = [] for key in sortedSeries: if 'weight' not in sortedSeries[key]: continue if 'avg' not in sortedSeries[key]: continue seriesWeight = sortedSeries[key]['weight'] seriesAvg = sortedSeries[key]['avg'] productValues = [safeMul(val1, val2) for val1, val2 in zip_longest(seriesAvg, seriesWeight)] name = 'product(%s,%s)' % (seriesWeight.name, seriesAvg.name) productSeries = TimeSeries(name, seriesAvg.start, seriesAvg.end, seriesAvg.step, productValues) productSeries.pathExpression = name productList.append(productSeries) if not productList: return [] [sumProducts] = sumSeries(requestContext, productList) [sumWeights] = sumSeries(requestContext, seriesListWeight) resultValues = [safeDiv(val1, val2) for val1, val2 in zip_longest(sumProducts, sumWeights)] name = "weightedAverage(%s, %s, %s)" % ( ','.join(sorted(set(s.pathExpression for s in seriesListAvg))), ','.join(sorted(set(s.pathExpression for s in seriesListWeight))), ','.join(map(str, nodes))) resultSeries = TimeSeries(name, sumProducts.start, sumProducts.end, sumProducts.step, resultValues) resultSeries.pathExpression = name return resultSeries
def weightedAverage(requestContext, seriesListAvg, seriesListWeight, *nodes)
Takes a series of average values and a series of weights and produces a weighted average for all values. The corresponding values should share one or more zero-indexed nodes. Example:: &target=weightedAverage(*.transactions.mean,*.transactions.count,0) &target=weightedAverage(*.transactions.mean,*.transactions.count,1,3,4)
2.314298
2.345124
0.986855
# EMA = C * (current_value) + (1 - C) + EMA # C = 2 / (windowSize + 1) # The following was copied from movingAverage, and altered for ema if not seriesList: return [] windowInterval = None if isinstance(windowSize, six.string_types): delta = parseTimeOffset(windowSize) windowInterval = abs(delta.seconds + (delta.days * 86400)) # set previewSeconds and constant based on windowSize string or integer if windowInterval: previewSeconds = windowInterval constant = (float(2) / (int(windowInterval) + 1)) else: previewSeconds = max([s.step for s in seriesList]) * int(windowSize) constant = (float(2) / (int(windowSize) + 1)) # ignore original data and pull new, including our preview # data from earlier is needed to calculate the early results newContext = requestContext.copy() newContext['startTime'] = (requestContext['startTime'] - timedelta(seconds=previewSeconds)) previewList = evaluateTokens(newContext, requestContext['args'][0]) result = [] for series in previewList: if windowInterval: windowPoints = windowInterval // series.step else: windowPoints = int(windowSize) if isinstance(windowSize, six.string_types): newName = 'exponentialMovingAverage(%s,"%s")' % ( series.name, windowSize) else: newName = "exponentialMovingAverage(%s,%s)" % ( series.name, windowSize) newSeries = TimeSeries(newName, series.start + previewSeconds, series.end, series.step, []) newSeries.pathExpression = newName window_sum = safeSum(series[:windowPoints]) or 0 count = safeLen(series[:windowPoints]) ema = safeDiv(window_sum, count) newSeries.append(ema) if ema is None: ema = 0.0 else: ema = float(ema) for i in range(windowPoints, len(series) - 1): if series[i] is not None: ema = (float(constant) * float(series[i]) + (1 - float(constant)) * float(ema)) newSeries.append(round(ema, 3)) else: newSeries.append(None) result.append(newSeries) return result
def exponentialMovingAverage(requestContext, seriesList, windowSize)
Takes a series of values and a window size and produces an exponential moving average utilizing the following formula: ema(current) = constant * (Current Value) + (1 - constant) * ema(previous) The Constant is calculated as: constant = 2 / (windowSize + 1) The first period EMA uses a simple moving average for its value. Example:: &target=exponentialMovingAverage(*.transactions.count, 10) &target=exponentialMovingAverage(*.transactions.count, '-10s')
3.930558
3.830369
1.026156
if not seriesList: return [] windowInterval = None if isinstance(windowSize, six.string_types): delta = parseTimeOffset(windowSize) windowInterval = to_seconds(delta) if windowInterval: previewSeconds = windowInterval else: previewSeconds = max([s.step for s in seriesList]) * int(windowSize) # ignore original data and pull new, including our preview # data from earlier is needed to calculate the early results newContext = requestContext.copy() newContext['startTime'] = (requestContext['startTime'] - timedelta(seconds=previewSeconds)) previewList = evaluateTokens(newContext, requestContext['args'][0]) result = [] for series in previewList: if windowInterval: windowPoints = windowInterval // series.step else: windowPoints = int(windowSize) if isinstance(windowSize, six.string_types): newName = 'movingMedian(%s,"%s")' % (series.name, windowSize) else: newName = "movingMedian(%s,%s)" % (series.name, windowSize) newSeries = TimeSeries(newName, series.start + previewSeconds, series.end, series.step, []) newSeries.pathExpression = newName for i in range(windowPoints, len(series)): window = series[i - windowPoints:i] nonNull = [v for v in window if v is not None] if nonNull: m_index = len(nonNull) // 2 newSeries.append(sorted(nonNull)[m_index]) else: newSeries.append(None) result.append(newSeries) return result
def movingMedian(requestContext, seriesList, windowSize)
Graphs the moving median of a metric (or metrics) over a fixed number of past points, or a time interval. Takes one metric or a wildcard seriesList followed by a number N of datapoints or a quoted string with a length of time like '1hour' or '5min' (See ``from / until`` in the render\_api_ for examples of time formats). Graphs the median of the preceding datapoints for each point on the graph. Example:: &target=movingMedian(Server.instance01.threads.busy,10) &target=movingMedian(Server.instance*.threads.idle,'5min')
3.872353
4.061271
0.953483
for series in seriesList: series.name = "scale(%s,%g)" % (series.name, float(factor)) series.pathExpression = series.name for i, value in enumerate(series): series[i] = safeMul(value, factor) return seriesList
def scale(requestContext, seriesList, factor)
Takes one metric or a wildcard seriesList followed by a constant, and multiplies the datapoint by the constant provided at each point. Example:: &target=scale(Server.instance01.threads.busy,10) &target=scale(Server.instance*.threads.busy,10)
4.197841
5.760663
0.728708
for series in seriesList: series.name = "scaleToSeconds(%s,%d)" % (series.name, seconds) series.pathExpression = series.name factor = seconds * 1.0 / series.step for i, value in enumerate(series): series[i] = safeMul(value, factor) return seriesList
def scaleToSeconds(requestContext, seriesList, seconds)
Takes one metric or a wildcard seriesList and returns "value per seconds" where seconds is a last argument to this functions. Useful in conjunction with derivative or integral function if you want to normalize its result to a known resolution for arbitrary retentions
3.696594
4.105643
0.900369
for series in seriesList: series.name = "pow(%s,%g)" % (series.name, float(factor)) series.pathExpression = series.name for i, value in enumerate(series): series[i] = safePow(value, factor) return seriesList
def pow(requestContext, seriesList, factor)
Takes one metric or a wildcard seriesList followed by a constant, and raises the datapoint by the power of the constant provided at each point. Example:: &target=pow(Server.instance01.threads.busy,10) &target=pow(Server.instance*.threads.busy,10)
3.757124
5.397134
0.696133
if not seriesLists or not any(seriesLists): return [] seriesList, start, end, step = normalize(seriesLists) name = "powSeries(%s)" % ','.join([s.name for s in seriesList]) values = [] for row in zip_longest(*seriesList): first = True tmpVal = None for element in row: # If it is a first iteration - tmpVal needs to be element if first: tmpVal = element first = False else: tmpVal = safePow(tmpVal, element) values.append(tmpVal) series = TimeSeries(name, start, end, step, values) series.pathExpression = name return [series]
def powSeries(requestContext, *seriesLists)
Takes two or more series and pows their points. A constant line may be used. Example:: &target=powSeries(Server.instance01.app.requests, Server.instance01.app.replies)
3.95599
4.609529
0.85822
for series in seriesList: series.name = "squareRoot(%s)" % (series.name) for i, value in enumerate(series): series[i] = safePow(value, 0.5) return seriesList
def squareRoot(requestContext, seriesList)
Takes one metric or a wildcard seriesList, and computes the square root of each datapoint. Example:: &target=squareRoot(Server.instance01.threads.busy)
3.084404
4.664082
0.66131
for series in seriesList: series.name = "absolute(%s)" % (series.name) series.pathExpression = series.name for i, value in enumerate(series): series[i] = safeAbs(value) return seriesList
def absolute(requestContext, seriesList)
Takes one metric or a wildcard seriesList and applies the mathematical abs function to each datapoint transforming it to its absolute value. Example:: &target=absolute(Server.instance01.threads.busy) &target=absolute(Server.instance*.threads.busy)
3.979023
5.239015
0.759498
for series in seriesList: series.name = "offset(%s,%g)" % (series.name, float(factor)) series.pathExpression = series.name for i, value in enumerate(series): if value is not None: series[i] = value + factor return seriesList
def offset(requestContext, seriesList, factor)
Takes one metric or a wildcard seriesList followed by a constant, and adds the constant to each datapoint. Example:: &target=offset(Server.instance01.threads.busy,10)
3.479943
4.884774
0.712406
for series in seriesList: series.name = "offsetToZero(%s)" % (series.name) minimum = safeMin(series) for i, value in enumerate(series): if value is not None: series[i] = value - minimum return seriesList
def offsetToZero(requestContext, seriesList)
Offsets a metric or wildcard seriesList by subtracting the minimum value in the series from each datapoint. Useful to compare different series where the values in each series may be higher or lower on average but you're only interested in the relative difference. An example use case is for comparing different round trip time results. When measuring RTT (like pinging a server), different devices may come back with consistently different results due to network latency which will be different depending on how many network hops between the probe and the device. To compare different devices in the same graph, the network latency to each has to be factored out of the results. This is a shortcut that takes the fastest response (lowest number in the series) and sets that to zero and then offsets all of the other datapoints in that series by that amount. This makes the assumption that the lowest response is the fastest the device can respond, of course the more datapoints that are in the series the more accurate this assumption is. Example:: &target=offsetToZero(Server.instance01.responseTime) &target=offsetToZero(Server.instance*.responseTime)
3.298953
4.802119
0.686979
if not seriesList: return [] windowInterval = None if isinstance(windowSize, six.string_types): delta = parseTimeOffset(windowSize) windowInterval = to_seconds(delta) if windowInterval: previewSeconds = windowInterval else: previewSeconds = max([s.step for s in seriesList]) * int(windowSize) # ignore original data and pull new, including our preview # data from earlier is needed to calculate the early results newContext = requestContext.copy() newContext['startTime'] = (requestContext['startTime'] - timedelta(seconds=previewSeconds)) previewList = evaluateTokens(newContext, requestContext['args'][0]) result = [] for series in previewList: if windowInterval: windowPoints = windowInterval // series.step else: windowPoints = int(windowSize) if isinstance(windowSize, six.string_types): newName = 'movingAverage(%s,"%s")' % (series.name, windowSize) else: newName = "movingAverage(%s,%s)" % (series.name, windowSize) newSeries = TimeSeries(newName, series.start + previewSeconds, series.end, series.step, []) newSeries.pathExpression = newName windowSum = safeSum(series[:windowPoints]) or 0 count = safeLen(series[:windowPoints]) newSeries.append(safeDiv(windowSum, count)) for n, last in enumerate(series[windowPoints:-1]): if series[n] is not None: windowSum -= series[n] count -= 1 if last is not None: windowSum += last count += 1 newSeries.append(safeDiv(windowSum, count)) result.append(newSeries) return result
def movingAverage(requestContext, seriesList, windowSize)
Graphs the moving average of a metric (or metrics) over a fixed number of past points, or a time interval. Takes one metric or a wildcard seriesList followed by a number N of datapoints or a quoted string with a length of time like '1hour' or '5min' (See ``from / until`` in the render\_api_ for examples of time formats). Graphs the average of the preceding datapoints for each point on the graph. Example:: &target=movingAverage(Server.instance01.threads.busy,10) &target=movingAverage(Server.instance*.threads.idle,'5min')
4.004721
4.215869
0.949916
if not seriesList: return [] windowInterval = None if isinstance(windowSize, six.string_types): delta = parseTimeOffset(windowSize) windowInterval = abs(delta.seconds + (delta.days * 86400)) if windowInterval: previewSeconds = windowInterval else: previewSeconds = max([s.step for s in seriesList]) * int(windowSize) # ignore original data and pull new, including our preview # data from earlier is needed to calculate the early results newContext = requestContext.copy() newContext['startTime'] = (requestContext['startTime'] - timedelta(seconds=previewSeconds)) previewList = evaluateTokens(newContext, requestContext['args'][0]) result = [] for series in previewList: if windowInterval: windowPoints = windowInterval // series.step else: windowPoints = int(windowSize) if isinstance(windowSize, six.string_types): newName = 'movingSum(%s,"%s")' % (series.name, windowSize) else: newName = "movingSum(%s,%s)" % (series.name, windowSize) newSeries = TimeSeries(newName, series.start + previewSeconds, series.end, series.step, []) newSeries.pathExpression = newName window_sum = safeSum(series[:windowPoints]) newSeries.append(window_sum) for n, last in enumerate(series[windowPoints:-1]): if series[n] is not None: window_sum -= series[n] if last is not None: window_sum = (window_sum or 0) + last newSeries.append(window_sum) result.append(newSeries) return result
def movingSum(requestContext, seriesList, windowSize)
Graphs the moving sum of a metric (or metrics) over a fixed number of past points, or a time interval. Takes one metric or a wildcard seriesList followed by a number N of datapoints or a quoted string with a length of time like '1hour' or '5min' (See ``from / until`` in the render\_api_ for examples of time formats). Graphs the sum of the preceeding datapoints for each point on the graph. Example:: &target=movingSum(Server.instance01.requests,10) &target=movingSum(Server.instance*.errors,'5min')
3.977894
4.251676
0.935606
if not seriesList: return [] windowInterval = None if isinstance(windowSize, six.string_types): delta = parseTimeOffset(windowSize) windowInterval = abs(delta.seconds + (delta.days * 86400)) if windowInterval: previewSeconds = windowInterval else: previewSeconds = max([s.step for s in seriesList]) * int(windowSize) # ignore original data and pull new, including our preview # data from earlier is needed to calculate the early results newContext = requestContext.copy() newContext['startTime'] = (requestContext['startTime'] - timedelta(seconds=previewSeconds)) previewList = evaluateTokens(newContext, requestContext['args'][0]) result = [] for series in previewList: if windowInterval: windowPoints = windowInterval // series.step else: windowPoints = int(windowSize) if isinstance(windowSize, six.string_types): newName = 'movingMax(%s,"%s")' % (series.name, windowSize) else: newName = "movingMax(%s,%s)" % (series.name, windowSize) newSeries = TimeSeries(newName, series.start + previewSeconds, series.end, series.step, []) newSeries.pathExpression = newName for i in range(windowPoints, len(series)): window = series[i - windowPoints:i] newSeries.append(safeMax(window)) result.append(newSeries) return result
def movingMax(requestContext, seriesList, windowSize)
Graphs the moving maximum of a metric (or metrics) over a fixed number of past points, or a time interval. Takes one metric or a wildcard seriesList followed by a number N of datapoints or a quoted string with a length of time like '1hour' or '5min' (See ``from / until`` in the render\_api_ for examples of time formats). Graphs the maximum of the preceeding datapoints for each point on the graph. Example:: &target=movingMax(Server.instance01.requests,10) &target=movingMax(Server.instance*.errors,'5min')
4.083945
4.418615
0.924259
for series in seriesList: # datalib will throw an exception, so it's not necessary to validate # here series.consolidationFunc = consolidationFunc series.name = 'consolidateBy(%s,"%s")' % (series.name, series.consolidationFunc) series.pathExpression = series.name return seriesList
def consolidateBy(requestContext, seriesList, consolidationFunc)
Takes one metric or a wildcard seriesList and a consolidation function name. Valid function names are 'sum', 'average', 'min', and 'max'. When a graph is drawn where width of the graph size in pixels is smaller than the number of datapoints to be graphed, Graphite consolidates the values to to prevent line overlap. The consolidateBy() function changes the consolidation function from the default of 'average' to one of 'sum', 'max', or 'min'. This is especially useful in sales graphs, where fractional values make no sense and a 'sum' of consolidated values is appropriate. Example:: &target=consolidateBy(Sales.widgets.largeBlue, 'sum') &target=consolidateBy(Servers.web01.sda1.free_space, 'max')
5.764908
7.52754
0.765842
results = [] for series in seriesList: newValues = [] prev = [] for val in series: if len(prev) < steps: newValues.append(None) prev.append(val) continue newValues.append(prev.pop(0)) prev.append(val) newName = "delay(%s,%d)" % (series.name, steps) newSeries = TimeSeries(newName, series.start, series.end, series.step, newValues) newSeries.pathExpression = newName results.append(newSeries) return results
def delay(requestContext, seriesList, steps)
This shifts all samples later by an integer number of steps. This can be used for custom derivative calculations, among other things. Note: this will pad the early end of the data with None for every step shifted. This complements other time-displacement functions such as timeShift and timeSlice, in that this function is indifferent about the step intervals being shifted. Example:: &target=divideSeries(server.FreeSpace,delay(server.FreeSpace,1)) This computes the change in server free space as a percentage of the previous free space.
2.678699
3.003256
0.891932
results = [] for series in seriesList: newValues = [] current = 0.0 for val in series: if val is None: newValues.append(None) else: current += val newValues.append(current) newName = "integral(%s)" % series.name newSeries = TimeSeries(newName, series.start, series.end, series.step, newValues) newSeries.pathExpression = newName results.append(newSeries) return results
def integral(requestContext, seriesList)
This will show the sum over time, sort of like a continuous addition function. Useful for finding totals or trends in metrics that are collected per minute. Example:: &target=integral(company.sales.perMinute) This would start at zero on the left side of the graph, adding the sales each minute, and show the total sales for the time period selected at the right side, (time now, or the time specified by '&until=').
2.712554
2.910528
0.93198
intervalDuration = int(to_seconds(parseTimeOffset(intervalUnit))) startTime = int(epoch(requestContext['startTime'])) results = [] for series in seriesList: newValues = [] # current time within series iteration currentTime = series.start # current accumulated value current = 0.0 for val in series: # reset integral value if crossing an interval boundary if ( ((currentTime - startTime) // intervalDuration) != ((currentTime - startTime - series.step) // intervalDuration) ): current = 0.0 if val is None: # keep previous value since val can be None when resetting # current to 0.0 newValues.append(current) else: current += val newValues.append(current) currentTime += series.step newName = "integralByInterval(%s,'%s')" % (series.name, intervalUnit) newSeries = TimeSeries(newName, series.start, series.end, series.step, newValues) newSeries.pathExpression = newName results.append(newSeries) return results
def integralByInterval(requestContext, seriesList, intervalUnit)
This will do the same as integral() funcion, except resetting the total to 0 at the given time in the parameter "from" Useful for finding totals per hour/day/week/.. Example:: &target=integralByInterval(company.sales.perMinute, "1d")&from=midnight-10days This would start at zero on the left side of the graph, adding the sales each minute, and show the evolution of sales per day during the last 10 days.
4.274975
4.521508
0.945476
results = [] for series in seriesList: newValues = [] prev = None for val in series: if None in (prev, val): newValues.append(None) prev = val continue diff = val - prev if diff >= 0: newValues.append(diff) elif maxValue is not None and maxValue >= val: newValues.append((maxValue - prev) + val + 1) else: newValues.append(None) prev = val newName = "nonNegativeDerivative(%s)" % series.name newSeries = TimeSeries(newName, series.start, series.end, series.step, newValues) newSeries.pathExpression = newName results.append(newSeries) return results
def nonNegativeDerivative(requestContext, seriesList, maxValue=None)
Same as the derivative function above, but ignores datapoints that trend down. Useful for counters that increase for a long time, then wrap or reset. (Such as if a network interface is destroyed and recreated by unloading and re-loading a kernel module, common with USB / WiFi cards. Example:: &target=nonNegativederivative( company.server.application01.ifconfig.TXPackets)
2.655868
2.913683
0.911516
if 'totalStack' in requestContext: totalStack = requestContext['totalStack'].get(stackName, []) else: requestContext['totalStack'] = {} totalStack = [] results = [] for series in seriesLists: newValues = [] for i in range(len(series)): if len(totalStack) <= i: totalStack.append(0) if series[i] is not None: totalStack[i] += series[i] newValues.append(totalStack[i]) else: newValues.append(None) # Work-around for the case when legend is set if stackName == '__DEFAULT__': newName = "stacked(%s)" % series.name else: newName = series.name newSeries = TimeSeries(newName, series.start, series.end, series.step, newValues) newSeries.options['stacked'] = True newSeries.pathExpression = newName results.append(newSeries) requestContext['totalStack'][stackName] = totalStack return results
def stacked(requestContext, seriesLists, stackName='__DEFAULT__')
Takes one metric or a wildcard seriesList and change them so they are stacked. This is a way of stacking just a couple of metrics without having to use the stacked area mode (that stacks everything). By means of this a mixed stacked and non stacked graph can be made It can also take an optional argument with a name of the stack, in case there is more than one, e.g. for input and output metrics. Example:: &target=stacked(company.server.application01.ifconfig.TXPackets, 'tx')
2.672555
2.912284
0.917684
if len(seriesLists) == 1: [seriesLists] = seriesLists assert len(seriesLists) == 2, ("areaBetween series argument must " "reference *exactly* 2 series") lower, upper = seriesLists if len(lower) == 1: [lower] = lower if len(upper) == 1: [upper] = upper lower.options['stacked'] = True lower.options['invisible'] = True upper.options['stacked'] = True lower.name = upper.name = "areaBetween(%s)" % upper.pathExpression return [lower, upper]
def areaBetween(requestContext, *seriesLists)
Draws the vertical area in between the two series in seriesList. Useful for visualizing a range such as the minimum and maximum latency for a service. areaBetween expects **exactly one argument** that results in exactly two series (see example below). The order of the lower and higher values series does not matter. The visualization only works when used in conjunction with ``areaMode=stacked``. Most likely use case is to provide a band within which another metric should move. In such case applying an ``alpha()``, as in the second example, gives best visual results. Example:: &target=areaBetween(service.latency.{min,max})&areaMode=stacked &target=alpha(areaBetween(service.latency.{min,max}),0.3)&areaMode=stacked If for instance, you need to build a seriesList, you should use the ``group`` function, like so:: &target=areaBetween(group(minSeries(a.*.min),maxSeries(a.*.max)))
3.752607
4.971285
0.754856
try: seriesList.name = re.sub(search, replace, seriesList.name) except AttributeError: for series in seriesList: series.name = re.sub(search, replace, series.name) return seriesList
def aliasSub(requestContext, seriesList, search, replace)
Runs series names through a regex search/replace. Example:: &target=aliasSub(ip.*TCP*,"^.*TCP(\d+)","\\1")
2.111422
2.916682
0.723912
try: seriesList.name = newName except AttributeError: for series in seriesList: series.name = newName return seriesList
def alias(requestContext, seriesList, newName)
Takes one metric or a wildcard seriesList and a string in quotes. Prints the string instead of the metric name in the legend. Example:: &target=alias(Sales.widgets.largeBlue,"Large Blue Widgets")
3.354186
6.430655
0.521593
def fmt(x): if system: if units: return "%.2f %s" % format_units(x, system=system, units=units) else: return "%.2f%s" % format_units(x, system=system) else: if units: return "%.2f %s" % (x, units) else: return "%.2f" % x nameLen = max([0] + [len(series.name) for series in seriesList]) lastLen = max([0] + [len(fmt(int(safeLast(series) or 3))) for series in seriesList]) + 3 maxLen = max([0] + [len(fmt(int(safeMax(series) or 3))) for series in seriesList]) + 3 minLen = max([0] + [len(fmt(int(safeMin(series) or 3))) for series in seriesList]) + 3 for series in seriesList: last = safeLast(series) maximum = safeMax(series) minimum = safeMin(series) if last is None: last = NAN else: last = fmt(float(last)) if maximum is None: maximum = NAN else: maximum = fmt(float(maximum)) if minimum is None: minimum = NAN else: minimum = fmt(float(minimum)) series.name = "%*s Current:%*s Max:%*s Min:%*s " % ( -nameLen, series.name, -lastLen, last, -maxLen, maximum, -minLen, minimum) return seriesList
def cactiStyle(requestContext, seriesList, system=None, units=None)
Takes a series list and modifies the aliases to provide column aligned output with Current, Max, and Min values in the style of cacti. Optionally takes a "system" value to apply unit formatting in the same style as the Y-axis, or a "unit" string to append an arbitrary unit suffix. NOTE: column alignment only works with monospace fonts such as terminus. Example:: &target=cactiStyle(ganglia.*.net.bytes_out,"si") &target=cactiStyle(ganglia.*.net.bytes_out,"si","b")
2.10303
2.1176
0.99312
tokens = grammar.parseString(name) pathExpression = None while pathExpression is None: if tokens.pathExpression: pathExpression = tokens.pathExpression elif tokens.expression: tokens = tokens.expression elif tokens.call: tokens = tokens.call.args[0] else: break return pathExpression
def _getFirstPathExpression(name)
Returns the first metric path in an expression.
3.406969
3.565708
0.955482
for series in seriesList: pathExpression = _getFirstPathExpression(series.name) metric_pieces = pathExpression.split('.') series.name = '.'.join(metric_pieces[n] for n in nodes) return seriesList
def aliasByNode(requestContext, seriesList, *nodes)
Takes a seriesList and applies an alias derived from one or more "node" portion/s of the target name. Node indices are 0 indexed. Example:: &target=aliasByNode(ganglia.*.cpu.load5,1)
6.218773
8.864464
0.70154
valueFuncs = { 'avg': lambda s: safeDiv(safeSum(s), safeLen(s)), 'total': safeSum, 'min': safeMin, 'max': safeMax, 'last': safeLast, } system = None if valueTypes[-1] in ('si', 'binary'): system = valueTypes[-1] valueTypes = valueTypes[:-1] for valueType in valueTypes: valueFunc = valueFuncs.get(valueType, lambda s: '(?)') if system is None: for series in seriesList: series.name += " (%s: %s)" % (valueType, valueFunc(series)) else: for series in seriesList: value = valueFunc(series) formatted = None if value is not None: formatted = "%.2f%s" % format_units(value, system=system) series.name = "%-20s%-5s%-10s" % (series.name, valueType, formatted) return seriesList
def legendValue(requestContext, seriesList, *valueTypes)
Takes one metric or a wildcard seriesList and a string in quotes. Appends a value to the metric name in the legend. Currently one or several of: `last`, `avg`, `total`, `min`, `max`. The last argument can be `si` (default) or `binary`, in that case values will be formatted in the corresponding system. Example:: &target=legendValue(Sales.widgets.largeBlue, 'avg', 'max', 'si')
3.210658
2.94787
1.089145
for series in seriesList: series.options['alpha'] = alpha return seriesList
def alpha(requestContext, seriesList, alpha)
Assigns the given alpha transparency setting to the series. Takes a float value between 0 and 1.
4.185523
5.684774
0.736269
for series in seriesList: series.color = theColor return seriesList
def color(requestContext, seriesList, theColor)
Assigns the given color to the seriesList Example:: &target=color(collectd.hostname.cpu.0.user, 'green') &target=color(collectd.hostname.cpu.0.system, 'ff0000') &target=color(collectd.hostname.cpu.0.idle, 'gray') &target=color(collectd.hostname.cpu.0.idle, '6464ffaa')
3.045916
11.348255
0.268404
for series in seriesList: left = series.name.rfind('(') + 1 right = series.name.find(')') if right < 0: right = len(series.name)+1 cleanName = series.name[left:right:].split('.') if int(stop) == 0: series.name = '.'.join(cleanName[int(start)::]) else: series.name = '.'.join(cleanName[int(start):int(stop):]) # substr(func(a.b,'c'),1) becomes b instead of b,'c' series.name = re.sub(',.*$', '', series.name) return seriesList
def substr(requestContext, seriesList, start=0, stop=0)
Takes one metric or a wildcard seriesList followed by 1 or 2 integers. Assume that the metric name is a list or array, with each element separated by dots. Prints n - length elements of the array (if only one integer n is passed) or n - m elements of the array (if two integers n and m are passed). The list starts with element 0 and ends with element (length - 1). Example:: &target=substr(carbon.agents.hostname.avgUpdateTime,2,4) The label would be printed as "hostname.avgUpdateTime".
3.93625
4.769338
0.825324
results = [] for series in seriesList: newValues = [] for val in series: if val is None: newValues.append(None) elif val <= 0: newValues.append(None) else: newValues.append(math.log(val, base)) newName = "log(%s, %s)" % (series.name, base) newSeries = TimeSeries(newName, series.start, series.end, series.step, newValues) newSeries.pathExpression = newName results.append(newSeries) return results
def logarithm(requestContext, seriesList, base=10)
Takes one metric or a wildcard seriesList, a base, and draws the y-axis in logarithmic format. If base is omitted, the function defaults to base 10. Example:: &target=log(carbon.agents.hostname.avgUpdateTime,2)
2.229671
2.535084
0.879526
results = [] for series in seriesList: val = safeMax(series) if val is None or val <= n: results.append(series) return results
def maximumBelow(requestContext, seriesList, n)
Takes one metric or a wildcard seriesList followed by a constant n. Draws only the metrics with a maximum value below n. Example:: &target=maximumBelow(system.interface.eth*.packetsSent,1000) This would only display interfaces which always sent less than 1000 packets/min.
3.71496
5.778328
0.642913
results = [] for series in seriesList: val = safeMin(series) if val is None or val <= n: results.append(series) return results
def minimumBelow(requestContext, seriesList, n)
Takes one metric or a wildcard seriesList followed by a constant n. Draws only the metrics with a minimum value below n. Example:: &target=minimumBelow(system.interface.eth*.packetsSent,1000) This would only display interfaces which sent at one point less than 1000 packets/min.
3.487108
5.454971
0.639253
result_list = sorted(seriesList, key=lambda s: safeMax(s))[-n:] return sorted(result_list, key=lambda s: max(s), reverse=True)
def highestMax(requestContext, seriesList, n=1)
Takes one metric or a wildcard seriesList followed by an integer N. Out of all metrics passed, draws only the N metrics with the highest maximum value in the time period specified. Example:: &target=highestMax(server*.instance*.threads.busy,5) Draws the top 5 servers who have had the most busy threads during the time period specified.
4.401247
6.874042
0.640271
results = [] for series in seriesList: val = safeLast(series) if val is not None and val >= n: results.append(series) return results
def currentAbove(requestContext, seriesList, n)
Takes one metric or a wildcard seriesList followed by an integer N. Out of all metrics passed, draws only the metrics whose value is above N at the end of the time period specified. Example:: &target=currentAbove(server*.instance*.threads.busy,50) Draws the servers with more than 50 busy threads.
3.607757
5.212544
0.69213
results = [] for series in seriesList: val = safeAvg(series) if val is not None and val >= n: results.append(series) return results
def averageAbove(requestContext, seriesList, n)
Takes one metric or a wildcard seriesList followed by an integer N. Out of all metrics passed, draws only the metrics with an average value above N for the time period specified. Example:: &target=averageAbove(server*.instance*.threads.busy,25) Draws the servers with average values above 25.
3.481952
4.796837
0.725885
sortedPoints = sorted(not_none(points)) if len(sortedPoints) == 0: return None fractionalRank = (n/100.0) * (len(sortedPoints) + 1) rank = int(fractionalRank) rankFraction = fractionalRank - rank if not interpolate: rank += int(math.ceil(rankFraction)) if rank == 0: percentile = sortedPoints[0] elif rank - 1 == len(sortedPoints): percentile = sortedPoints[-1] else: percentile = sortedPoints[rank - 1] # Adjust for 0-index if interpolate: if rank != len(sortedPoints): # if a next value exists nextValue = sortedPoints[rank] percentile = percentile + rankFraction * (nextValue - percentile) return percentile
def _getPercentile(points, n, interpolate=False)
Percentile is calculated using the method outlined in the NIST Engineering Statistics Handbook: http://www.itl.nist.gov/div898/handbook/prc/section2/prc252.htm
2.9654
2.960524
1.001647
assert n, 'The requested percent is required to be greater than 0' results = [] for s in seriesList: # Create a sorted copy of the TimeSeries excluding None values in the # values list. s_copy = TimeSeries(s.name, s.start, s.end, s.step, sorted(not_none(s))) if not s_copy: continue # Skip this series because it is empty. perc_val = _getPercentile(s_copy, n) if perc_val is not None: name = 'nPercentile(%s, %g)' % (s_copy.name, n) point_count = int((s.end - s.start)/s.step) perc_series = TimeSeries(name, s_copy.start, s_copy.end, s_copy.step, [perc_val] * point_count) perc_series.pathExpression = name results.append(perc_series) return results
def nPercentile(requestContext, seriesList, n)
Returns n-percent of each series in the seriesList.
3.567999
3.563801
1.001178
averages = [safeAvg(s) for s in seriesList] if n < 50: n = 100 - n lowPercentile = _getPercentile(averages, 100 - n) highPercentile = _getPercentile(averages, n) return [s for s in seriesList if not lowPercentile < safeAvg(s) < highPercentile]
def averageOutsidePercentile(requestContext, seriesList, n)
Removes functions lying inside an average percentile interval
3.290918
3.277635
1.004053
if n < 50: n = 100 - n transposed = list(zip_longest(*seriesList)) lowPercentiles = [_getPercentile(col, 100-n) for col in transposed] highPercentiles = [_getPercentile(col, n) for col in transposed] return [l for l in seriesList if sum([not lowPercentiles[index] < val < highPercentiles[index] for index, val in enumerate(l)]) > 0]
def removeBetweenPercentile(requestContext, seriesList, n)
Removes lines who do not have an value lying in the x-percentile of all the values at a moment
3.588663
3.495529
1.026644
for s in seriesList: s.name = 'removeAboveValue(%s, %g)' % (s.name, n) s.pathExpression = s.name for (index, val) in enumerate(s): if val is None: continue if val > n: s[index] = None return seriesList
def removeAboveValue(requestContext, seriesList, n)
Removes data above the given threshold from the series or list of series provided. Values above this threshold are assigned a value of None.
3.502664
3.553268
0.985758
for s in seriesList: s.name = 'removeBelowPercentile(%s, %g)' % (s.name, n) s.pathExpression = s.name try: percentile = nPercentile(requestContext, [s], n)[0][0] except IndexError: continue for (index, val) in enumerate(s): if val is None: continue if val < percentile: s[index] = None return seriesList
def removeBelowPercentile(requestContext, seriesList, n)
Removes data below the nth percentile from the series or list of series provided. Values below this percentile are assigned a value of None.
3.494081
3.687175
0.947631
if natural: return list(sorted(seriesList, key=lambda x: paddedName(x.name))) else: return list(sorted(seriesList, key=lambda x: x.name))
def sortByName(requestContext, seriesList, natural=False)
Takes one metric or a wildcard seriesList. Sorts the list of metrics by the metric name using either alphabetical order or natural sorting. Natural sorting allows names containing numbers to be sorted more naturally, e.g: - Alphabetical sorting: server1, server11, server12, server2 - Natural sorting: server1, server2, server11, server12
2.708469
3.360578
0.805953
return list(sorted(seriesList, key=safeSum, reverse=True))
def sortByTotal(requestContext, seriesList)
Takes one metric or a wildcard seriesList. Sorts the list of metrics by the sum of values across the time period specified.
7.948934
12.299452
0.646284
newSeries = [] for series in seriesList: newname = re.sub(search, replace, series.name) if safeMax(series) > value: n = evaluateTarget(requestContext, newname) if n is not None and len(n) > 0: newSeries.append(n[0]) return newSeries
def useSeriesAbove(requestContext, seriesList, value, search, replace)
Compares the maximum of each series against the given `value`. If the series maximum is greater than `value`, the regular expression search and replace is applied against the series name to plot a related metric. e.g. given useSeriesAbove(ganglia.metric1.reqs,10,'reqs','time'), the response time metric will be plotted only when the maximum value of the corresponding request/s metric is > 10 Example:: &target=useSeriesAbove(ganglia.metric1.reqs,10,"reqs","time")
4.569159
5.076903
0.899989
deviants = [] for series in seriesList: mean = safeAvg(series) if mean is None: continue square_sum = sum([(value - mean) ** 2 for value in series if value is not None]) sigma = safeDiv(square_sum, safeLen(series)) if sigma is None: continue deviants.append((sigma, series)) return [series for sig, series in sorted(deviants, # sort by sigma key=itemgetter(0), reverse=True)][:n]
def mostDeviant(requestContext, seriesList, n)
Takes one metric or a wildcard seriesList followed by an integer N. Draws the N most deviant metrics. To find the deviants, the standard deviation (sigma) of each series is taken and ranked. The top N standard deviations are returned. Example:: &target=mostDeviant(server*.instance*.memory.free, 5) Draws the 5 instances furthest from the average memory free.
3.709413
3.482422
1.065182
# For this we take the standard deviation in terms of the moving average # and the moving average of series squares. for seriesIndex, series in enumerate(seriesList): stdevSeries = TimeSeries("stdev(%s,%d)" % (series.name, int(points)), series.start, series.end, series.step, []) stdevSeries.pathExpression = "stdev(%s,%d)" % (series.name, int(points)) validPoints = 0 currentSum = 0 currentSumOfSquares = 0 for index, newValue in enumerate(series): # Mark whether we've reached our window size - dont drop points # out otherwise if index < points: bootstrapping = True droppedValue = None else: bootstrapping = False droppedValue = series[index - points] # Track non-None points in window if not bootstrapping and droppedValue is not None: validPoints -= 1 if newValue is not None: validPoints += 1 # Remove the value that just dropped out of the window if not bootstrapping and droppedValue is not None: currentSum -= droppedValue currentSumOfSquares -= droppedValue**2 # Add in the value that just popped in the window if newValue is not None: currentSum += newValue currentSumOfSquares += newValue**2 if ( validPoints > 0 and float(validPoints) / points >= windowTolerance ): try: deviation = math.sqrt(validPoints * currentSumOfSquares - currentSum**2) / validPoints except ValueError: deviation = None stdevSeries.append(deviation) else: stdevSeries.append(None) seriesList[seriesIndex] = stdevSeries return seriesList
def stdev(requestContext, seriesList, points, windowTolerance=0.1)
Takes one metric or a wildcard seriesList followed by an integer N. Draw the Standard Deviation of all metrics passed for the past N datapoints. If the ratio of null points in the window is greater than windowTolerance, skip the calculation. The default for windowTolerance is 0.1 (up to 10% of points in the window can be missing). Note that if this is set to 0.0, it will cause large gaps in the output anywhere a single point is missing. Example:: &target=stdev(server*.instance*.threads.busy,30) &target=stdev(server*.instance*.cpu.system,30,0.0)
3.592196
3.696762
0.971714
for series in seriesList: series.options['secondYAxis'] = True series.name = 'secondYAxis(%s)' % series.name return seriesList
def secondYAxis(requestContext, seriesList)
Graph the series on the secondary Y axis.
4.020801
4.420883
0.909502
previewSeconds = 7 * 86400 # 7 days # ignore original data and pull new, including our preview newContext = requestContext.copy() newContext['startTime'] = (requestContext['startTime'] - timedelta(seconds=previewSeconds)) previewList = evaluateTokens(newContext, requestContext['args'][0]) results = [] for series in previewList: analysis = holtWintersAnalysis(series) predictions = analysis['predictions'] windowPoints = previewSeconds // predictions.step result = TimeSeries("holtWintersForecast(%s)" % series.name, predictions.start + previewSeconds, predictions.end, predictions.step, predictions[windowPoints:]) result.pathExpression = result.name results.append(result) return results
def holtWintersForecast(requestContext, seriesList)
Performs a Holt-Winters forecast using the series as input data. Data from one week previous to the series is used to bootstrap the initial forecast.
5.908636
6.386492
0.925177
previewSeconds = 7 * 86400 # 7 days # ignore original data and pull new, including our preview newContext = requestContext.copy() newContext['startTime'] = (requestContext['startTime'] - timedelta(seconds=previewSeconds)) previewList = evaluateTokens(newContext, requestContext['args'][0]) results = [] for series in previewList: analysis = holtWintersAnalysis(series) data = analysis['predictions'] windowPoints = previewSeconds // data.step forecast = TimeSeries(data.name, data.start + previewSeconds, data.end, data.step, data[windowPoints:]) forecast.pathExpression = data.pathExpression data = analysis['deviations'] windowPoints = previewSeconds // data.step deviation = TimeSeries(data.name, data.start + previewSeconds, data.end, data.step, data[windowPoints:]) deviation.pathExpression = data.pathExpression seriesLength = len(forecast) i = 0 upperBand = list() lowerBand = list() while i < seriesLength: forecast_item = forecast[i] deviation_item = deviation[i] i = i + 1 if forecast_item is None or deviation_item is None: upperBand.append(None) lowerBand.append(None) else: scaled_deviation = delta * deviation_item upperBand.append(forecast_item + scaled_deviation) lowerBand.append(forecast_item - scaled_deviation) upperName = "holtWintersConfidenceUpper(%s)" % series.name lowerName = "holtWintersConfidenceLower(%s)" % series.name upperSeries = TimeSeries(upperName, forecast.start, forecast.end, forecast.step, upperBand) lowerSeries = TimeSeries(lowerName, forecast.start, forecast.end, forecast.step, lowerBand) upperSeries.pathExpression = series.pathExpression lowerSeries.pathExpression = series.pathExpression results.append(lowerSeries) results.append(upperSeries) return results
def holtWintersConfidenceBands(requestContext, seriesList, delta=3)
Performs a Holt-Winters forecast using the series as input data and plots upper and lower bands with the predicted forecast deviations.
2.696231
2.694159
1.000769
results = [] for series in seriesList: confidenceBands = holtWintersConfidenceBands(requestContext, [series], delta) lowerBand = confidenceBands[0] upperBand = confidenceBands[1] aberration = list() for i, actual in enumerate(series): if actual is None: aberration.append(0) elif upperBand[i] is not None and actual > upperBand[i]: aberration.append(actual - upperBand[i]) elif lowerBand[i] is not None and actual < lowerBand[i]: aberration.append(actual - lowerBand[i]) else: aberration.append(0) newName = "holtWintersAberration(%s)" % series.name results.append(TimeSeries(newName, series.start, series.end, series.step, aberration)) return results
def holtWintersAberration(requestContext, seriesList, delta=3)
Performs a Holt-Winters forecast using the series as input data and plots the positive or negative deviation of the series data from the forecast.
2.212947
2.262194
0.97823
bands = holtWintersConfidenceBands(requestContext, seriesList, delta) results = areaBetween(requestContext, bands) for series in results: series.name = series.name.replace('areaBetween', 'holtWintersConfidenceArea') return results
def holtWintersConfidenceArea(requestContext, seriesList, delta=3)
Performs a Holt-Winters forecast using the series as input data and plots the area between the upper and lower bands of the predicted forecast deviations.
4.242509
4.647117
0.912934
n = safeLen(series) sumI = sum([i for i, v in enumerate(series) if v is not None]) sumV = sum([v for i, v in enumerate(series) if v is not None]) sumII = sum([i * i for i, v in enumerate(series) if v is not None]) sumIV = sum([i * v for i, v in enumerate(series) if v is not None]) denominator = float(n * sumII - sumI * sumI) if denominator == 0: return None else: factor = (n * sumIV - sumI * sumV) / denominator / series.step offset = sumII * sumV - sumIV * sumI offset = offset / denominator - factor * series.start return factor, offset
def linearRegressionAnalysis(series)
Returns factor and offset of linear regression function by least squares method.
2.582288
2.402622
1.074779
from .app import evaluateTarget results = [] sourceContext = requestContext.copy() if startSourceAt is not None: sourceContext['startTime'] = parseATTime(startSourceAt) if endSourceAt is not None: sourceContext['endTime'] = parseATTime(endSourceAt) sourceList = [] for series in seriesList: source = evaluateTarget(sourceContext, series.pathExpression) sourceList.extend(source) for source, series in zip(sourceList, seriesList): newName = 'linearRegression(%s, %s, %s)' % ( series.name, int(epoch(sourceContext['startTime'])), int(epoch(sourceContext['endTime']))) forecast = linearRegressionAnalysis(source) if forecast is None: continue factor, offset = forecast values = [offset + (series.start + i * series.step) * factor for i in range(len(series))] newSeries = TimeSeries(newName, series.start, series.end, series.step, values) newSeries.pathExpression = newSeries.name results.append(newSeries) return results
def linearRegression(requestContext, seriesList, startSourceAt=None, endSourceAt=None)
Graphs the liner regression function by least squares method. Takes one metric or a wildcard seriesList, followed by a quoted string with the time to start the line and another quoted string with the time to end the line. The start and end times are inclusive (default range is from to until). See ``from / until`` in the render\_api_ for examples of time formats. Datapoints in the range is used to regression. Example:: &target=linearRegression(Server.instance01.threads.busy,'-1d') &target=linearRegression(Server.instance*.threads.busy, "00:00 20140101","11:59 20140630")
3.204118
3.547956
0.903089
for series in seriesList: series.options['drawAsInfinite'] = True series.name = 'drawAsInfinite(%s)' % series.name return seriesList
def drawAsInfinite(requestContext, seriesList)
Takes one metric or a wildcard seriesList. If the value is zero, draw the line at 0. If the value is above zero, draw the line at infinity. If the value is null or less than zero, do not draw the line. Useful for displaying on/off metrics, such as exit codes. (0 = success, anything else = failure.) Example:: drawAsInfinite(Testing.script.exitCode)
3.794523
6.635935
0.571814
for series in seriesList: series.options['lineWidth'] = width return seriesList
def lineWidth(requestContext, seriesList, width)
Takes one metric or a wildcard seriesList, followed by a float F. Draw the selected metrics with a line width of F, overriding the default value of 1, or the &lineWidth=X.X parameter. Useful for highlighting a single metric out of many, or having multiple line widths in one graph. Example:: &target=lineWidth(server01.instance01.memory.free,5)
3.541772
22.88199
0.154784
for series in seriesList: series.name = 'dashed(%s, %g)' % (series.name, dashLength) series.options['dashed'] = dashLength return seriesList
def dashed(requestContext, seriesList, dashLength=5)
Takes one metric or a wildcard seriesList, followed by a float F. Draw the selected metrics with a dotted line with segments of length F If omitted, the default length of the segments is 5.0 Example:: &target=dashed(server01.instance01.memory.free,2.5)
3.420565
6.722641
0.508813
# Default to negative. parseTimeOffset defaults to + if timeShiftUnit[0].isdigit(): timeShiftUnit = '-' + timeShiftUnit delta = parseTimeOffset(timeShiftUnit) # if len(seriesList) > 1, they will all have the same pathExpression, # which is all we care about. series = seriesList[0] results = [] timeShiftStartint = int(timeShiftStart) timeShiftEndint = int(timeShiftEnd) for shft in range(timeShiftStartint, timeShiftEndint): myContext = requestContext.copy() innerDelta = delta * shft myContext['startTime'] = requestContext['startTime'] + innerDelta myContext['endTime'] = requestContext['endTime'] + innerDelta for shiftedSeries in evaluateTarget(myContext, series.pathExpression): shiftedSeries.name = 'timeShift(%s, %s, %s)' % (shiftedSeries.name, timeShiftUnit, shft) shiftedSeries.pathExpression = shiftedSeries.name shiftedSeries.start = series.start shiftedSeries.end = series.end results.append(shiftedSeries) return results
def timeStack(requestContext, seriesList, timeShiftUnit, timeShiftStart, timeShiftEnd)
Takes one metric or a wildcard seriesList, followed by a quoted string with the length of time (See ``from / until`` in the render\_api_ for examples of time formats). Also takes a start multiplier and end multiplier for the length of time- Create a seriesList which is composed the original metric series stacked with time shifts starting time shifts from the start multiplier through the end multiplier. Useful for looking at history, or feeding into averageSeries or stddevSeries. Example:: # create a series for today and each of the previous 7 days &target=timeStack(Sales.widgets.largeBlue,"1d",0,7)
3.753199
4.22713
0.887883