body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
@property @since('2.0.0') def numIterations(self): '\n Number of training iterations.\n ' return self._call_java('numIterations')
3,231,421,437,338,347,000
Number of training iterations.
python/pyspark/ml/regression.py
numIterations
AjithShetty2489/spark
python
@property @since('2.0.0') def numIterations(self): '\n \n ' return self._call_java('numIterations')
@property @since('2.0.0') def solver(self): '\n The numeric solver used for training.\n ' return self._call_java('solver')
7,895,560,103,752,479,000
The numeric solver used for training.
python/pyspark/ml/regression.py
solver
AjithShetty2489/spark
python
@property @since('2.0.0') def solver(self): '\n \n ' return self._call_java('solver')
@property @since('2.0.0') def coefficientStandardErrors(self): '\n Standard error of estimated coefficients and intercept.\n\n If :py:attr:`GeneralizedLinearRegression.fitIntercept` is set to True,\n then the last element returned corresponds to the intercept.\n ' return self._call_java('coefficientStandardErrors')
-3,088,971,962,521,040,400
Standard error of estimated coefficients and intercept. If :py:attr:`GeneralizedLinearRegression.fitIntercept` is set to True, then the last element returned corresponds to the intercept.
python/pyspark/ml/regression.py
coefficientStandardErrors
AjithShetty2489/spark
python
@property @since('2.0.0') def coefficientStandardErrors(self): '\n Standard error of estimated coefficients and intercept.\n\n If :py:attr:`GeneralizedLinearRegression.fitIntercept` is set to True,\n then the last element returned corresponds to the intercept.\n ' return self._call_java('coefficientStandardErrors')
@property @since('2.0.0') def tValues(self): '\n T-statistic of estimated coefficients and intercept.\n\n If :py:attr:`GeneralizedLinearRegression.fitIntercept` is set to True,\n then the last element returned corresponds to the intercept.\n ' return self._call_java('tValues')
1,458,171,731,345,339,000
T-statistic of estimated coefficients and intercept. If :py:attr:`GeneralizedLinearRegression.fitIntercept` is set to True, then the last element returned corresponds to the intercept.
python/pyspark/ml/regression.py
tValues
AjithShetty2489/spark
python
@property @since('2.0.0') def tValues(self): '\n T-statistic of estimated coefficients and intercept.\n\n If :py:attr:`GeneralizedLinearRegression.fitIntercept` is set to True,\n then the last element returned corresponds to the intercept.\n ' return self._call_java('tValues')
@property @since('2.0.0') def pValues(self): '\n Two-sided p-value of estimated coefficients and intercept.\n\n If :py:attr:`GeneralizedLinearRegression.fitIntercept` is set to True,\n then the last element returned corresponds to the intercept.\n ' return self._call_java('pValues')
-4,774,701,551,750,324,000
Two-sided p-value of estimated coefficients and intercept. If :py:attr:`GeneralizedLinearRegression.fitIntercept` is set to True, then the last element returned corresponds to the intercept.
python/pyspark/ml/regression.py
pValues
AjithShetty2489/spark
python
@property @since('2.0.0') def pValues(self): '\n Two-sided p-value of estimated coefficients and intercept.\n\n If :py:attr:`GeneralizedLinearRegression.fitIntercept` is set to True,\n then the last element returned corresponds to the intercept.\n ' return self._call_java('pValues')
@since('3.0.0') def getFactorSize(self): '\n Gets the value of factorSize or its default value.\n ' return self.getOrDefault(self.factorSize)
1,791,135,980,955,474,200
Gets the value of factorSize or its default value.
python/pyspark/ml/regression.py
getFactorSize
AjithShetty2489/spark
python
@since('3.0.0') def getFactorSize(self): '\n \n ' return self.getOrDefault(self.factorSize)
@since('3.0.0') def getFitLinear(self): '\n Gets the value of fitLinear or its default value.\n ' return self.getOrDefault(self.fitLinear)
-4,194,490,835,834,387,000
Gets the value of fitLinear or its default value.
python/pyspark/ml/regression.py
getFitLinear
AjithShetty2489/spark
python
@since('3.0.0') def getFitLinear(self): '\n \n ' return self.getOrDefault(self.fitLinear)
@since('3.0.0') def getMiniBatchFraction(self): '\n Gets the value of miniBatchFraction or its default value.\n ' return self.getOrDefault(self.miniBatchFraction)
3,609,176,603,815,900,000
Gets the value of miniBatchFraction or its default value.
python/pyspark/ml/regression.py
getMiniBatchFraction
AjithShetty2489/spark
python
@since('3.0.0') def getMiniBatchFraction(self): '\n \n ' return self.getOrDefault(self.miniBatchFraction)
@since('3.0.0') def getInitStd(self): '\n Gets the value of initStd or its default value.\n ' return self.getOrDefault(self.initStd)
3,816,975,538,956,782,600
Gets the value of initStd or its default value.
python/pyspark/ml/regression.py
getInitStd
AjithShetty2489/spark
python
@since('3.0.0') def getInitStd(self): '\n \n ' return self.getOrDefault(self.initStd)
@keyword_only def __init__(self, featuresCol='features', labelCol='label', predictionCol='prediction', factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0, miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0, tol=1e-06, solver='adamW', seed=None): '\n __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0, miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0, tol=1e-6, solver="adamW", seed=None)\n ' super(FMRegressor, self).__init__() self._java_obj = self._new_java_obj('org.apache.spark.ml.regression.FMRegressor', self.uid) self._setDefault(factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0, miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0, tol=1e-06, solver='adamW') kwargs = self._input_kwargs self.setParams(**kwargs)
3,828,870,183,181,281,300
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0, miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0, tol=1e-6, solver="adamW", seed=None)
python/pyspark/ml/regression.py
__init__
AjithShetty2489/spark
python
@keyword_only def __init__(self, featuresCol='features', labelCol='label', predictionCol='prediction', factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0, miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0, tol=1e-06, solver='adamW', seed=None): '\n \n ' super(FMRegressor, self).__init__() self._java_obj = self._new_java_obj('org.apache.spark.ml.regression.FMRegressor', self.uid) self._setDefault(factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0, miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0, tol=1e-06, solver='adamW') kwargs = self._input_kwargs self.setParams(**kwargs)
@keyword_only @since('3.0.0') def setParams(self, featuresCol='features', labelCol='label', predictionCol='prediction', factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0, miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0, tol=1e-06, solver='adamW', seed=None): '\n setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0, miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0, tol=1e-6, solver="adamW", seed=None)\n Sets Params for FMRegressor.\n ' kwargs = self._input_kwargs return self._set(**kwargs)
-4,552,423,437,633,609,700
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0, miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0, tol=1e-6, solver="adamW", seed=None) Sets Params for FMRegressor.
python/pyspark/ml/regression.py
setParams
AjithShetty2489/spark
python
@keyword_only @since('3.0.0') def setParams(self, featuresCol='features', labelCol='label', predictionCol='prediction', factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0, miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0, tol=1e-06, solver='adamW', seed=None): '\n setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0, miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0, tol=1e-6, solver="adamW", seed=None)\n Sets Params for FMRegressor.\n ' kwargs = self._input_kwargs return self._set(**kwargs)
@since('3.0.0') def setFactorSize(self, value): '\n Sets the value of :py:attr:`factorSize`.\n ' return self._set(factorSize=value)
-8,753,162,586,989,364,000
Sets the value of :py:attr:`factorSize`.
python/pyspark/ml/regression.py
setFactorSize
AjithShetty2489/spark
python
@since('3.0.0') def setFactorSize(self, value): '\n \n ' return self._set(factorSize=value)
@since('3.0.0') def setFitLinear(self, value): '\n Sets the value of :py:attr:`fitLinear`.\n ' return self._set(fitLinear=value)
4,134,348,066,261,796,000
Sets the value of :py:attr:`fitLinear`.
python/pyspark/ml/regression.py
setFitLinear
AjithShetty2489/spark
python
@since('3.0.0') def setFitLinear(self, value): '\n \n ' return self._set(fitLinear=value)
@since('3.0.0') def setMiniBatchFraction(self, value): '\n Sets the value of :py:attr:`miniBatchFraction`.\n ' return self._set(miniBatchFraction=value)
5,309,665,991,897,720,000
Sets the value of :py:attr:`miniBatchFraction`.
python/pyspark/ml/regression.py
setMiniBatchFraction
AjithShetty2489/spark
python
@since('3.0.0') def setMiniBatchFraction(self, value): '\n \n ' return self._set(miniBatchFraction=value)
@since('3.0.0') def setInitStd(self, value): '\n Sets the value of :py:attr:`initStd`.\n ' return self._set(initStd=value)
7,314,427,056,946,567,000
Sets the value of :py:attr:`initStd`.
python/pyspark/ml/regression.py
setInitStd
AjithShetty2489/spark
python
@since('3.0.0') def setInitStd(self, value): '\n \n ' return self._set(initStd=value)
@since('3.0.0') def setMaxIter(self, value): '\n Sets the value of :py:attr:`maxIter`.\n ' return self._set(maxIter=value)
8,691,892,694,452,766,000
Sets the value of :py:attr:`maxIter`.
python/pyspark/ml/regression.py
setMaxIter
AjithShetty2489/spark
python
@since('3.0.0') def setMaxIter(self, value): '\n \n ' return self._set(maxIter=value)
@since('3.0.0') def setStepSize(self, value): '\n Sets the value of :py:attr:`stepSize`.\n ' return self._set(stepSize=value)
-6,862,698,385,601,250,000
Sets the value of :py:attr:`stepSize`.
python/pyspark/ml/regression.py
setStepSize
AjithShetty2489/spark
python
@since('3.0.0') def setStepSize(self, value): '\n \n ' return self._set(stepSize=value)
@since('3.0.0') def setTol(self, value): '\n Sets the value of :py:attr:`tol`.\n ' return self._set(tol=value)
3,581,312,399,990,777,300
Sets the value of :py:attr:`tol`.
python/pyspark/ml/regression.py
setTol
AjithShetty2489/spark
python
@since('3.0.0') def setTol(self, value): '\n \n ' return self._set(tol=value)
@since('3.0.0') def setSolver(self, value): '\n Sets the value of :py:attr:`solver`.\n ' return self._set(solver=value)
5,689,872,772,768,317,000
Sets the value of :py:attr:`solver`.
python/pyspark/ml/regression.py
setSolver
AjithShetty2489/spark
python
@since('3.0.0') def setSolver(self, value): '\n \n ' return self._set(solver=value)
@since('3.0.0') def setSeed(self, value): '\n Sets the value of :py:attr:`seed`.\n ' return self._set(seed=value)
1,987,893,307,764,387,800
Sets the value of :py:attr:`seed`.
python/pyspark/ml/regression.py
setSeed
AjithShetty2489/spark
python
@since('3.0.0') def setSeed(self, value): '\n \n ' return self._set(seed=value)
@since('3.0.0') def setFitIntercept(self, value): '\n Sets the value of :py:attr:`fitIntercept`.\n ' return self._set(fitIntercept=value)
4,746,861,393,854,669,000
Sets the value of :py:attr:`fitIntercept`.
python/pyspark/ml/regression.py
setFitIntercept
AjithShetty2489/spark
python
@since('3.0.0') def setFitIntercept(self, value): '\n \n ' return self._set(fitIntercept=value)
@since('3.0.0') def setRegParam(self, value): '\n Sets the value of :py:attr:`regParam`.\n ' return self._set(regParam=value)
4,120,470,953,683,944,400
Sets the value of :py:attr:`regParam`.
python/pyspark/ml/regression.py
setRegParam
AjithShetty2489/spark
python
@since('3.0.0') def setRegParam(self, value): '\n \n ' return self._set(regParam=value)
@property @since('3.0.0') def intercept(self): '\n Model intercept.\n ' return self._call_java('intercept')
-378,010,395,860,784,450
Model intercept.
python/pyspark/ml/regression.py
intercept
AjithShetty2489/spark
python
@property @since('3.0.0') def intercept(self): '\n \n ' return self._call_java('intercept')
@property @since('3.0.0') def linear(self): '\n Model linear term.\n ' return self._call_java('linear')
8,724,079,305,889,703,000
Model linear term.
python/pyspark/ml/regression.py
linear
AjithShetty2489/spark
python
@property @since('3.0.0') def linear(self): '\n \n ' return self._call_java('linear')
@property @since('3.0.0') def factors(self): '\n Model factor term.\n ' return self._call_java('factors')
-1,686,756,612,127,754,800
Model factor term.
python/pyspark/ml/regression.py
factors
AjithShetty2489/spark
python
@property @since('3.0.0') def factors(self): '\n \n ' return self._call_java('factors')
def min_time(x): 'my lib' graph = GeometryTopology.Graph() for i in range(h): for j in range(w): graph.add_node((i, j)) for i in range(h): for j in range(w): if (i > 0): graph.add_edge((i, j), ((i - 1), j), weight=(1 if (s[(i - 1)][j] == '.') else x)) if (i < (h - 1)): graph.add_edge((i, j), ((i + 1), j), weight=(1 if (s[(i + 1)][j] == '.') else x)) if (j > 0): graph.add_edge((i, j), (i, (j - 1)), weight=(1 if (s[i][(j - 1)] == '.') else x)) if (j < (w - 1)): graph.add_edge((i, j), (i, (j + 1)), weight=(1 if (s[i][(j + 1)] == '.') else x)) return graph.dijkstra(source)[target] 'networkx' graph = nx.DiGraph() for i in range(h): for j in range(w): if (i > 0): graph.add_edge((i, j), ((i - 1), j), weight=(1 if (s[(i - 1)][j] == '.') else x)) if (i < (h - 1)): graph.add_edge((i, j), ((i + 1), j), weight=(1 if (s[(i + 1)][j] == '.') else x)) if (j > 0): graph.add_edge((i, j), (i, (j - 1)), weight=(1 if (s[i][(j - 1)] == '.') else x)) if (j < (w - 1)): graph.add_edge((i, j), (i, (j + 1)), weight=(1 if (s[i][(j + 1)] == '.') else x)) return nx.dijkstra_path_length(graph, source, target) return nx.astar_path_length(graph, source, target, heuristic_function)
-785,816,328,218,200,000
my lib
jp.atcoder/abc009/abc009_4/17183548.py
min_time
kagemeka/atcoder-submissions
python
def min_time(x): graph = GeometryTopology.Graph() for i in range(h): for j in range(w): graph.add_node((i, j)) for i in range(h): for j in range(w): if (i > 0): graph.add_edge((i, j), ((i - 1), j), weight=(1 if (s[(i - 1)][j] == '.') else x)) if (i < (h - 1)): graph.add_edge((i, j), ((i + 1), j), weight=(1 if (s[(i + 1)][j] == '.') else x)) if (j > 0): graph.add_edge((i, j), (i, (j - 1)), weight=(1 if (s[i][(j - 1)] == '.') else x)) if (j < (w - 1)): graph.add_edge((i, j), (i, (j + 1)), weight=(1 if (s[i][(j + 1)] == '.') else x)) return graph.dijkstra(source)[target] 'networkx' graph = nx.DiGraph() for i in range(h): for j in range(w): if (i > 0): graph.add_edge((i, j), ((i - 1), j), weight=(1 if (s[(i - 1)][j] == '.') else x)) if (i < (h - 1)): graph.add_edge((i, j), ((i + 1), j), weight=(1 if (s[(i + 1)][j] == '.') else x)) if (j > 0): graph.add_edge((i, j), (i, (j - 1)), weight=(1 if (s[i][(j - 1)] == '.') else x)) if (j < (w - 1)): graph.add_edge((i, j), (i, (j + 1)), weight=(1 if (s[i][(j + 1)] == '.') else x)) return nx.dijkstra_path_length(graph, source, target) return nx.astar_path_length(graph, source, target, heuristic_function)
def group_by(keys, values=None, reduction=None, axis=0): 'construct a grouping object on the given keys, optionally performing the given reduction on the given values\n\n Parameters\n ----------\n keys : indexable object\n keys to group by\n values : array_like, optional\n sequence of values, of the same length as keys\n if a reduction function is provided, the given values are reduced by key\n if no reduction is provided, the given values are grouped and split by key\n reduction : lambda, optional\n reduction function to apply to the values in each group\n axis : int, optional\n axis to regard as the key-sequence, in case keys is multi-dimensional\n\n Returns\n -------\n iterable\n if values is None, a GroupBy object of the given keys object\n if reduction is None, an tuple of a sequence of unique keys and a sequence of grouped values\n else, a sequence of tuples of unique keys and reductions of values over that key-group\n\n See Also\n --------\n numpy_indexed.as_index : for information regarding the casting rules to a valid Index object\n ' g = GroupBy(keys, axis) if (values is None): return g groups = g.split(values) if (reduction is None): return (g.unique, groups) return [(key, reduction(group)) for (key, group) in zip(g.unique, groups)]
6,910,904,156,956,246,000
construct a grouping object on the given keys, optionally performing the given reduction on the given values Parameters ---------- keys : indexable object keys to group by values : array_like, optional sequence of values, of the same length as keys if a reduction function is provided, the given values are reduced by key if no reduction is provided, the given values are grouped and split by key reduction : lambda, optional reduction function to apply to the values in each group axis : int, optional axis to regard as the key-sequence, in case keys is multi-dimensional Returns ------- iterable if values is None, a GroupBy object of the given keys object if reduction is None, an tuple of a sequence of unique keys and a sequence of grouped values else, a sequence of tuples of unique keys and reductions of values over that key-group See Also -------- numpy_indexed.as_index : for information regarding the casting rules to a valid Index object
numpy_indexed/grouping.py
group_by
EelcoHoogendoorn/Numpy_arraysetops_EP
python
def group_by(keys, values=None, reduction=None, axis=0): 'construct a grouping object on the given keys, optionally performing the given reduction on the given values\n\n Parameters\n ----------\n keys : indexable object\n keys to group by\n values : array_like, optional\n sequence of values, of the same length as keys\n if a reduction function is provided, the given values are reduced by key\n if no reduction is provided, the given values are grouped and split by key\n reduction : lambda, optional\n reduction function to apply to the values in each group\n axis : int, optional\n axis to regard as the key-sequence, in case keys is multi-dimensional\n\n Returns\n -------\n iterable\n if values is None, a GroupBy object of the given keys object\n if reduction is None, an tuple of a sequence of unique keys and a sequence of grouped values\n else, a sequence of tuples of unique keys and reductions of values over that key-group\n\n See Also\n --------\n numpy_indexed.as_index : for information regarding the casting rules to a valid Index object\n ' g = GroupBy(keys, axis) if (values is None): return g groups = g.split(values) if (reduction is None): return (g.unique, groups) return [(key, reduction(group)) for (key, group) in zip(g.unique, groups)]
def __init__(self, keys, axis=0): '\n Parameters\n ----------\n keys : indexable object\n sequence of keys to group by\n axis : int, optional\n axis to regard as the key-sequence, in case keys is multi-dimensional\n\n See Also\n --------\n numpy_indexed.as_index : for information regarding the casting rules to a valid Index object\n ' self.index = as_index(keys, axis)
6,020,760,228,939,865,000
Parameters ---------- keys : indexable object sequence of keys to group by axis : int, optional axis to regard as the key-sequence, in case keys is multi-dimensional See Also -------- numpy_indexed.as_index : for information regarding the casting rules to a valid Index object
numpy_indexed/grouping.py
__init__
EelcoHoogendoorn/Numpy_arraysetops_EP
python
def __init__(self, keys, axis=0): '\n Parameters\n ----------\n keys : indexable object\n sequence of keys to group by\n axis : int, optional\n axis to regard as the key-sequence, in case keys is multi-dimensional\n\n See Also\n --------\n numpy_indexed.as_index : for information regarding the casting rules to a valid Index object\n ' self.index = as_index(keys, axis)
@property def unique(self): 'unique keys' return self.index.unique
-930,526,704,603,093,000
unique keys
numpy_indexed/grouping.py
unique
EelcoHoogendoorn/Numpy_arraysetops_EP
python
@property def unique(self): return self.index.unique
@property def count(self): 'count of each unique key' return self.index.count
8,502,613,712,486,878,000
count of each unique key
numpy_indexed/grouping.py
count
EelcoHoogendoorn/Numpy_arraysetops_EP
python
@property def count(self): return self.index.count
@property def inverse(self): 'mapping such that unique[inverse]==keys' return self.index.inverse
5,544,252,425,276,580,000
mapping such that unique[inverse]==keys
numpy_indexed/grouping.py
inverse
EelcoHoogendoorn/Numpy_arraysetops_EP
python
@property def inverse(self): return self.index.inverse
@property def groups(self): 'int, number of groups formed by the keys' return self.index.groups
8,731,109,496,834,587,000
int, number of groups formed by the keys
numpy_indexed/grouping.py
groups
EelcoHoogendoorn/Numpy_arraysetops_EP
python
@property def groups(self): return self.index.groups
def split_iterable_as_iterable(self, values): 'Group iterable into iterables, in the order of the keys\n\n Parameters\n ----------\n values : iterable of length equal to keys\n iterable of values to be grouped\n\n Yields\n ------\n iterable of items in values\n\n Notes\n -----\n Memory consumption depends on the amount of sorting required\n Worst case, if index.sorter[-1] = 0, we need to consume the entire value iterable,\n before we can start yielding any output\n But to the extent that the keys are already sorted, the grouping is lazy\n ' values = iter(enumerate(values)) cache = dict() def get_value(ti): try: return cache.pop(ti) except: while True: (i, v) = next(values) if (i == ti): return v cache[i] = v s = iter(self.index.sorter) for c in self.count: (yield (get_value(i) for i in itertools.islice(s, int(c))))
-1,514,213,191,221,959,200
Group iterable into iterables, in the order of the keys Parameters ---------- values : iterable of length equal to keys iterable of values to be grouped Yields ------ iterable of items in values Notes ----- Memory consumption depends on the amount of sorting required Worst case, if index.sorter[-1] = 0, we need to consume the entire value iterable, before we can start yielding any output But to the extent that the keys are already sorted, the grouping is lazy
numpy_indexed/grouping.py
split_iterable_as_iterable
EelcoHoogendoorn/Numpy_arraysetops_EP
python
def split_iterable_as_iterable(self, values): 'Group iterable into iterables, in the order of the keys\n\n Parameters\n ----------\n values : iterable of length equal to keys\n iterable of values to be grouped\n\n Yields\n ------\n iterable of items in values\n\n Notes\n -----\n Memory consumption depends on the amount of sorting required\n Worst case, if index.sorter[-1] = 0, we need to consume the entire value iterable,\n before we can start yielding any output\n But to the extent that the keys are already sorted, the grouping is lazy\n ' values = iter(enumerate(values)) cache = dict() def get_value(ti): try: return cache.pop(ti) except: while True: (i, v) = next(values) if (i == ti): return v cache[i] = v s = iter(self.index.sorter) for c in self.count: (yield (get_value(i) for i in itertools.islice(s, int(c))))
def split_iterable_as_unordered_iterable(self, values): 'Group iterable into iterables, without regard for the ordering of self.index.unique\n key-group tuples are yielded as soon as they are complete\n\n Parameters\n ----------\n values : iterable of length equal to keys\n iterable of values to be grouped\n\n Yields\n ------\n tuple of key, and a list of corresponding items in values\n\n Notes\n -----\n This approach is lazy, insofar as grouped values are close in their iterable\n ' from collections import defaultdict cache = defaultdict(list) count = self.count unique = self.unique key = ((lambda i: unique[i]) if isinstance(unique, np.ndarray) else (lambda i: tuple((c[i] for c in unique)))) for (i, v) in zip(self.inverse, values): cache[i].append(v) if (len(cache[i]) == count[i]): (yield (key(i), cache.pop(i)))
7,352,415,599,736,740,000
Group iterable into iterables, without regard for the ordering of self.index.unique key-group tuples are yielded as soon as they are complete Parameters ---------- values : iterable of length equal to keys iterable of values to be grouped Yields ------ tuple of key, and a list of corresponding items in values Notes ----- This approach is lazy, insofar as grouped values are close in their iterable
numpy_indexed/grouping.py
split_iterable_as_unordered_iterable
EelcoHoogendoorn/Numpy_arraysetops_EP
python
def split_iterable_as_unordered_iterable(self, values): 'Group iterable into iterables, without regard for the ordering of self.index.unique\n key-group tuples are yielded as soon as they are complete\n\n Parameters\n ----------\n values : iterable of length equal to keys\n iterable of values to be grouped\n\n Yields\n ------\n tuple of key, and a list of corresponding items in values\n\n Notes\n -----\n This approach is lazy, insofar as grouped values are close in their iterable\n ' from collections import defaultdict cache = defaultdict(list) count = self.count unique = self.unique key = ((lambda i: unique[i]) if isinstance(unique, np.ndarray) else (lambda i: tuple((c[i] for c in unique)))) for (i, v) in zip(self.inverse, values): cache[i].append(v) if (len(cache[i]) == count[i]): (yield (key(i), cache.pop(i)))
def split_sequence_as_iterable(self, values): 'Group sequence into iterables\n\n Parameters\n ----------\n values : iterable of length equal to keys\n iterable of values to be grouped\n\n Yields\n ------\n iterable of items in values\n\n Notes\n -----\n This is the preferred method if values has random access, but we dont want it completely in memory.\n Like a big memory mapped file, for instance\n ' print(self.count) s = iter(self.index.sorter) for c in self.count: (yield (values[i] for i in itertools.islice(s, int(c))))
-1,918,695,829,166,377,500
Group sequence into iterables Parameters ---------- values : iterable of length equal to keys iterable of values to be grouped Yields ------ iterable of items in values Notes ----- This is the preferred method if values has random access, but we dont want it completely in memory. Like a big memory mapped file, for instance
numpy_indexed/grouping.py
split_sequence_as_iterable
EelcoHoogendoorn/Numpy_arraysetops_EP
python
def split_sequence_as_iterable(self, values): 'Group sequence into iterables\n\n Parameters\n ----------\n values : iterable of length equal to keys\n iterable of values to be grouped\n\n Yields\n ------\n iterable of items in values\n\n Notes\n -----\n This is the preferred method if values has random access, but we dont want it completely in memory.\n Like a big memory mapped file, for instance\n ' print(self.count) s = iter(self.index.sorter) for c in self.count: (yield (values[i] for i in itertools.islice(s, int(c))))
def split_array_as_array(self, values): 'Group ndarray into ndarray by means of reshaping\n\n Parameters\n ----------\n values : ndarray_like, [index.size, ...]\n\n Returns\n -------\n ndarray, [groups, group_size, ...]\n values grouped by key\n\n Raises\n ------\n AssertionError\n This operation is only possible if index.uniform==True\n ' if (not self.index.uniform): raise ValueError('Array can only be split as array if all groups have the same size') values = np.asarray(values) values = values[self.index.sorter] return values.reshape(self.groups, (- 1), *values.shape[1:])
4,391,596,414,982,254,000
Group ndarray into ndarray by means of reshaping Parameters ---------- values : ndarray_like, [index.size, ...] Returns ------- ndarray, [groups, group_size, ...] values grouped by key Raises ------ AssertionError This operation is only possible if index.uniform==True
numpy_indexed/grouping.py
split_array_as_array
EelcoHoogendoorn/Numpy_arraysetops_EP
python
def split_array_as_array(self, values): 'Group ndarray into ndarray by means of reshaping\n\n Parameters\n ----------\n values : ndarray_like, [index.size, ...]\n\n Returns\n -------\n ndarray, [groups, group_size, ...]\n values grouped by key\n\n Raises\n ------\n AssertionError\n This operation is only possible if index.uniform==True\n ' if (not self.index.uniform): raise ValueError('Array can only be split as array if all groups have the same size') values = np.asarray(values) values = values[self.index.sorter] return values.reshape(self.groups, (- 1), *values.shape[1:])
def split_array_as_list(self, values): 'Group values as a list of arrays, or a jagged-array\n\n Parameters\n ----------\n values : ndarray, [keys, ...]\n\n Returns\n -------\n list of length self.groups of ndarray, [key_count, ...]\n ' values = np.asarray(values) values = values[self.index.sorter] return np.split(values, self.index.slices[1:(- 1)], axis=0)
-2,253,695,053,208,438,500
Group values as a list of arrays, or a jagged-array Parameters ---------- values : ndarray, [keys, ...] Returns ------- list of length self.groups of ndarray, [key_count, ...]
numpy_indexed/grouping.py
split_array_as_list
EelcoHoogendoorn/Numpy_arraysetops_EP
python
def split_array_as_list(self, values): 'Group values as a list of arrays, or a jagged-array\n\n Parameters\n ----------\n values : ndarray, [keys, ...]\n\n Returns\n -------\n list of length self.groups of ndarray, [key_count, ...]\n ' values = np.asarray(values) values = values[self.index.sorter] return np.split(values, self.index.slices[1:(- 1)], axis=0)
def split(self, values): 'some sensible defaults' try: return self.split_array_as_array(values) except: return self.split_array_as_list(values)
8,993,428,587,264,464,000
some sensible defaults
numpy_indexed/grouping.py
split
EelcoHoogendoorn/Numpy_arraysetops_EP
python
def split(self, values): try: return self.split_array_as_array(values) except: return self.split_array_as_list(values)
def __call__(self, values): 'not sure how i feel about this. explicit is better than implict?' return (self.unique, self.split(values))
6,196,984,892,751,246,000
not sure how i feel about this. explicit is better than implict?
numpy_indexed/grouping.py
__call__
EelcoHoogendoorn/Numpy_arraysetops_EP
python
def __call__(self, values): return (self.unique, self.split(values))
def reduce(self, values, operator=np.add, axis=0, dtype=None): 'Reduce the values over identical key groups, using the given ufunc\n reduction is over the first axis, which should have elements corresponding to the keys\n all other axes are treated indepenently for the sake of this reduction\n\n Parameters\n ----------\n values : ndarray, [keys, ...]\n values to perform reduction over\n operator : numpy.ufunc\n a numpy ufunc, such as np.add or np.sum\n axis : int, optional\n the axis to reduce over\n dtype : output dtype\n\n Returns\n -------\n ndarray, [groups, ...]\n values reduced by operator over the key-groups\n ' values = np.take(values, self.index.sorter, axis=axis) return operator.reduceat(values, self.index.start, axis=axis, dtype=dtype)
-1,888,145,151,123,266,800
Reduce the values over identical key groups, using the given ufunc reduction is over the first axis, which should have elements corresponding to the keys all other axes are treated indepenently for the sake of this reduction Parameters ---------- values : ndarray, [keys, ...] values to perform reduction over operator : numpy.ufunc a numpy ufunc, such as np.add or np.sum axis : int, optional the axis to reduce over dtype : output dtype Returns ------- ndarray, [groups, ...] values reduced by operator over the key-groups
numpy_indexed/grouping.py
reduce
EelcoHoogendoorn/Numpy_arraysetops_EP
python
def reduce(self, values, operator=np.add, axis=0, dtype=None): 'Reduce the values over identical key groups, using the given ufunc\n reduction is over the first axis, which should have elements corresponding to the keys\n all other axes are treated indepenently for the sake of this reduction\n\n Parameters\n ----------\n values : ndarray, [keys, ...]\n values to perform reduction over\n operator : numpy.ufunc\n a numpy ufunc, such as np.add or np.sum\n axis : int, optional\n the axis to reduce over\n dtype : output dtype\n\n Returns\n -------\n ndarray, [groups, ...]\n values reduced by operator over the key-groups\n ' values = np.take(values, self.index.sorter, axis=axis) return operator.reduceat(values, self.index.start, axis=axis, dtype=dtype)
def sum(self, values, axis=0, dtype=None): 'compute the sum over each group\n\n Parameters\n ----------\n values : array_like, [keys, ...]\n values to sum per group\n axis : int, optional\n alternative reduction axis for values\n dtype : output dtype\n\n Returns\n -------\n unique: ndarray, [groups]\n unique keys\n reduced : ndarray, [groups, ...]\n value array, reduced over groups\n ' values = np.asarray(values) return (self.unique, self.reduce(values, axis=axis, dtype=dtype))
-1,945,319,861,920,158,000
compute the sum over each group Parameters ---------- values : array_like, [keys, ...] values to sum per group axis : int, optional alternative reduction axis for values dtype : output dtype Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups
numpy_indexed/grouping.py
sum
EelcoHoogendoorn/Numpy_arraysetops_EP
python
def sum(self, values, axis=0, dtype=None): 'compute the sum over each group\n\n Parameters\n ----------\n values : array_like, [keys, ...]\n values to sum per group\n axis : int, optional\n alternative reduction axis for values\n dtype : output dtype\n\n Returns\n -------\n unique: ndarray, [groups]\n unique keys\n reduced : ndarray, [groups, ...]\n value array, reduced over groups\n ' values = np.asarray(values) return (self.unique, self.reduce(values, axis=axis, dtype=dtype))
def prod(self, values, axis=0, dtype=None): 'compute the product over each group\n\n Parameters\n ----------\n values : array_like, [keys, ...]\n values to multiply per group\n axis : int, optional\n alternative reduction axis for values\n dtype : output dtype\n\n Returns\n -------\n unique: ndarray, [groups]\n unique keys\n reduced : ndarray, [groups, ...]\n value array, reduced over groups\n ' values = np.asarray(values) return (self.unique, self.reduce(values, axis=axis, dtype=dtype, operator=np.multiply))
4,575,069,133,977,734,000
compute the product over each group Parameters ---------- values : array_like, [keys, ...] values to multiply per group axis : int, optional alternative reduction axis for values dtype : output dtype Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups
numpy_indexed/grouping.py
prod
EelcoHoogendoorn/Numpy_arraysetops_EP
python
def prod(self, values, axis=0, dtype=None): 'compute the product over each group\n\n Parameters\n ----------\n values : array_like, [keys, ...]\n values to multiply per group\n axis : int, optional\n alternative reduction axis for values\n dtype : output dtype\n\n Returns\n -------\n unique: ndarray, [groups]\n unique keys\n reduced : ndarray, [groups, ...]\n value array, reduced over groups\n ' values = np.asarray(values) return (self.unique, self.reduce(values, axis=axis, dtype=dtype, operator=np.multiply))
def mean(self, values, axis=0, weights=None, dtype=None): 'compute the mean over each group\n\n Parameters\n ----------\n values : array_like, [keys, ...]\n values to take average of per group\n axis : int, optional\n alternative reduction axis for values\n weights : ndarray, [keys, ...], optional\n weight to use for each value\n dtype : output dtype\n\n Returns\n -------\n unique: ndarray, [groups]\n unique keys\n reduced : ndarray, [groups, ...]\n value array, reduced over groups\n ' values = np.asarray(values) if (weights is None): result = self.reduce(values, axis=axis, dtype=dtype) shape = ([1] * values.ndim) shape[axis] = self.groups weights = self.count.reshape(shape) else: weights = np.asarray(weights) result = self.reduce((values * weights), axis=axis, dtype=dtype) weights = self.reduce(weights, axis=axis, dtype=dtype) return (self.unique, (result / weights))
8,492,916,686,960,966,000
compute the mean over each group Parameters ---------- values : array_like, [keys, ...] values to take average of per group axis : int, optional alternative reduction axis for values weights : ndarray, [keys, ...], optional weight to use for each value dtype : output dtype Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups
numpy_indexed/grouping.py
mean
EelcoHoogendoorn/Numpy_arraysetops_EP
python
def mean(self, values, axis=0, weights=None, dtype=None): 'compute the mean over each group\n\n Parameters\n ----------\n values : array_like, [keys, ...]\n values to take average of per group\n axis : int, optional\n alternative reduction axis for values\n weights : ndarray, [keys, ...], optional\n weight to use for each value\n dtype : output dtype\n\n Returns\n -------\n unique: ndarray, [groups]\n unique keys\n reduced : ndarray, [groups, ...]\n value array, reduced over groups\n ' values = np.asarray(values) if (weights is None): result = self.reduce(values, axis=axis, dtype=dtype) shape = ([1] * values.ndim) shape[axis] = self.groups weights = self.count.reshape(shape) else: weights = np.asarray(weights) result = self.reduce((values * weights), axis=axis, dtype=dtype) weights = self.reduce(weights, axis=axis, dtype=dtype) return (self.unique, (result / weights))
def var(self, values, axis=0, weights=None, dtype=None): 'compute the variance over each group\n\n Parameters\n ----------\n values : array_like, [keys, ...]\n values to take variance of per group\n axis : int, optional\n alternative reduction axis for values\n\n Returns\n -------\n unique: ndarray, [groups]\n unique keys\n reduced : ndarray, [groups, ...]\n value array, reduced over groups\n ' values = np.asarray(values) (unique, mean) = self.mean(values, axis, weights, dtype) err = (values - mean.take(self.inverse, axis)) if (weights is None): shape = ([1] * values.ndim) shape[axis] = self.groups group_weights = self.count.reshape(shape) var = self.reduce((err ** 2), axis=axis, dtype=dtype) else: weights = np.asarray(weights) group_weights = self.reduce(weights, axis=axis, dtype=dtype) var = self.reduce((weights * (err ** 2)), axis=axis, dtype=dtype) return (unique, (var / group_weights))
-4,991,949,059,122,910,000
compute the variance over each group Parameters ---------- values : array_like, [keys, ...] values to take variance of per group axis : int, optional alternative reduction axis for values Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups
numpy_indexed/grouping.py
var
EelcoHoogendoorn/Numpy_arraysetops_EP
python
def var(self, values, axis=0, weights=None, dtype=None): 'compute the variance over each group\n\n Parameters\n ----------\n values : array_like, [keys, ...]\n values to take variance of per group\n axis : int, optional\n alternative reduction axis for values\n\n Returns\n -------\n unique: ndarray, [groups]\n unique keys\n reduced : ndarray, [groups, ...]\n value array, reduced over groups\n ' values = np.asarray(values) (unique, mean) = self.mean(values, axis, weights, dtype) err = (values - mean.take(self.inverse, axis)) if (weights is None): shape = ([1] * values.ndim) shape[axis] = self.groups group_weights = self.count.reshape(shape) var = self.reduce((err ** 2), axis=axis, dtype=dtype) else: weights = np.asarray(weights) group_weights = self.reduce(weights, axis=axis, dtype=dtype) var = self.reduce((weights * (err ** 2)), axis=axis, dtype=dtype) return (unique, (var / group_weights))
def std(self, values, axis=0, weights=None, dtype=None): 'standard deviation over each group\n\n Parameters\n ----------\n values : array_like, [keys, ...]\n values to take standard deviation of per group\n axis : int, optional\n alternative reduction axis for values\n\n Returns\n -------\n unique: ndarray, [groups]\n unique keys\n reduced : ndarray, [groups, ...]\n value array, reduced over groups\n ' (unique, var) = self.var(values, axis, weights, dtype) return (unique, np.sqrt(var))
-5,936,213,147,425,417,000
standard deviation over each group Parameters ---------- values : array_like, [keys, ...] values to take standard deviation of per group axis : int, optional alternative reduction axis for values Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups
numpy_indexed/grouping.py
std
EelcoHoogendoorn/Numpy_arraysetops_EP
python
def std(self, values, axis=0, weights=None, dtype=None): 'standard deviation over each group\n\n Parameters\n ----------\n values : array_like, [keys, ...]\n values to take standard deviation of per group\n axis : int, optional\n alternative reduction axis for values\n\n Returns\n -------\n unique: ndarray, [groups]\n unique keys\n reduced : ndarray, [groups, ...]\n value array, reduced over groups\n ' (unique, var) = self.var(values, axis, weights, dtype) return (unique, np.sqrt(var))
def median(self, values, axis=0, average=True): 'compute the median value over each group.\n\n Parameters\n ----------\n values : array_like, [keys, ...]\n values to compute the median of per group\n axis : int, optional\n alternative reduction axis for values\n average : bool, optional\n when average is true, the average of the two central values is taken for groups with an even key-count\n\n Returns\n -------\n unique: ndarray, [groups]\n unique keys\n reduced : ndarray, [groups, ...]\n value array, reduced over groups\n ' mid_2 = (self.index.start + self.index.stop) hi = (mid_2 // 2) lo = ((mid_2 - 1) // 2) sorted_group_rank_per_key = self.index.sorted_group_rank_per_key def median1d(slc): slc = slc[self.index.sorter] sorter = np.lexsort((slc, sorted_group_rank_per_key)) slc = slc[sorter] return (((slc[lo] + slc[hi]) / 2) if average else slc[hi]) values = np.asarray(values) if (values.ndim > 1): values = np.apply_along_axis(median1d, axis, values) else: values = median1d(values) return (self.unique, values)
-9,059,944,262,215,597,000
compute the median value over each group. Parameters ---------- values : array_like, [keys, ...] values to compute the median of per group axis : int, optional alternative reduction axis for values average : bool, optional when average is true, the average of the two central values is taken for groups with an even key-count Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups
numpy_indexed/grouping.py
median
EelcoHoogendoorn/Numpy_arraysetops_EP
python
def median(self, values, axis=0, average=True): 'compute the median value over each group.\n\n Parameters\n ----------\n values : array_like, [keys, ...]\n values to compute the median of per group\n axis : int, optional\n alternative reduction axis for values\n average : bool, optional\n when average is true, the average of the two central values is taken for groups with an even key-count\n\n Returns\n -------\n unique: ndarray, [groups]\n unique keys\n reduced : ndarray, [groups, ...]\n value array, reduced over groups\n ' mid_2 = (self.index.start + self.index.stop) hi = (mid_2 // 2) lo = ((mid_2 - 1) // 2) sorted_group_rank_per_key = self.index.sorted_group_rank_per_key def median1d(slc): slc = slc[self.index.sorter] sorter = np.lexsort((slc, sorted_group_rank_per_key)) slc = slc[sorter] return (((slc[lo] + slc[hi]) / 2) if average else slc[hi]) values = np.asarray(values) if (values.ndim > 1): values = np.apply_along_axis(median1d, axis, values) else: values = median1d(values) return (self.unique, values)
def mode(self, values, weights=None): 'compute the mode within each group.\n\n Parameters\n ----------\n values : array_like, [keys, ...]\n values to compute the mode of per group\n weights : array_like, [keys], float, optional\n optional weight associated with each entry in values\n\n Returns\n -------\n unique: ndarray, [groups]\n unique keys\n reduced : ndarray, [groups, ...]\n value array, reduced over groups\n ' if (weights is None): (unique, weights) = npi.count((self.index.sorted_group_rank_per_key, values)) else: (unique, weights) = npi.group_by((self.index.sorted_group_rank_per_key, values)).sum(weights) (x, bin) = npi.group_by(unique[0]).argmax(weights) return (x, unique[1][bin])
4,774,019,377,929,014,000
compute the mode within each group. Parameters ---------- values : array_like, [keys, ...] values to compute the mode of per group weights : array_like, [keys], float, optional optional weight associated with each entry in values Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups
numpy_indexed/grouping.py
mode
EelcoHoogendoorn/Numpy_arraysetops_EP
python
def mode(self, values, weights=None): 'compute the mode within each group.\n\n Parameters\n ----------\n values : array_like, [keys, ...]\n values to compute the mode of per group\n weights : array_like, [keys], float, optional\n optional weight associated with each entry in values\n\n Returns\n -------\n unique: ndarray, [groups]\n unique keys\n reduced : ndarray, [groups, ...]\n value array, reduced over groups\n ' if (weights is None): (unique, weights) = npi.count((self.index.sorted_group_rank_per_key, values)) else: (unique, weights) = npi.group_by((self.index.sorted_group_rank_per_key, values)).sum(weights) (x, bin) = npi.group_by(unique[0]).argmax(weights) return (x, unique[1][bin])
def min(self, values, axis=0): 'return the minimum within each group\n\n Parameters\n ----------\n values : array_like, [keys, ...]\n values to take minimum of per group\n axis : int, optional\n alternative reduction axis for values\n\n Returns\n -------\n unique: ndarray, [groups]\n unique keys\n reduced : ndarray, [groups, ...]\n value array, reduced over groups\n ' values = np.asarray(values) return (self.unique, self.reduce(values, np.minimum, axis))
-2,479,186,574,521,301,000
return the minimum within each group Parameters ---------- values : array_like, [keys, ...] values to take minimum of per group axis : int, optional alternative reduction axis for values Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups
numpy_indexed/grouping.py
min
EelcoHoogendoorn/Numpy_arraysetops_EP
python
def min(self, values, axis=0): 'return the minimum within each group\n\n Parameters\n ----------\n values : array_like, [keys, ...]\n values to take minimum of per group\n axis : int, optional\n alternative reduction axis for values\n\n Returns\n -------\n unique: ndarray, [groups]\n unique keys\n reduced : ndarray, [groups, ...]\n value array, reduced over groups\n ' values = np.asarray(values) return (self.unique, self.reduce(values, np.minimum, axis))
def max(self, values, axis=0): 'return the maximum within each group\n\n Parameters\n ----------\n values : array_like, [keys, ...]\n values to take maximum of per group\n axis : int, optional\n alternative reduction axis for values\n\n Returns\n -------\n unique: ndarray, [groups]\n unique keys\n reduced : ndarray, [groups, ...]\n value array, reduced over groups\n ' values = np.asarray(values) return (self.unique, self.reduce(values, np.maximum, axis))
-4,892,151,556,389,857,000
return the maximum within each group Parameters ---------- values : array_like, [keys, ...] values to take maximum of per group axis : int, optional alternative reduction axis for values Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups
numpy_indexed/grouping.py
max
EelcoHoogendoorn/Numpy_arraysetops_EP
python
def max(self, values, axis=0): 'return the maximum within each group\n\n Parameters\n ----------\n values : array_like, [keys, ...]\n values to take maximum of per group\n axis : int, optional\n alternative reduction axis for values\n\n Returns\n -------\n unique: ndarray, [groups]\n unique keys\n reduced : ndarray, [groups, ...]\n value array, reduced over groups\n ' values = np.asarray(values) return (self.unique, self.reduce(values, np.maximum, axis))
def first(self, values, axis=0): 'return values at first occurance of its associated key\n\n Parameters\n ----------\n values : array_like, [keys, ...]\n values to pick the first value of per group\n axis : int, optional\n alternative reduction axis for values\n\n Returns\n -------\n unique: ndarray, [groups]\n unique keys\n reduced : ndarray, [groups, ...]\n value array, reduced over groups\n ' values = np.asarray(values) return (self.unique, np.take(values, self.index.sorter[self.index.start], axis))
1,523,354,121,096,837,400
return values at first occurance of its associated key Parameters ---------- values : array_like, [keys, ...] values to pick the first value of per group axis : int, optional alternative reduction axis for values Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups
numpy_indexed/grouping.py
first
EelcoHoogendoorn/Numpy_arraysetops_EP
python
def first(self, values, axis=0): 'return values at first occurance of its associated key\n\n Parameters\n ----------\n values : array_like, [keys, ...]\n values to pick the first value of per group\n axis : int, optional\n alternative reduction axis for values\n\n Returns\n -------\n unique: ndarray, [groups]\n unique keys\n reduced : ndarray, [groups, ...]\n value array, reduced over groups\n ' values = np.asarray(values) return (self.unique, np.take(values, self.index.sorter[self.index.start], axis))
def last(self, values, axis=0): 'return values at last occurance of its associated key\n\n Parameters\n ----------\n values : array_like, [keys, ...]\n values to pick the last value of per group\n axis : int, optional\n alternative reduction axis for values\n\n Returns\n -------\n unique: ndarray, [groups]\n unique keys\n reduced : ndarray, [groups, ...]\n value array, reduced over groups\n ' values = np.asarray(values) return (self.unique, np.take(values, self.index.sorter[(self.index.stop - 1)], axis))
6,716,917,442,927,277,000
return values at last occurance of its associated key Parameters ---------- values : array_like, [keys, ...] values to pick the last value of per group axis : int, optional alternative reduction axis for values Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups
numpy_indexed/grouping.py
last
EelcoHoogendoorn/Numpy_arraysetops_EP
python
def last(self, values, axis=0): 'return values at last occurance of its associated key\n\n Parameters\n ----------\n values : array_like, [keys, ...]\n values to pick the last value of per group\n axis : int, optional\n alternative reduction axis for values\n\n Returns\n -------\n unique: ndarray, [groups]\n unique keys\n reduced : ndarray, [groups, ...]\n value array, reduced over groups\n ' values = np.asarray(values) return (self.unique, np.take(values, self.index.sorter[(self.index.stop - 1)], axis))
def any(self, values, axis=0): 'compute if any item evaluates to true in each group\n\n Parameters\n ----------\n values : array_like, [keys, ...]\n values to take boolean predicate over per group\n axis : int, optional\n alternative reduction axis for values\n\n Returns\n -------\n unique: ndarray, [groups]\n unique keys\n reduced : ndarray, [groups, ...], np.bool\n value array, reduced over groups\n ' values = np.asarray(values) if (not (values.dtype == np.bool)): values = (values != 0) return (self.unique, (self.reduce(values, axis=axis) > 0))
-577,262,749,674,790,800
compute if any item evaluates to true in each group Parameters ---------- values : array_like, [keys, ...] values to take boolean predicate over per group axis : int, optional alternative reduction axis for values Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...], np.bool value array, reduced over groups
numpy_indexed/grouping.py
any
EelcoHoogendoorn/Numpy_arraysetops_EP
python
def any(self, values, axis=0): 'compute if any item evaluates to true in each group\n\n Parameters\n ----------\n values : array_like, [keys, ...]\n values to take boolean predicate over per group\n axis : int, optional\n alternative reduction axis for values\n\n Returns\n -------\n unique: ndarray, [groups]\n unique keys\n reduced : ndarray, [groups, ...], np.bool\n value array, reduced over groups\n ' values = np.asarray(values) if (not (values.dtype == np.bool)): values = (values != 0) return (self.unique, (self.reduce(values, axis=axis) > 0))
def all(self, values, axis=0): 'compute if all items evaluates to true in each group\n\n Parameters\n ----------\n values : array_like, [keys, ...]\n values to take boolean predicate over per group\n axis : int, optional\n alternative reduction axis for values\n\n Returns\n -------\n unique: ndarray, [groups]\n unique keys\n reduced : ndarray, [groups, ...], np.bool\n value array, reduced over groups\n ' values = np.asarray(values) return (self.unique, (self.reduce(values, axis=axis, operator=np.multiply) != 0))
1,479,049,310,855,061,500
compute if all items evaluates to true in each group Parameters ---------- values : array_like, [keys, ...] values to take boolean predicate over per group axis : int, optional alternative reduction axis for values Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...], np.bool value array, reduced over groups
numpy_indexed/grouping.py
all
EelcoHoogendoorn/Numpy_arraysetops_EP
python
def all(self, values, axis=0): 'compute if all items evaluates to true in each group\n\n Parameters\n ----------\n values : array_like, [keys, ...]\n values to take boolean predicate over per group\n axis : int, optional\n alternative reduction axis for values\n\n Returns\n -------\n unique: ndarray, [groups]\n unique keys\n reduced : ndarray, [groups, ...], np.bool\n value array, reduced over groups\n ' values = np.asarray(values) return (self.unique, (self.reduce(values, axis=axis, operator=np.multiply) != 0))
def argmin(self, values): 'return the index into values corresponding to the minimum value of the group\n\n Parameters\n ----------\n values : array_like, [keys]\n values to pick the argmin of per group\n\n Returns\n -------\n unique: ndarray, [groups]\n unique keys\n argmin : ndarray, [groups]\n index into value array, representing the argmin per group\n ' (keys, minima) = self.min(values) minima = minima[self.inverse] index = as_index((self.inverse, (values == minima))) return (keys, index.sorter[index.start[(- self.groups):]])
-7,292,802,029,241,178,000
return the index into values corresponding to the minimum value of the group Parameters ---------- values : array_like, [keys] values to pick the argmin of per group Returns ------- unique: ndarray, [groups] unique keys argmin : ndarray, [groups] index into value array, representing the argmin per group
numpy_indexed/grouping.py
argmin
EelcoHoogendoorn/Numpy_arraysetops_EP
python
def argmin(self, values): 'return the index into values corresponding to the minimum value of the group\n\n Parameters\n ----------\n values : array_like, [keys]\n values to pick the argmin of per group\n\n Returns\n -------\n unique: ndarray, [groups]\n unique keys\n argmin : ndarray, [groups]\n index into value array, representing the argmin per group\n ' (keys, minima) = self.min(values) minima = minima[self.inverse] index = as_index((self.inverse, (values == minima))) return (keys, index.sorter[index.start[(- self.groups):]])
def argmax(self, values): 'return the index into values corresponding to the maximum value of the group\n\n Parameters\n ----------\n values : array_like, [keys]\n values to pick the argmax of per group\n\n Returns\n -------\n unique: ndarray, [groups]\n unique keys\n argmax : ndarray, [groups]\n index into value array, representing the argmax per group\n ' (keys, maxima) = self.max(values) maxima = maxima[self.inverse] index = as_index((self.inverse, (values == maxima))) return (keys, index.sorter[index.start[(- self.groups):]])
-2,912,817,621,899,028,000
return the index into values corresponding to the maximum value of the group Parameters ---------- values : array_like, [keys] values to pick the argmax of per group Returns ------- unique: ndarray, [groups] unique keys argmax : ndarray, [groups] index into value array, representing the argmax per group
numpy_indexed/grouping.py
argmax
EelcoHoogendoorn/Numpy_arraysetops_EP
python
def argmax(self, values): 'return the index into values corresponding to the maximum value of the group\n\n Parameters\n ----------\n values : array_like, [keys]\n values to pick the argmax of per group\n\n Returns\n -------\n unique: ndarray, [groups]\n unique keys\n argmax : ndarray, [groups]\n index into value array, representing the argmax per group\n ' (keys, maxima) = self.max(values) maxima = maxima[self.inverse] index = as_index((self.inverse, (values == maxima))) return (keys, index.sorter[index.start[(- self.groups):]])
def __init__(self, raw_gadget): '\n Gadget constructor\n :param str raw_gadget: raw line output from ROPgadget\n ' self.offset = raw_gadget[:raw_gadget.find(':')] self.instruction_string = raw_gadget[(raw_gadget.find(':') + 2):] self.instructions = [] for instr in self.instruction_string.split(' ; '): self.instructions.append(Instruction(instr)) self.score = 0.0
8,029,491,231,991,800,000
Gadget constructor :param str raw_gadget: raw line output from ROPgadget
src/static_analyzer/Gadget.py
__init__
michaelbrownuc/GadgetSetAnalyzer
python
def __init__(self, raw_gadget): '\n Gadget constructor\n :param str raw_gadget: raw line output from ROPgadget\n ' self.offset = raw_gadget[:raw_gadget.find(':')] self.instruction_string = raw_gadget[(raw_gadget.find(':') + 2):] self.instructions = [] for instr in self.instruction_string.split(' ; '): self.instructions.append(Instruction(instr)) self.score = 0.0
def is_useless_op(self): '\n :return boolean: Returns True if the first instruction opcode is in the "useless" list, False otherwise\n Default behavior is to consider opcodes useful unless otherwise observed.\n ' first_opcode = self.instructions[0].opcode if first_opcode.startswith('j'): return True if first_opcode.startswith('bnd'): return True if first_opcode.startswith('ret'): return True if first_opcode.startswith('iret'): return True if first_opcode.startswith('call'): return True useless = ['nop', 'fnop', 'ljmp'] return (first_opcode in useless)
-8,508,034,393,575,901,000
:return boolean: Returns True if the first instruction opcode is in the "useless" list, False otherwise Default behavior is to consider opcodes useful unless otherwise observed.
src/static_analyzer/Gadget.py
is_useless_op
michaelbrownuc/GadgetSetAnalyzer
python
def is_useless_op(self): '\n :return boolean: Returns True if the first instruction opcode is in the "useless" list, False otherwise\n Default behavior is to consider opcodes useful unless otherwise observed.\n ' first_opcode = self.instructions[0].opcode if first_opcode.startswith('j'): return True if first_opcode.startswith('bnd'): return True if first_opcode.startswith('ret'): return True if first_opcode.startswith('iret'): return True if first_opcode.startswith('call'): return True useless = ['nop', 'fnop', 'ljmp'] return (first_opcode in useless)
def contains_unusable_op(self): '\n :return boolean: Returns True if any instruction opcode is unusable. False otherwise\n unusable instructions are Ring-0 opcodes that trap in user mode and some other exceptional ops.\n ' for instr in self.instructions: if instr.opcode.startswith('inv'): return True if (instr.opcode.startswith('vm') and (instr.opcode != 'vminsd') and (instr.opcode != 'vminpd')): return True if instr.opcode.startswith('ud'): return True unusable = ['clts', 'hlt', 'lgdt', 'lidt', 'lldt', 'lmsw', 'ltr', 'monitor', 'mwait', 'swapgs', 'sysexit', 'sysreturn', 'wbinvd', 'wrmsr', 'xsetbv', 'rsm', 'lock'] if (instr.opcode in unusable): return True if (instr.op1 is not None): if (instr.op1.startswith('cr') or instr.op1.startswith('tr') or instr.op1.startswith('db')): return True if (instr.op2 is not None): if (instr.op2.startswith('cr') or instr.op2.startswith('tr') or instr.op2.startswith('db')): return True return False
5,801,953,904,756,299,000
:return boolean: Returns True if any instruction opcode is unusable. False otherwise unusable instructions are Ring-0 opcodes that trap in user mode and some other exceptional ops.
src/static_analyzer/Gadget.py
contains_unusable_op
michaelbrownuc/GadgetSetAnalyzer
python
def contains_unusable_op(self): '\n :return boolean: Returns True if any instruction opcode is unusable. False otherwise\n unusable instructions are Ring-0 opcodes that trap in user mode and some other exceptional ops.\n ' for instr in self.instructions: if instr.opcode.startswith('inv'): return True if (instr.opcode.startswith('vm') and (instr.opcode != 'vminsd') and (instr.opcode != 'vminpd')): return True if instr.opcode.startswith('ud'): return True unusable = ['clts', 'hlt', 'lgdt', 'lidt', 'lldt', 'lmsw', 'ltr', 'monitor', 'mwait', 'swapgs', 'sysexit', 'sysreturn', 'wbinvd', 'wrmsr', 'xsetbv', 'rsm', 'lock'] if (instr.opcode in unusable): return True if (instr.op1 is not None): if (instr.op1.startswith('cr') or instr.op1.startswith('tr') or instr.op1.startswith('db')): return True if (instr.op2 is not None): if (instr.op2.startswith('cr') or instr.op2.startswith('tr') or instr.op2.startswith('db')): return True return False
def is_gpi_only(self): "\n :return boolean: Returns True if the gadget is a single instruction and starts with 'ret', 'jmp', or 'call',\n False otherwise\n " if (len(self.instructions) == 1): opcode = self.instructions[0].opcode if (opcode.startswith('ret') or opcode.startswith('jmp') or opcode.startswith('call')): return True return False
4,594,664,549,577,642,500
:return boolean: Returns True if the gadget is a single instruction and starts with 'ret', 'jmp', or 'call', False otherwise
src/static_analyzer/Gadget.py
is_gpi_only
michaelbrownuc/GadgetSetAnalyzer
python
def is_gpi_only(self): "\n :return boolean: Returns True if the gadget is a single instruction and starts with 'ret', 'jmp', or 'call',\n False otherwise\n " if (len(self.instructions) == 1): opcode = self.instructions[0].opcode if (opcode.startswith('ret') or opcode.startswith('jmp') or opcode.startswith('call')): return True return False
def is_invalid_branch(self): "\n :return boolean: Returns True if the gadget is 'jmp' or 'call' ending and the call target is a constant offset\n or does not target a recognized register family. False otherwise\n " last_instr = self.instructions[(len(self.instructions) - 1)] if (last_instr.opcode.startswith('call') or last_instr.opcode.startswith('jmp')): if (Instruction.get_operand_register_family(last_instr.op1) is None): return True return False
5,528,830,466,194,106,000
:return boolean: Returns True if the gadget is 'jmp' or 'call' ending and the call target is a constant offset or does not target a recognized register family. False otherwise
src/static_analyzer/Gadget.py
is_invalid_branch
michaelbrownuc/GadgetSetAnalyzer
python
def is_invalid_branch(self): "\n :return boolean: Returns True if the gadget is 'jmp' or 'call' ending and the call target is a constant offset\n or does not target a recognized register family. False otherwise\n " last_instr = self.instructions[(len(self.instructions) - 1)] if (last_instr.opcode.startswith('call') or last_instr.opcode.startswith('jmp')): if (Instruction.get_operand_register_family(last_instr.op1) is None): return True return False
def has_invalid_ret_offset(self): "\n :return boolean: Returns True if the gadget is 'ret' ending and contains a constant offset that is not byte\n aligned or is greater than 32 bytes, False otherwise\n " last_instr = self.instructions[(len(self.instructions) - 1)] if (last_instr.opcode.startswith('ret') and (last_instr.op1 is not None)): offset = Instruction.get_operand_as_constant(last_instr.op1) if (((offset % 2) != 0) or (offset > 32)): return True return False
-862,678,852,484,315,100
:return boolean: Returns True if the gadget is 'ret' ending and contains a constant offset that is not byte aligned or is greater than 32 bytes, False otherwise
src/static_analyzer/Gadget.py
has_invalid_ret_offset
michaelbrownuc/GadgetSetAnalyzer
python
def has_invalid_ret_offset(self): "\n :return boolean: Returns True if the gadget is 'ret' ending and contains a constant offset that is not byte\n aligned or is greater than 32 bytes, False otherwise\n " last_instr = self.instructions[(len(self.instructions) - 1)] if (last_instr.opcode.startswith('ret') and (last_instr.op1 is not None)): offset = Instruction.get_operand_as_constant(last_instr.op1) if (((offset % 2) != 0) or (offset > 32)): return True return False
def clobbers_created_value(self): '\n :return boolean: Returns True if the gadget completely overwrites the value created in the first instruction,\n False otherwise.\n ' first_instr = self.instructions[0] if ((not first_instr.creates_value()) or ('xchg' in first_instr.opcode)): return False first_family = Instruction.get_operand_register_family(first_instr.op1) if (first_family is None): return False for i in range(1, (len(self.instructions) - 1)): cur_instr = self.instructions[i] if ((not cur_instr.creates_value()) or ('xchg' in cur_instr.opcode)): continue if (first_family == Instruction.get_operand_register_family(cur_instr.op1)): if (((cur_instr.op2 is None) and (cur_instr.opcode not in ['inc', 'dec', 'neg', 'not'])) or ((cur_instr.op2 is not None) and (not Instruction.is_constant(cur_instr.op2)))): return True return False
-1,797,705,343,943,502,000
:return boolean: Returns True if the gadget completely overwrites the value created in the first instruction, False otherwise.
src/static_analyzer/Gadget.py
clobbers_created_value
michaelbrownuc/GadgetSetAnalyzer
python
def clobbers_created_value(self): '\n :return boolean: Returns True if the gadget completely overwrites the value created in the first instruction,\n False otherwise.\n ' first_instr = self.instructions[0] if ((not first_instr.creates_value()) or ('xchg' in first_instr.opcode)): return False first_family = Instruction.get_operand_register_family(first_instr.op1) if (first_family is None): return False for i in range(1, (len(self.instructions) - 1)): cur_instr = self.instructions[i] if ((not cur_instr.creates_value()) or ('xchg' in cur_instr.opcode)): continue if (first_family == Instruction.get_operand_register_family(cur_instr.op1)): if (((cur_instr.op2 is None) and (cur_instr.opcode not in ['inc', 'dec', 'neg', 'not'])) or ((cur_instr.op2 is not None) and (not Instruction.is_constant(cur_instr.op2)))): return True return False
def creates_unusable_value(self): '\n :return boolean: Returns True if the gadget creates a value in segment or extension registers, or are\n RIP-relative, or are constant memory locations; False otherwise.\n ' first_instr = self.instructions[0] if ((first_instr.opcode in ['cmp', 'test', 'push']) or (first_instr.op1 is None)): return False if ((not Instruction.is_constant(first_instr.op1)) and (Instruction.get_operand_register_family(first_instr.op1) is None)): return True return False
2,964,850,469,619,353,600
:return boolean: Returns True if the gadget creates a value in segment or extension registers, or are RIP-relative, or are constant memory locations; False otherwise.
src/static_analyzer/Gadget.py
creates_unusable_value
michaelbrownuc/GadgetSetAnalyzer
python
def creates_unusable_value(self): '\n :return boolean: Returns True if the gadget creates a value in segment or extension registers, or are\n RIP-relative, or are constant memory locations; False otherwise.\n ' first_instr = self.instructions[0] if ((first_instr.opcode in ['cmp', 'test', 'push']) or (first_instr.op1 is None)): return False if ((not Instruction.is_constant(first_instr.op1)) and (Instruction.get_operand_register_family(first_instr.op1) is None)): return True return False
def contains_intermediate_GPI(self): "\n :return boolean: Returns True if the gadget's intermediate instructions contain a GPI (or a generic interrupt),\n False otherwise.\n " for i in range((len(self.instructions) - 1)): cur_opcode = self.instructions[i].opcode cur_target = self.instructions[i].op1 if (cur_opcode.startswith('ret') or (cur_opcode == 'syscall') or (cur_opcode == 'sysenter') or cur_opcode.startswith('int') or (('jmp' in cur_opcode) and (not Instruction.is_constant(cur_target))) or (('call' in cur_opcode) and (not Instruction.is_constant(cur_target)))): return True return False
1,243,145,747,006,903,300
:return boolean: Returns True if the gadget's intermediate instructions contain a GPI (or a generic interrupt), False otherwise.
src/static_analyzer/Gadget.py
contains_intermediate_GPI
michaelbrownuc/GadgetSetAnalyzer
python
def contains_intermediate_GPI(self): "\n :return boolean: Returns True if the gadget's intermediate instructions contain a GPI (or a generic interrupt),\n False otherwise.\n " for i in range((len(self.instructions) - 1)): cur_opcode = self.instructions[i].opcode cur_target = self.instructions[i].op1 if (cur_opcode.startswith('ret') or (cur_opcode == 'syscall') or (cur_opcode == 'sysenter') or cur_opcode.startswith('int') or (('jmp' in cur_opcode) and (not Instruction.is_constant(cur_target))) or (('call' in cur_opcode) and (not Instruction.is_constant(cur_target)))): return True return False
def clobbers_stack_pointer(self): "\n :return boolean: Returns True if the ROP gadget's instructions assign a non-static value to the stack pointer\n register, False otherwise.\n " last_instr = self.instructions[(len(self.instructions) - 1)] if last_instr.opcode.startswith('ret'): for i in range((len(self.instructions) - 1)): cur_instr = self.instructions[i] if (not cur_instr.creates_value()): continue if (Instruction.get_operand_register_family(cur_instr.op1) == 7): if (((cur_instr.op2 is None) and (cur_instr.opcode not in ['inc', 'dec', 'pop'])) or ((cur_instr.op2 is not None) and (not Instruction.is_constant(cur_instr.op2)))): return True return False
-2,445,516,483,870,566,400
:return boolean: Returns True if the ROP gadget's instructions assign a non-static value to the stack pointer register, False otherwise.
src/static_analyzer/Gadget.py
clobbers_stack_pointer
michaelbrownuc/GadgetSetAnalyzer
python
def clobbers_stack_pointer(self): "\n :return boolean: Returns True if the ROP gadget's instructions assign a non-static value to the stack pointer\n register, False otherwise.\n " last_instr = self.instructions[(len(self.instructions) - 1)] if last_instr.opcode.startswith('ret'): for i in range((len(self.instructions) - 1)): cur_instr = self.instructions[i] if (not cur_instr.creates_value()): continue if (Instruction.get_operand_register_family(cur_instr.op1) == 7): if (((cur_instr.op2 is None) and (cur_instr.opcode not in ['inc', 'dec', 'pop'])) or ((cur_instr.op2 is not None) and (not Instruction.is_constant(cur_instr.op2)))): return True return False
def clobbers_indirect_target(self): "\n :return boolean: Returns True if the JOP/COP gadget's instructions modify the indirect branch register in\n certain ways, False otherwise.\n " last_instr = self.instructions[(len(self.instructions) - 1)] if (last_instr.opcode.startswith('jmp') or last_instr.opcode.startswith('call')): family = Instruction.get_operand_register_family(last_instr.op1) for i in range((len(self.instructions) - 1)): cur_instr = self.instructions[i] if (cur_instr.op1 in Instruction.register_families[family]): if ((cur_instr.opcode == 'xor') and (cur_instr.op1 == cur_instr.op2)): return True if ((cur_instr.opcode == 'lea') and (('rip' in cur_instr.op2) or ('eip' in cur_instr.op2))): return True if (cur_instr.opcode.startswith('lods') or (cur_instr.opcode == 'in')): return True if (('mov' in cur_instr.opcode) and (Instruction.is_constant(cur_instr.op2) or (Instruction.get_operand_register_family(cur_instr.op2) is None))): return True return False
5,501,789,693,138,077,000
:return boolean: Returns True if the JOP/COP gadget's instructions modify the indirect branch register in certain ways, False otherwise.
src/static_analyzer/Gadget.py
clobbers_indirect_target
michaelbrownuc/GadgetSetAnalyzer
python
def clobbers_indirect_target(self): "\n :return boolean: Returns True if the JOP/COP gadget's instructions modify the indirect branch register in\n certain ways, False otherwise.\n " last_instr = self.instructions[(len(self.instructions) - 1)] if (last_instr.opcode.startswith('jmp') or last_instr.opcode.startswith('call')): family = Instruction.get_operand_register_family(last_instr.op1) for i in range((len(self.instructions) - 1)): cur_instr = self.instructions[i] if (cur_instr.op1 in Instruction.register_families[family]): if ((cur_instr.opcode == 'xor') and (cur_instr.op1 == cur_instr.op2)): return True if ((cur_instr.opcode == 'lea') and (('rip' in cur_instr.op2) or ('eip' in cur_instr.op2))): return True if (cur_instr.opcode.startswith('lods') or (cur_instr.opcode == 'in')): return True if (('mov' in cur_instr.opcode) and (Instruction.is_constant(cur_instr.op2) or (Instruction.get_operand_register_family(cur_instr.op2) is None))): return True return False
def has_invalid_int_handler(self): "\n :return boolean: Returns True if the gadget's instructions assign a non-static value to the stack pointer\n register, False otherwise.\n " last_instr = self.instructions[(len(self.instructions) - 1)] if (last_instr.opcode.startswith('int') and (last_instr.op1 != '0x80')): return True return False
6,179,865,065,802,890,000
:return boolean: Returns True if the gadget's instructions assign a non-static value to the stack pointer register, False otherwise.
src/static_analyzer/Gadget.py
has_invalid_int_handler
michaelbrownuc/GadgetSetAnalyzer
python
def has_invalid_int_handler(self): "\n :return boolean: Returns True if the gadget's instructions assign a non-static value to the stack pointer\n register, False otherwise.\n " last_instr = self.instructions[(len(self.instructions) - 1)] if (last_instr.opcode.startswith('int') and (last_instr.op1 != '0x80')): return True return False
def is_rip_relative_indirect_branch(self): '\n :return boolean: Returns True if the gadget is a JOP/COP gadget relying on a RIP relative indirect branch,\n False otherwise.\n ' last_instr = self.instructions[(len(self.instructions) - 1)] if (last_instr.opcode.startswith('jmp') or last_instr.opcode.startswith('call')): if (('rip' in last_instr.op1) or ('eip' in last_instr.op1)): return True return False
-610,727,062,618,971,600
:return boolean: Returns True if the gadget is a JOP/COP gadget relying on a RIP relative indirect branch, False otherwise.
src/static_analyzer/Gadget.py
is_rip_relative_indirect_branch
michaelbrownuc/GadgetSetAnalyzer
python
def is_rip_relative_indirect_branch(self): '\n :return boolean: Returns True if the gadget is a JOP/COP gadget relying on a RIP relative indirect branch,\n False otherwise.\n ' last_instr = self.instructions[(len(self.instructions) - 1)] if (last_instr.opcode.startswith('jmp') or last_instr.opcode.startswith('call')): if (('rip' in last_instr.op1) or ('eip' in last_instr.op1)): return True return False
def is_equal(self, rhs): '\n :return boolean: Returns True if the gadgets are an exact match, including offset. Used for gadget locality.\n ' return ((self.offset == rhs.offset) and (self.instruction_string == rhs.instruction_string))
2,534,057,557,342,863,000
:return boolean: Returns True if the gadgets are an exact match, including offset. Used for gadget locality.
src/static_analyzer/Gadget.py
is_equal
michaelbrownuc/GadgetSetAnalyzer
python
def is_equal(self, rhs): '\n \n ' return ((self.offset == rhs.offset) and (self.instruction_string == rhs.instruction_string))
def is_duplicate(self, rhs): '\n :return boolean: Returns True if the gadgets are a semantic match. Used for non-locality gadget metrics.\n Semantic match is defined as the exact same sequence of equivalent instructions.\n ' if (len(self.instructions) != len(rhs.instructions)): return False for i in range(len(self.instructions)): if (not self.instructions[i].is_equivalent(rhs.instructions[i])): return False return True
-8,467,245,155,612,059,000
:return boolean: Returns True if the gadgets are a semantic match. Used for non-locality gadget metrics. Semantic match is defined as the exact same sequence of equivalent instructions.
src/static_analyzer/Gadget.py
is_duplicate
michaelbrownuc/GadgetSetAnalyzer
python
def is_duplicate(self, rhs): '\n :return boolean: Returns True if the gadgets are a semantic match. Used for non-locality gadget metrics.\n Semantic match is defined as the exact same sequence of equivalent instructions.\n ' if (len(self.instructions) != len(rhs.instructions)): return False for i in range(len(self.instructions)): if (not self.instructions[i].is_equivalent(rhs.instructions[i])): return False return True
def is_JOP_COP_dispatcher(self): "\n :return boolean: Returns True if the gadget is a JOP or COP dispatcher. Defined as a gadget that begins with a\n arithmetic operation on a register and ends with a branch to a deference of that register. Used\n to iterate through instructions in payload. Only restrictions on the arithmetic operation is\n that it doesn't use the same register as both operands.\n " first_instr = self.instructions[0] last_instr = self.instructions[(len(self.instructions) - 1)] if (('[' in last_instr.op1) and (first_instr.opcode in ['inc', 'dec', 'add', 'adc', 'sub', 'sbb']) and ('[' not in first_instr.op1)): gpi_target = Instruction.get_operand_register_family(last_instr.op1) arith_target_1 = Instruction.get_operand_register_family(first_instr.op1) if Instruction.is_constant(first_instr.op2): additive_value = Instruction.get_operand_as_constant(first_instr.op2) if ((additive_value < 1) or (additive_value > 32)): return False arith_target_2 = Instruction.get_operand_register_family(first_instr.op2) return ((gpi_target == arith_target_1) and (arith_target_1 != arith_target_2)) return False
5,951,576,296,575,257,000
:return boolean: Returns True if the gadget is a JOP or COP dispatcher. Defined as a gadget that begins with a arithmetic operation on a register and ends with a branch to a deference of that register. Used to iterate through instructions in payload. Only restrictions on the arithmetic operation is that it doesn't use the same register as both operands.
src/static_analyzer/Gadget.py
is_JOP_COP_dispatcher
michaelbrownuc/GadgetSetAnalyzer
python
def is_JOP_COP_dispatcher(self): "\n :return boolean: Returns True if the gadget is a JOP or COP dispatcher. Defined as a gadget that begins with a\n arithmetic operation on a register and ends with a branch to a deference of that register. Used\n to iterate through instructions in payload. Only restrictions on the arithmetic operation is\n that it doesn't use the same register as both operands.\n " first_instr = self.instructions[0] last_instr = self.instructions[(len(self.instructions) - 1)] if (('[' in last_instr.op1) and (first_instr.opcode in ['inc', 'dec', 'add', 'adc', 'sub', 'sbb']) and ('[' not in first_instr.op1)): gpi_target = Instruction.get_operand_register_family(last_instr.op1) arith_target_1 = Instruction.get_operand_register_family(first_instr.op1) if Instruction.is_constant(first_instr.op2): additive_value = Instruction.get_operand_as_constant(first_instr.op2) if ((additive_value < 1) or (additive_value > 32)): return False arith_target_2 = Instruction.get_operand_register_family(first_instr.op2) return ((gpi_target == arith_target_1) and (arith_target_1 != arith_target_2)) return False
def is_JOP_COP_dataloader(self): '\n :return boolean: Returns True if the gadget is a JOP or COP data loader. Defined as a gadget that begins with a\n pop opcode to a non-memory location, that is also not the target of the GPI. Used to pop a\n necessary value off stack en masse before redirecting to the dispatcher.\n ' first_instr = self.instructions[0] if ((first_instr.opcode == 'pop') and ('[' not in first_instr.op1)): gpi_target = Instruction.get_operand_register_family(self.instructions[(len(self.instructions) - 1)].op1) pop_target = Instruction.get_operand_register_family(first_instr.op1) return (gpi_target != pop_target) return False
5,617,497,105,708,245,000
:return boolean: Returns True if the gadget is a JOP or COP data loader. Defined as a gadget that begins with a pop opcode to a non-memory location, that is also not the target of the GPI. Used to pop a necessary value off stack en masse before redirecting to the dispatcher.
src/static_analyzer/Gadget.py
is_JOP_COP_dataloader
michaelbrownuc/GadgetSetAnalyzer
python
def is_JOP_COP_dataloader(self): '\n :return boolean: Returns True if the gadget is a JOP or COP data loader. Defined as a gadget that begins with a\n pop opcode to a non-memory location, that is also not the target of the GPI. Used to pop a\n necessary value off stack en masse before redirecting to the dispatcher.\n ' first_instr = self.instructions[0] if ((first_instr.opcode == 'pop') and ('[' not in first_instr.op1)): gpi_target = Instruction.get_operand_register_family(self.instructions[(len(self.instructions) - 1)].op1) pop_target = Instruction.get_operand_register_family(first_instr.op1) return (gpi_target != pop_target) return False
def is_JOP_initializer(self): '\n :return boolean: Returns True if the gadget is a JOP Initializer. Defined as a gadget that begins with a\n "pop all" opcode, used to pop necessary values off stack en masse before redirecting to the\n dispatcher.\n ' return self.instructions[0].opcode.startswith('popa')
405,727,441,158,540,800
:return boolean: Returns True if the gadget is a JOP Initializer. Defined as a gadget that begins with a "pop all" opcode, used to pop necessary values off stack en masse before redirecting to the dispatcher.
src/static_analyzer/Gadget.py
is_JOP_initializer
michaelbrownuc/GadgetSetAnalyzer
python
def is_JOP_initializer(self): '\n :return boolean: Returns True if the gadget is a JOP Initializer. Defined as a gadget that begins with a\n "pop all" opcode, used to pop necessary values off stack en masse before redirecting to the\n dispatcher.\n ' return self.instructions[0].opcode.startswith('popa')
def is_JOP_trampoline(self): '\n :return boolean: Returns True if the gadget is a JOP trampoline. Defined as a gadget that begins with a\n pop opcode to a non-memory location, and that ends in a dereference of that value. Used to\n redirect execution to value stored in memory.\n ' first_instr = self.instructions[0] gpi_target_op = self.instructions[(len(self.instructions) - 1)].op1 if ((first_instr.opcode == 'pop') and ('[' not in first_instr.op1)): gpi_target = Instruction.get_operand_register_family(gpi_target_op) pop_target = Instruction.get_operand_register_family(first_instr.op1) return ((gpi_target == pop_target) and ('[' in gpi_target_op)) return False
-3,181,699,853,611,830,300
:return boolean: Returns True if the gadget is a JOP trampoline. Defined as a gadget that begins with a pop opcode to a non-memory location, and that ends in a dereference of that value. Used to redirect execution to value stored in memory.
src/static_analyzer/Gadget.py
is_JOP_trampoline
michaelbrownuc/GadgetSetAnalyzer
python
def is_JOP_trampoline(self): '\n :return boolean: Returns True if the gadget is a JOP trampoline. Defined as a gadget that begins with a\n pop opcode to a non-memory location, and that ends in a dereference of that value. Used to\n redirect execution to value stored in memory.\n ' first_instr = self.instructions[0] gpi_target_op = self.instructions[(len(self.instructions) - 1)].op1 if ((first_instr.opcode == 'pop') and ('[' not in first_instr.op1)): gpi_target = Instruction.get_operand_register_family(gpi_target_op) pop_target = Instruction.get_operand_register_family(first_instr.op1) return ((gpi_target == pop_target) and ('[' in gpi_target_op)) return False
def is_COP_initializer(self): '\n :return boolean: Returns True if the gadget is a COP initializer. Defined as a gadget that begins with a\n "pop all" opcode, does not use register bx/cx/dx/di as the call target, and does not clobber\n bx/cx/dx or the call target in an intermediate instruction\n ' first_instr = self.instructions[0] last_instr = self.instructions[(len(self.instructions) - 1)] call_target = Instruction.get_operand_register_family(last_instr.op1) if (first_instr.opcode.startswith('popa') and (call_target not in [1, 2, 3, 5])): protected_families = [1, 2, 3, call_target] protected_registers = [] for family in protected_families: for register in Instruction.register_families[family]: protected_registers.append(register) for i in range(1, (len(self.instructions) - 1)): cur_instr = self.instructions[i] if (not cur_instr.creates_value()): continue if (cur_instr.op1 in protected_registers): if (((cur_instr.op2 is None) and (cur_instr.opcode not in ['inc', 'dec', 'neg', 'not'])) or ((cur_instr.op2 is not None) and (not Instruction.is_constant(cur_instr.op2)))): return False return True return False
-943,675,825,263,414,400
:return boolean: Returns True if the gadget is a COP initializer. Defined as a gadget that begins with a "pop all" opcode, does not use register bx/cx/dx/di as the call target, and does not clobber bx/cx/dx or the call target in an intermediate instruction
src/static_analyzer/Gadget.py
is_COP_initializer
michaelbrownuc/GadgetSetAnalyzer
python
def is_COP_initializer(self): '\n :return boolean: Returns True if the gadget is a COP initializer. Defined as a gadget that begins with a\n "pop all" opcode, does not use register bx/cx/dx/di as the call target, and does not clobber\n bx/cx/dx or the call target in an intermediate instruction\n ' first_instr = self.instructions[0] last_instr = self.instructions[(len(self.instructions) - 1)] call_target = Instruction.get_operand_register_family(last_instr.op1) if (first_instr.opcode.startswith('popa') and (call_target not in [1, 2, 3, 5])): protected_families = [1, 2, 3, call_target] protected_registers = [] for family in protected_families: for register in Instruction.register_families[family]: protected_registers.append(register) for i in range(1, (len(self.instructions) - 1)): cur_instr = self.instructions[i] if (not cur_instr.creates_value()): continue if (cur_instr.op1 in protected_registers): if (((cur_instr.op2 is None) and (cur_instr.opcode not in ['inc', 'dec', 'neg', 'not'])) or ((cur_instr.op2 is not None) and (not Instruction.is_constant(cur_instr.op2)))): return False return True return False
def is_COP_strong_trampoline(self): '\n :return boolean: Returns True if the gadget is a COP strong trampoline. Defined as a gadget that begins with a\n pop opcode, and contains at least one other pop operation. The last non-pop all operation must\n target the call target.\n ' first_instr = self.instructions[0] last_instr = self.instructions[(len(self.instructions) - 1)] call_target = Instruction.get_operand_register_family(last_instr.op1) if ((first_instr.opcode == 'pop') and ('[' not in first_instr.op1)): cnt_pops = 1 last_pop_target = first_instr.op1 for i in range(1, (len(self.instructions) - 1)): cur_instr = self.instructions[i] if cur_instr.opcode.startswith('popa'): cnt_pops += 1 if ((cur_instr.opcode == 'pop') and ('[' not in cur_instr.op1)): cnt_pops += 1 last_pop_target = cur_instr.op1 if ((cnt_pops > 1) and (last_pop_target in Instruction.register_families[call_target])): return True return False
-7,207,612,691,470,076,000
:return boolean: Returns True if the gadget is a COP strong trampoline. Defined as a gadget that begins with a pop opcode, and contains at least one other pop operation. The last non-pop all operation must target the call target.
src/static_analyzer/Gadget.py
is_COP_strong_trampoline
michaelbrownuc/GadgetSetAnalyzer
python
def is_COP_strong_trampoline(self): '\n :return boolean: Returns True if the gadget is a COP strong trampoline. Defined as a gadget that begins with a\n pop opcode, and contains at least one other pop operation. The last non-pop all operation must\n target the call target.\n ' first_instr = self.instructions[0] last_instr = self.instructions[(len(self.instructions) - 1)] call_target = Instruction.get_operand_register_family(last_instr.op1) if ((first_instr.opcode == 'pop') and ('[' not in first_instr.op1)): cnt_pops = 1 last_pop_target = first_instr.op1 for i in range(1, (len(self.instructions) - 1)): cur_instr = self.instructions[i] if cur_instr.opcode.startswith('popa'): cnt_pops += 1 if ((cur_instr.opcode == 'pop') and ('[' not in cur_instr.op1)): cnt_pops += 1 last_pop_target = cur_instr.op1 if ((cnt_pops > 1) and (last_pop_target in Instruction.register_families[call_target])): return True return False
def is_COP_intrastack_pivot(self): '\n :return boolean: Returns True if the gadget is a COP Intra-stack pivot gadget. Defined as a gadget that begins\n with an additive operation on the stack pointer register. Used to move around in shellcode\n during COP exploits. Only restriction on the arithmetic operation is that the second operand\n is not a pointer.\n ' first_instr = self.instructions[0] if ((first_instr.opcode in ['inc', 'add', 'adc', 'sub', 'sbb']) and ('[' not in first_instr.op1)): arith_target = Instruction.get_operand_register_family(first_instr.op1) if (arith_target == 7): if ((first_instr.op2 is None) or ('[' not in first_instr.op2)): return True return False
9,165,429,799,068,683,000
:return boolean: Returns True if the gadget is a COP Intra-stack pivot gadget. Defined as a gadget that begins with an additive operation on the stack pointer register. Used to move around in shellcode during COP exploits. Only restriction on the arithmetic operation is that the second operand is not a pointer.
src/static_analyzer/Gadget.py
is_COP_intrastack_pivot
michaelbrownuc/GadgetSetAnalyzer
python
def is_COP_intrastack_pivot(self): '\n :return boolean: Returns True if the gadget is a COP Intra-stack pivot gadget. Defined as a gadget that begins\n with an additive operation on the stack pointer register. Used to move around in shellcode\n during COP exploits. Only restriction on the arithmetic operation is that the second operand\n is not a pointer.\n ' first_instr = self.instructions[0] if ((first_instr.opcode in ['inc', 'add', 'adc', 'sub', 'sbb']) and ('[' not in first_instr.op1)): arith_target = Instruction.get_operand_register_family(first_instr.op1) if (arith_target == 7): if ((first_instr.op2 is None) or ('[' not in first_instr.op2)): return True return False
def check_contains_leave(self): '\n :return void: Increases gadget\'s score if the gadget has an intermediate "leave" instruction.\n ' for i in range(1, (len(self.instructions) - 1)): if (self.instructions[i].opcode == 'leave'): self.score += 2.0 return
7,210,409,693,577,871,000
:return void: Increases gadget's score if the gadget has an intermediate "leave" instruction.
src/static_analyzer/Gadget.py
check_contains_leave
michaelbrownuc/GadgetSetAnalyzer
python
def check_contains_leave(self): '\n :return void: Increases gadget\'s score if the gadget has an intermediate "leave" instruction.\n ' for i in range(1, (len(self.instructions) - 1)): if (self.instructions[i].opcode == 'leave'): self.score += 2.0 return
def check_sp_target_of_operation(self): "\n :return void: Increases gadget's score if the gadget has an intermediate instruction that performs certain\n operations on the stack pointer register family.\n " for i in range((len(self.instructions) - 1)): cur_instr = self.instructions[i] if (not cur_instr.creates_value()): continue if (Instruction.get_operand_register_family(cur_instr.op1) == 7): if (('xchg' in cur_instr.opcode) or ('mov' in cur_instr.opcode) or (cur_instr.opcode in ['lea'])): self.score += 4.0 elif (cur_instr.opcode in ['shl', 'shr', 'sar', 'sal', 'ror', 'rol', 'rcr', 'rcl']): self.score += 3.0 elif (cur_instr.opcode == 'pop'): self.score += 1.0 else: self.score += 2.0
-7,326,613,457,057,683,000
:return void: Increases gadget's score if the gadget has an intermediate instruction that performs certain operations on the stack pointer register family.
src/static_analyzer/Gadget.py
check_sp_target_of_operation
michaelbrownuc/GadgetSetAnalyzer
python
def check_sp_target_of_operation(self): "\n :return void: Increases gadget's score if the gadget has an intermediate instruction that performs certain\n operations on the stack pointer register family.\n " for i in range((len(self.instructions) - 1)): cur_instr = self.instructions[i] if (not cur_instr.creates_value()): continue if (Instruction.get_operand_register_family(cur_instr.op1) == 7): if (('xchg' in cur_instr.opcode) or ('mov' in cur_instr.opcode) or (cur_instr.opcode in ['lea'])): self.score += 4.0 elif (cur_instr.opcode in ['shl', 'shr', 'sar', 'sal', 'ror', 'rol', 'rcr', 'rcl']): self.score += 3.0 elif (cur_instr.opcode == 'pop'): self.score += 1.0 else: self.score += 2.0
def check_negative_sp_offsets(self): "\n :return void: Increases gadget's score if its cumulative register offsets are negative.\n " sp_offset = 0 for i in range(len(self.instructions)): cur_instr = self.instructions[i] if (cur_instr.opcode == 'push'): sp_offset -= 8 elif ((cur_instr.opcode == 'pop') and (cur_instr.op1 not in Instruction.register_families[7])): sp_offset += 8 elif ((cur_instr.opcode in ['add', 'adc']) and (cur_instr.op1 in Instruction.register_families[7]) and Instruction.is_constant(cur_instr.op2)): sp_offset += Instruction.get_operand_as_constant(cur_instr.op2) elif ((cur_instr.opcode in ['sub', 'sbb']) and (cur_instr.op1 in Instruction.register_families[7]) and Instruction.is_constant(cur_instr.op2)): sp_offset -= Instruction.get_operand_as_constant(cur_instr.op2) elif ((cur_instr.opcode == 'inc') and (cur_instr.op1 in Instruction.register_families[7])): sp_offset += 1 elif ((cur_instr.opcode == 'dec') and (cur_instr.op1 in Instruction.register_families[7])): sp_offset -= 1 elif (cur_instr.opcode.startswith('ret') and (cur_instr.op1 is not None)): sp_offset += Instruction.get_operand_as_constant(cur_instr.op1) if (sp_offset < 0): self.score += 2.0
-8,229,238,631,788,484,000
:return void: Increases gadget's score if its cumulative register offsets are negative.
src/static_analyzer/Gadget.py
check_negative_sp_offsets
michaelbrownuc/GadgetSetAnalyzer
python
def check_negative_sp_offsets(self): "\n \n " sp_offset = 0 for i in range(len(self.instructions)): cur_instr = self.instructions[i] if (cur_instr.opcode == 'push'): sp_offset -= 8 elif ((cur_instr.opcode == 'pop') and (cur_instr.op1 not in Instruction.register_families[7])): sp_offset += 8 elif ((cur_instr.opcode in ['add', 'adc']) and (cur_instr.op1 in Instruction.register_families[7]) and Instruction.is_constant(cur_instr.op2)): sp_offset += Instruction.get_operand_as_constant(cur_instr.op2) elif ((cur_instr.opcode in ['sub', 'sbb']) and (cur_instr.op1 in Instruction.register_families[7]) and Instruction.is_constant(cur_instr.op2)): sp_offset -= Instruction.get_operand_as_constant(cur_instr.op2) elif ((cur_instr.opcode == 'inc') and (cur_instr.op1 in Instruction.register_families[7])): sp_offset += 1 elif ((cur_instr.opcode == 'dec') and (cur_instr.op1 in Instruction.register_families[7])): sp_offset -= 1 elif (cur_instr.opcode.startswith('ret') and (cur_instr.op1 is not None)): sp_offset += Instruction.get_operand_as_constant(cur_instr.op1) if (sp_offset < 0): self.score += 2.0
def check_contains_conditional_op(self): "\n :return void: Increases gadget's score if it contains conditional instructions like jumps, sets, and moves.\n " for i in range((len(self.instructions) - 1)): cur_instr = self.instructions[i] if (cur_instr.opcode.startswith('j') and (cur_instr.opcode != 'jmp')): self.score += 3.0 elif (('cmov' in cur_instr.opcode) or ('cmpxchg' in cur_instr.opcode)): self.score += 2.0 elif ('set' in cur_instr.opcode): self.score += 1.0
-501,580,019,472,423,000
:return void: Increases gadget's score if it contains conditional instructions like jumps, sets, and moves.
src/static_analyzer/Gadget.py
check_contains_conditional_op
michaelbrownuc/GadgetSetAnalyzer
python
def check_contains_conditional_op(self): "\n \n " for i in range((len(self.instructions) - 1)): cur_instr = self.instructions[i] if (cur_instr.opcode.startswith('j') and (cur_instr.opcode != 'jmp')): self.score += 3.0 elif (('cmov' in cur_instr.opcode) or ('cmpxchg' in cur_instr.opcode)): self.score += 2.0 elif ('set' in cur_instr.opcode): self.score += 1.0
def check_register_ops(self): "\n :return void: Increases gadget's score if it contains operations on a value carrying or a bystander register\n " first_instr = self.instructions[0] if ((not first_instr.creates_value()) or ('xchg' in first_instr.opcode)): first_family = None else: first_family = Instruction.get_operand_register_family(first_instr.op1) for i in range(1, (len(self.instructions) - 1)): cur_instr = self.instructions[i] if (not cur_instr.creates_value()): continue if ((first_family is not None) and (first_family == Instruction.get_operand_register_family(cur_instr.op1))): if (cur_instr.opcode in ['shl', 'shr', 'sar', 'sal', 'ror', 'rol', 'rcr', 'rcl']): self.score += 1.5 else: self.score += 1.0 elif (('xchg' not in cur_instr.opcode) and (cur_instr.opcode != 'pop')): if ((cur_instr.op2 is not None) and (Instruction.get_operand_register_family(cur_instr.op2) is not None)): self.score += 1.0 else: self.score += 0.5
409,782,009,336,981,060
:return void: Increases gadget's score if it contains operations on a value carrying or a bystander register
src/static_analyzer/Gadget.py
check_register_ops
michaelbrownuc/GadgetSetAnalyzer
python
def check_register_ops(self): "\n \n " first_instr = self.instructions[0] if ((not first_instr.creates_value()) or ('xchg' in first_instr.opcode)): first_family = None else: first_family = Instruction.get_operand_register_family(first_instr.op1) for i in range(1, (len(self.instructions) - 1)): cur_instr = self.instructions[i] if (not cur_instr.creates_value()): continue if ((first_family is not None) and (first_family == Instruction.get_operand_register_family(cur_instr.op1))): if (cur_instr.opcode in ['shl', 'shr', 'sar', 'sal', 'ror', 'rol', 'rcr', 'rcl']): self.score += 1.5 else: self.score += 1.0 elif (('xchg' not in cur_instr.opcode) and (cur_instr.opcode != 'pop')): if ((cur_instr.op2 is not None) and (Instruction.get_operand_register_family(cur_instr.op2) is not None)): self.score += 1.0 else: self.score += 0.5
def check_branch_target_of_operation(self): "\n :return void: Increases gadget's score if the gadget has an intermediate instruction that performs certain\n operations on the indirect branch target register family.\n " last_instr = self.instructions[(len(self.instructions) - 1)] target_family = Instruction.get_operand_register_family(last_instr.op1) for i in range((len(self.instructions) - 1)): cur_instr = self.instructions[i] if (not cur_instr.creates_value()): continue if (Instruction.get_operand_register_family(cur_instr.op1) == target_family): if (cur_instr.opcode in ['shl', 'shr', 'sar', 'sal', 'ror', 'rol', 'rcr', 'rcl']): self.score += 3.0 else: self.score += 2.0
-2,168,993,783,908,520,400
:return void: Increases gadget's score if the gadget has an intermediate instruction that performs certain operations on the indirect branch target register family.
src/static_analyzer/Gadget.py
check_branch_target_of_operation
michaelbrownuc/GadgetSetAnalyzer
python
def check_branch_target_of_operation(self): "\n :return void: Increases gadget's score if the gadget has an intermediate instruction that performs certain\n operations on the indirect branch target register family.\n " last_instr = self.instructions[(len(self.instructions) - 1)] target_family = Instruction.get_operand_register_family(last_instr.op1) for i in range((len(self.instructions) - 1)): cur_instr = self.instructions[i] if (not cur_instr.creates_value()): continue if (Instruction.get_operand_register_family(cur_instr.op1) == target_family): if (cur_instr.opcode in ['shl', 'shr', 'sar', 'sal', 'ror', 'rol', 'rcr', 'rcl']): self.score += 3.0 else: self.score += 2.0
def check_memory_writes(self): "\n :return void: Increases gadget's score if the gadget has an instruction that writes to memory.\n " for i in range((len(self.instructions) - 1)): cur_instr = self.instructions[i] if (not cur_instr.creates_value()): continue if (('xchg' in cur_instr.opcode) and (('[' in cur_instr.op1) or ('[' in cur_instr.op2))): self.score += 1.0 elif ((cur_instr.op1 is not None) and ('[' in cur_instr.op1)): self.score += 1.0
1,998,233,147,490,157,800
:return void: Increases gadget's score if the gadget has an instruction that writes to memory.
src/static_analyzer/Gadget.py
check_memory_writes
michaelbrownuc/GadgetSetAnalyzer
python
def check_memory_writes(self): "\n \n " for i in range((len(self.instructions) - 1)): cur_instr = self.instructions[i] if (not cur_instr.creates_value()): continue if (('xchg' in cur_instr.opcode) and (('[' in cur_instr.op1) or ('[' in cur_instr.op2))): self.score += 1.0 elif ((cur_instr.op1 is not None) and ('[' in cur_instr.op1)): self.score += 1.0
def proof_of_work(last_proof): "\n Simple Proof of Work Algorithm\n - Find a number p' such that hash(pp') contains 6 leading\n zeroes, where p is the previous p'\n - p is the previous proof, and p' is the new proof\n " print(f''' Search for proof initialized. ''') proof = 0 while (valid_proof(last_proof, proof) is False): proof += 1 print(f''' Search for proof complete, proof is {proof} ''') return proof
9,213,363,334,812,784,000
Simple Proof of Work Algorithm - Find a number p' such that hash(pp') contains 6 leading zeroes, where p is the previous p' - p is the previous proof, and p' is the new proof
client_mining_p/miner.py
proof_of_work
lambda-projects-lafriedel/Blockchain
python
def proof_of_work(last_proof): "\n Simple Proof of Work Algorithm\n - Find a number p' such that hash(pp') contains 6 leading\n zeroes, where p is the previous p'\n - p is the previous proof, and p' is the new proof\n " print(f' Search for proof initialized. ') proof = 0 while (valid_proof(last_proof, proof) is False): proof += 1 print(f' Search for proof complete, proof is {proof} ') return proof
def equal_up_to_global_phase(val: Any, other: Any, *, atol: Union[(int, float)]=1e-08) -> bool: 'Determine whether two objects are equal up to global phase.\n\n If `val` implements a `_equal_up_to_global_phase_` method then it is\n invoked and takes precedence over all other checks:\n - For complex primitive type the magnitudes of the values are compared.\n - For `val` and `other` both iterable of the same length, consecutive\n elements are compared recursively. Types of `val` and `other` does not\n necessarily needs to match each other. They just need to be iterable and\n have the same structure.\n - For all other types, fall back to `_approx_eq_`\n\n Args:\n val: Source object for approximate comparison.\n other: Target object for approximate comparison.\n atol: The minimum absolute tolerance. This places an upper bound on\n the differences in *magnitudes* of two compared complex numbers.\n\n Returns:\n True if objects are approximately equal up to phase, False otherwise.\n ' eq_up_to_phase_getter = getattr(val, '_equal_up_to_global_phase_', None) if (eq_up_to_phase_getter is not None): result = eq_up_to_phase_getter(other, atol) if (result is not NotImplemented): return result other_eq_up_to_phase_getter = getattr(other, '_equal_up_to_global_phase_', None) if (other_eq_up_to_phase_getter is not None): result = other_eq_up_to_phase_getter(val, atol) if (result is not NotImplemented): return result if (isinstance(val, Iterable) and isinstance(other, Iterable)): a = np.asarray(val) b = np.asarray(other) if ((a.dtype.kind in 'uifc') and (b.dtype.kind in 'uifc')): return linalg.allclose_up_to_global_phase(a, b, atol=atol) if (isinstance(val, numbers.Number) and isinstance(other, numbers.Number)): result = approx_eq(abs(val), abs(other), atol=atol) if (result is not NotImplemented): return result return approx_eq(val, other, atol=atol)
-9,119,513,904,675,405,000
Determine whether two objects are equal up to global phase. If `val` implements a `_equal_up_to_global_phase_` method then it is invoked and takes precedence over all other checks: - For complex primitive type the magnitudes of the values are compared. - For `val` and `other` both iterable of the same length, consecutive elements are compared recursively. Types of `val` and `other` does not necessarily needs to match each other. They just need to be iterable and have the same structure. - For all other types, fall back to `_approx_eq_` Args: val: Source object for approximate comparison. other: Target object for approximate comparison. atol: The minimum absolute tolerance. This places an upper bound on the differences in *magnitudes* of two compared complex numbers. Returns: True if objects are approximately equal up to phase, False otherwise.
cirq-core/cirq/protocols/equal_up_to_global_phase_protocol.py
equal_up_to_global_phase
95-martin-orion/Cirq
python
def equal_up_to_global_phase(val: Any, other: Any, *, atol: Union[(int, float)]=1e-08) -> bool: 'Determine whether two objects are equal up to global phase.\n\n If `val` implements a `_equal_up_to_global_phase_` method then it is\n invoked and takes precedence over all other checks:\n - For complex primitive type the magnitudes of the values are compared.\n - For `val` and `other` both iterable of the same length, consecutive\n elements are compared recursively. Types of `val` and `other` does not\n necessarily needs to match each other. They just need to be iterable and\n have the same structure.\n - For all other types, fall back to `_approx_eq_`\n\n Args:\n val: Source object for approximate comparison.\n other: Target object for approximate comparison.\n atol: The minimum absolute tolerance. This places an upper bound on\n the differences in *magnitudes* of two compared complex numbers.\n\n Returns:\n True if objects are approximately equal up to phase, False otherwise.\n ' eq_up_to_phase_getter = getattr(val, '_equal_up_to_global_phase_', None) if (eq_up_to_phase_getter is not None): result = eq_up_to_phase_getter(other, atol) if (result is not NotImplemented): return result other_eq_up_to_phase_getter = getattr(other, '_equal_up_to_global_phase_', None) if (other_eq_up_to_phase_getter is not None): result = other_eq_up_to_phase_getter(val, atol) if (result is not NotImplemented): return result if (isinstance(val, Iterable) and isinstance(other, Iterable)): a = np.asarray(val) b = np.asarray(other) if ((a.dtype.kind in 'uifc') and (b.dtype.kind in 'uifc')): return linalg.allclose_up_to_global_phase(a, b, atol=atol) if (isinstance(val, numbers.Number) and isinstance(other, numbers.Number)): result = approx_eq(abs(val), abs(other), atol=atol) if (result is not NotImplemented): return result return approx_eq(val, other, atol=atol)
@doc_private def _equal_up_to_global_phase_(self, other: Any, *, atol: Union[(int, float)]) -> bool: 'Approximate comparator.\n\n Types implementing this protocol define their own logic for comparison\n with other types.\n\n Args:\n other: Target object for comparison of equality up to global phase.\n atol: The minimum absolute tolerance. See `np.isclose()`\n documentation for details.\n\n Returns:\n True if objects are equal up to a global phase, False otherwise.\n Returns NotImplemented when checking equality up to a global phase\n is not implemented for given types.\n '
556,117,564,477,804,200
Approximate comparator. Types implementing this protocol define their own logic for comparison with other types. Args: other: Target object for comparison of equality up to global phase. atol: The minimum absolute tolerance. See `np.isclose()` documentation for details. Returns: True if objects are equal up to a global phase, False otherwise. Returns NotImplemented when checking equality up to a global phase is not implemented for given types.
cirq-core/cirq/protocols/equal_up_to_global_phase_protocol.py
_equal_up_to_global_phase_
95-martin-orion/Cirq
python
@doc_private def _equal_up_to_global_phase_(self, other: Any, *, atol: Union[(int, float)]) -> bool: 'Approximate comparator.\n\n Types implementing this protocol define their own logic for comparison\n with other types.\n\n Args:\n other: Target object for comparison of equality up to global phase.\n atol: The minimum absolute tolerance. See `np.isclose()`\n documentation for details.\n\n Returns:\n True if objects are equal up to a global phase, False otherwise.\n Returns NotImplemented when checking equality up to a global phase\n is not implemented for given types.\n '
@tf_export('train.load_checkpoint') def load_checkpoint(ckpt_dir_or_file): 'Returns `CheckpointReader` for checkpoint found in `ckpt_dir_or_file`.\n\n If `ckpt_dir_or_file` resolves to a directory with multiple checkpoints,\n reader for the latest checkpoint is returned.\n\n Args:\n ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint\n file.\n\n Returns:\n `CheckpointReader` object.\n\n Raises:\n ValueError: If `ckpt_dir_or_file` resolves to a directory with no\n checkpoints.\n ' filename = _get_checkpoint_filename(ckpt_dir_or_file) if (filename is None): raise ValueError(("Couldn't find 'checkpoint' file or checkpoints in given directory %s" % ckpt_dir_or_file)) return py_checkpoint_reader.NewCheckpointReader(filename)
3,253,334,963,162,104,000
Returns `CheckpointReader` for checkpoint found in `ckpt_dir_or_file`. If `ckpt_dir_or_file` resolves to a directory with multiple checkpoints, reader for the latest checkpoint is returned. Args: ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint file. Returns: `CheckpointReader` object. Raises: ValueError: If `ckpt_dir_or_file` resolves to a directory with no checkpoints.
tensorflow/python/training/checkpoint_utils.py
load_checkpoint
KodeWorker/tensorflow
python
@tf_export('train.load_checkpoint') def load_checkpoint(ckpt_dir_or_file): 'Returns `CheckpointReader` for checkpoint found in `ckpt_dir_or_file`.\n\n If `ckpt_dir_or_file` resolves to a directory with multiple checkpoints,\n reader for the latest checkpoint is returned.\n\n Args:\n ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint\n file.\n\n Returns:\n `CheckpointReader` object.\n\n Raises:\n ValueError: If `ckpt_dir_or_file` resolves to a directory with no\n checkpoints.\n ' filename = _get_checkpoint_filename(ckpt_dir_or_file) if (filename is None): raise ValueError(("Couldn't find 'checkpoint' file or checkpoints in given directory %s" % ckpt_dir_or_file)) return py_checkpoint_reader.NewCheckpointReader(filename)
@tf_export('train.load_variable') def load_variable(ckpt_dir_or_file, name): 'Returns the tensor value of the given variable in the checkpoint.\n\n Args:\n ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.\n name: Name of the variable to return.\n\n Returns:\n A numpy `ndarray` with a copy of the value of this variable.\n ' if name.endswith(':0'): name = name[:(- 2)] reader = load_checkpoint(ckpt_dir_or_file) return reader.get_tensor(name)
-7,616,513,250,938,454,000
Returns the tensor value of the given variable in the checkpoint. Args: ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint. name: Name of the variable to return. Returns: A numpy `ndarray` with a copy of the value of this variable.
tensorflow/python/training/checkpoint_utils.py
load_variable
KodeWorker/tensorflow
python
@tf_export('train.load_variable') def load_variable(ckpt_dir_or_file, name): 'Returns the tensor value of the given variable in the checkpoint.\n\n Args:\n ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.\n name: Name of the variable to return.\n\n Returns:\n A numpy `ndarray` with a copy of the value of this variable.\n ' if name.endswith(':0'): name = name[:(- 2)] reader = load_checkpoint(ckpt_dir_or_file) return reader.get_tensor(name)
@tf_export('train.list_variables') def list_variables(ckpt_dir_or_file): 'Returns list of all variables in the checkpoint.\n\n Args:\n ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.\n\n Returns:\n List of tuples `(name, shape)`.\n ' reader = load_checkpoint(ckpt_dir_or_file) variable_map = reader.get_variable_to_shape_map() names = sorted(variable_map.keys()) result = [] for name in names: result.append((name, variable_map[name])) return result
1,467,950,224,971,931,600
Returns list of all variables in the checkpoint. Args: ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint. Returns: List of tuples `(name, shape)`.
tensorflow/python/training/checkpoint_utils.py
list_variables
KodeWorker/tensorflow
python
@tf_export('train.list_variables') def list_variables(ckpt_dir_or_file): 'Returns list of all variables in the checkpoint.\n\n Args:\n ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.\n\n Returns:\n List of tuples `(name, shape)`.\n ' reader = load_checkpoint(ckpt_dir_or_file) variable_map = reader.get_variable_to_shape_map() names = sorted(variable_map.keys()) result = [] for name in names: result.append((name, variable_map[name])) return result
def wait_for_new_checkpoint(checkpoint_dir, last_checkpoint=None, seconds_to_sleep=1, timeout=None): "Waits until a new checkpoint file is found.\n\n Args:\n checkpoint_dir: The directory in which checkpoints are saved.\n last_checkpoint: The last checkpoint path used or `None` if we're expecting\n a checkpoint for the first time.\n seconds_to_sleep: The number of seconds to sleep for before looking for a\n new checkpoint.\n timeout: The maximum number of seconds to wait. If left as `None`, then the\n process will wait indefinitely.\n\n Returns:\n a new checkpoint path, or None if the timeout was reached.\n " logging.info('Waiting for new checkpoint at %s', checkpoint_dir) stop_time = ((time.time() + timeout) if (timeout is not None) else None) while True: checkpoint_path = checkpoint_management.latest_checkpoint(checkpoint_dir) if ((checkpoint_path is None) or (checkpoint_path == last_checkpoint)): if ((stop_time is not None) and ((time.time() + seconds_to_sleep) > stop_time)): return None time.sleep(seconds_to_sleep) else: logging.info('Found new checkpoint at %s', checkpoint_path) return checkpoint_path
-1,605,284,766,611,941,000
Waits until a new checkpoint file is found. Args: checkpoint_dir: The directory in which checkpoints are saved. last_checkpoint: The last checkpoint path used or `None` if we're expecting a checkpoint for the first time. seconds_to_sleep: The number of seconds to sleep for before looking for a new checkpoint. timeout: The maximum number of seconds to wait. If left as `None`, then the process will wait indefinitely. Returns: a new checkpoint path, or None if the timeout was reached.
tensorflow/python/training/checkpoint_utils.py
wait_for_new_checkpoint
KodeWorker/tensorflow
python
def wait_for_new_checkpoint(checkpoint_dir, last_checkpoint=None, seconds_to_sleep=1, timeout=None): "Waits until a new checkpoint file is found.\n\n Args:\n checkpoint_dir: The directory in which checkpoints are saved.\n last_checkpoint: The last checkpoint path used or `None` if we're expecting\n a checkpoint for the first time.\n seconds_to_sleep: The number of seconds to sleep for before looking for a\n new checkpoint.\n timeout: The maximum number of seconds to wait. If left as `None`, then the\n process will wait indefinitely.\n\n Returns:\n a new checkpoint path, or None if the timeout was reached.\n " logging.info('Waiting for new checkpoint at %s', checkpoint_dir) stop_time = ((time.time() + timeout) if (timeout is not None) else None) while True: checkpoint_path = checkpoint_management.latest_checkpoint(checkpoint_dir) if ((checkpoint_path is None) or (checkpoint_path == last_checkpoint)): if ((stop_time is not None) and ((time.time() + seconds_to_sleep) > stop_time)): return None time.sleep(seconds_to_sleep) else: logging.info('Found new checkpoint at %s', checkpoint_path) return checkpoint_path
@tf_export('train.checkpoints_iterator') def checkpoints_iterator(checkpoint_dir, min_interval_secs=0, timeout=None, timeout_fn=None): 'Continuously yield new checkpoint files as they appear.\n\n The iterator only checks for new checkpoints when control flow has been\n reverted to it. This means it can miss checkpoints if your code takes longer\n to run between iterations than `min_interval_secs` or the interval at which\n new checkpoints are written.\n\n The `timeout` argument is the maximum number of seconds to block waiting for\n a new checkpoint. It is used in combination with the `timeout_fn` as\n follows:\n\n * If the timeout expires and no `timeout_fn` was specified, the iterator\n stops yielding.\n * If a `timeout_fn` was specified, that function is called and if it returns\n a true boolean value the iterator stops yielding.\n * If the function returns a false boolean value then the iterator resumes the\n wait for new checkpoints. At this point the timeout logic applies again.\n\n This behavior gives control to callers on what to do if checkpoints do not\n come fast enough or stop being generated. For example, if callers have a way\n to detect that the training has stopped and know that no new checkpoints\n will be generated, they can provide a `timeout_fn` that returns `True` when\n the training has stopped. If they know that the training is still going on\n they return `False` instead.\n\n Args:\n checkpoint_dir: The directory in which checkpoints are saved.\n min_interval_secs: The minimum number of seconds between yielding\n checkpoints.\n timeout: The maximum number of seconds to wait between checkpoints. If left\n as `None`, then the process will wait indefinitely.\n timeout_fn: Optional function to call after a timeout. If the function\n returns True, then it means that no new checkpoints will be generated and\n the iterator will exit. The function is called with no arguments.\n\n Yields:\n String paths to latest checkpoint files as they arrive.\n ' checkpoint_path = None while True: new_checkpoint_path = wait_for_new_checkpoint(checkpoint_dir, checkpoint_path, timeout=timeout) if (new_checkpoint_path is None): if (not timeout_fn): logging.info('Timed-out waiting for a checkpoint.') return if timeout_fn(): return else: continue start = time.time() checkpoint_path = new_checkpoint_path (yield checkpoint_path) time_to_next_eval = ((start + min_interval_secs) - time.time()) if (time_to_next_eval > 0): time.sleep(time_to_next_eval)
8,677,676,642,965,667,000
Continuously yield new checkpoint files as they appear. The iterator only checks for new checkpoints when control flow has been reverted to it. This means it can miss checkpoints if your code takes longer to run between iterations than `min_interval_secs` or the interval at which new checkpoints are written. The `timeout` argument is the maximum number of seconds to block waiting for a new checkpoint. It is used in combination with the `timeout_fn` as follows: * If the timeout expires and no `timeout_fn` was specified, the iterator stops yielding. * If a `timeout_fn` was specified, that function is called and if it returns a true boolean value the iterator stops yielding. * If the function returns a false boolean value then the iterator resumes the wait for new checkpoints. At this point the timeout logic applies again. This behavior gives control to callers on what to do if checkpoints do not come fast enough or stop being generated. For example, if callers have a way to detect that the training has stopped and know that no new checkpoints will be generated, they can provide a `timeout_fn` that returns `True` when the training has stopped. If they know that the training is still going on they return `False` instead. Args: checkpoint_dir: The directory in which checkpoints are saved. min_interval_secs: The minimum number of seconds between yielding checkpoints. timeout: The maximum number of seconds to wait between checkpoints. If left as `None`, then the process will wait indefinitely. timeout_fn: Optional function to call after a timeout. If the function returns True, then it means that no new checkpoints will be generated and the iterator will exit. The function is called with no arguments. Yields: String paths to latest checkpoint files as they arrive.
tensorflow/python/training/checkpoint_utils.py
checkpoints_iterator
KodeWorker/tensorflow
python
@tf_export('train.checkpoints_iterator') def checkpoints_iterator(checkpoint_dir, min_interval_secs=0, timeout=None, timeout_fn=None): 'Continuously yield new checkpoint files as they appear.\n\n The iterator only checks for new checkpoints when control flow has been\n reverted to it. This means it can miss checkpoints if your code takes longer\n to run between iterations than `min_interval_secs` or the interval at which\n new checkpoints are written.\n\n The `timeout` argument is the maximum number of seconds to block waiting for\n a new checkpoint. It is used in combination with the `timeout_fn` as\n follows:\n\n * If the timeout expires and no `timeout_fn` was specified, the iterator\n stops yielding.\n * If a `timeout_fn` was specified, that function is called and if it returns\n a true boolean value the iterator stops yielding.\n * If the function returns a false boolean value then the iterator resumes the\n wait for new checkpoints. At this point the timeout logic applies again.\n\n This behavior gives control to callers on what to do if checkpoints do not\n come fast enough or stop being generated. For example, if callers have a way\n to detect that the training has stopped and know that no new checkpoints\n will be generated, they can provide a `timeout_fn` that returns `True` when\n the training has stopped. If they know that the training is still going on\n they return `False` instead.\n\n Args:\n checkpoint_dir: The directory in which checkpoints are saved.\n min_interval_secs: The minimum number of seconds between yielding\n checkpoints.\n timeout: The maximum number of seconds to wait between checkpoints. If left\n as `None`, then the process will wait indefinitely.\n timeout_fn: Optional function to call after a timeout. If the function\n returns True, then it means that no new checkpoints will be generated and\n the iterator will exit. The function is called with no arguments.\n\n Yields:\n String paths to latest checkpoint files as they arrive.\n ' checkpoint_path = None while True: new_checkpoint_path = wait_for_new_checkpoint(checkpoint_dir, checkpoint_path, timeout=timeout) if (new_checkpoint_path is None): if (not timeout_fn): logging.info('Timed-out waiting for a checkpoint.') return if timeout_fn(): return else: continue start = time.time() checkpoint_path = new_checkpoint_path (yield checkpoint_path) time_to_next_eval = ((start + min_interval_secs) - time.time()) if (time_to_next_eval > 0): time.sleep(time_to_next_eval)
@tf_export(v1=['train.init_from_checkpoint']) def init_from_checkpoint(ckpt_dir_or_file, assignment_map): "Replaces `tf.Variable` initializers so they load from a checkpoint file.\n\n Values are not loaded immediately, but when the initializer is run\n (typically by running a `tf.compat.v1.global_variables_initializer` op).\n\n Note: This overrides default initialization ops of specified variables and\n redefines dtype.\n\n Assignment map supports following syntax:\n\n * `'checkpoint_scope_name/': 'scope_name/'` - will load all variables in\n current `scope_name` from `checkpoint_scope_name` with matching tensor\n names.\n * `'checkpoint_scope_name/some_other_variable': 'scope_name/variable_name'` -\n will initialize `scope_name/variable_name` variable\n from `checkpoint_scope_name/some_other_variable`.\n * `'scope_variable_name': variable` - will initialize given `tf.Variable`\n object with tensor 'scope_variable_name' from the checkpoint.\n * `'scope_variable_name': list(variable)` - will initialize list of\n partitioned variables with tensor 'scope_variable_name' from the checkpoint.\n * `'/': 'scope_name/'` - will load all variables in current `scope_name` from\n checkpoint's root (e.g. no scope).\n\n Supports loading into partitioned variables, which are represented as\n `'<variable>/part_<part #>'`.\n\n Example:\n\n ```python\n\n # Say, '/tmp/model.ckpt' has the following tensors:\n # -- name='old_scope_1/var1', shape=[20, 2]\n # -- name='old_scope_1/var2', shape=[50, 4]\n # -- name='old_scope_2/var3', shape=[100, 100]\n\n # Create new model's variables\n with tf.compat.v1.variable_scope('new_scope_1'):\n var1 = tf.compat.v1.get_variable('var1', shape=[20, 2],\n initializer=tf.compat.v1.zeros_initializer())\n with tf.compat.v1.variable_scope('new_scope_2'):\n var2 = tf.compat.v1.get_variable('var2', shape=[50, 4],\n initializer=tf.compat.v1.zeros_initializer())\n # Partition into 5 variables along the first axis.\n var3 = tf.compat.v1.get_variable(name='var3', shape=[100, 100],\n initializer=tf.compat.v1.zeros_initializer(),\n partitioner=lambda shape, dtype: [5, 1])\n\n # Initialize all variables in `new_scope_1` from `old_scope_1`.\n init_from_checkpoint('/tmp/model.ckpt', {'old_scope_1/': 'new_scope_1'})\n\n # Use names to specify which variables to initialize from checkpoint.\n init_from_checkpoint('/tmp/model.ckpt',\n {'old_scope_1/var1': 'new_scope_1/var1',\n 'old_scope_1/var2': 'new_scope_2/var2'})\n\n # Or use tf.Variable objects to identify what to initialize.\n init_from_checkpoint('/tmp/model.ckpt',\n {'old_scope_1/var1': var1,\n 'old_scope_1/var2': var2})\n\n # Initialize partitioned variables using variable's name\n init_from_checkpoint('/tmp/model.ckpt',\n {'old_scope_2/var3': 'new_scope_2/var3'})\n\n # Or specify the list of tf.Variable objects.\n init_from_checkpoint('/tmp/model.ckpt',\n {'old_scope_2/var3': var3._get_variable_list()})\n\n ```\n\n Args:\n ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.\n assignment_map: Dict, where keys are names of the variables in the\n checkpoint and values are current variables or names of current variables\n (in default graph).\n\n Raises:\n ValueError: If missing variables in current graph, or if missing\n checkpoints or tensors in checkpoints.\n " init_from_checkpoint_fn = (lambda _: _init_from_checkpoint(ckpt_dir_or_file, assignment_map)) if distribution_strategy_context.get_cross_replica_context(): init_from_checkpoint_fn(None) else: distribution_strategy_context.get_replica_context().merge_call(init_from_checkpoint_fn)
-8,538,280,477,431,354,000
Replaces `tf.Variable` initializers so they load from a checkpoint file. Values are not loaded immediately, but when the initializer is run (typically by running a `tf.compat.v1.global_variables_initializer` op). Note: This overrides default initialization ops of specified variables and redefines dtype. Assignment map supports following syntax: * `'checkpoint_scope_name/': 'scope_name/'` - will load all variables in current `scope_name` from `checkpoint_scope_name` with matching tensor names. * `'checkpoint_scope_name/some_other_variable': 'scope_name/variable_name'` - will initialize `scope_name/variable_name` variable from `checkpoint_scope_name/some_other_variable`. * `'scope_variable_name': variable` - will initialize given `tf.Variable` object with tensor 'scope_variable_name' from the checkpoint. * `'scope_variable_name': list(variable)` - will initialize list of partitioned variables with tensor 'scope_variable_name' from the checkpoint. * `'/': 'scope_name/'` - will load all variables in current `scope_name` from checkpoint's root (e.g. no scope). Supports loading into partitioned variables, which are represented as `'<variable>/part_<part #>'`. Example: ```python # Say, '/tmp/model.ckpt' has the following tensors: # -- name='old_scope_1/var1', shape=[20, 2] # -- name='old_scope_1/var2', shape=[50, 4] # -- name='old_scope_2/var3', shape=[100, 100] # Create new model's variables with tf.compat.v1.variable_scope('new_scope_1'): var1 = tf.compat.v1.get_variable('var1', shape=[20, 2], initializer=tf.compat.v1.zeros_initializer()) with tf.compat.v1.variable_scope('new_scope_2'): var2 = tf.compat.v1.get_variable('var2', shape=[50, 4], initializer=tf.compat.v1.zeros_initializer()) # Partition into 5 variables along the first axis. var3 = tf.compat.v1.get_variable(name='var3', shape=[100, 100], initializer=tf.compat.v1.zeros_initializer(), partitioner=lambda shape, dtype: [5, 1]) # Initialize all variables in `new_scope_1` from `old_scope_1`. init_from_checkpoint('/tmp/model.ckpt', {'old_scope_1/': 'new_scope_1'}) # Use names to specify which variables to initialize from checkpoint. init_from_checkpoint('/tmp/model.ckpt', {'old_scope_1/var1': 'new_scope_1/var1', 'old_scope_1/var2': 'new_scope_2/var2'}) # Or use tf.Variable objects to identify what to initialize. init_from_checkpoint('/tmp/model.ckpt', {'old_scope_1/var1': var1, 'old_scope_1/var2': var2}) # Initialize partitioned variables using variable's name init_from_checkpoint('/tmp/model.ckpt', {'old_scope_2/var3': 'new_scope_2/var3'}) # Or specify the list of tf.Variable objects. init_from_checkpoint('/tmp/model.ckpt', {'old_scope_2/var3': var3._get_variable_list()}) ``` Args: ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint. assignment_map: Dict, where keys are names of the variables in the checkpoint and values are current variables or names of current variables (in default graph). Raises: ValueError: If missing variables in current graph, or if missing checkpoints or tensors in checkpoints.
tensorflow/python/training/checkpoint_utils.py
init_from_checkpoint
KodeWorker/tensorflow
python
@tf_export(v1=['train.init_from_checkpoint']) def init_from_checkpoint(ckpt_dir_or_file, assignment_map): "Replaces `tf.Variable` initializers so they load from a checkpoint file.\n\n Values are not loaded immediately, but when the initializer is run\n (typically by running a `tf.compat.v1.global_variables_initializer` op).\n\n Note: This overrides default initialization ops of specified variables and\n redefines dtype.\n\n Assignment map supports following syntax:\n\n * `'checkpoint_scope_name/': 'scope_name/'` - will load all variables in\n current `scope_name` from `checkpoint_scope_name` with matching tensor\n names.\n * `'checkpoint_scope_name/some_other_variable': 'scope_name/variable_name'` -\n will initialize `scope_name/variable_name` variable\n from `checkpoint_scope_name/some_other_variable`.\n * `'scope_variable_name': variable` - will initialize given `tf.Variable`\n object with tensor 'scope_variable_name' from the checkpoint.\n * `'scope_variable_name': list(variable)` - will initialize list of\n partitioned variables with tensor 'scope_variable_name' from the checkpoint.\n * `'/': 'scope_name/'` - will load all variables in current `scope_name` from\n checkpoint's root (e.g. no scope).\n\n Supports loading into partitioned variables, which are represented as\n `'<variable>/part_<part #>'`.\n\n Example:\n\n ```python\n\n # Say, '/tmp/model.ckpt' has the following tensors:\n # -- name='old_scope_1/var1', shape=[20, 2]\n # -- name='old_scope_1/var2', shape=[50, 4]\n # -- name='old_scope_2/var3', shape=[100, 100]\n\n # Create new model's variables\n with tf.compat.v1.variable_scope('new_scope_1'):\n var1 = tf.compat.v1.get_variable('var1', shape=[20, 2],\n initializer=tf.compat.v1.zeros_initializer())\n with tf.compat.v1.variable_scope('new_scope_2'):\n var2 = tf.compat.v1.get_variable('var2', shape=[50, 4],\n initializer=tf.compat.v1.zeros_initializer())\n # Partition into 5 variables along the first axis.\n var3 = tf.compat.v1.get_variable(name='var3', shape=[100, 100],\n initializer=tf.compat.v1.zeros_initializer(),\n partitioner=lambda shape, dtype: [5, 1])\n\n # Initialize all variables in `new_scope_1` from `old_scope_1`.\n init_from_checkpoint('/tmp/model.ckpt', {'old_scope_1/': 'new_scope_1'})\n\n # Use names to specify which variables to initialize from checkpoint.\n init_from_checkpoint('/tmp/model.ckpt',\n {'old_scope_1/var1': 'new_scope_1/var1',\n 'old_scope_1/var2': 'new_scope_2/var2'})\n\n # Or use tf.Variable objects to identify what to initialize.\n init_from_checkpoint('/tmp/model.ckpt',\n {'old_scope_1/var1': var1,\n 'old_scope_1/var2': var2})\n\n # Initialize partitioned variables using variable's name\n init_from_checkpoint('/tmp/model.ckpt',\n {'old_scope_2/var3': 'new_scope_2/var3'})\n\n # Or specify the list of tf.Variable objects.\n init_from_checkpoint('/tmp/model.ckpt',\n {'old_scope_2/var3': var3._get_variable_list()})\n\n ```\n\n Args:\n ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.\n assignment_map: Dict, where keys are names of the variables in the\n checkpoint and values are current variables or names of current variables\n (in default graph).\n\n Raises:\n ValueError: If missing variables in current graph, or if missing\n checkpoints or tensors in checkpoints.\n " init_from_checkpoint_fn = (lambda _: _init_from_checkpoint(ckpt_dir_or_file, assignment_map)) if distribution_strategy_context.get_cross_replica_context(): init_from_checkpoint_fn(None) else: distribution_strategy_context.get_replica_context().merge_call(init_from_checkpoint_fn)
def _init_from_checkpoint(ckpt_dir_or_file, assignment_map): 'See `init_from_checkpoint` for documentation.' ckpt_file = _get_checkpoint_filename(ckpt_dir_or_file) reader = load_checkpoint(ckpt_dir_or_file) variable_map = reader.get_variable_to_shape_map() for (tensor_name_in_ckpt, current_var_or_name) in sorted(six.iteritems(assignment_map)): var = None if (_is_variable(current_var_or_name) or (isinstance(current_var_or_name, list) and all((_is_variable(v) for v in current_var_or_name)))): var = current_var_or_name else: store_vars = vs._get_default_variable_store()._vars var = store_vars.get(current_var_or_name, None) if (var is None): var = _collect_partitioned_variable(current_var_or_name, store_vars) if (var is not None): if (tensor_name_in_ckpt not in variable_map): raise ValueError(('Tensor %s is not found in %s checkpoint %s' % (tensor_name_in_ckpt, ckpt_dir_or_file, variable_map))) if _is_variable(var): if (not var.get_shape().is_compatible_with(variable_map[tensor_name_in_ckpt])): raise ValueError(("Shape of variable %s (%s) doesn't match with shape of tensor %s (%s) from checkpoint reader." % (var.name, str(var.get_shape()), tensor_name_in_ckpt, str(variable_map[tensor_name_in_ckpt])))) var_name = var.name else: var_name = ','.join([v.name for v in var]) _set_variable_or_list_initializer(var, ckpt_file, tensor_name_in_ckpt) logging.debug('Initialize variable %s from checkpoint %s with %s', var_name, ckpt_dir_or_file, tensor_name_in_ckpt) else: scopes = '' if ('/' in current_var_or_name): scopes = current_var_or_name[:current_var_or_name.rindex('/')] if (not tensor_name_in_ckpt.endswith('/')): raise ValueError("Assignment map with scope only name {} should map to scope only {}. Should be 'scope/': 'other_scope/'.".format(scopes, tensor_name_in_ckpt)) scope_variables = set() for var_name in store_vars: if ((not scopes) or var_name.startswith((scopes + '/'))): if ('/part_' in var_name): var_name = var_name[:var_name.index('/part_')] scope_variables.add(var_name) for var_name in sorted(scope_variables): full_tensor_name = var_name[len(scopes):] if (current_var_or_name != '/'): full_tensor_name = full_tensor_name[1:] if (tensor_name_in_ckpt != '/'): full_tensor_name = (tensor_name_in_ckpt + full_tensor_name) if full_tensor_name.endswith('/'): full_tensor_name = full_tensor_name[:(- 1)] if (full_tensor_name not in variable_map): raise ValueError(('Tensor %s (%s in %s) is not found in %s checkpoint' % (full_tensor_name, var_name[(len(scopes) + 1):], tensor_name_in_ckpt, ckpt_dir_or_file))) var = store_vars.get(var_name, None) if (var is None): var = _collect_partitioned_variable(var_name, store_vars) _set_variable_or_list_initializer(var, ckpt_file, full_tensor_name) logging.debug('Initialize variable %s from checkpoint %s with %s', var_name, ckpt_dir_or_file, full_tensor_name)
-3,119,381,913,987,592,700
See `init_from_checkpoint` for documentation.
tensorflow/python/training/checkpoint_utils.py
_init_from_checkpoint
KodeWorker/tensorflow
python
def _init_from_checkpoint(ckpt_dir_or_file, assignment_map): ckpt_file = _get_checkpoint_filename(ckpt_dir_or_file) reader = load_checkpoint(ckpt_dir_or_file) variable_map = reader.get_variable_to_shape_map() for (tensor_name_in_ckpt, current_var_or_name) in sorted(six.iteritems(assignment_map)): var = None if (_is_variable(current_var_or_name) or (isinstance(current_var_or_name, list) and all((_is_variable(v) for v in current_var_or_name)))): var = current_var_or_name else: store_vars = vs._get_default_variable_store()._vars var = store_vars.get(current_var_or_name, None) if (var is None): var = _collect_partitioned_variable(current_var_or_name, store_vars) if (var is not None): if (tensor_name_in_ckpt not in variable_map): raise ValueError(('Tensor %s is not found in %s checkpoint %s' % (tensor_name_in_ckpt, ckpt_dir_or_file, variable_map))) if _is_variable(var): if (not var.get_shape().is_compatible_with(variable_map[tensor_name_in_ckpt])): raise ValueError(("Shape of variable %s (%s) doesn't match with shape of tensor %s (%s) from checkpoint reader." % (var.name, str(var.get_shape()), tensor_name_in_ckpt, str(variable_map[tensor_name_in_ckpt])))) var_name = var.name else: var_name = ','.join([v.name for v in var]) _set_variable_or_list_initializer(var, ckpt_file, tensor_name_in_ckpt) logging.debug('Initialize variable %s from checkpoint %s with %s', var_name, ckpt_dir_or_file, tensor_name_in_ckpt) else: scopes = if ('/' in current_var_or_name): scopes = current_var_or_name[:current_var_or_name.rindex('/')] if (not tensor_name_in_ckpt.endswith('/')): raise ValueError("Assignment map with scope only name {} should map to scope only {}. Should be 'scope/': 'other_scope/'.".format(scopes, tensor_name_in_ckpt)) scope_variables = set() for var_name in store_vars: if ((not scopes) or var_name.startswith((scopes + '/'))): if ('/part_' in var_name): var_name = var_name[:var_name.index('/part_')] scope_variables.add(var_name) for var_name in sorted(scope_variables): full_tensor_name = var_name[len(scopes):] if (current_var_or_name != '/'): full_tensor_name = full_tensor_name[1:] if (tensor_name_in_ckpt != '/'): full_tensor_name = (tensor_name_in_ckpt + full_tensor_name) if full_tensor_name.endswith('/'): full_tensor_name = full_tensor_name[:(- 1)] if (full_tensor_name not in variable_map): raise ValueError(('Tensor %s (%s in %s) is not found in %s checkpoint' % (full_tensor_name, var_name[(len(scopes) + 1):], tensor_name_in_ckpt, ckpt_dir_or_file))) var = store_vars.get(var_name, None) if (var is None): var = _collect_partitioned_variable(var_name, store_vars) _set_variable_or_list_initializer(var, ckpt_file, full_tensor_name) logging.debug('Initialize variable %s from checkpoint %s with %s', var_name, ckpt_dir_or_file, full_tensor_name)
def _get_checkpoint_filename(ckpt_dir_or_file): 'Returns checkpoint filename given directory or specific checkpoint file.' if gfile.IsDirectory(ckpt_dir_or_file): return checkpoint_management.latest_checkpoint(ckpt_dir_or_file) return ckpt_dir_or_file
-5,948,685,012,336,749,000
Returns checkpoint filename given directory or specific checkpoint file.
tensorflow/python/training/checkpoint_utils.py
_get_checkpoint_filename
KodeWorker/tensorflow
python
def _get_checkpoint_filename(ckpt_dir_or_file): if gfile.IsDirectory(ckpt_dir_or_file): return checkpoint_management.latest_checkpoint(ckpt_dir_or_file) return ckpt_dir_or_file
def _set_checkpoint_initializer(variable, ckpt_file, tensor_name, slice_spec, name='checkpoint_initializer', write_version=saver_pb2.SaverDef.DIT): "Overrides given variable's initialization op.\n\n Sets variable initializer to assign op that initializes variable from tensor's\n value in the checkpoint.\n\n Args:\n variable: `tf.Variable` object.\n ckpt_file: string, full path of the checkpoint.\n tensor_name: Name of the tensor to load from the checkpoint.\n slice_spec: Slice specification for loading partitioned tensors.\n name: Name of the operation.\n " base_type = variable.dtype.base_dtype with ops.device(variable.device), ops.device('/cpu:0'): if ((self._write_version == saver_pb2.SaverDef.V1) or (self._write_version == saver_pb2.SaverDef.V2)): restore_op = io_ops.restore_v2(ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0] elif (self._write_version == saver_pb2.SaverDef.DIT): restore_op = io_ops.restore_dit(ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0] else: raise RuntimeError(('Unexpected write_version: ' + self._write_version)) names_to_saveables = saveable_object_util.op_list_to_dict([variable]) saveable_objects = [] for (name, op) in names_to_saveables.items(): for s in saveable_object_util.saveable_objects_for_op(op, name): saveable_objects.append(s) assert (len(saveable_objects) == 1) init_op = saveable_objects[0].restore([restore_op], restored_shapes=None) variable._initializer_op = init_op restore_op.set_shape(variable.shape) variable._initial_value = restore_op
7,078,638,621,091,424,000
Overrides given variable's initialization op. Sets variable initializer to assign op that initializes variable from tensor's value in the checkpoint. Args: variable: `tf.Variable` object. ckpt_file: string, full path of the checkpoint. tensor_name: Name of the tensor to load from the checkpoint. slice_spec: Slice specification for loading partitioned tensors. name: Name of the operation.
tensorflow/python/training/checkpoint_utils.py
_set_checkpoint_initializer
KodeWorker/tensorflow
python
def _set_checkpoint_initializer(variable, ckpt_file, tensor_name, slice_spec, name='checkpoint_initializer', write_version=saver_pb2.SaverDef.DIT): "Overrides given variable's initialization op.\n\n Sets variable initializer to assign op that initializes variable from tensor's\n value in the checkpoint.\n\n Args:\n variable: `tf.Variable` object.\n ckpt_file: string, full path of the checkpoint.\n tensor_name: Name of the tensor to load from the checkpoint.\n slice_spec: Slice specification for loading partitioned tensors.\n name: Name of the operation.\n " base_type = variable.dtype.base_dtype with ops.device(variable.device), ops.device('/cpu:0'): if ((self._write_version == saver_pb2.SaverDef.V1) or (self._write_version == saver_pb2.SaverDef.V2)): restore_op = io_ops.restore_v2(ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0] elif (self._write_version == saver_pb2.SaverDef.DIT): restore_op = io_ops.restore_dit(ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0] else: raise RuntimeError(('Unexpected write_version: ' + self._write_version)) names_to_saveables = saveable_object_util.op_list_to_dict([variable]) saveable_objects = [] for (name, op) in names_to_saveables.items(): for s in saveable_object_util.saveable_objects_for_op(op, name): saveable_objects.append(s) assert (len(saveable_objects) == 1) init_op = saveable_objects[0].restore([restore_op], restored_shapes=None) variable._initializer_op = init_op restore_op.set_shape(variable.shape) variable._initial_value = restore_op
def _set_variable_or_list_initializer(variable_or_list, ckpt_file, tensor_name): 'Overrides initialization op of given variable or list of variables.\n\n Calls `_set_checkpoint_initializer` for each variable in the given list of\n variables.\n\n Args:\n variable_or_list: `tf.Variable` object or a list of `tf.Variable` objects.\n ckpt_file: string, full path of the checkpoint.\n tensor_name: Name of the tensor to load from the checkpoint.\n\n Raises:\n ValueError: if all objects in `variable_or_list` are not partitions of the\n same large variable.\n ' if isinstance(variable_or_list, (list, tuple)): slice_name = None for v in variable_or_list: slice_info = v._save_slice_info if (slice_name is None): slice_name = slice_info.full_name elif (slice_name != slice_info.full_name): raise ValueError(('Slices must all be from the same tensor: %s != %s' % (slice_name, slice_info.full_name))) _set_checkpoint_initializer(v, ckpt_file, tensor_name, slice_info.spec) else: _set_checkpoint_initializer(variable_or_list, ckpt_file, tensor_name, '')
4,867,478,037,457,606,000
Overrides initialization op of given variable or list of variables. Calls `_set_checkpoint_initializer` for each variable in the given list of variables. Args: variable_or_list: `tf.Variable` object or a list of `tf.Variable` objects. ckpt_file: string, full path of the checkpoint. tensor_name: Name of the tensor to load from the checkpoint. Raises: ValueError: if all objects in `variable_or_list` are not partitions of the same large variable.
tensorflow/python/training/checkpoint_utils.py
_set_variable_or_list_initializer
KodeWorker/tensorflow
python
def _set_variable_or_list_initializer(variable_or_list, ckpt_file, tensor_name): 'Overrides initialization op of given variable or list of variables.\n\n Calls `_set_checkpoint_initializer` for each variable in the given list of\n variables.\n\n Args:\n variable_or_list: `tf.Variable` object or a list of `tf.Variable` objects.\n ckpt_file: string, full path of the checkpoint.\n tensor_name: Name of the tensor to load from the checkpoint.\n\n Raises:\n ValueError: if all objects in `variable_or_list` are not partitions of the\n same large variable.\n ' if isinstance(variable_or_list, (list, tuple)): slice_name = None for v in variable_or_list: slice_info = v._save_slice_info if (slice_name is None): slice_name = slice_info.full_name elif (slice_name != slice_info.full_name): raise ValueError(('Slices must all be from the same tensor: %s != %s' % (slice_name, slice_info.full_name))) _set_checkpoint_initializer(v, ckpt_file, tensor_name, slice_info.spec) else: _set_checkpoint_initializer(variable_or_list, ckpt_file, tensor_name, )
def _collect_partitioned_variable(name, all_vars): 'Returns list of `tf.Variable` that comprise the partitioned variable.' if ((name + '/part_0') in all_vars): var = [] i = 0 while ((name + ('/part_%d' % i)) in all_vars): var.append(all_vars[(name + ('/part_%d' % i))]) i += 1 return var return None
-3,062,386,609,698,979,300
Returns list of `tf.Variable` that comprise the partitioned variable.
tensorflow/python/training/checkpoint_utils.py
_collect_partitioned_variable
KodeWorker/tensorflow
python
def _collect_partitioned_variable(name, all_vars): if ((name + '/part_0') in all_vars): var = [] i = 0 while ((name + ('/part_%d' % i)) in all_vars): var.append(all_vars[(name + ('/part_%d' % i))]) i += 1 return var return None
def add_const(self, const): '\n Add a constant to the environment, return its index.\n ' if isinstance(const, str): const = utils.intern(const) for (index, val) in enumerate(self.env.consts): if (val is const): break else: index = len(self.env.consts) self.env.consts.append(const) return index
3,270,935,112,231,871,000
Add a constant to the environment, return its index.
numba/core/pythonapi.py
add_const
DrTodd13/numba
python
def add_const(self, const): '\n \n ' if isinstance(const, str): const = utils.intern(const) for (index, val) in enumerate(self.env.consts): if (val is const): break else: index = len(self.env.consts) self.env.consts.append(const) return index
def read_const(self, index): '\n Look up constant number *index* inside the environment body.\n A borrowed reference is returned.\n\n The returned LLVM value may have NULL value at runtime which indicates\n an error at runtime.\n ' assert (index < len(self.env.consts)) builder = self.pyapi.builder consts = self.env_body.consts ret = cgutils.alloca_once(builder, self.pyapi.pyobj, zfill=True) with builder.if_else(cgutils.is_not_null(builder, consts)) as (br_not_null, br_null): with br_not_null: getitem = self.pyapi.list_getitem(consts, index) builder.store(getitem, ret) with br_null: self.pyapi.err_set_string('PyExc_RuntimeError', '`env.consts` is NULL in `read_const`') return builder.load(ret)
7,686,056,002,697,819,000
Look up constant number *index* inside the environment body. A borrowed reference is returned. The returned LLVM value may have NULL value at runtime which indicates an error at runtime.
numba/core/pythonapi.py
read_const
DrTodd13/numba
python
def read_const(self, index): '\n Look up constant number *index* inside the environment body.\n A borrowed reference is returned.\n\n The returned LLVM value may have NULL value at runtime which indicates\n an error at runtime.\n ' assert (index < len(self.env.consts)) builder = self.pyapi.builder consts = self.env_body.consts ret = cgutils.alloca_once(builder, self.pyapi.pyobj, zfill=True) with builder.if_else(cgutils.is_not_null(builder, consts)) as (br_not_null, br_null): with br_not_null: getitem = self.pyapi.list_getitem(consts, index) builder.store(getitem, ret) with br_null: self.pyapi.err_set_string('PyExc_RuntimeError', '`env.consts` is NULL in `read_const`') return builder.load(ret)