body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
@since('2.0.0') def setVarianceCol(self, value): '\n Sets the value of :py:attr:`varianceCol`.\n ' return self._set(varianceCol=value)
-1,104,017,226,281,381,500
Sets the value of :py:attr:`varianceCol`.
python/pyspark/ml/regression.py
setVarianceCol
AjithShetty2489/spark
python
@since('2.0.0') def setVarianceCol(self, value): '\n \n ' return self._set(varianceCol=value)
@since('3.0.0') def setVarianceCol(self, value): '\n Sets the value of :py:attr:`varianceCol`.\n ' return self._set(varianceCol=value)
-6,026,809,695,580,712,000
Sets the value of :py:attr:`varianceCol`.
python/pyspark/ml/regression.py
setVarianceCol
AjithShetty2489/spark
python
@since('3.0.0') def setVarianceCol(self, value): '\n \n ' return self._set(varianceCol=value)
@property @since('2.0.0') def featureImportances(self): '\n Estimate of the importance of each feature.\n\n This generalizes the idea of "Gini" importance to other losses,\n following the explanation of Gini importance from "Random Forests" documentation\n by Leo Breiman and Adele Cutler, and following the implementation from scikit-learn.\n\n This feature importance is calculated as follows:\n - importance(feature j) = sum (over nodes which split on feature j) of the gain,\n where gain is scaled by the number of instances passing through node\n - Normalize importances for tree to sum to 1.\n\n .. note:: Feature importance for single decision trees can have high variance due to\n correlated predictor variables. Consider using a :py:class:`RandomForestRegressor`\n to determine feature importance instead.\n ' return self._call_java('featureImportances')
-6,933,464,203,652,408,000
Estimate of the importance of each feature. This generalizes the idea of "Gini" importance to other losses, following the explanation of Gini importance from "Random Forests" documentation by Leo Breiman and Adele Cutler, and following the implementation from scikit-learn. This feature importance is calculated as follows: - importance(feature j) = sum (over nodes which split on feature j) of the gain, where gain is scaled by the number of instances passing through node - Normalize importances for tree to sum to 1. .. note:: Feature importance for single decision trees can have high variance due to correlated predictor variables. Consider using a :py:class:`RandomForestRegressor` to determine feature importance instead.
python/pyspark/ml/regression.py
featureImportances
AjithShetty2489/spark
python
@property @since('2.0.0') def featureImportances(self): '\n Estimate of the importance of each feature.\n\n This generalizes the idea of "Gini" importance to other losses,\n following the explanation of Gini importance from "Random Forests" documentation\n by Leo Breiman and Adele Cutler, and following the implementation from scikit-learn.\n\n This feature importance is calculated as follows:\n - importance(feature j) = sum (over nodes which split on feature j) of the gain,\n where gain is scaled by the number of instances passing through node\n - Normalize importances for tree to sum to 1.\n\n .. note:: Feature importance for single decision trees can have high variance due to\n correlated predictor variables. Consider using a :py:class:`RandomForestRegressor`\n to determine feature importance instead.\n ' return self._call_java('featureImportances')
@keyword_only def __init__(self, featuresCol='features', labelCol='label', predictionCol='prediction', maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity='variance', subsamplingRate=1.0, seed=None, numTrees=20, featureSubsetStrategy='auto', leafCol='', minWeightFractionPerNode=0.0, weightCol=None): '\n __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20, featureSubsetStrategy="auto", leafCol=", minWeightFractionPerNode=0.0", weightCol=None)\n ' super(RandomForestRegressor, self).__init__() self._java_obj = self._new_java_obj('org.apache.spark.ml.regression.RandomForestRegressor', self.uid) self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity='variance', subsamplingRate=1.0, numTrees=20, featureSubsetStrategy='auto', leafCol='', minWeightFractionPerNode=0.0) kwargs = self._input_kwargs self.setParams(**kwargs)
6,518,399,712,782,969,000
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20, featureSubsetStrategy="auto", leafCol=", minWeightFractionPerNode=0.0", weightCol=None)
python/pyspark/ml/regression.py
__init__
AjithShetty2489/spark
python
@keyword_only def __init__(self, featuresCol='features', labelCol='label', predictionCol='prediction', maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity='variance', subsamplingRate=1.0, seed=None, numTrees=20, featureSubsetStrategy='auto', leafCol=, minWeightFractionPerNode=0.0, weightCol=None): '\n \n ' super(RandomForestRegressor, self).__init__() self._java_obj = self._new_java_obj('org.apache.spark.ml.regression.RandomForestRegressor', self.uid) self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity='variance', subsamplingRate=1.0, numTrees=20, featureSubsetStrategy='auto', leafCol=, minWeightFractionPerNode=0.0) kwargs = self._input_kwargs self.setParams(**kwargs)
@keyword_only @since('1.4.0') def setParams(self, featuresCol='features', labelCol='label', predictionCol='prediction', maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity='variance', subsamplingRate=1.0, seed=None, numTrees=20, featureSubsetStrategy='auto', leafCol='', minWeightFractionPerNode=0.0, weightCol=None): '\n setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20, featureSubsetStrategy="auto", leafCol="", minWeightFractionPerNode=0.0, weightCol=None)\n Sets params for linear regression.\n ' kwargs = self._input_kwargs return self._set(**kwargs)
-4,650,487,209,188,145,000
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20, featureSubsetStrategy="auto", leafCol="", minWeightFractionPerNode=0.0, weightCol=None) Sets params for linear regression.
python/pyspark/ml/regression.py
setParams
AjithShetty2489/spark
python
@keyword_only @since('1.4.0') def setParams(self, featuresCol='features', labelCol='label', predictionCol='prediction', maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity='variance', subsamplingRate=1.0, seed=None, numTrees=20, featureSubsetStrategy='auto', leafCol=, minWeightFractionPerNode=0.0, weightCol=None): '\n setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20, featureSubsetStrategy="auto", leafCol=, minWeightFractionPerNode=0.0, weightCol=None)\n Sets params for linear regression.\n ' kwargs = self._input_kwargs return self._set(**kwargs)
def setMaxDepth(self, value): '\n Sets the value of :py:attr:`maxDepth`.\n ' return self._set(maxDepth=value)
-6,658,512,568,115,957,000
Sets the value of :py:attr:`maxDepth`.
python/pyspark/ml/regression.py
setMaxDepth
AjithShetty2489/spark
python
def setMaxDepth(self, value): '\n \n ' return self._set(maxDepth=value)
def setMaxBins(self, value): '\n Sets the value of :py:attr:`maxBins`.\n ' return self._set(maxBins=value)
-1,640,362,106,271,631,400
Sets the value of :py:attr:`maxBins`.
python/pyspark/ml/regression.py
setMaxBins
AjithShetty2489/spark
python
def setMaxBins(self, value): '\n \n ' return self._set(maxBins=value)
def setMinInstancesPerNode(self, value): '\n Sets the value of :py:attr:`minInstancesPerNode`.\n ' return self._set(minInstancesPerNode=value)
-2,656,768,942,740,913,000
Sets the value of :py:attr:`minInstancesPerNode`.
python/pyspark/ml/regression.py
setMinInstancesPerNode
AjithShetty2489/spark
python
def setMinInstancesPerNode(self, value): '\n \n ' return self._set(minInstancesPerNode=value)
def setMinInfoGain(self, value): '\n Sets the value of :py:attr:`minInfoGain`.\n ' return self._set(minInfoGain=value)
8,219,744,118,478,087,000
Sets the value of :py:attr:`minInfoGain`.
python/pyspark/ml/regression.py
setMinInfoGain
AjithShetty2489/spark
python
def setMinInfoGain(self, value): '\n \n ' return self._set(minInfoGain=value)
def setMaxMemoryInMB(self, value): '\n Sets the value of :py:attr:`maxMemoryInMB`.\n ' return self._set(maxMemoryInMB=value)
6,802,939,914,533,361,000
Sets the value of :py:attr:`maxMemoryInMB`.
python/pyspark/ml/regression.py
setMaxMemoryInMB
AjithShetty2489/spark
python
def setMaxMemoryInMB(self, value): '\n \n ' return self._set(maxMemoryInMB=value)
def setCacheNodeIds(self, value): '\n Sets the value of :py:attr:`cacheNodeIds`.\n ' return self._set(cacheNodeIds=value)
-5,193,773,612,683,504,000
Sets the value of :py:attr:`cacheNodeIds`.
python/pyspark/ml/regression.py
setCacheNodeIds
AjithShetty2489/spark
python
def setCacheNodeIds(self, value): '\n \n ' return self._set(cacheNodeIds=value)
@since('1.4.0') def setImpurity(self, value): '\n Sets the value of :py:attr:`impurity`.\n ' return self._set(impurity=value)
5,925,454,725,552,672,000
Sets the value of :py:attr:`impurity`.
python/pyspark/ml/regression.py
setImpurity
AjithShetty2489/spark
python
@since('1.4.0') def setImpurity(self, value): '\n \n ' return self._set(impurity=value)
@since('1.4.0') def setNumTrees(self, value): '\n Sets the value of :py:attr:`numTrees`.\n ' return self._set(numTrees=value)
8,739,145,108,853,067,000
Sets the value of :py:attr:`numTrees`.
python/pyspark/ml/regression.py
setNumTrees
AjithShetty2489/spark
python
@since('1.4.0') def setNumTrees(self, value): '\n \n ' return self._set(numTrees=value)
@since('1.4.0') def setSubsamplingRate(self, value): '\n Sets the value of :py:attr:`subsamplingRate`.\n ' return self._set(subsamplingRate=value)
7,760,574,118,449,772,000
Sets the value of :py:attr:`subsamplingRate`.
python/pyspark/ml/regression.py
setSubsamplingRate
AjithShetty2489/spark
python
@since('1.4.0') def setSubsamplingRate(self, value): '\n \n ' return self._set(subsamplingRate=value)
@since('2.4.0') def setFeatureSubsetStrategy(self, value): '\n Sets the value of :py:attr:`featureSubsetStrategy`.\n ' return self._set(featureSubsetStrategy=value)
8,563,096,663,945,954,000
Sets the value of :py:attr:`featureSubsetStrategy`.
python/pyspark/ml/regression.py
setFeatureSubsetStrategy
AjithShetty2489/spark
python
@since('2.4.0') def setFeatureSubsetStrategy(self, value): '\n \n ' return self._set(featureSubsetStrategy=value)
def setCheckpointInterval(self, value): '\n Sets the value of :py:attr:`checkpointInterval`.\n ' return self._set(checkpointInterval=value)
9,195,478,207,231,309,000
Sets the value of :py:attr:`checkpointInterval`.
python/pyspark/ml/regression.py
setCheckpointInterval
AjithShetty2489/spark
python
def setCheckpointInterval(self, value): '\n \n ' return self._set(checkpointInterval=value)
def setSeed(self, value): '\n Sets the value of :py:attr:`seed`.\n ' return self._set(seed=value)
-88,293,150,966,480,180
Sets the value of :py:attr:`seed`.
python/pyspark/ml/regression.py
setSeed
AjithShetty2489/spark
python
def setSeed(self, value): '\n \n ' return self._set(seed=value)
@since('3.0.0') def setWeightCol(self, value): '\n Sets the value of :py:attr:`weightCol`.\n ' return self._set(weightCol=value)
3,791,292,180,445,544,000
Sets the value of :py:attr:`weightCol`.
python/pyspark/ml/regression.py
setWeightCol
AjithShetty2489/spark
python
@since('3.0.0') def setWeightCol(self, value): '\n \n ' return self._set(weightCol=value)
@since('3.0.0') def setMinWeightFractionPerNode(self, value): '\n Sets the value of :py:attr:`minWeightFractionPerNode`.\n ' return self._set(minWeightFractionPerNode=value)
5,709,196,588,527,269,000
Sets the value of :py:attr:`minWeightFractionPerNode`.
python/pyspark/ml/regression.py
setMinWeightFractionPerNode
AjithShetty2489/spark
python
@since('3.0.0') def setMinWeightFractionPerNode(self, value): '\n \n ' return self._set(minWeightFractionPerNode=value)
@property @since('2.0.0') def trees(self): 'Trees in this ensemble. Warning: These have null parent Estimators.' return [DecisionTreeRegressionModel(m) for m in list(self._call_java('trees'))]
7,781,632,944,006,031,000
Trees in this ensemble. Warning: These have null parent Estimators.
python/pyspark/ml/regression.py
trees
AjithShetty2489/spark
python
@property @since('2.0.0') def trees(self): return [DecisionTreeRegressionModel(m) for m in list(self._call_java('trees'))]
@property @since('2.0.0') def featureImportances(self): '\n Estimate of the importance of each feature.\n\n Each feature\'s importance is the average of its importance across all trees in the ensemble\n The importance vector is normalized to sum to 1. This method is suggested by Hastie et al.\n (Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.)\n and follows the implementation from scikit-learn.\n\n .. seealso:: :py:attr:`DecisionTreeRegressionModel.featureImportances`\n ' return self._call_java('featureImportances')
253,380,475,036,441,120
Estimate of the importance of each feature. Each feature's importance is the average of its importance across all trees in the ensemble The importance vector is normalized to sum to 1. This method is suggested by Hastie et al. (Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.) and follows the implementation from scikit-learn. .. seealso:: :py:attr:`DecisionTreeRegressionModel.featureImportances`
python/pyspark/ml/regression.py
featureImportances
AjithShetty2489/spark
python
@property @since('2.0.0') def featureImportances(self): '\n Estimate of the importance of each feature.\n\n Each feature\'s importance is the average of its importance across all trees in the ensemble\n The importance vector is normalized to sum to 1. This method is suggested by Hastie et al.\n (Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.)\n and follows the implementation from scikit-learn.\n\n .. seealso:: :py:attr:`DecisionTreeRegressionModel.featureImportances`\n ' return self._call_java('featureImportances')
@since('1.4.0') def getLossType(self): '\n Gets the value of lossType or its default value.\n ' return self.getOrDefault(self.lossType)
6,726,951,567,021,210,000
Gets the value of lossType or its default value.
python/pyspark/ml/regression.py
getLossType
AjithShetty2489/spark
python
@since('1.4.0') def getLossType(self): '\n \n ' return self.getOrDefault(self.lossType)
@keyword_only def __init__(self, featuresCol='features', labelCol='label', predictionCol='prediction', maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0, checkpointInterval=10, lossType='squared', maxIter=20, stepSize=0.1, seed=None, impurity='variance', featureSubsetStrategy='all', validationTol=0.01, validationIndicatorCol=None, leafCol='', minWeightFractionPerNode=0.0, weightCol=None): '\n __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0, checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None, impurity="variance", featureSubsetStrategy="all", validationTol=0.01, validationIndicatorCol=None, leafCol="", minWeightFractionPerNode=0.0,\n weightCol=None)\n ' super(GBTRegressor, self).__init__() self._java_obj = self._new_java_obj('org.apache.spark.ml.regression.GBTRegressor', self.uid) self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0, checkpointInterval=10, lossType='squared', maxIter=20, stepSize=0.1, impurity='variance', featureSubsetStrategy='all', validationTol=0.01, leafCol='', minWeightFractionPerNode=0.0) kwargs = self._input_kwargs self.setParams(**kwargs)
2,081,694,121,952,453,000
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0, checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None, impurity="variance", featureSubsetStrategy="all", validationTol=0.01, validationIndicatorCol=None, leafCol="", minWeightFractionPerNode=0.0, weightCol=None)
python/pyspark/ml/regression.py
__init__
AjithShetty2489/spark
python
@keyword_only def __init__(self, featuresCol='features', labelCol='label', predictionCol='prediction', maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0, checkpointInterval=10, lossType='squared', maxIter=20, stepSize=0.1, seed=None, impurity='variance', featureSubsetStrategy='all', validationTol=0.01, validationIndicatorCol=None, leafCol=, minWeightFractionPerNode=0.0, weightCol=None): '\n __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0, checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None, impurity="variance", featureSubsetStrategy="all", validationTol=0.01, validationIndicatorCol=None, leafCol=, minWeightFractionPerNode=0.0,\n weightCol=None)\n ' super(GBTRegressor, self).__init__() self._java_obj = self._new_java_obj('org.apache.spark.ml.regression.GBTRegressor', self.uid) self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0, checkpointInterval=10, lossType='squared', maxIter=20, stepSize=0.1, impurity='variance', featureSubsetStrategy='all', validationTol=0.01, leafCol=, minWeightFractionPerNode=0.0) kwargs = self._input_kwargs self.setParams(**kwargs)
@keyword_only @since('1.4.0') def setParams(self, featuresCol='features', labelCol='label', predictionCol='prediction', maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0, checkpointInterval=10, lossType='squared', maxIter=20, stepSize=0.1, seed=None, impuriy='variance', featureSubsetStrategy='all', validationTol=0.01, validationIndicatorCol=None, leafCol='', minWeightFractionPerNode=0.0, weightCol=None): '\n setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0, checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None, impurity="variance", featureSubsetStrategy="all", validationTol=0.01, validationIndicatorCol=None, leafCol="", minWeightFractionPerNode=0.0, weightCol=None)\n Sets params for Gradient Boosted Tree Regression.\n ' kwargs = self._input_kwargs return self._set(**kwargs)
5,634,973,182,862,086,000
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0, checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None, impurity="variance", featureSubsetStrategy="all", validationTol=0.01, validationIndicatorCol=None, leafCol="", minWeightFractionPerNode=0.0, weightCol=None) Sets params for Gradient Boosted Tree Regression.
python/pyspark/ml/regression.py
setParams
AjithShetty2489/spark
python
@keyword_only @since('1.4.0') def setParams(self, featuresCol='features', labelCol='label', predictionCol='prediction', maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0, checkpointInterval=10, lossType='squared', maxIter=20, stepSize=0.1, seed=None, impuriy='variance', featureSubsetStrategy='all', validationTol=0.01, validationIndicatorCol=None, leafCol=, minWeightFractionPerNode=0.0, weightCol=None): '\n setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0, checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None, impurity="variance", featureSubsetStrategy="all", validationTol=0.01, validationIndicatorCol=None, leafCol=, minWeightFractionPerNode=0.0, weightCol=None)\n Sets params for Gradient Boosted Tree Regression.\n ' kwargs = self._input_kwargs return self._set(**kwargs)
@since('1.4.0') def setMaxDepth(self, value): '\n Sets the value of :py:attr:`maxDepth`.\n ' return self._set(maxDepth=value)
6,900,103,097,222,239,000
Sets the value of :py:attr:`maxDepth`.
python/pyspark/ml/regression.py
setMaxDepth
AjithShetty2489/spark
python
@since('1.4.0') def setMaxDepth(self, value): '\n \n ' return self._set(maxDepth=value)
@since('1.4.0') def setMaxBins(self, value): '\n Sets the value of :py:attr:`maxBins`.\n ' return self._set(maxBins=value)
-7,662,286,719,655,598,000
Sets the value of :py:attr:`maxBins`.
python/pyspark/ml/regression.py
setMaxBins
AjithShetty2489/spark
python
@since('1.4.0') def setMaxBins(self, value): '\n \n ' return self._set(maxBins=value)
@since('1.4.0') def setMinInstancesPerNode(self, value): '\n Sets the value of :py:attr:`minInstancesPerNode`.\n ' return self._set(minInstancesPerNode=value)
-8,071,360,048,086,069,000
Sets the value of :py:attr:`minInstancesPerNode`.
python/pyspark/ml/regression.py
setMinInstancesPerNode
AjithShetty2489/spark
python
@since('1.4.0') def setMinInstancesPerNode(self, value): '\n \n ' return self._set(minInstancesPerNode=value)
@since('1.4.0') def setMinInfoGain(self, value): '\n Sets the value of :py:attr:`minInfoGain`.\n ' return self._set(minInfoGain=value)
-2,189,222,529,958,267,000
Sets the value of :py:attr:`minInfoGain`.
python/pyspark/ml/regression.py
setMinInfoGain
AjithShetty2489/spark
python
@since('1.4.0') def setMinInfoGain(self, value): '\n \n ' return self._set(minInfoGain=value)
@since('1.4.0') def setMaxMemoryInMB(self, value): '\n Sets the value of :py:attr:`maxMemoryInMB`.\n ' return self._set(maxMemoryInMB=value)
-7,743,889,602,156,593,000
Sets the value of :py:attr:`maxMemoryInMB`.
python/pyspark/ml/regression.py
setMaxMemoryInMB
AjithShetty2489/spark
python
@since('1.4.0') def setMaxMemoryInMB(self, value): '\n \n ' return self._set(maxMemoryInMB=value)
@since('1.4.0') def setCacheNodeIds(self, value): '\n Sets the value of :py:attr:`cacheNodeIds`.\n ' return self._set(cacheNodeIds=value)
-7,957,309,380,185,966,000
Sets the value of :py:attr:`cacheNodeIds`.
python/pyspark/ml/regression.py
setCacheNodeIds
AjithShetty2489/spark
python
@since('1.4.0') def setCacheNodeIds(self, value): '\n \n ' return self._set(cacheNodeIds=value)
@since('1.4.0') def setImpurity(self, value): '\n Sets the value of :py:attr:`impurity`.\n ' return self._set(impurity=value)
5,925,454,725,552,672,000
Sets the value of :py:attr:`impurity`.
python/pyspark/ml/regression.py
setImpurity
AjithShetty2489/spark
python
@since('1.4.0') def setImpurity(self, value): '\n \n ' return self._set(impurity=value)
@since('1.4.0') def setLossType(self, value): '\n Sets the value of :py:attr:`lossType`.\n ' return self._set(lossType=value)
-8,809,049,382,203,914,000
Sets the value of :py:attr:`lossType`.
python/pyspark/ml/regression.py
setLossType
AjithShetty2489/spark
python
@since('1.4.0') def setLossType(self, value): '\n \n ' return self._set(lossType=value)
@since('1.4.0') def setSubsamplingRate(self, value): '\n Sets the value of :py:attr:`subsamplingRate`.\n ' return self._set(subsamplingRate=value)
7,760,574,118,449,772,000
Sets the value of :py:attr:`subsamplingRate`.
python/pyspark/ml/regression.py
setSubsamplingRate
AjithShetty2489/spark
python
@since('1.4.0') def setSubsamplingRate(self, value): '\n \n ' return self._set(subsamplingRate=value)
@since('2.4.0') def setFeatureSubsetStrategy(self, value): '\n Sets the value of :py:attr:`featureSubsetStrategy`.\n ' return self._set(featureSubsetStrategy=value)
8,563,096,663,945,954,000
Sets the value of :py:attr:`featureSubsetStrategy`.
python/pyspark/ml/regression.py
setFeatureSubsetStrategy
AjithShetty2489/spark
python
@since('2.4.0') def setFeatureSubsetStrategy(self, value): '\n \n ' return self._set(featureSubsetStrategy=value)
@since('3.0.0') def setValidationIndicatorCol(self, value): '\n Sets the value of :py:attr:`validationIndicatorCol`.\n ' return self._set(validationIndicatorCol=value)
-5,114,315,931,364,224,000
Sets the value of :py:attr:`validationIndicatorCol`.
python/pyspark/ml/regression.py
setValidationIndicatorCol
AjithShetty2489/spark
python
@since('3.0.0') def setValidationIndicatorCol(self, value): '\n \n ' return self._set(validationIndicatorCol=value)
@since('1.4.0') def setMaxIter(self, value): '\n Sets the value of :py:attr:`maxIter`.\n ' return self._set(maxIter=value)
-1,196,247,245,985,027,000
Sets the value of :py:attr:`maxIter`.
python/pyspark/ml/regression.py
setMaxIter
AjithShetty2489/spark
python
@since('1.4.0') def setMaxIter(self, value): '\n \n ' return self._set(maxIter=value)
@since('1.4.0') def setCheckpointInterval(self, value): '\n Sets the value of :py:attr:`checkpointInterval`.\n ' return self._set(checkpointInterval=value)
-7,454,580,376,492,684,000
Sets the value of :py:attr:`checkpointInterval`.
python/pyspark/ml/regression.py
setCheckpointInterval
AjithShetty2489/spark
python
@since('1.4.0') def setCheckpointInterval(self, value): '\n \n ' return self._set(checkpointInterval=value)
@since('1.4.0') def setSeed(self, value): '\n Sets the value of :py:attr:`seed`.\n ' return self._set(seed=value)
5,160,183,399,222,839,000
Sets the value of :py:attr:`seed`.
python/pyspark/ml/regression.py
setSeed
AjithShetty2489/spark
python
@since('1.4.0') def setSeed(self, value): '\n \n ' return self._set(seed=value)
@since('1.4.0') def setStepSize(self, value): '\n Sets the value of :py:attr:`stepSize`.\n ' return self._set(stepSize=value)
1,217,394,787,011,605,200
Sets the value of :py:attr:`stepSize`.
python/pyspark/ml/regression.py
setStepSize
AjithShetty2489/spark
python
@since('1.4.0') def setStepSize(self, value): '\n \n ' return self._set(stepSize=value)
@since('3.0.0') def setWeightCol(self, value): '\n Sets the value of :py:attr:`weightCol`.\n ' return self._set(weightCol=value)
3,791,292,180,445,544,000
Sets the value of :py:attr:`weightCol`.
python/pyspark/ml/regression.py
setWeightCol
AjithShetty2489/spark
python
@since('3.0.0') def setWeightCol(self, value): '\n \n ' return self._set(weightCol=value)
@since('3.0.0') def setMinWeightFractionPerNode(self, value): '\n Sets the value of :py:attr:`minWeightFractionPerNode`.\n ' return self._set(minWeightFractionPerNode=value)
5,709,196,588,527,269,000
Sets the value of :py:attr:`minWeightFractionPerNode`.
python/pyspark/ml/regression.py
setMinWeightFractionPerNode
AjithShetty2489/spark
python
@since('3.0.0') def setMinWeightFractionPerNode(self, value): '\n \n ' return self._set(minWeightFractionPerNode=value)
@property @since('2.0.0') def featureImportances(self): '\n Estimate of the importance of each feature.\n\n Each feature\'s importance is the average of its importance across all trees in the ensemble\n The importance vector is normalized to sum to 1. This method is suggested by Hastie et al.\n (Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.)\n and follows the implementation from scikit-learn.\n\n .. seealso:: :py:attr:`DecisionTreeRegressionModel.featureImportances`\n ' return self._call_java('featureImportances')
253,380,475,036,441,120
Estimate of the importance of each feature. Each feature's importance is the average of its importance across all trees in the ensemble The importance vector is normalized to sum to 1. This method is suggested by Hastie et al. (Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.) and follows the implementation from scikit-learn. .. seealso:: :py:attr:`DecisionTreeRegressionModel.featureImportances`
python/pyspark/ml/regression.py
featureImportances
AjithShetty2489/spark
python
@property @since('2.0.0') def featureImportances(self): '\n Estimate of the importance of each feature.\n\n Each feature\'s importance is the average of its importance across all trees in the ensemble\n The importance vector is normalized to sum to 1. This method is suggested by Hastie et al.\n (Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.)\n and follows the implementation from scikit-learn.\n\n .. seealso:: :py:attr:`DecisionTreeRegressionModel.featureImportances`\n ' return self._call_java('featureImportances')
@property @since('2.0.0') def trees(self): 'Trees in this ensemble. Warning: These have null parent Estimators.' return [DecisionTreeRegressionModel(m) for m in list(self._call_java('trees'))]
7,781,632,944,006,031,000
Trees in this ensemble. Warning: These have null parent Estimators.
python/pyspark/ml/regression.py
trees
AjithShetty2489/spark
python
@property @since('2.0.0') def trees(self): return [DecisionTreeRegressionModel(m) for m in list(self._call_java('trees'))]
@since('2.4.0') def evaluateEachIteration(self, dataset, loss): '\n Method to compute error or loss for every iteration of gradient boosting.\n\n :param dataset:\n Test dataset to evaluate model on, where dataset is an\n instance of :py:class:`pyspark.sql.DataFrame`\n :param loss:\n The loss function used to compute error.\n Supported options: squared, absolute\n ' return self._call_java('evaluateEachIteration', dataset, loss)
8,534,903,255,737,567,000
Method to compute error or loss for every iteration of gradient boosting. :param dataset: Test dataset to evaluate model on, where dataset is an instance of :py:class:`pyspark.sql.DataFrame` :param loss: The loss function used to compute error. Supported options: squared, absolute
python/pyspark/ml/regression.py
evaluateEachIteration
AjithShetty2489/spark
python
@since('2.4.0') def evaluateEachIteration(self, dataset, loss): '\n Method to compute error or loss for every iteration of gradient boosting.\n\n :param dataset:\n Test dataset to evaluate model on, where dataset is an\n instance of :py:class:`pyspark.sql.DataFrame`\n :param loss:\n The loss function used to compute error.\n Supported options: squared, absolute\n ' return self._call_java('evaluateEachIteration', dataset, loss)
@since('1.6.0') def getCensorCol(self): '\n Gets the value of censorCol or its default value.\n ' return self.getOrDefault(self.censorCol)
-2,230,830,563,015,044,000
Gets the value of censorCol or its default value.
python/pyspark/ml/regression.py
getCensorCol
AjithShetty2489/spark
python
@since('1.6.0') def getCensorCol(self): '\n \n ' return self.getOrDefault(self.censorCol)
@since('1.6.0') def getQuantileProbabilities(self): '\n Gets the value of quantileProbabilities or its default value.\n ' return self.getOrDefault(self.quantileProbabilities)
-207,356,761,093,793,340
Gets the value of quantileProbabilities or its default value.
python/pyspark/ml/regression.py
getQuantileProbabilities
AjithShetty2489/spark
python
@since('1.6.0') def getQuantileProbabilities(self): '\n \n ' return self.getOrDefault(self.quantileProbabilities)
@since('1.6.0') def getQuantilesCol(self): '\n Gets the value of quantilesCol or its default value.\n ' return self.getOrDefault(self.quantilesCol)
-6,848,420,129,026,341,000
Gets the value of quantilesCol or its default value.
python/pyspark/ml/regression.py
getQuantilesCol
AjithShetty2489/spark
python
@since('1.6.0') def getQuantilesCol(self): '\n \n ' return self.getOrDefault(self.quantilesCol)
@keyword_only def __init__(self, featuresCol='features', labelCol='label', predictionCol='prediction', fitIntercept=True, maxIter=100, tol=1e-06, censorCol='censor', quantileProbabilities=list([0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99]), quantilesCol=None, aggregationDepth=2): '\n __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor", quantileProbabilities=[0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99], quantilesCol=None, aggregationDepth=2)\n ' super(AFTSurvivalRegression, self).__init__() self._java_obj = self._new_java_obj('org.apache.spark.ml.regression.AFTSurvivalRegression', self.uid) self._setDefault(censorCol='censor', quantileProbabilities=[0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99], maxIter=100, tol=1e-06) kwargs = self._input_kwargs self.setParams(**kwargs)
-6,859,158,537,253,641,000
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor", quantileProbabilities=[0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99], quantilesCol=None, aggregationDepth=2)
python/pyspark/ml/regression.py
__init__
AjithShetty2489/spark
python
@keyword_only def __init__(self, featuresCol='features', labelCol='label', predictionCol='prediction', fitIntercept=True, maxIter=100, tol=1e-06, censorCol='censor', quantileProbabilities=list([0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99]), quantilesCol=None, aggregationDepth=2): '\n \n ' super(AFTSurvivalRegression, self).__init__() self._java_obj = self._new_java_obj('org.apache.spark.ml.regression.AFTSurvivalRegression', self.uid) self._setDefault(censorCol='censor', quantileProbabilities=[0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99], maxIter=100, tol=1e-06) kwargs = self._input_kwargs self.setParams(**kwargs)
@keyword_only @since('1.6.0') def setParams(self, featuresCol='features', labelCol='label', predictionCol='prediction', fitIntercept=True, maxIter=100, tol=1e-06, censorCol='censor', quantileProbabilities=list([0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99]), quantilesCol=None, aggregationDepth=2): '\n setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor", quantileProbabilities=[0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99], quantilesCol=None, aggregationDepth=2):\n ' kwargs = self._input_kwargs return self._set(**kwargs)
-8,722,679,978,714,797,000
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor", quantileProbabilities=[0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99], quantilesCol=None, aggregationDepth=2):
python/pyspark/ml/regression.py
setParams
AjithShetty2489/spark
python
@keyword_only @since('1.6.0') def setParams(self, featuresCol='features', labelCol='label', predictionCol='prediction', fitIntercept=True, maxIter=100, tol=1e-06, censorCol='censor', quantileProbabilities=list([0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99]), quantilesCol=None, aggregationDepth=2): '\n \n ' kwargs = self._input_kwargs return self._set(**kwargs)
@since('1.6.0') def setCensorCol(self, value): '\n Sets the value of :py:attr:`censorCol`.\n ' return self._set(censorCol=value)
541,491,126,207,018,900
Sets the value of :py:attr:`censorCol`.
python/pyspark/ml/regression.py
setCensorCol
AjithShetty2489/spark
python
@since('1.6.0') def setCensorCol(self, value): '\n \n ' return self._set(censorCol=value)
@since('1.6.0') def setQuantileProbabilities(self, value): '\n Sets the value of :py:attr:`quantileProbabilities`.\n ' return self._set(quantileProbabilities=value)
-850,399,242,309,996,200
Sets the value of :py:attr:`quantileProbabilities`.
python/pyspark/ml/regression.py
setQuantileProbabilities
AjithShetty2489/spark
python
@since('1.6.0') def setQuantileProbabilities(self, value): '\n \n ' return self._set(quantileProbabilities=value)
@since('1.6.0') def setQuantilesCol(self, value): '\n Sets the value of :py:attr:`quantilesCol`.\n ' return self._set(quantilesCol=value)
-5,907,029,639,598,257,000
Sets the value of :py:attr:`quantilesCol`.
python/pyspark/ml/regression.py
setQuantilesCol
AjithShetty2489/spark
python
@since('1.6.0') def setQuantilesCol(self, value): '\n \n ' return self._set(quantilesCol=value)
@since('1.6.0') def setMaxIter(self, value): '\n Sets the value of :py:attr:`maxIter`.\n ' return self._set(maxIter=value)
-6,368,118,853,475,780,000
Sets the value of :py:attr:`maxIter`.
python/pyspark/ml/regression.py
setMaxIter
AjithShetty2489/spark
python
@since('1.6.0') def setMaxIter(self, value): '\n \n ' return self._set(maxIter=value)
@since('1.6.0') def setTol(self, value): '\n Sets the value of :py:attr:`tol`.\n ' return self._set(tol=value)
-7,171,505,432,429,106,000
Sets the value of :py:attr:`tol`.
python/pyspark/ml/regression.py
setTol
AjithShetty2489/spark
python
@since('1.6.0') def setTol(self, value): '\n \n ' return self._set(tol=value)
@since('1.6.0') def setFitIntercept(self, value): '\n Sets the value of :py:attr:`fitIntercept`.\n ' return self._set(fitIntercept=value)
1,145,056,169,683,105,800
Sets the value of :py:attr:`fitIntercept`.
python/pyspark/ml/regression.py
setFitIntercept
AjithShetty2489/spark
python
@since('1.6.0') def setFitIntercept(self, value): '\n \n ' return self._set(fitIntercept=value)
@since('2.1.0') def setAggregationDepth(self, value): '\n Sets the value of :py:attr:`aggregationDepth`.\n ' return self._set(aggregationDepth=value)
768,663,242,657,319,600
Sets the value of :py:attr:`aggregationDepth`.
python/pyspark/ml/regression.py
setAggregationDepth
AjithShetty2489/spark
python
@since('2.1.0') def setAggregationDepth(self, value): '\n \n ' return self._set(aggregationDepth=value)
@since('3.0.0') def setQuantileProbabilities(self, value): '\n Sets the value of :py:attr:`quantileProbabilities`.\n ' return self._set(quantileProbabilities=value)
3,744,453,599,272,685,000
Sets the value of :py:attr:`quantileProbabilities`.
python/pyspark/ml/regression.py
setQuantileProbabilities
AjithShetty2489/spark
python
@since('3.0.0') def setQuantileProbabilities(self, value): '\n \n ' return self._set(quantileProbabilities=value)
@since('3.0.0') def setQuantilesCol(self, value): '\n Sets the value of :py:attr:`quantilesCol`.\n ' return self._set(quantilesCol=value)
-8,202,487,490,823,432,000
Sets the value of :py:attr:`quantilesCol`.
python/pyspark/ml/regression.py
setQuantilesCol
AjithShetty2489/spark
python
@since('3.0.0') def setQuantilesCol(self, value): '\n \n ' return self._set(quantilesCol=value)
@property @since('2.0.0') def coefficients(self): '\n Model coefficients.\n ' return self._call_java('coefficients')
6,857,518,054,360,473,000
Model coefficients.
python/pyspark/ml/regression.py
coefficients
AjithShetty2489/spark
python
@property @since('2.0.0') def coefficients(self): '\n \n ' return self._call_java('coefficients')
@property @since('1.6.0') def intercept(self): '\n Model intercept.\n ' return self._call_java('intercept')
-3,560,865,619,730,612,700
Model intercept.
python/pyspark/ml/regression.py
intercept
AjithShetty2489/spark
python
@property @since('1.6.0') def intercept(self): '\n \n ' return self._call_java('intercept')
@property @since('1.6.0') def scale(self): '\n Model scale parameter.\n ' return self._call_java('scale')
-7,138,564,796,128,718,000
Model scale parameter.
python/pyspark/ml/regression.py
scale
AjithShetty2489/spark
python
@property @since('1.6.0') def scale(self): '\n \n ' return self._call_java('scale')
@since('2.0.0') def predictQuantiles(self, features): '\n Predicted Quantiles\n ' return self._call_java('predictQuantiles', features)
-2,929,868,469,692,212,000
Predicted Quantiles
python/pyspark/ml/regression.py
predictQuantiles
AjithShetty2489/spark
python
@since('2.0.0') def predictQuantiles(self, features): '\n \n ' return self._call_java('predictQuantiles', features)
@since('2.0.0') def getFamily(self): '\n Gets the value of family or its default value.\n ' return self.getOrDefault(self.family)
3,807,383,793,635,875,000
Gets the value of family or its default value.
python/pyspark/ml/regression.py
getFamily
AjithShetty2489/spark
python
@since('2.0.0') def getFamily(self): '\n \n ' return self.getOrDefault(self.family)
@since('2.0.0') def getLinkPredictionCol(self): '\n Gets the value of linkPredictionCol or its default value.\n ' return self.getOrDefault(self.linkPredictionCol)
4,153,613,732,417,093,600
Gets the value of linkPredictionCol or its default value.
python/pyspark/ml/regression.py
getLinkPredictionCol
AjithShetty2489/spark
python
@since('2.0.0') def getLinkPredictionCol(self): '\n \n ' return self.getOrDefault(self.linkPredictionCol)
@since('2.0.0') def getLink(self): '\n Gets the value of link or its default value.\n ' return self.getOrDefault(self.link)
3,719,389,862,898,408,400
Gets the value of link or its default value.
python/pyspark/ml/regression.py
getLink
AjithShetty2489/spark
python
@since('2.0.0') def getLink(self): '\n \n ' return self.getOrDefault(self.link)
@since('2.2.0') def getVariancePower(self): '\n Gets the value of variancePower or its default value.\n ' return self.getOrDefault(self.variancePower)
5,905,842,859,415,928,000
Gets the value of variancePower or its default value.
python/pyspark/ml/regression.py
getVariancePower
AjithShetty2489/spark
python
@since('2.2.0') def getVariancePower(self): '\n \n ' return self.getOrDefault(self.variancePower)
@since('2.2.0') def getLinkPower(self): '\n Gets the value of linkPower or its default value.\n ' return self.getOrDefault(self.linkPower)
917,688,027,843,338,800
Gets the value of linkPower or its default value.
python/pyspark/ml/regression.py
getLinkPower
AjithShetty2489/spark
python
@since('2.2.0') def getLinkPower(self): '\n \n ' return self.getOrDefault(self.linkPower)
@since('2.3.0') def getOffsetCol(self): '\n Gets the value of offsetCol or its default value.\n ' return self.getOrDefault(self.offsetCol)
-5,945,135,978,674,455,000
Gets the value of offsetCol or its default value.
python/pyspark/ml/regression.py
getOffsetCol
AjithShetty2489/spark
python
@since('2.3.0') def getOffsetCol(self): '\n \n ' return self.getOrDefault(self.offsetCol)
@keyword_only def __init__(self, labelCol='label', featuresCol='features', predictionCol='prediction', family='gaussian', link=None, fitIntercept=True, maxIter=25, tol=1e-06, regParam=0.0, weightCol=None, solver='irls', linkPredictionCol=None, variancePower=0.0, linkPower=None, offsetCol=None, aggregationDepth=2): '\n __init__(self, labelCol="label", featuresCol="features", predictionCol="prediction", family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6, regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None, variancePower=0.0, linkPower=None, offsetCol=None, aggregationDepth=2)\n ' super(GeneralizedLinearRegression, self).__init__() self._java_obj = self._new_java_obj('org.apache.spark.ml.regression.GeneralizedLinearRegression', self.uid) self._setDefault(family='gaussian', maxIter=25, tol=1e-06, regParam=0.0, solver='irls', variancePower=0.0, aggregationDepth=2) kwargs = self._input_kwargs self.setParams(**kwargs)
-2,628,348,636,192,205,300
__init__(self, labelCol="label", featuresCol="features", predictionCol="prediction", family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6, regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None, variancePower=0.0, linkPower=None, offsetCol=None, aggregationDepth=2)
python/pyspark/ml/regression.py
__init__
AjithShetty2489/spark
python
@keyword_only def __init__(self, labelCol='label', featuresCol='features', predictionCol='prediction', family='gaussian', link=None, fitIntercept=True, maxIter=25, tol=1e-06, regParam=0.0, weightCol=None, solver='irls', linkPredictionCol=None, variancePower=0.0, linkPower=None, offsetCol=None, aggregationDepth=2): '\n \n ' super(GeneralizedLinearRegression, self).__init__() self._java_obj = self._new_java_obj('org.apache.spark.ml.regression.GeneralizedLinearRegression', self.uid) self._setDefault(family='gaussian', maxIter=25, tol=1e-06, regParam=0.0, solver='irls', variancePower=0.0, aggregationDepth=2) kwargs = self._input_kwargs self.setParams(**kwargs)
@keyword_only @since('2.0.0') def setParams(self, labelCol='label', featuresCol='features', predictionCol='prediction', family='gaussian', link=None, fitIntercept=True, maxIter=25, tol=1e-06, regParam=0.0, weightCol=None, solver='irls', linkPredictionCol=None, variancePower=0.0, linkPower=None, offsetCol=None, aggregationDepth=2): '\n setParams(self, labelCol="label", featuresCol="features", predictionCol="prediction", family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6, regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None, variancePower=0.0, linkPower=None, offsetCol=None, aggregationDepth=2)\n Sets params for generalized linear regression.\n ' kwargs = self._input_kwargs return self._set(**kwargs)
8,200,085,354,386,206,000
setParams(self, labelCol="label", featuresCol="features", predictionCol="prediction", family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6, regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None, variancePower=0.0, linkPower=None, offsetCol=None, aggregationDepth=2) Sets params for generalized linear regression.
python/pyspark/ml/regression.py
setParams
AjithShetty2489/spark
python
@keyword_only @since('2.0.0') def setParams(self, labelCol='label', featuresCol='features', predictionCol='prediction', family='gaussian', link=None, fitIntercept=True, maxIter=25, tol=1e-06, regParam=0.0, weightCol=None, solver='irls', linkPredictionCol=None, variancePower=0.0, linkPower=None, offsetCol=None, aggregationDepth=2): '\n setParams(self, labelCol="label", featuresCol="features", predictionCol="prediction", family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6, regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None, variancePower=0.0, linkPower=None, offsetCol=None, aggregationDepth=2)\n Sets params for generalized linear regression.\n ' kwargs = self._input_kwargs return self._set(**kwargs)
@since('2.0.0') def setFamily(self, value): '\n Sets the value of :py:attr:`family`.\n ' return self._set(family=value)
1,164,448,383,522,171,100
Sets the value of :py:attr:`family`.
python/pyspark/ml/regression.py
setFamily
AjithShetty2489/spark
python
@since('2.0.0') def setFamily(self, value): '\n \n ' return self._set(family=value)
@since('2.0.0') def setLinkPredictionCol(self, value): '\n Sets the value of :py:attr:`linkPredictionCol`.\n ' return self._set(linkPredictionCol=value)
6,628,946,941,373,349,000
Sets the value of :py:attr:`linkPredictionCol`.
python/pyspark/ml/regression.py
setLinkPredictionCol
AjithShetty2489/spark
python
@since('2.0.0') def setLinkPredictionCol(self, value): '\n \n ' return self._set(linkPredictionCol=value)
@since('2.0.0') def setLink(self, value): '\n Sets the value of :py:attr:`link`.\n ' return self._set(link=value)
-7,532,497,425,619,843,000
Sets the value of :py:attr:`link`.
python/pyspark/ml/regression.py
setLink
AjithShetty2489/spark
python
@since('2.0.0') def setLink(self, value): '\n \n ' return self._set(link=value)
@since('2.2.0') def setVariancePower(self, value): '\n Sets the value of :py:attr:`variancePower`.\n ' return self._set(variancePower=value)
3,701,548,667,630,760,000
Sets the value of :py:attr:`variancePower`.
python/pyspark/ml/regression.py
setVariancePower
AjithShetty2489/spark
python
@since('2.2.0') def setVariancePower(self, value): '\n \n ' return self._set(variancePower=value)
@since('2.2.0') def setLinkPower(self, value): '\n Sets the value of :py:attr:`linkPower`.\n ' return self._set(linkPower=value)
7,172,790,407,754,197,000
Sets the value of :py:attr:`linkPower`.
python/pyspark/ml/regression.py
setLinkPower
AjithShetty2489/spark
python
@since('2.2.0') def setLinkPower(self, value): '\n \n ' return self._set(linkPower=value)
@since('2.3.0') def setOffsetCol(self, value): '\n Sets the value of :py:attr:`offsetCol`.\n ' return self._set(offsetCol=value)
-7,094,106,673,869,634,000
Sets the value of :py:attr:`offsetCol`.
python/pyspark/ml/regression.py
setOffsetCol
AjithShetty2489/spark
python
@since('2.3.0') def setOffsetCol(self, value): '\n \n ' return self._set(offsetCol=value)
@since('2.0.0') def setMaxIter(self, value): '\n Sets the value of :py:attr:`maxIter`.\n ' return self._set(maxIter=value)
-8,587,339,129,955,141,000
Sets the value of :py:attr:`maxIter`.
python/pyspark/ml/regression.py
setMaxIter
AjithShetty2489/spark
python
@since('2.0.0') def setMaxIter(self, value): '\n \n ' return self._set(maxIter=value)
@since('2.0.0') def setRegParam(self, value): '\n Sets the value of :py:attr:`regParam`.\n ' return self._set(regParam=value)
1,711,442,500,235,189,200
Sets the value of :py:attr:`regParam`.
python/pyspark/ml/regression.py
setRegParam
AjithShetty2489/spark
python
@since('2.0.0') def setRegParam(self, value): '\n \n ' return self._set(regParam=value)
@since('2.0.0') def setTol(self, value): '\n Sets the value of :py:attr:`tol`.\n ' return self._set(tol=value)
5,756,669,959,330,179,000
Sets the value of :py:attr:`tol`.
python/pyspark/ml/regression.py
setTol
AjithShetty2489/spark
python
@since('2.0.0') def setTol(self, value): '\n \n ' return self._set(tol=value)
@since('2.2.0') def setFitIntercept(self, value): '\n Sets the value of :py:attr:`fitIntercept`.\n ' return self._set(fitIntercept=value)
2,783,097,627,851,963,000
Sets the value of :py:attr:`fitIntercept`.
python/pyspark/ml/regression.py
setFitIntercept
AjithShetty2489/spark
python
@since('2.2.0') def setFitIntercept(self, value): '\n \n ' return self._set(fitIntercept=value)
@since('2.0.0') def setWeightCol(self, value): '\n Sets the value of :py:attr:`weightCol`.\n ' return self._set(weightCol=value)
-1,547,864,048,159,526,000
Sets the value of :py:attr:`weightCol`.
python/pyspark/ml/regression.py
setWeightCol
AjithShetty2489/spark
python
@since('2.0.0') def setWeightCol(self, value): '\n \n ' return self._set(weightCol=value)
@since('2.0.0') def setSolver(self, value): '\n Sets the value of :py:attr:`solver`.\n ' return self._set(solver=value)
-2,492,674,661,642,492,000
Sets the value of :py:attr:`solver`.
python/pyspark/ml/regression.py
setSolver
AjithShetty2489/spark
python
@since('2.0.0') def setSolver(self, value): '\n \n ' return self._set(solver=value)
@since('3.0.0') def setAggregationDepth(self, value): '\n Sets the value of :py:attr:`aggregationDepth`.\n ' return self._set(aggregationDepth=value)
4,437,374,844,223,364,600
Sets the value of :py:attr:`aggregationDepth`.
python/pyspark/ml/regression.py
setAggregationDepth
AjithShetty2489/spark
python
@since('3.0.0') def setAggregationDepth(self, value): '\n \n ' return self._set(aggregationDepth=value)
@since('3.0.0') def setLinkPredictionCol(self, value): '\n Sets the value of :py:attr:`linkPredictionCol`.\n ' return self._set(linkPredictionCol=value)
-914,956,450,454,516,500
Sets the value of :py:attr:`linkPredictionCol`.
python/pyspark/ml/regression.py
setLinkPredictionCol
AjithShetty2489/spark
python
@since('3.0.0') def setLinkPredictionCol(self, value): '\n \n ' return self._set(linkPredictionCol=value)
@property @since('2.0.0') def coefficients(self): '\n Model coefficients.\n ' return self._call_java('coefficients')
6,857,518,054,360,473,000
Model coefficients.
python/pyspark/ml/regression.py
coefficients
AjithShetty2489/spark
python
@property @since('2.0.0') def coefficients(self): '\n \n ' return self._call_java('coefficients')
@property @since('2.0.0') def intercept(self): '\n Model intercept.\n ' return self._call_java('intercept')
-5,173,635,402,826,854,000
Model intercept.
python/pyspark/ml/regression.py
intercept
AjithShetty2489/spark
python
@property @since('2.0.0') def intercept(self): '\n \n ' return self._call_java('intercept')
@property @since('2.0.0') def summary(self): '\n Gets summary (e.g. residuals, deviance, pValues) of model on\n training set. An exception is thrown if\n `trainingSummary is None`.\n ' if self.hasSummary: return GeneralizedLinearRegressionTrainingSummary(super(GeneralizedLinearRegressionModel, self).summary) else: raise RuntimeError(('No training summary available for this %s' % self.__class__.__name__))
580,146,496,962,842,500
Gets summary (e.g. residuals, deviance, pValues) of model on training set. An exception is thrown if `trainingSummary is None`.
python/pyspark/ml/regression.py
summary
AjithShetty2489/spark
python
@property @since('2.0.0') def summary(self): '\n Gets summary (e.g. residuals, deviance, pValues) of model on\n training set. An exception is thrown if\n `trainingSummary is None`.\n ' if self.hasSummary: return GeneralizedLinearRegressionTrainingSummary(super(GeneralizedLinearRegressionModel, self).summary) else: raise RuntimeError(('No training summary available for this %s' % self.__class__.__name__))
@since('2.0.0') def evaluate(self, dataset): '\n Evaluates the model on a test dataset.\n\n :param dataset:\n Test dataset to evaluate model on, where dataset is an\n instance of :py:class:`pyspark.sql.DataFrame`\n ' if (not isinstance(dataset, DataFrame)): raise ValueError(('dataset must be a DataFrame but got %s.' % type(dataset))) java_glr_summary = self._call_java('evaluate', dataset) return GeneralizedLinearRegressionSummary(java_glr_summary)
-7,544,132,382,661,572,000
Evaluates the model on a test dataset. :param dataset: Test dataset to evaluate model on, where dataset is an instance of :py:class:`pyspark.sql.DataFrame`
python/pyspark/ml/regression.py
evaluate
AjithShetty2489/spark
python
@since('2.0.0') def evaluate(self, dataset): '\n Evaluates the model on a test dataset.\n\n :param dataset:\n Test dataset to evaluate model on, where dataset is an\n instance of :py:class:`pyspark.sql.DataFrame`\n ' if (not isinstance(dataset, DataFrame)): raise ValueError(('dataset must be a DataFrame but got %s.' % type(dataset))) java_glr_summary = self._call_java('evaluate', dataset) return GeneralizedLinearRegressionSummary(java_glr_summary)
@property @since('2.0.0') def predictions(self): "\n Predictions output by the model's `transform` method.\n " return self._call_java('predictions')
-9,200,266,731,621,595,000
Predictions output by the model's `transform` method.
python/pyspark/ml/regression.py
predictions
AjithShetty2489/spark
python
@property @since('2.0.0') def predictions(self): "\n \n " return self._call_java('predictions')
@property @since('2.0.0') def predictionCol(self): "\n Field in :py:attr:`predictions` which gives the predicted value of each instance.\n This is set to a new column name if the original model's `predictionCol` is not set.\n " return self._call_java('predictionCol')
-6,291,787,637,265,208,000
Field in :py:attr:`predictions` which gives the predicted value of each instance. This is set to a new column name if the original model's `predictionCol` is not set.
python/pyspark/ml/regression.py
predictionCol
AjithShetty2489/spark
python
@property @since('2.0.0') def predictionCol(self): "\n Field in :py:attr:`predictions` which gives the predicted value of each instance.\n This is set to a new column name if the original model's `predictionCol` is not set.\n " return self._call_java('predictionCol')
@property @since('2.2.0') def numInstances(self): '\n Number of instances in DataFrame predictions.\n ' return self._call_java('numInstances')
447,667,802,768,058,500
Number of instances in DataFrame predictions.
python/pyspark/ml/regression.py
numInstances
AjithShetty2489/spark
python
@property @since('2.2.0') def numInstances(self): '\n \n ' return self._call_java('numInstances')
@property @since('2.0.0') def rank(self): '\n The numeric rank of the fitted linear model.\n ' return self._call_java('rank')
-3,806,673,583,552,185,000
The numeric rank of the fitted linear model.
python/pyspark/ml/regression.py
rank
AjithShetty2489/spark
python
@property @since('2.0.0') def rank(self): '\n \n ' return self._call_java('rank')
@property @since('2.0.0') def degreesOfFreedom(self): '\n Degrees of freedom.\n ' return self._call_java('degreesOfFreedom')
-4,423,156,539,142,590,000
Degrees of freedom.
python/pyspark/ml/regression.py
degreesOfFreedom
AjithShetty2489/spark
python
@property @since('2.0.0') def degreesOfFreedom(self): '\n \n ' return self._call_java('degreesOfFreedom')
@property @since('2.0.0') def residualDegreeOfFreedom(self): '\n The residual degrees of freedom.\n ' return self._call_java('residualDegreeOfFreedom')
3,038,285,719,347,285,500
The residual degrees of freedom.
python/pyspark/ml/regression.py
residualDegreeOfFreedom
AjithShetty2489/spark
python
@property @since('2.0.0') def residualDegreeOfFreedom(self): '\n \n ' return self._call_java('residualDegreeOfFreedom')
@property @since('2.0.0') def residualDegreeOfFreedomNull(self): '\n The residual degrees of freedom for the null model.\n ' return self._call_java('residualDegreeOfFreedomNull')
7,911,363,567,214,588,000
The residual degrees of freedom for the null model.
python/pyspark/ml/regression.py
residualDegreeOfFreedomNull
AjithShetty2489/spark
python
@property @since('2.0.0') def residualDegreeOfFreedomNull(self): '\n \n ' return self._call_java('residualDegreeOfFreedomNull')
@since('2.0.0') def residuals(self, residualsType='deviance'): '\n Get the residuals of the fitted model by type.\n\n :param residualsType: The type of residuals which should be returned.\n Supported options: deviance (default), pearson, working, and response.\n ' return self._call_java('residuals', residualsType)
4,521,332,300,123,337,000
Get the residuals of the fitted model by type. :param residualsType: The type of residuals which should be returned. Supported options: deviance (default), pearson, working, and response.
python/pyspark/ml/regression.py
residuals
AjithShetty2489/spark
python
@since('2.0.0') def residuals(self, residualsType='deviance'): '\n Get the residuals of the fitted model by type.\n\n :param residualsType: The type of residuals which should be returned.\n Supported options: deviance (default), pearson, working, and response.\n ' return self._call_java('residuals', residualsType)
@property @since('2.0.0') def nullDeviance(self): '\n The deviance for the null model.\n ' return self._call_java('nullDeviance')
-8,684,909,638,740,993,000
The deviance for the null model.
python/pyspark/ml/regression.py
nullDeviance
AjithShetty2489/spark
python
@property @since('2.0.0') def nullDeviance(self): '\n \n ' return self._call_java('nullDeviance')
@property @since('2.0.0') def deviance(self): '\n The deviance for the fitted model.\n ' return self._call_java('deviance')
-3,850,408,545,137,996,000
The deviance for the fitted model.
python/pyspark/ml/regression.py
deviance
AjithShetty2489/spark
python
@property @since('2.0.0') def deviance(self): '\n \n ' return self._call_java('deviance')
@property @since('2.0.0') def dispersion(self): '\n The dispersion of the fitted model.\n It is taken as 1.0 for the "binomial" and "poisson" families, and otherwise\n estimated by the residual Pearson\'s Chi-Squared statistic (which is defined as\n sum of the squares of the Pearson residuals) divided by the residual degrees of freedom.\n ' return self._call_java('dispersion')
6,445,401,432,828,434,000
The dispersion of the fitted model. It is taken as 1.0 for the "binomial" and "poisson" families, and otherwise estimated by the residual Pearson's Chi-Squared statistic (which is defined as sum of the squares of the Pearson residuals) divided by the residual degrees of freedom.
python/pyspark/ml/regression.py
dispersion
AjithShetty2489/spark
python
@property @since('2.0.0') def dispersion(self): '\n The dispersion of the fitted model.\n It is taken as 1.0 for the "binomial" and "poisson" families, and otherwise\n estimated by the residual Pearson\'s Chi-Squared statistic (which is defined as\n sum of the squares of the Pearson residuals) divided by the residual degrees of freedom.\n ' return self._call_java('dispersion')
@property @since('2.0.0') def aic(self): '\n Akaike\'s "An Information Criterion"(AIC) for the fitted model.\n ' return self._call_java('aic')
8,133,851,070,450,337,000
Akaike's "An Information Criterion"(AIC) for the fitted model.
python/pyspark/ml/regression.py
aic
AjithShetty2489/spark
python
@property @since('2.0.0') def aic(self): '\n Akaike\'s "An Information Criterion"(AIC) for the fitted model.\n ' return self._call_java('aic')