body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
@property def A(self): 'scipy.sparse.csr_matrix: csr_matrix to be exponentiated.' return self._A
5,594,488,028,696,724,000
scipy.sparse.csr_matrix: csr_matrix to be exponentiated.
quspin/tools/expm_multiply_parallel_core/expm_multiply_parallel_core.py
A
markusschmitt/QuSpin
python
@property def A(self): return self._A
def set_a(self, a, dtype=None): 'Sets the value of the property `a`.\n\n Parameters\n ----------\n a : scalar\n new value of `a`.\n dtype : numpy.dtype, optional\n dtype specified for this operator. Default is: result_type(A.dtype,min_scalar_type(a),float64)\n\n Examples\n --------\n\n .. literalinclude:: ../../doc_examples/expm_multiply_parallel-example.py\n :linenos:\n :language: python\n :lines: 32-35\n \n ' if (_np.array(a).ndim == 0): self._a = a a_dtype_min = _np.min_scalar_type(self._a) if (dtype is None): self._dtype = _np.result_type(self._A.dtype, a_dtype_min, _np.float64) else: min_dtype = _np.result_type(A.dtype, a_dtype_min, _np.float32) if (not _np.can_cast(min_dtype, dtype)): raise ValueError('dtype not sufficient to represent a*A to at least float32 precision.') self._dtype = dtype tol = (_np.finfo(self._dtype).eps / 2) tol_dtype = _np.finfo(self._dtype).eps.dtype self._tol = _np.array(tol, dtype=tol_dtype) self._mu = _np.array(self._mu, dtype=self._dtype) self._calculate_partition() else: raise ValueError("expecting 'a' to be scalar.")
1,647,312,176,468,744,400
Sets the value of the property `a`. Parameters ---------- a : scalar new value of `a`. dtype : numpy.dtype, optional dtype specified for this operator. Default is: result_type(A.dtype,min_scalar_type(a),float64) Examples -------- .. literalinclude:: ../../doc_examples/expm_multiply_parallel-example.py :linenos: :language: python :lines: 32-35
quspin/tools/expm_multiply_parallel_core/expm_multiply_parallel_core.py
set_a
markusschmitt/QuSpin
python
def set_a(self, a, dtype=None): 'Sets the value of the property `a`.\n\n Parameters\n ----------\n a : scalar\n new value of `a`.\n dtype : numpy.dtype, optional\n dtype specified for this operator. Default is: result_type(A.dtype,min_scalar_type(a),float64)\n\n Examples\n --------\n\n .. literalinclude:: ../../doc_examples/expm_multiply_parallel-example.py\n :linenos:\n :language: python\n :lines: 32-35\n \n ' if (_np.array(a).ndim == 0): self._a = a a_dtype_min = _np.min_scalar_type(self._a) if (dtype is None): self._dtype = _np.result_type(self._A.dtype, a_dtype_min, _np.float64) else: min_dtype = _np.result_type(A.dtype, a_dtype_min, _np.float32) if (not _np.can_cast(min_dtype, dtype)): raise ValueError('dtype not sufficient to represent a*A to at least float32 precision.') self._dtype = dtype tol = (_np.finfo(self._dtype).eps / 2) tol_dtype = _np.finfo(self._dtype).eps.dtype self._tol = _np.array(tol, dtype=tol_dtype) self._mu = _np.array(self._mu, dtype=self._dtype) self._calculate_partition() else: raise ValueError("expecting 'a' to be scalar.")
def dot(self, v, work_array=None, overwrite_v=False): 'Calculates the action of :math:`\\mathrm{e}^{aA}` on a vector :math:`v`. \n\n Examples\n --------\n\n .. literalinclude:: ../../doc_examples/expm_multiply_parallel-example.py\n :linenos:\n :language: python\n :lines: 37-\n\n Parameters\n -----------\n v : contiguous numpy.ndarray\n array to apply :math:`\\mathrm{e}^{aA}` on.\n work_array : contiguous numpy.ndarray, optional\n array of `shape = (2*len(v),)` which is used as work_array space for the underlying c-code. This saves extra memory allocation for function operations.\n overwrite_v : bool\n if set to `True`, the data in `v` is overwritten by the function. This saves extra memory allocation for the results.\n\n Returns\n --------\n numpy.ndarray\n result of :math:`\\mathrm{e}^{aA}v`. \n\n If `overwrite_v = True` the dunction returns `v` with the data overwritten, otherwise the result is stored in a new array. \n\n ' v = _np.asarray(v) if (v.ndim != 1): raise ValueError('array must have ndim of 1.') if (v.shape[0] != self._A.shape[1]): raise ValueError('dimension mismatch {}, {}'.format(self._A.shape, v.shape)) v_dtype = _np.result_type(self._dtype, v.dtype) if overwrite_v: if (v_dtype != v.dtype): raise ValueError('if overwrite_v is True, the input array must match correct output dtype for matrix multiplication.') if (not v.flags['CARRAY']): raise TypeError('input array must a contiguous and writable.') if (v.ndim != 1): raise ValueError('array must have ndim of 1.') else: v = v.astype(v_dtype, order='C', copy=True) if (work_array is None): work_array = _np.zeros(((2 * self._A.shape[0]),), dtype=v.dtype) else: work_array = _np.ascontiguousarray(work_array) if (work_array.shape != ((2 * self._A.shape[0]),)): raise ValueError('work_array array must be an array of shape (2*v.shape[0],) with same dtype as v.') if (work_array.dtype != v_dtype): raise ValueError('work_array must be array of dtype which matches the result of the matrix-vector multiplication.') a = _np.array(self._a, dtype=v_dtype) mu = _np.array(self._mu, dtype=v_dtype) tol = _np.array(self._tol, dtype=mu.real.dtype) _wrapper_expm_multiply(self._A.indptr, self._A.indices, self._A.data, self._s, self._m_star, a, tol, mu, v, work_array) return v
3,259,529,137,262,955,000
Calculates the action of :math:`\mathrm{e}^{aA}` on a vector :math:`v`. Examples -------- .. literalinclude:: ../../doc_examples/expm_multiply_parallel-example.py :linenos: :language: python :lines: 37- Parameters ----------- v : contiguous numpy.ndarray array to apply :math:`\mathrm{e}^{aA}` on. work_array : contiguous numpy.ndarray, optional array of `shape = (2*len(v),)` which is used as work_array space for the underlying c-code. This saves extra memory allocation for function operations. overwrite_v : bool if set to `True`, the data in `v` is overwritten by the function. This saves extra memory allocation for the results. Returns -------- numpy.ndarray result of :math:`\mathrm{e}^{aA}v`. If `overwrite_v = True` the dunction returns `v` with the data overwritten, otherwise the result is stored in a new array.
quspin/tools/expm_multiply_parallel_core/expm_multiply_parallel_core.py
dot
markusschmitt/QuSpin
python
def dot(self, v, work_array=None, overwrite_v=False): 'Calculates the action of :math:`\\mathrm{e}^{aA}` on a vector :math:`v`. \n\n Examples\n --------\n\n .. literalinclude:: ../../doc_examples/expm_multiply_parallel-example.py\n :linenos:\n :language: python\n :lines: 37-\n\n Parameters\n -----------\n v : contiguous numpy.ndarray\n array to apply :math:`\\mathrm{e}^{aA}` on.\n work_array : contiguous numpy.ndarray, optional\n array of `shape = (2*len(v),)` which is used as work_array space for the underlying c-code. This saves extra memory allocation for function operations.\n overwrite_v : bool\n if set to `True`, the data in `v` is overwritten by the function. This saves extra memory allocation for the results.\n\n Returns\n --------\n numpy.ndarray\n result of :math:`\\mathrm{e}^{aA}v`. \n\n If `overwrite_v = True` the dunction returns `v` with the data overwritten, otherwise the result is stored in a new array. \n\n ' v = _np.asarray(v) if (v.ndim != 1): raise ValueError('array must have ndim of 1.') if (v.shape[0] != self._A.shape[1]): raise ValueError('dimension mismatch {}, {}'.format(self._A.shape, v.shape)) v_dtype = _np.result_type(self._dtype, v.dtype) if overwrite_v: if (v_dtype != v.dtype): raise ValueError('if overwrite_v is True, the input array must match correct output dtype for matrix multiplication.') if (not v.flags['CARRAY']): raise TypeError('input array must a contiguous and writable.') if (v.ndim != 1): raise ValueError('array must have ndim of 1.') else: v = v.astype(v_dtype, order='C', copy=True) if (work_array is None): work_array = _np.zeros(((2 * self._A.shape[0]),), dtype=v.dtype) else: work_array = _np.ascontiguousarray(work_array) if (work_array.shape != ((2 * self._A.shape[0]),)): raise ValueError('work_array array must be an array of shape (2*v.shape[0],) with same dtype as v.') if (work_array.dtype != v_dtype): raise ValueError('work_array must be array of dtype which matches the result of the matrix-vector multiplication.') a = _np.array(self._a, dtype=v_dtype) mu = _np.array(self._mu, dtype=v_dtype) tol = _np.array(self._tol, dtype=mu.real.dtype) _wrapper_expm_multiply(self._A.indptr, self._A.indices, self._A.data, self._s, self._m_star, a, tol, mu, v, work_array) return v
def __init__(self, A, A_1_norm, a, mu, dtype, ell=2): '\n Provide the operator and some norm-related information.\n\n Parameters\n -----------\n A : linear operator\n The operator of interest.\n A_1_norm : float\n The exact 1-norm of A.\n ell : int, optional\n A technical parameter controlling norm estimation quality.\n\n ' self._A = A self._a = a self._mu = mu self._dtype = dtype self._A_1_norm = A_1_norm self._ell = ell self._d = {}
-3,342,848,802,935,478,300
Provide the operator and some norm-related information. Parameters ----------- A : linear operator The operator of interest. A_1_norm : float The exact 1-norm of A. ell : int, optional A technical parameter controlling norm estimation quality.
quspin/tools/expm_multiply_parallel_core/expm_multiply_parallel_core.py
__init__
markusschmitt/QuSpin
python
def __init__(self, A, A_1_norm, a, mu, dtype, ell=2): '\n Provide the operator and some norm-related information.\n\n Parameters\n -----------\n A : linear operator\n The operator of interest.\n A_1_norm : float\n The exact 1-norm of A.\n ell : int, optional\n A technical parameter controlling norm estimation quality.\n\n ' self._A = A self._a = a self._mu = mu self._dtype = dtype self._A_1_norm = A_1_norm self._ell = ell self._d = {}
def onenorm(self): '\n Compute the exact 1-norm.\n ' return (_np.abs(self._a) * self._A_1_norm)
3,180,204,854,046,551,600
Compute the exact 1-norm.
quspin/tools/expm_multiply_parallel_core/expm_multiply_parallel_core.py
onenorm
markusschmitt/QuSpin
python
def onenorm(self): '\n \n ' return (_np.abs(self._a) * self._A_1_norm)
def d(self, p): '\n Lazily estimate d_p(A) ~= || A^p ||^(1/p) where ||.|| is the 1-norm.\n ' if (p not in self._d): matvec = (lambda v: (self._a * (self._A.dot(v) - (self._mu * v)))) rmatvec = (lambda v: (_np.conj(self._a) * (self._A.H.dot(v) - (_np.conj(self._mu) * v)))) LO = LinearOperator(self._A.shape, dtype=self._dtype, matvec=matvec, rmatvec=rmatvec) est = onenormest((LO ** p)) self._d[p] = (est ** (1.0 / p)) return self._d[p]
3,573,397,589,313,746,400
Lazily estimate d_p(A) ~= || A^p ||^(1/p) where ||.|| is the 1-norm.
quspin/tools/expm_multiply_parallel_core/expm_multiply_parallel_core.py
d
markusschmitt/QuSpin
python
def d(self, p): '\n \n ' if (p not in self._d): matvec = (lambda v: (self._a * (self._A.dot(v) - (self._mu * v)))) rmatvec = (lambda v: (_np.conj(self._a) * (self._A.H.dot(v) - (_np.conj(self._mu) * v)))) LO = LinearOperator(self._A.shape, dtype=self._dtype, matvec=matvec, rmatvec=rmatvec) est = onenormest((LO ** p)) self._d[p] = (est ** (1.0 / p)) return self._d[p]
def alpha(self, p): '\n Lazily compute max(d(p), d(p+1)).\n ' return max(self.d(p), self.d((p + 1)))
-7,602,815,063,169,623,000
Lazily compute max(d(p), d(p+1)).
quspin/tools/expm_multiply_parallel_core/expm_multiply_parallel_core.py
alpha
markusschmitt/QuSpin
python
def alpha(self, p): '\n \n ' return max(self.d(p), self.d((p + 1)))
def _reduce_spark_multi(sdf: SparkDataFrame, aggs: List[Column]) -> Any: '\n Performs a reduction on a spark DataFrame, the functions being known sql aggregate functions.\n ' assert isinstance(sdf, SparkDataFrame) sdf0 = sdf.agg(*aggs) lst = sdf0.limit(2).toPandas() assert (len(lst) == 1), (sdf, lst) row = lst.iloc[0] lst2 = list(row) assert (len(lst2) == len(aggs)), (row, lst2) return lst2
7,922,910,722,984,314,000
Performs a reduction on a spark DataFrame, the functions being known sql aggregate functions.
python/pyspark/pandas/frame.py
_reduce_spark_multi
Flyangz/spark
python
def _reduce_spark_multi(sdf: SparkDataFrame, aggs: List[Column]) -> Any: '\n \n ' assert isinstance(sdf, SparkDataFrame) sdf0 = sdf.agg(*aggs) lst = sdf0.limit(2).toPandas() assert (len(lst) == 1), (sdf, lst) row = lst.iloc[0] lst2 = list(row) assert (len(lst2) == len(aggs)), (row, lst2) return lst2
@property def _pssers(self) -> Dict[(Label, 'Series')]: 'Return a dict of column label -> Series which anchors `self`.' from pyspark.pandas.series import Series if (not hasattr(self, '_psseries')): object.__setattr__(self, '_psseries', {label: Series(data=self, index=label) for label in self._internal.column_labels}) else: psseries = cast(Dict[(Label, Series)], self._psseries) assert (len(self._internal.column_labels) == len(psseries)), (len(self._internal.column_labels), len(psseries)) if any(((self is not psser._psdf) for psser in psseries.values())): self._psseries = {label: (psseries[label] if (self is psseries[label]._psdf) else Series(data=self, index=label)) for label in self._internal.column_labels} return self._psseries
1,271,166,069,773,295,400
Return a dict of column label -> Series which anchors `self`.
python/pyspark/pandas/frame.py
_pssers
Flyangz/spark
python
@property def _pssers(self) -> Dict[(Label, 'Series')]: from pyspark.pandas.series import Series if (not hasattr(self, '_psseries')): object.__setattr__(self, '_psseries', {label: Series(data=self, index=label) for label in self._internal.column_labels}) else: psseries = cast(Dict[(Label, Series)], self._psseries) assert (len(self._internal.column_labels) == len(psseries)), (len(self._internal.column_labels), len(psseries)) if any(((self is not psser._psdf) for psser in psseries.values())): self._psseries = {label: (psseries[label] if (self is psseries[label]._psdf) else Series(data=self, index=label)) for label in self._internal.column_labels} return self._psseries
def _update_internal_frame(self, internal: InternalFrame, requires_same_anchor: bool=True) -> None: '\n Update InternalFrame with the given one.\n\n If the column_label is changed or the new InternalFrame is not the same `anchor`,\n disconnect the link to the Series and create a new one.\n\n If `requires_same_anchor` is `False`, checking whether or not the same anchor is ignored\n and force to update the InternalFrame, e.g., replacing the internal with the resolved_copy,\n updating the underlying Spark DataFrame which need to combine a different Spark DataFrame.\n\n :param internal: the new InternalFrame\n :param requires_same_anchor: whether checking the same anchor\n ' from pyspark.pandas.series import Series if hasattr(self, '_psseries'): psseries = {} for (old_label, new_label) in zip_longest(self._internal.column_labels, internal.column_labels): if (old_label is not None): psser = self._pssers[old_label] renamed = (old_label != new_label) not_same_anchor = (requires_same_anchor and (not same_anchor(internal, psser))) if (renamed or not_same_anchor): psdf: DataFrame = DataFrame(self._internal.select_column(old_label)) psser._update_anchor(psdf) psser = None else: psser = None if (new_label is not None): if (psser is None): psser = Series(data=self, index=new_label) psseries[new_label] = psser self._psseries = psseries self._internal_frame = internal if hasattr(self, '_repr_pandas_cache'): del self._repr_pandas_cache
694,187,229,648,033,900
Update InternalFrame with the given one. If the column_label is changed or the new InternalFrame is not the same `anchor`, disconnect the link to the Series and create a new one. If `requires_same_anchor` is `False`, checking whether or not the same anchor is ignored and force to update the InternalFrame, e.g., replacing the internal with the resolved_copy, updating the underlying Spark DataFrame which need to combine a different Spark DataFrame. :param internal: the new InternalFrame :param requires_same_anchor: whether checking the same anchor
python/pyspark/pandas/frame.py
_update_internal_frame
Flyangz/spark
python
def _update_internal_frame(self, internal: InternalFrame, requires_same_anchor: bool=True) -> None: '\n Update InternalFrame with the given one.\n\n If the column_label is changed or the new InternalFrame is not the same `anchor`,\n disconnect the link to the Series and create a new one.\n\n If `requires_same_anchor` is `False`, checking whether or not the same anchor is ignored\n and force to update the InternalFrame, e.g., replacing the internal with the resolved_copy,\n updating the underlying Spark DataFrame which need to combine a different Spark DataFrame.\n\n :param internal: the new InternalFrame\n :param requires_same_anchor: whether checking the same anchor\n ' from pyspark.pandas.series import Series if hasattr(self, '_psseries'): psseries = {} for (old_label, new_label) in zip_longest(self._internal.column_labels, internal.column_labels): if (old_label is not None): psser = self._pssers[old_label] renamed = (old_label != new_label) not_same_anchor = (requires_same_anchor and (not same_anchor(internal, psser))) if (renamed or not_same_anchor): psdf: DataFrame = DataFrame(self._internal.select_column(old_label)) psser._update_anchor(psdf) psser = None else: psser = None if (new_label is not None): if (psser is None): psser = Series(data=self, index=new_label) psseries[new_label] = psser self._psseries = psseries self._internal_frame = internal if hasattr(self, '_repr_pandas_cache'): del self._repr_pandas_cache
@property def ndim(self) -> int: "\n Return an int representing the number of array dimensions.\n\n return 2 for DataFrame.\n\n Examples\n --------\n\n >>> df = ps.DataFrame([[1, 2], [4, 5], [7, 8]],\n ... index=['cobra', 'viper', None],\n ... columns=['max_speed', 'shield'])\n >>> df\n max_speed shield\n cobra 1 2\n viper 4 5\n NaN 7 8\n >>> df.ndim\n 2\n " return 2
-5,889,926,738,711,093,000
Return an int representing the number of array dimensions. return 2 for DataFrame. Examples -------- >>> df = ps.DataFrame([[1, 2], [4, 5], [7, 8]], ... index=['cobra', 'viper', None], ... columns=['max_speed', 'shield']) >>> df max_speed shield cobra 1 2 viper 4 5 NaN 7 8 >>> df.ndim 2
python/pyspark/pandas/frame.py
ndim
Flyangz/spark
python
@property def ndim(self) -> int: "\n Return an int representing the number of array dimensions.\n\n return 2 for DataFrame.\n\n Examples\n --------\n\n >>> df = ps.DataFrame([[1, 2], [4, 5], [7, 8]],\n ... index=['cobra', 'viper', None],\n ... columns=['max_speed', 'shield'])\n >>> df\n max_speed shield\n cobra 1 2\n viper 4 5\n NaN 7 8\n >>> df.ndim\n 2\n " return 2
@property def axes(self) -> List: "\n Return a list representing the axes of the DataFrame.\n\n It has the row axis labels and column axis labels as the only members.\n They are returned in that order.\n\n Examples\n --------\n\n >>> df = ps.DataFrame({'col1': [1, 2], 'col2': [3, 4]})\n >>> df.axes\n [Int64Index([0, 1], dtype='int64'), Index(['col1', 'col2'], dtype='object')]\n " return [self.index, self.columns]
3,087,804,529,254,136,300
Return a list representing the axes of the DataFrame. It has the row axis labels and column axis labels as the only members. They are returned in that order. Examples -------- >>> df = ps.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.axes [Int64Index([0, 1], dtype='int64'), Index(['col1', 'col2'], dtype='object')]
python/pyspark/pandas/frame.py
axes
Flyangz/spark
python
@property def axes(self) -> List: "\n Return a list representing the axes of the DataFrame.\n\n It has the row axis labels and column axis labels as the only members.\n They are returned in that order.\n\n Examples\n --------\n\n >>> df = ps.DataFrame({'col1': [1, 2], 'col2': [3, 4]})\n >>> df.axes\n [Int64Index([0, 1], dtype='int64'), Index(['col1', 'col2'], dtype='object')]\n " return [self.index, self.columns]
def _reduce_for_stat_function(self, sfun: Callable[(['Series'], Column)], name: str, axis: Optional[Axis]=None, numeric_only: bool=True, **kwargs: Any) -> 'Series': "\n Applies sfun to each column and returns a pd.Series where the number of rows equal the\n number of columns.\n\n Parameters\n ----------\n sfun : either an 1-arg function that takes a Column and returns a Column, or\n a 2-arg function that takes a Column and its DataType and returns a Column.\n axis: used only for sanity check because series only support index axis.\n name : original pandas API name.\n axis : axis to apply. 0 or 1, or 'index' or 'columns.\n numeric_only : bool, default True\n Include only float, int, boolean columns. False is not supported. This parameter\n is mainly for pandas compatibility. Only 'DataFrame.count' uses this parameter\n currently.\n " from pyspark.pandas.series import Series, first_series axis = validate_axis(axis) if (axis == 0): min_count = kwargs.get('min_count', 0) exprs = [SF.lit(None).cast(StringType()).alias(SPARK_DEFAULT_INDEX_NAME)] new_column_labels = [] for label in self._internal.column_labels: psser = self._psser_for(label) is_numeric_or_boolean = isinstance(psser.spark.data_type, (NumericType, BooleanType)) keep_column = ((not numeric_only) or is_numeric_or_boolean) if keep_column: scol = sfun(psser) if (min_count > 0): scol = F.when((Frame._count_expr(psser) >= min_count), scol) exprs.append(scol.alias(name_like_string(label))) new_column_labels.append(label) if (len(exprs) == 1): return Series([]) sdf = self._internal.spark_frame.select(*exprs) with ps.option_context('compute.max_rows', 1): internal = InternalFrame(spark_frame=sdf, index_spark_columns=[scol_for(sdf, SPARK_DEFAULT_INDEX_NAME)], column_labels=new_column_labels, column_label_names=self._internal.column_label_names) return first_series(DataFrame(internal).transpose()) else: limit = get_option('compute.shortcut_limit') pdf = self.head((limit + 1))._to_internal_pandas() pser = getattr(pdf, name)(axis=axis, numeric_only=numeric_only, **kwargs) if (len(pdf) <= limit): return Series(pser) @pandas_udf(returnType=as_spark_type(pser.dtype.type)) def calculate_columns_axis(*cols: pd.Series) -> pd.Series: return getattr(pd.concat(cols, axis=1), name)(axis=axis, numeric_only=numeric_only, **kwargs) column_name = verify_temp_column_name(self._internal.spark_frame.select(self._internal.index_spark_columns), '__calculate_columns_axis__') sdf = self._internal.spark_frame.select((self._internal.index_spark_columns + [calculate_columns_axis(*self._internal.data_spark_columns).alias(column_name)])) internal = InternalFrame(spark_frame=sdf, index_spark_columns=[scol_for(sdf, col) for col in self._internal.index_spark_column_names], index_names=self._internal.index_names, index_fields=self._internal.index_fields) return first_series(DataFrame(internal)).rename(pser.name)
-5,742,163,017,799,007,000
Applies sfun to each column and returns a pd.Series where the number of rows equal the number of columns. Parameters ---------- sfun : either an 1-arg function that takes a Column and returns a Column, or a 2-arg function that takes a Column and its DataType and returns a Column. axis: used only for sanity check because series only support index axis. name : original pandas API name. axis : axis to apply. 0 or 1, or 'index' or 'columns. numeric_only : bool, default True Include only float, int, boolean columns. False is not supported. This parameter is mainly for pandas compatibility. Only 'DataFrame.count' uses this parameter currently.
python/pyspark/pandas/frame.py
_reduce_for_stat_function
Flyangz/spark
python
def _reduce_for_stat_function(self, sfun: Callable[(['Series'], Column)], name: str, axis: Optional[Axis]=None, numeric_only: bool=True, **kwargs: Any) -> 'Series': "\n Applies sfun to each column and returns a pd.Series where the number of rows equal the\n number of columns.\n\n Parameters\n ----------\n sfun : either an 1-arg function that takes a Column and returns a Column, or\n a 2-arg function that takes a Column and its DataType and returns a Column.\n axis: used only for sanity check because series only support index axis.\n name : original pandas API name.\n axis : axis to apply. 0 or 1, or 'index' or 'columns.\n numeric_only : bool, default True\n Include only float, int, boolean columns. False is not supported. This parameter\n is mainly for pandas compatibility. Only 'DataFrame.count' uses this parameter\n currently.\n " from pyspark.pandas.series import Series, first_series axis = validate_axis(axis) if (axis == 0): min_count = kwargs.get('min_count', 0) exprs = [SF.lit(None).cast(StringType()).alias(SPARK_DEFAULT_INDEX_NAME)] new_column_labels = [] for label in self._internal.column_labels: psser = self._psser_for(label) is_numeric_or_boolean = isinstance(psser.spark.data_type, (NumericType, BooleanType)) keep_column = ((not numeric_only) or is_numeric_or_boolean) if keep_column: scol = sfun(psser) if (min_count > 0): scol = F.when((Frame._count_expr(psser) >= min_count), scol) exprs.append(scol.alias(name_like_string(label))) new_column_labels.append(label) if (len(exprs) == 1): return Series([]) sdf = self._internal.spark_frame.select(*exprs) with ps.option_context('compute.max_rows', 1): internal = InternalFrame(spark_frame=sdf, index_spark_columns=[scol_for(sdf, SPARK_DEFAULT_INDEX_NAME)], column_labels=new_column_labels, column_label_names=self._internal.column_label_names) return first_series(DataFrame(internal).transpose()) else: limit = get_option('compute.shortcut_limit') pdf = self.head((limit + 1))._to_internal_pandas() pser = getattr(pdf, name)(axis=axis, numeric_only=numeric_only, **kwargs) if (len(pdf) <= limit): return Series(pser) @pandas_udf(returnType=as_spark_type(pser.dtype.type)) def calculate_columns_axis(*cols: pd.Series) -> pd.Series: return getattr(pd.concat(cols, axis=1), name)(axis=axis, numeric_only=numeric_only, **kwargs) column_name = verify_temp_column_name(self._internal.spark_frame.select(self._internal.index_spark_columns), '__calculate_columns_axis__') sdf = self._internal.spark_frame.select((self._internal.index_spark_columns + [calculate_columns_axis(*self._internal.data_spark_columns).alias(column_name)])) internal = InternalFrame(spark_frame=sdf, index_spark_columns=[scol_for(sdf, col) for col in self._internal.index_spark_column_names], index_names=self._internal.index_names, index_fields=self._internal.index_fields) return first_series(DataFrame(internal)).rename(pser.name)
def _psser_for(self, label: Label) -> 'Series': "\n Create Series with a proper column label.\n\n The given label must be verified to exist in `InternalFrame.column_labels`.\n\n For example, in some method, self is like:\n\n >>> self = ps.range(3)\n\n `self._psser_for(label)` can be used with `InternalFrame.column_labels`:\n\n >>> self._psser_for(self._internal.column_labels[0])\n 0 0\n 1 1\n 2 2\n Name: id, dtype: int64\n\n `self._psser_for(label)` must not be used directly with user inputs.\n In that case, `self[label]` should be used instead, which checks the label exists or not:\n\n >>> self['id']\n 0 0\n 1 1\n 2 2\n Name: id, dtype: int64\n " return self._pssers[label]
1,867,862,387,058,158,300
Create Series with a proper column label. The given label must be verified to exist in `InternalFrame.column_labels`. For example, in some method, self is like: >>> self = ps.range(3) `self._psser_for(label)` can be used with `InternalFrame.column_labels`: >>> self._psser_for(self._internal.column_labels[0]) 0 0 1 1 2 2 Name: id, dtype: int64 `self._psser_for(label)` must not be used directly with user inputs. In that case, `self[label]` should be used instead, which checks the label exists or not: >>> self['id'] 0 0 1 1 2 2 Name: id, dtype: int64
python/pyspark/pandas/frame.py
_psser_for
Flyangz/spark
python
def _psser_for(self, label: Label) -> 'Series': "\n Create Series with a proper column label.\n\n The given label must be verified to exist in `InternalFrame.column_labels`.\n\n For example, in some method, self is like:\n\n >>> self = ps.range(3)\n\n `self._psser_for(label)` can be used with `InternalFrame.column_labels`:\n\n >>> self._psser_for(self._internal.column_labels[0])\n 0 0\n 1 1\n 2 2\n Name: id, dtype: int64\n\n `self._psser_for(label)` must not be used directly with user inputs.\n In that case, `self[label]` should be used instead, which checks the label exists or not:\n\n >>> self['id']\n 0 0\n 1 1\n 2 2\n Name: id, dtype: int64\n " return self._pssers[label]
def eq(self, other: Any) -> 'DataFrame': "\n Compare if the current value is equal to the other.\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4],\n ... 'b': [1, np.nan, 1, np.nan]},\n ... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n\n >>> df.eq(1)\n a b\n a True True\n b False False\n c False True\n d False False\n " return (self == other)
-2,790,439,695,348,336,600
Compare if the current value is equal to the other. >>> df = ps.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.eq(1) a b a True True b False False c False True d False False
python/pyspark/pandas/frame.py
eq
Flyangz/spark
python
def eq(self, other: Any) -> 'DataFrame': "\n Compare if the current value is equal to the other.\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4],\n ... 'b': [1, np.nan, 1, np.nan]},\n ... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n\n >>> df.eq(1)\n a b\n a True True\n b False False\n c False True\n d False False\n " return (self == other)
def gt(self, other: Any) -> 'DataFrame': "\n Compare if the current value is greater than the other.\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4],\n ... 'b': [1, np.nan, 1, np.nan]},\n ... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n\n >>> df.gt(2)\n a b\n a False False\n b False False\n c True False\n d True False\n " return (self > other)
-7,195,802,182,166,305,000
Compare if the current value is greater than the other. >>> df = ps.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.gt(2) a b a False False b False False c True False d True False
python/pyspark/pandas/frame.py
gt
Flyangz/spark
python
def gt(self, other: Any) -> 'DataFrame': "\n Compare if the current value is greater than the other.\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4],\n ... 'b': [1, np.nan, 1, np.nan]},\n ... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n\n >>> df.gt(2)\n a b\n a False False\n b False False\n c True False\n d True False\n " return (self > other)
def ge(self, other: Any) -> 'DataFrame': "\n Compare if the current value is greater than or equal to the other.\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4],\n ... 'b': [1, np.nan, 1, np.nan]},\n ... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n\n >>> df.ge(1)\n a b\n a True True\n b True False\n c True True\n d True False\n " return (self >= other)
-5,542,816,325,634,264,000
Compare if the current value is greater than or equal to the other. >>> df = ps.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.ge(1) a b a True True b True False c True True d True False
python/pyspark/pandas/frame.py
ge
Flyangz/spark
python
def ge(self, other: Any) -> 'DataFrame': "\n Compare if the current value is greater than or equal to the other.\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4],\n ... 'b': [1, np.nan, 1, np.nan]},\n ... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n\n >>> df.ge(1)\n a b\n a True True\n b True False\n c True True\n d True False\n " return (self >= other)
def lt(self, other: Any) -> 'DataFrame': "\n Compare if the current value is less than the other.\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4],\n ... 'b': [1, np.nan, 1, np.nan]},\n ... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n\n >>> df.lt(1)\n a b\n a False False\n b False False\n c False False\n d False False\n " return (self < other)
-6,288,953,088,293,913,000
Compare if the current value is less than the other. >>> df = ps.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.lt(1) a b a False False b False False c False False d False False
python/pyspark/pandas/frame.py
lt
Flyangz/spark
python
def lt(self, other: Any) -> 'DataFrame': "\n Compare if the current value is less than the other.\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4],\n ... 'b': [1, np.nan, 1, np.nan]},\n ... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n\n >>> df.lt(1)\n a b\n a False False\n b False False\n c False False\n d False False\n " return (self < other)
def le(self, other: Any) -> 'DataFrame': "\n Compare if the current value is less than or equal to the other.\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4],\n ... 'b': [1, np.nan, 1, np.nan]},\n ... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n\n >>> df.le(2)\n a b\n a True True\n b True False\n c False True\n d False False\n " return (self <= other)
-6,213,523,783,659,392,000
Compare if the current value is less than or equal to the other. >>> df = ps.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.le(2) a b a True True b True False c False True d False False
python/pyspark/pandas/frame.py
le
Flyangz/spark
python
def le(self, other: Any) -> 'DataFrame': "\n Compare if the current value is less than or equal to the other.\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4],\n ... 'b': [1, np.nan, 1, np.nan]},\n ... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n\n >>> df.le(2)\n a b\n a True True\n b True False\n c False True\n d False False\n " return (self <= other)
def ne(self, other: Any) -> 'DataFrame': "\n Compare if the current value is not equal to the other.\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4],\n ... 'b': [1, np.nan, 1, np.nan]},\n ... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n\n >>> df.ne(1)\n a b\n a False False\n b True True\n c True False\n d True True\n " return (self != other)
8,309,935,139,510,682,000
Compare if the current value is not equal to the other. >>> df = ps.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.ne(1) a b a False False b True True c True False d True True
python/pyspark/pandas/frame.py
ne
Flyangz/spark
python
def ne(self, other: Any) -> 'DataFrame': "\n Compare if the current value is not equal to the other.\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4],\n ... 'b': [1, np.nan, 1, np.nan]},\n ... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n\n >>> df.ne(1)\n a b\n a False False\n b True True\n c True False\n d True True\n " return (self != other)
def applymap(self, func: Callable[([Any], Any)]) -> 'DataFrame': '\n Apply a function to a Dataframe elementwise.\n\n This method applies a function that accepts and returns a scalar\n to every element of a DataFrame.\n\n .. note:: this API executes the function once to infer the type which is\n potentially expensive, for instance, when the dataset is created after\n aggregations or sorting.\n\n To avoid this, specify return type in ``func``, for instance, as below:\n\n >>> def square(x) -> np.int32:\n ... return x ** 2\n\n pandas-on-Spark uses return type hint and does not try to infer the type.\n\n Parameters\n ----------\n func : callable\n Python function, returns a single value from a single value.\n\n Returns\n -------\n DataFrame\n Transformed DataFrame.\n\n Examples\n --------\n >>> df = ps.DataFrame([[1, 2.12], [3.356, 4.567]])\n >>> df\n 0 1\n 0 1.000 2.120\n 1 3.356 4.567\n\n >>> def str_len(x) -> int:\n ... return len(str(x))\n >>> df.applymap(str_len)\n 0 1\n 0 3 4\n 1 5 5\n\n >>> def power(x) -> float:\n ... return x ** 2\n >>> df.applymap(power)\n 0 1\n 0 1.000000 4.494400\n 1 11.262736 20.857489\n\n You can omit the type hint and let pandas-on-Spark infer its type.\n\n >>> df.applymap(lambda x: x ** 2)\n 0 1\n 0 1.000000 4.494400\n 1 11.262736 20.857489\n ' return self._apply_series_op((lambda psser: psser.apply(func)))
2,872,845,046,706,062,000
Apply a function to a Dataframe elementwise. This method applies a function that accepts and returns a scalar to every element of a DataFrame. .. note:: this API executes the function once to infer the type which is potentially expensive, for instance, when the dataset is created after aggregations or sorting. To avoid this, specify return type in ``func``, for instance, as below: >>> def square(x) -> np.int32: ... return x ** 2 pandas-on-Spark uses return type hint and does not try to infer the type. Parameters ---------- func : callable Python function, returns a single value from a single value. Returns ------- DataFrame Transformed DataFrame. Examples -------- >>> df = ps.DataFrame([[1, 2.12], [3.356, 4.567]]) >>> df 0 1 0 1.000 2.120 1 3.356 4.567 >>> def str_len(x) -> int: ... return len(str(x)) >>> df.applymap(str_len) 0 1 0 3 4 1 5 5 >>> def power(x) -> float: ... return x ** 2 >>> df.applymap(power) 0 1 0 1.000000 4.494400 1 11.262736 20.857489 You can omit the type hint and let pandas-on-Spark infer its type. >>> df.applymap(lambda x: x ** 2) 0 1 0 1.000000 4.494400 1 11.262736 20.857489
python/pyspark/pandas/frame.py
applymap
Flyangz/spark
python
def applymap(self, func: Callable[([Any], Any)]) -> 'DataFrame': '\n Apply a function to a Dataframe elementwise.\n\n This method applies a function that accepts and returns a scalar\n to every element of a DataFrame.\n\n .. note:: this API executes the function once to infer the type which is\n potentially expensive, for instance, when the dataset is created after\n aggregations or sorting.\n\n To avoid this, specify return type in ``func``, for instance, as below:\n\n >>> def square(x) -> np.int32:\n ... return x ** 2\n\n pandas-on-Spark uses return type hint and does not try to infer the type.\n\n Parameters\n ----------\n func : callable\n Python function, returns a single value from a single value.\n\n Returns\n -------\n DataFrame\n Transformed DataFrame.\n\n Examples\n --------\n >>> df = ps.DataFrame([[1, 2.12], [3.356, 4.567]])\n >>> df\n 0 1\n 0 1.000 2.120\n 1 3.356 4.567\n\n >>> def str_len(x) -> int:\n ... return len(str(x))\n >>> df.applymap(str_len)\n 0 1\n 0 3 4\n 1 5 5\n\n >>> def power(x) -> float:\n ... return x ** 2\n >>> df.applymap(power)\n 0 1\n 0 1.000000 4.494400\n 1 11.262736 20.857489\n\n You can omit the type hint and let pandas-on-Spark infer its type.\n\n >>> df.applymap(lambda x: x ** 2)\n 0 1\n 0 1.000000 4.494400\n 1 11.262736 20.857489\n ' return self._apply_series_op((lambda psser: psser.apply(func)))
def aggregate(self, func: Union[(List[str], Dict[(Name, List[str])])]) -> 'DataFrame': 'Aggregate using one or more operations over the specified axis.\n\n Parameters\n ----------\n func : dict or a list\n a dict mapping from column name (string) to\n aggregate functions (list of strings).\n If a list is given, the aggregation is performed against\n all columns.\n\n Returns\n -------\n DataFrame\n\n Notes\n -----\n `agg` is an alias for `aggregate`. Use the alias.\n\n See Also\n --------\n DataFrame.apply : Invoke function on DataFrame.\n DataFrame.transform : Only perform transforming type operations.\n DataFrame.groupby : Perform operations over groups.\n Series.aggregate : The equivalent function for Series.\n\n Examples\n --------\n >>> df = ps.DataFrame([[1, 2, 3],\n ... [4, 5, 6],\n ... [7, 8, 9],\n ... [np.nan, np.nan, np.nan]],\n ... columns=[\'A\', \'B\', \'C\'])\n\n >>> df\n A B C\n 0 1.0 2.0 3.0\n 1 4.0 5.0 6.0\n 2 7.0 8.0 9.0\n 3 NaN NaN NaN\n\n Aggregate these functions over the rows.\n\n >>> df.agg([\'sum\', \'min\'])[[\'A\', \'B\', \'C\']].sort_index()\n A B C\n min 1.0 2.0 3.0\n sum 12.0 15.0 18.0\n\n Different aggregations per column.\n\n >>> df.agg({\'A\' : [\'sum\', \'min\'], \'B\' : [\'min\', \'max\']})[[\'A\', \'B\']].sort_index()\n A B\n max NaN 8.0\n min 1.0 2.0\n sum 12.0 NaN\n\n For multi-index columns:\n\n >>> df.columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])\n >>> df.agg([\'sum\', \'min\'])[[("X", "A"), ("X", "B"), ("Y", "C")]].sort_index()\n X Y\n A B C\n min 1.0 2.0 3.0\n sum 12.0 15.0 18.0\n\n >>> aggregated = df.agg({("X", "A") : [\'sum\', \'min\'], ("X", "B") : [\'min\', \'max\']})\n >>> aggregated[[("X", "A"), ("X", "B")]].sort_index() # doctest: +NORMALIZE_WHITESPACE\n X\n A B\n max NaN 8.0\n min 1.0 2.0\n sum 12.0 NaN\n ' from pyspark.pandas.groupby import GroupBy if isinstance(func, list): if all((isinstance(f, str) for f in func)): func = dict([(column, func) for column in self.columns]) else: raise ValueError('If the given function is a list, it should only contains function names as strings.') if ((not isinstance(func, dict)) or (not all(((is_name_like_value(key) and (isinstance(value, str) or (isinstance(value, list) and all((isinstance(v, str) for v in value))))) for (key, value) in func.items())))): raise ValueError('aggs must be a dict mapping from column name to aggregate functions (string or list of strings).') with option_context('compute.default_index_type', 'distributed'): psdf: DataFrame = DataFrame(GroupBy._spark_groupby(self, func)) return psdf.stack().droplevel(0)[list(func.keys())]
-858,337,314,020,279,400
Aggregate using one or more operations over the specified axis. Parameters ---------- func : dict or a list a dict mapping from column name (string) to aggregate functions (list of strings). If a list is given, the aggregation is performed against all columns. Returns ------- DataFrame Notes ----- `agg` is an alias for `aggregate`. Use the alias. See Also -------- DataFrame.apply : Invoke function on DataFrame. DataFrame.transform : Only perform transforming type operations. DataFrame.groupby : Perform operations over groups. Series.aggregate : The equivalent function for Series. Examples -------- >>> df = ps.DataFrame([[1, 2, 3], ... [4, 5, 6], ... [7, 8, 9], ... [np.nan, np.nan, np.nan]], ... columns=['A', 'B', 'C']) >>> df A B C 0 1.0 2.0 3.0 1 4.0 5.0 6.0 2 7.0 8.0 9.0 3 NaN NaN NaN Aggregate these functions over the rows. >>> df.agg(['sum', 'min'])[['A', 'B', 'C']].sort_index() A B C min 1.0 2.0 3.0 sum 12.0 15.0 18.0 Different aggregations per column. >>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']})[['A', 'B']].sort_index() A B max NaN 8.0 min 1.0 2.0 sum 12.0 NaN For multi-index columns: >>> df.columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")]) >>> df.agg(['sum', 'min'])[[("X", "A"), ("X", "B"), ("Y", "C")]].sort_index() X Y A B C min 1.0 2.0 3.0 sum 12.0 15.0 18.0 >>> aggregated = df.agg({("X", "A") : ['sum', 'min'], ("X", "B") : ['min', 'max']}) >>> aggregated[[("X", "A"), ("X", "B")]].sort_index() # doctest: +NORMALIZE_WHITESPACE X A B max NaN 8.0 min 1.0 2.0 sum 12.0 NaN
python/pyspark/pandas/frame.py
aggregate
Flyangz/spark
python
def aggregate(self, func: Union[(List[str], Dict[(Name, List[str])])]) -> 'DataFrame': 'Aggregate using one or more operations over the specified axis.\n\n Parameters\n ----------\n func : dict or a list\n a dict mapping from column name (string) to\n aggregate functions (list of strings).\n If a list is given, the aggregation is performed against\n all columns.\n\n Returns\n -------\n DataFrame\n\n Notes\n -----\n `agg` is an alias for `aggregate`. Use the alias.\n\n See Also\n --------\n DataFrame.apply : Invoke function on DataFrame.\n DataFrame.transform : Only perform transforming type operations.\n DataFrame.groupby : Perform operations over groups.\n Series.aggregate : The equivalent function for Series.\n\n Examples\n --------\n >>> df = ps.DataFrame([[1, 2, 3],\n ... [4, 5, 6],\n ... [7, 8, 9],\n ... [np.nan, np.nan, np.nan]],\n ... columns=[\'A\', \'B\', \'C\'])\n\n >>> df\n A B C\n 0 1.0 2.0 3.0\n 1 4.0 5.0 6.0\n 2 7.0 8.0 9.0\n 3 NaN NaN NaN\n\n Aggregate these functions over the rows.\n\n >>> df.agg([\'sum\', \'min\'])[[\'A\', \'B\', \'C\']].sort_index()\n A B C\n min 1.0 2.0 3.0\n sum 12.0 15.0 18.0\n\n Different aggregations per column.\n\n >>> df.agg({\'A\' : [\'sum\', \'min\'], \'B\' : [\'min\', \'max\']})[[\'A\', \'B\']].sort_index()\n A B\n max NaN 8.0\n min 1.0 2.0\n sum 12.0 NaN\n\n For multi-index columns:\n\n >>> df.columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])\n >>> df.agg([\'sum\', \'min\'])[[("X", "A"), ("X", "B"), ("Y", "C")]].sort_index()\n X Y\n A B C\n min 1.0 2.0 3.0\n sum 12.0 15.0 18.0\n\n >>> aggregated = df.agg({("X", "A") : [\'sum\', \'min\'], ("X", "B") : [\'min\', \'max\']})\n >>> aggregated[[("X", "A"), ("X", "B")]].sort_index() # doctest: +NORMALIZE_WHITESPACE\n X\n A B\n max NaN 8.0\n min 1.0 2.0\n sum 12.0 NaN\n ' from pyspark.pandas.groupby import GroupBy if isinstance(func, list): if all((isinstance(f, str) for f in func)): func = dict([(column, func) for column in self.columns]) else: raise ValueError('If the given function is a list, it should only contains function names as strings.') if ((not isinstance(func, dict)) or (not all(((is_name_like_value(key) and (isinstance(value, str) or (isinstance(value, list) and all((isinstance(v, str) for v in value))))) for (key, value) in func.items())))): raise ValueError('aggs must be a dict mapping from column name to aggregate functions (string or list of strings).') with option_context('compute.default_index_type', 'distributed'): psdf: DataFrame = DataFrame(GroupBy._spark_groupby(self, func)) return psdf.stack().droplevel(0)[list(func.keys())]
def corr(self, method: str='pearson') -> 'DataFrame': "\n Compute pairwise correlation of columns, excluding NA/null values.\n\n Parameters\n ----------\n method : {'pearson', 'spearman'}\n * pearson : standard correlation coefficient\n * spearman : Spearman rank correlation\n\n Returns\n -------\n y : DataFrame\n\n See Also\n --------\n Series.corr\n\n Examples\n --------\n >>> df = ps.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],\n ... columns=['dogs', 'cats'])\n >>> df.corr('pearson')\n dogs cats\n dogs 1.000000 -0.851064\n cats -0.851064 1.000000\n\n >>> df.corr('spearman')\n dogs cats\n dogs 1.000000 -0.948683\n cats -0.948683 1.000000\n\n Notes\n -----\n There are behavior differences between pandas-on-Spark and pandas.\n\n * the `method` argument only accepts 'pearson', 'spearman'\n * the data should not contain NaNs. pandas-on-Spark will return an error.\n * pandas-on-Spark doesn't support the following argument(s).\n\n * `min_periods` argument is not supported\n " return cast(DataFrame, ps.from_pandas(corr(self, method)))
8,847,163,846,708,294,000
Compute pairwise correlation of columns, excluding NA/null values. Parameters ---------- method : {'pearson', 'spearman'} * pearson : standard correlation coefficient * spearman : Spearman rank correlation Returns ------- y : DataFrame See Also -------- Series.corr Examples -------- >>> df = ps.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], ... columns=['dogs', 'cats']) >>> df.corr('pearson') dogs cats dogs 1.000000 -0.851064 cats -0.851064 1.000000 >>> df.corr('spearman') dogs cats dogs 1.000000 -0.948683 cats -0.948683 1.000000 Notes ----- There are behavior differences between pandas-on-Spark and pandas. * the `method` argument only accepts 'pearson', 'spearman' * the data should not contain NaNs. pandas-on-Spark will return an error. * pandas-on-Spark doesn't support the following argument(s). * `min_periods` argument is not supported
python/pyspark/pandas/frame.py
corr
Flyangz/spark
python
def corr(self, method: str='pearson') -> 'DataFrame': "\n Compute pairwise correlation of columns, excluding NA/null values.\n\n Parameters\n ----------\n method : {'pearson', 'spearman'}\n * pearson : standard correlation coefficient\n * spearman : Spearman rank correlation\n\n Returns\n -------\n y : DataFrame\n\n See Also\n --------\n Series.corr\n\n Examples\n --------\n >>> df = ps.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],\n ... columns=['dogs', 'cats'])\n >>> df.corr('pearson')\n dogs cats\n dogs 1.000000 -0.851064\n cats -0.851064 1.000000\n\n >>> df.corr('spearman')\n dogs cats\n dogs 1.000000 -0.948683\n cats -0.948683 1.000000\n\n Notes\n -----\n There are behavior differences between pandas-on-Spark and pandas.\n\n * the `method` argument only accepts 'pearson', 'spearman'\n * the data should not contain NaNs. pandas-on-Spark will return an error.\n * pandas-on-Spark doesn't support the following argument(s).\n\n * `min_periods` argument is not supported\n " return cast(DataFrame, ps.from_pandas(corr(self, method)))
def iteritems(self) -> Iterator[Tuple[(Name, 'Series')]]: "\n Iterator over (column name, Series) pairs.\n\n Iterates over the DataFrame columns, returning a tuple with\n the column name and the content as a Series.\n\n Returns\n -------\n label : object\n The column names for the DataFrame being iterated over.\n content : Series\n The column entries belonging to each label, as a Series.\n\n Examples\n --------\n >>> df = ps.DataFrame({'species': ['bear', 'bear', 'marsupial'],\n ... 'population': [1864, 22000, 80000]},\n ... index=['panda', 'polar', 'koala'],\n ... columns=['species', 'population'])\n >>> df\n species population\n panda bear 1864\n polar bear 22000\n koala marsupial 80000\n\n >>> for label, content in df.iteritems():\n ... print('label:', label)\n ... print('content:', content.to_string())\n ...\n label: species\n content: panda bear\n polar bear\n koala marsupial\n label: population\n content: panda 1864\n polar 22000\n koala 80000\n " return (((label if (len(label) > 1) else label[0]), self._psser_for(label)) for label in self._internal.column_labels)
5,349,945,611,935,761,000
Iterator over (column name, Series) pairs. Iterates over the DataFrame columns, returning a tuple with the column name and the content as a Series. Returns ------- label : object The column names for the DataFrame being iterated over. content : Series The column entries belonging to each label, as a Series. Examples -------- >>> df = ps.DataFrame({'species': ['bear', 'bear', 'marsupial'], ... 'population': [1864, 22000, 80000]}, ... index=['panda', 'polar', 'koala'], ... columns=['species', 'population']) >>> df species population panda bear 1864 polar bear 22000 koala marsupial 80000 >>> for label, content in df.iteritems(): ... print('label:', label) ... print('content:', content.to_string()) ... label: species content: panda bear polar bear koala marsupial label: population content: panda 1864 polar 22000 koala 80000
python/pyspark/pandas/frame.py
iteritems
Flyangz/spark
python
def iteritems(self) -> Iterator[Tuple[(Name, 'Series')]]: "\n Iterator over (column name, Series) pairs.\n\n Iterates over the DataFrame columns, returning a tuple with\n the column name and the content as a Series.\n\n Returns\n -------\n label : object\n The column names for the DataFrame being iterated over.\n content : Series\n The column entries belonging to each label, as a Series.\n\n Examples\n --------\n >>> df = ps.DataFrame({'species': ['bear', 'bear', 'marsupial'],\n ... 'population': [1864, 22000, 80000]},\n ... index=['panda', 'polar', 'koala'],\n ... columns=['species', 'population'])\n >>> df\n species population\n panda bear 1864\n polar bear 22000\n koala marsupial 80000\n\n >>> for label, content in df.iteritems():\n ... print('label:', label)\n ... print('content:', content.to_string())\n ...\n label: species\n content: panda bear\n polar bear\n koala marsupial\n label: population\n content: panda 1864\n polar 22000\n koala 80000\n " return (((label if (len(label) > 1) else label[0]), self._psser_for(label)) for label in self._internal.column_labels)
def iterrows(self) -> Iterator[Tuple[(Name, pd.Series)]]: "\n Iterate over DataFrame rows as (index, Series) pairs.\n\n Yields\n ------\n index : label or tuple of label\n The index of the row. A tuple for a `MultiIndex`.\n data : pandas.Series\n The data of the row as a Series.\n\n it : generator\n A generator that iterates over the rows of the frame.\n\n Notes\n -----\n\n 1. Because ``iterrows`` returns a Series for each row,\n it does **not** preserve dtypes across the rows (dtypes are\n preserved across columns for DataFrames). For example,\n\n >>> df = ps.DataFrame([[1, 1.5]], columns=['int', 'float'])\n >>> row = next(df.iterrows())[1]\n >>> row\n int 1.0\n float 1.5\n Name: 0, dtype: float64\n >>> print(row['int'].dtype)\n float64\n >>> print(df['int'].dtype)\n int64\n\n To preserve dtypes while iterating over the rows, it is better\n to use :meth:`itertuples` which returns namedtuples of the values\n and which is generally faster than ``iterrows``.\n\n 2. You should **never modify** something you are iterating over.\n This is not guaranteed to work in all cases. Depending on the\n data types, the iterator returns a copy and not a view, and writing\n to it will have no effect.\n " columns = self.columns internal_index_columns = self._internal.index_spark_column_names internal_data_columns = self._internal.data_spark_column_names def extract_kv_from_spark_row(row: Row) -> Tuple[(Name, Any)]: k = (row[internal_index_columns[0]] if (len(internal_index_columns) == 1) else tuple((row[c] for c in internal_index_columns))) v = [row[c] for c in internal_data_columns] return (k, v) for (k, v) in map(extract_kv_from_spark_row, self._internal.resolved_copy.spark_frame.toLocalIterator()): s = pd.Series(v, index=columns, name=k) (yield (k, s))
1,428,964,907,911,900,200
Iterate over DataFrame rows as (index, Series) pairs. Yields ------ index : label or tuple of label The index of the row. A tuple for a `MultiIndex`. data : pandas.Series The data of the row as a Series. it : generator A generator that iterates over the rows of the frame. Notes ----- 1. Because ``iterrows`` returns a Series for each row, it does **not** preserve dtypes across the rows (dtypes are preserved across columns for DataFrames). For example, >>> df = ps.DataFrame([[1, 1.5]], columns=['int', 'float']) >>> row = next(df.iterrows())[1] >>> row int 1.0 float 1.5 Name: 0, dtype: float64 >>> print(row['int'].dtype) float64 >>> print(df['int'].dtype) int64 To preserve dtypes while iterating over the rows, it is better to use :meth:`itertuples` which returns namedtuples of the values and which is generally faster than ``iterrows``. 2. You should **never modify** something you are iterating over. This is not guaranteed to work in all cases. Depending on the data types, the iterator returns a copy and not a view, and writing to it will have no effect.
python/pyspark/pandas/frame.py
iterrows
Flyangz/spark
python
def iterrows(self) -> Iterator[Tuple[(Name, pd.Series)]]: "\n Iterate over DataFrame rows as (index, Series) pairs.\n\n Yields\n ------\n index : label or tuple of label\n The index of the row. A tuple for a `MultiIndex`.\n data : pandas.Series\n The data of the row as a Series.\n\n it : generator\n A generator that iterates over the rows of the frame.\n\n Notes\n -----\n\n 1. Because ``iterrows`` returns a Series for each row,\n it does **not** preserve dtypes across the rows (dtypes are\n preserved across columns for DataFrames). For example,\n\n >>> df = ps.DataFrame([[1, 1.5]], columns=['int', 'float'])\n >>> row = next(df.iterrows())[1]\n >>> row\n int 1.0\n float 1.5\n Name: 0, dtype: float64\n >>> print(row['int'].dtype)\n float64\n >>> print(df['int'].dtype)\n int64\n\n To preserve dtypes while iterating over the rows, it is better\n to use :meth:`itertuples` which returns namedtuples of the values\n and which is generally faster than ``iterrows``.\n\n 2. You should **never modify** something you are iterating over.\n This is not guaranteed to work in all cases. Depending on the\n data types, the iterator returns a copy and not a view, and writing\n to it will have no effect.\n " columns = self.columns internal_index_columns = self._internal.index_spark_column_names internal_data_columns = self._internal.data_spark_column_names def extract_kv_from_spark_row(row: Row) -> Tuple[(Name, Any)]: k = (row[internal_index_columns[0]] if (len(internal_index_columns) == 1) else tuple((row[c] for c in internal_index_columns))) v = [row[c] for c in internal_data_columns] return (k, v) for (k, v) in map(extract_kv_from_spark_row, self._internal.resolved_copy.spark_frame.toLocalIterator()): s = pd.Series(v, index=columns, name=k) (yield (k, s))
def itertuples(self, index: bool=True, name: Optional[str]='PandasOnSpark') -> Iterator[Tuple]: '\n Iterate over DataFrame rows as namedtuples.\n\n Parameters\n ----------\n index : bool, default True\n If True, return the index as the first element of the tuple.\n name : str or None, default "PandasOnSpark"\n The name of the returned namedtuples or None to return regular\n tuples.\n\n Returns\n -------\n iterator\n An object to iterate over namedtuples for each row in the\n DataFrame with the first field possibly being the index and\n following fields being the column values.\n\n See Also\n --------\n DataFrame.iterrows : Iterate over DataFrame rows as (index, Series)\n pairs.\n DataFrame.items : Iterate over (column name, Series) pairs.\n\n Notes\n -----\n The column names will be renamed to positional names if they are\n invalid Python identifiers, repeated, or start with an underscore.\n On python versions < 3.7 regular tuples are returned for DataFrames\n with a large number of columns (>254).\n\n Examples\n --------\n >>> df = ps.DataFrame({\'num_legs\': [4, 2], \'num_wings\': [0, 2]},\n ... index=[\'dog\', \'hawk\'])\n >>> df\n num_legs num_wings\n dog 4 0\n hawk 2 2\n\n >>> for row in df.itertuples():\n ... print(row)\n ...\n PandasOnSpark(Index=\'dog\', num_legs=4, num_wings=0)\n PandasOnSpark(Index=\'hawk\', num_legs=2, num_wings=2)\n\n By setting the `index` parameter to False we can remove the index\n as the first element of the tuple:\n\n >>> for row in df.itertuples(index=False):\n ... print(row)\n ...\n PandasOnSpark(num_legs=4, num_wings=0)\n PandasOnSpark(num_legs=2, num_wings=2)\n\n With the `name` parameter set we set a custom name for the yielded\n namedtuples:\n\n >>> for row in df.itertuples(name=\'Animal\'):\n ... print(row)\n ...\n Animal(Index=\'dog\', num_legs=4, num_wings=0)\n Animal(Index=\'hawk\', num_legs=2, num_wings=2)\n ' fields = list(self.columns) if index: fields.insert(0, 'Index') index_spark_column_names = self._internal.index_spark_column_names data_spark_column_names = self._internal.data_spark_column_names def extract_kv_from_spark_row(row: Row) -> Tuple[(Name, Any)]: k = (row[index_spark_column_names[0]] if (len(index_spark_column_names) == 1) else tuple((row[c] for c in index_spark_column_names))) v = [row[c] for c in data_spark_column_names] return (k, v) can_return_named_tuples = ((sys.version_info >= (3, 7)) or ((len(self.columns) + index) < 255)) if ((name is not None) and can_return_named_tuples): itertuple = namedtuple(name, fields, rename=True) for (k, v) in map(extract_kv_from_spark_row, self._internal.resolved_copy.spark_frame.toLocalIterator()): (yield itertuple._make((([k] if index else []) + list(v)))) else: for (k, v) in map(extract_kv_from_spark_row, self._internal.resolved_copy.spark_frame.toLocalIterator()): (yield tuple((([k] if index else []) + list(v))))
-2,867,164,090,168,643,600
Iterate over DataFrame rows as namedtuples. Parameters ---------- index : bool, default True If True, return the index as the first element of the tuple. name : str or None, default "PandasOnSpark" The name of the returned namedtuples or None to return regular tuples. Returns ------- iterator An object to iterate over namedtuples for each row in the DataFrame with the first field possibly being the index and following fields being the column values. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.items : Iterate over (column name, Series) pairs. Notes ----- The column names will be renamed to positional names if they are invalid Python identifiers, repeated, or start with an underscore. On python versions < 3.7 regular tuples are returned for DataFrames with a large number of columns (>254). Examples -------- >>> df = ps.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]}, ... index=['dog', 'hawk']) >>> df num_legs num_wings dog 4 0 hawk 2 2 >>> for row in df.itertuples(): ... print(row) ... PandasOnSpark(Index='dog', num_legs=4, num_wings=0) PandasOnSpark(Index='hawk', num_legs=2, num_wings=2) By setting the `index` parameter to False we can remove the index as the first element of the tuple: >>> for row in df.itertuples(index=False): ... print(row) ... PandasOnSpark(num_legs=4, num_wings=0) PandasOnSpark(num_legs=2, num_wings=2) With the `name` parameter set we set a custom name for the yielded namedtuples: >>> for row in df.itertuples(name='Animal'): ... print(row) ... Animal(Index='dog', num_legs=4, num_wings=0) Animal(Index='hawk', num_legs=2, num_wings=2)
python/pyspark/pandas/frame.py
itertuples
Flyangz/spark
python
def itertuples(self, index: bool=True, name: Optional[str]='PandasOnSpark') -> Iterator[Tuple]: '\n Iterate over DataFrame rows as namedtuples.\n\n Parameters\n ----------\n index : bool, default True\n If True, return the index as the first element of the tuple.\n name : str or None, default "PandasOnSpark"\n The name of the returned namedtuples or None to return regular\n tuples.\n\n Returns\n -------\n iterator\n An object to iterate over namedtuples for each row in the\n DataFrame with the first field possibly being the index and\n following fields being the column values.\n\n See Also\n --------\n DataFrame.iterrows : Iterate over DataFrame rows as (index, Series)\n pairs.\n DataFrame.items : Iterate over (column name, Series) pairs.\n\n Notes\n -----\n The column names will be renamed to positional names if they are\n invalid Python identifiers, repeated, or start with an underscore.\n On python versions < 3.7 regular tuples are returned for DataFrames\n with a large number of columns (>254).\n\n Examples\n --------\n >>> df = ps.DataFrame({\'num_legs\': [4, 2], \'num_wings\': [0, 2]},\n ... index=[\'dog\', \'hawk\'])\n >>> df\n num_legs num_wings\n dog 4 0\n hawk 2 2\n\n >>> for row in df.itertuples():\n ... print(row)\n ...\n PandasOnSpark(Index=\'dog\', num_legs=4, num_wings=0)\n PandasOnSpark(Index=\'hawk\', num_legs=2, num_wings=2)\n\n By setting the `index` parameter to False we can remove the index\n as the first element of the tuple:\n\n >>> for row in df.itertuples(index=False):\n ... print(row)\n ...\n PandasOnSpark(num_legs=4, num_wings=0)\n PandasOnSpark(num_legs=2, num_wings=2)\n\n With the `name` parameter set we set a custom name for the yielded\n namedtuples:\n\n >>> for row in df.itertuples(name=\'Animal\'):\n ... print(row)\n ...\n Animal(Index=\'dog\', num_legs=4, num_wings=0)\n Animal(Index=\'hawk\', num_legs=2, num_wings=2)\n ' fields = list(self.columns) if index: fields.insert(0, 'Index') index_spark_column_names = self._internal.index_spark_column_names data_spark_column_names = self._internal.data_spark_column_names def extract_kv_from_spark_row(row: Row) -> Tuple[(Name, Any)]: k = (row[index_spark_column_names[0]] if (len(index_spark_column_names) == 1) else tuple((row[c] for c in index_spark_column_names))) v = [row[c] for c in data_spark_column_names] return (k, v) can_return_named_tuples = ((sys.version_info >= (3, 7)) or ((len(self.columns) + index) < 255)) if ((name is not None) and can_return_named_tuples): itertuple = namedtuple(name, fields, rename=True) for (k, v) in map(extract_kv_from_spark_row, self._internal.resolved_copy.spark_frame.toLocalIterator()): (yield itertuple._make((([k] if index else []) + list(v)))) else: for (k, v) in map(extract_kv_from_spark_row, self._internal.resolved_copy.spark_frame.toLocalIterator()): (yield tuple((([k] if index else []) + list(v))))
def items(self) -> Iterator[Tuple[(Name, 'Series')]]: 'This is an alias of ``iteritems``.' return self.iteritems()
6,259,565,771,771,294,000
This is an alias of ``iteritems``.
python/pyspark/pandas/frame.py
items
Flyangz/spark
python
def items(self) -> Iterator[Tuple[(Name, 'Series')]]: return self.iteritems()
def to_clipboard(self, excel: bool=True, sep: Optional[str]=None, **kwargs: Any) -> None: "\n Copy object to the system clipboard.\n\n Write a text representation of object to the system clipboard.\n This can be pasted into Excel, for example.\n\n .. note:: This method should only be used if the resulting DataFrame is expected\n to be small, as all the data is loaded into the driver's memory.\n\n Parameters\n ----------\n excel : bool, default True\n - True, use the provided separator, writing in a csv format for\n allowing easy pasting into excel.\n - False, write a string representation of the object to the\n clipboard.\n\n sep : str, default ``'\\t'``\n Field delimiter.\n **kwargs\n These parameters will be passed to DataFrame.to_csv.\n\n Notes\n -----\n Requirements for your platform.\n\n - Linux : `xclip`, or `xsel` (with `gtk` or `PyQt4` modules)\n - Windows : none\n - OS X : none\n\n See Also\n --------\n read_clipboard : Read text from clipboard.\n\n Examples\n --------\n Copy the contents of a DataFrame to the clipboard.\n\n >>> df = ps.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) # doctest: +SKIP\n >>> df.to_clipboard(sep=',') # doctest: +SKIP\n ... # Wrote the following to the system clipboard:\n ... # ,A,B,C\n ... # 0,1,2,3\n ... # 1,4,5,6\n\n We can omit the index by passing the keyword `index` and setting\n it to false.\n\n >>> df.to_clipboard(sep=',', index=False) # doctest: +SKIP\n ... # Wrote the following to the system clipboard:\n ... # A,B,C\n ... # 1,2,3\n ... # 4,5,6\n\n This function also works for Series:\n\n >>> df = ps.Series([1, 2, 3, 4, 5, 6, 7], name='x') # doctest: +SKIP\n >>> df.to_clipboard(sep=',') # doctest: +SKIP\n ... # Wrote the following to the system clipboard:\n ... # 0, 1\n ... # 1, 2\n ... # 2, 3\n ... # 3, 4\n ... # 4, 5\n ... # 5, 6\n ... # 6, 7\n " args = locals() psdf = self return validate_arguments_and_invoke_function(psdf._to_internal_pandas(), self.to_clipboard, pd.DataFrame.to_clipboard, args)
5,270,269,083,777,499,000
Copy object to the system clipboard. Write a text representation of object to the system clipboard. This can be pasted into Excel, for example. .. note:: This method should only be used if the resulting DataFrame is expected to be small, as all the data is loaded into the driver's memory. Parameters ---------- excel : bool, default True - True, use the provided separator, writing in a csv format for allowing easy pasting into excel. - False, write a string representation of the object to the clipboard. sep : str, default ``'\t'`` Field delimiter. **kwargs These parameters will be passed to DataFrame.to_csv. Notes ----- Requirements for your platform. - Linux : `xclip`, or `xsel` (with `gtk` or `PyQt4` modules) - Windows : none - OS X : none See Also -------- read_clipboard : Read text from clipboard. Examples -------- Copy the contents of a DataFrame to the clipboard. >>> df = ps.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) # doctest: +SKIP >>> df.to_clipboard(sep=',') # doctest: +SKIP ... # Wrote the following to the system clipboard: ... # ,A,B,C ... # 0,1,2,3 ... # 1,4,5,6 We can omit the index by passing the keyword `index` and setting it to false. >>> df.to_clipboard(sep=',', index=False) # doctest: +SKIP ... # Wrote the following to the system clipboard: ... # A,B,C ... # 1,2,3 ... # 4,5,6 This function also works for Series: >>> df = ps.Series([1, 2, 3, 4, 5, 6, 7], name='x') # doctest: +SKIP >>> df.to_clipboard(sep=',') # doctest: +SKIP ... # Wrote the following to the system clipboard: ... # 0, 1 ... # 1, 2 ... # 2, 3 ... # 3, 4 ... # 4, 5 ... # 5, 6 ... # 6, 7
python/pyspark/pandas/frame.py
to_clipboard
Flyangz/spark
python
def to_clipboard(self, excel: bool=True, sep: Optional[str]=None, **kwargs: Any) -> None: "\n Copy object to the system clipboard.\n\n Write a text representation of object to the system clipboard.\n This can be pasted into Excel, for example.\n\n .. note:: This method should only be used if the resulting DataFrame is expected\n to be small, as all the data is loaded into the driver's memory.\n\n Parameters\n ----------\n excel : bool, default True\n - True, use the provided separator, writing in a csv format for\n allowing easy pasting into excel.\n - False, write a string representation of the object to the\n clipboard.\n\n sep : str, default ``'\\t'``\n Field delimiter.\n **kwargs\n These parameters will be passed to DataFrame.to_csv.\n\n Notes\n -----\n Requirements for your platform.\n\n - Linux : `xclip`, or `xsel` (with `gtk` or `PyQt4` modules)\n - Windows : none\n - OS X : none\n\n See Also\n --------\n read_clipboard : Read text from clipboard.\n\n Examples\n --------\n Copy the contents of a DataFrame to the clipboard.\n\n >>> df = ps.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) # doctest: +SKIP\n >>> df.to_clipboard(sep=',') # doctest: +SKIP\n ... # Wrote the following to the system clipboard:\n ... # ,A,B,C\n ... # 0,1,2,3\n ... # 1,4,5,6\n\n We can omit the index by passing the keyword `index` and setting\n it to false.\n\n >>> df.to_clipboard(sep=',', index=False) # doctest: +SKIP\n ... # Wrote the following to the system clipboard:\n ... # A,B,C\n ... # 1,2,3\n ... # 4,5,6\n\n This function also works for Series:\n\n >>> df = ps.Series([1, 2, 3, 4, 5, 6, 7], name='x') # doctest: +SKIP\n >>> df.to_clipboard(sep=',') # doctest: +SKIP\n ... # Wrote the following to the system clipboard:\n ... # 0, 1\n ... # 1, 2\n ... # 2, 3\n ... # 3, 4\n ... # 4, 5\n ... # 5, 6\n ... # 6, 7\n " args = locals() psdf = self return validate_arguments_and_invoke_function(psdf._to_internal_pandas(), self.to_clipboard, pd.DataFrame.to_clipboard, args)
def to_html(self, buf: Optional[IO[str]]=None, columns: Optional[Sequence[Name]]=None, col_space: Optional[Union[(str, int, Dict[(Name, Union[(str, int)])])]]=None, header: bool=True, index: bool=True, na_rep: str='NaN', formatters: Optional[Union[(List[Callable[([Any], str)]], Dict[(Name, Callable[([Any], str)])])]]=None, float_format: Optional[Callable[([float], str)]]=None, sparsify: Optional[bool]=None, index_names: bool=True, justify: Optional[str]=None, max_rows: Optional[int]=None, max_cols: Optional[int]=None, show_dimensions: bool=False, decimal: str='.', bold_rows: bool=True, classes: Optional[Union[(str, list, tuple)]]=None, escape: bool=True, notebook: bool=False, border: Optional[int]=None, table_id: Optional[str]=None, render_links: bool=False) -> Optional[str]: "\n Render a DataFrame as an HTML table.\n\n .. note:: This method should only be used if the resulting pandas object is expected\n to be small, as all the data is loaded into the driver's memory. If the input\n is large, set max_rows parameter.\n\n Parameters\n ----------\n buf : StringIO-like, optional\n Buffer to write to.\n columns : sequence, optional, default None\n The subset of columns to write. Writes all columns by default.\n col_space : int, optional\n The minimum width of each column.\n header : bool, optional\n Write out the column names. If a list of strings is given, it\n is assumed to be aliases for the column names\n index : bool, optional, default True\n Whether to print index (row) labels.\n na_rep : str, optional, default 'NaN'\n String representation of NAN to use.\n formatters : list or dict of one-param. functions, optional\n Formatter functions to apply to columns' elements by position or\n name.\n The result of each function must be a unicode string.\n List must be of length equal to the number of columns.\n float_format : one-parameter function, optional, default None\n Formatter function to apply to columns' elements if they are\n floats. The result of this function must be a unicode string.\n sparsify : bool, optional, default True\n Set to False for a DataFrame with a hierarchical index to print\n every multiindex key at each row.\n index_names : bool, optional, default True\n Prints the names of the indexes.\n justify : str, default None\n How to justify the column labels. If None uses the option from\n the print configuration (controlled by set_option), 'right' out\n of the box. Valid values are\n\n * left\n * right\n * center\n * justify\n * justify-all\n * start\n * end\n * inherit\n * match-parent\n * initial\n * unset.\n max_rows : int, optional\n Maximum number of rows to display in the console.\n max_cols : int, optional\n Maximum number of columns to display in the console.\n show_dimensions : bool, default False\n Display DataFrame dimensions (number of rows by number of columns).\n decimal : str, default '.'\n Character recognized as decimal separator, e.g. ',' in Europe.\n bold_rows : bool, default True\n Make the row labels bold in the output.\n classes : str or list or tuple, default None\n CSS class(es) to apply to the resulting html table.\n escape : bool, default True\n Convert the characters <, >, and & to HTML-safe sequences.\n notebook : {True, False}, default False\n Whether the generated HTML is for IPython Notebook.\n border : int\n A ``border=border`` attribute is included in the opening\n `<table>` tag. Default ``pd.options.html.border``.\n table_id : str, optional\n A css id is included in the opening `<table>` tag if specified.\n render_links : bool, default False\n Convert URLs to HTML links (only works with pandas 0.24+).\n\n Returns\n -------\n str (or unicode, depending on data and options)\n String representation of the dataframe.\n\n See Also\n --------\n to_string : Convert DataFrame to a string.\n " args = locals() if (max_rows is not None): psdf = self.head(max_rows) else: psdf = self return validate_arguments_and_invoke_function(psdf._to_internal_pandas(), self.to_html, pd.DataFrame.to_html, args)
7,113,171,304,014,801,000
Render a DataFrame as an HTML table. .. note:: This method should only be used if the resulting pandas object is expected to be small, as all the data is loaded into the driver's memory. If the input is large, set max_rows parameter. Parameters ---------- buf : StringIO-like, optional Buffer to write to. columns : sequence, optional, default None The subset of columns to write. Writes all columns by default. col_space : int, optional The minimum width of each column. header : bool, optional Write out the column names. If a list of strings is given, it is assumed to be aliases for the column names index : bool, optional, default True Whether to print index (row) labels. na_rep : str, optional, default 'NaN' String representation of NAN to use. formatters : list or dict of one-param. functions, optional Formatter functions to apply to columns' elements by position or name. The result of each function must be a unicode string. List must be of length equal to the number of columns. float_format : one-parameter function, optional, default None Formatter function to apply to columns' elements if they are floats. The result of this function must be a unicode string. sparsify : bool, optional, default True Set to False for a DataFrame with a hierarchical index to print every multiindex key at each row. index_names : bool, optional, default True Prints the names of the indexes. justify : str, default None How to justify the column labels. If None uses the option from the print configuration (controlled by set_option), 'right' out of the box. Valid values are * left * right * center * justify * justify-all * start * end * inherit * match-parent * initial * unset. max_rows : int, optional Maximum number of rows to display in the console. max_cols : int, optional Maximum number of columns to display in the console. show_dimensions : bool, default False Display DataFrame dimensions (number of rows by number of columns). decimal : str, default '.' Character recognized as decimal separator, e.g. ',' in Europe. bold_rows : bool, default True Make the row labels bold in the output. classes : str or list or tuple, default None CSS class(es) to apply to the resulting html table. escape : bool, default True Convert the characters <, >, and & to HTML-safe sequences. notebook : {True, False}, default False Whether the generated HTML is for IPython Notebook. border : int A ``border=border`` attribute is included in the opening `<table>` tag. Default ``pd.options.html.border``. table_id : str, optional A css id is included in the opening `<table>` tag if specified. render_links : bool, default False Convert URLs to HTML links (only works with pandas 0.24+). Returns ------- str (or unicode, depending on data and options) String representation of the dataframe. See Also -------- to_string : Convert DataFrame to a string.
python/pyspark/pandas/frame.py
to_html
Flyangz/spark
python
def to_html(self, buf: Optional[IO[str]]=None, columns: Optional[Sequence[Name]]=None, col_space: Optional[Union[(str, int, Dict[(Name, Union[(str, int)])])]]=None, header: bool=True, index: bool=True, na_rep: str='NaN', formatters: Optional[Union[(List[Callable[([Any], str)]], Dict[(Name, Callable[([Any], str)])])]]=None, float_format: Optional[Callable[([float], str)]]=None, sparsify: Optional[bool]=None, index_names: bool=True, justify: Optional[str]=None, max_rows: Optional[int]=None, max_cols: Optional[int]=None, show_dimensions: bool=False, decimal: str='.', bold_rows: bool=True, classes: Optional[Union[(str, list, tuple)]]=None, escape: bool=True, notebook: bool=False, border: Optional[int]=None, table_id: Optional[str]=None, render_links: bool=False) -> Optional[str]: "\n Render a DataFrame as an HTML table.\n\n .. note:: This method should only be used if the resulting pandas object is expected\n to be small, as all the data is loaded into the driver's memory. If the input\n is large, set max_rows parameter.\n\n Parameters\n ----------\n buf : StringIO-like, optional\n Buffer to write to.\n columns : sequence, optional, default None\n The subset of columns to write. Writes all columns by default.\n col_space : int, optional\n The minimum width of each column.\n header : bool, optional\n Write out the column names. If a list of strings is given, it\n is assumed to be aliases for the column names\n index : bool, optional, default True\n Whether to print index (row) labels.\n na_rep : str, optional, default 'NaN'\n String representation of NAN to use.\n formatters : list or dict of one-param. functions, optional\n Formatter functions to apply to columns' elements by position or\n name.\n The result of each function must be a unicode string.\n List must be of length equal to the number of columns.\n float_format : one-parameter function, optional, default None\n Formatter function to apply to columns' elements if they are\n floats. The result of this function must be a unicode string.\n sparsify : bool, optional, default True\n Set to False for a DataFrame with a hierarchical index to print\n every multiindex key at each row.\n index_names : bool, optional, default True\n Prints the names of the indexes.\n justify : str, default None\n How to justify the column labels. If None uses the option from\n the print configuration (controlled by set_option), 'right' out\n of the box. Valid values are\n\n * left\n * right\n * center\n * justify\n * justify-all\n * start\n * end\n * inherit\n * match-parent\n * initial\n * unset.\n max_rows : int, optional\n Maximum number of rows to display in the console.\n max_cols : int, optional\n Maximum number of columns to display in the console.\n show_dimensions : bool, default False\n Display DataFrame dimensions (number of rows by number of columns).\n decimal : str, default '.'\n Character recognized as decimal separator, e.g. ',' in Europe.\n bold_rows : bool, default True\n Make the row labels bold in the output.\n classes : str or list or tuple, default None\n CSS class(es) to apply to the resulting html table.\n escape : bool, default True\n Convert the characters <, >, and & to HTML-safe sequences.\n notebook : {True, False}, default False\n Whether the generated HTML is for IPython Notebook.\n border : int\n A ``border=border`` attribute is included in the opening\n `<table>` tag. Default ``pd.options.html.border``.\n table_id : str, optional\n A css id is included in the opening `<table>` tag if specified.\n render_links : bool, default False\n Convert URLs to HTML links (only works with pandas 0.24+).\n\n Returns\n -------\n str (or unicode, depending on data and options)\n String representation of the dataframe.\n\n See Also\n --------\n to_string : Convert DataFrame to a string.\n " args = locals() if (max_rows is not None): psdf = self.head(max_rows) else: psdf = self return validate_arguments_and_invoke_function(psdf._to_internal_pandas(), self.to_html, pd.DataFrame.to_html, args)
def to_string(self, buf: Optional[IO[str]]=None, columns: Optional[Sequence[Name]]=None, col_space: Optional[Union[(str, int, Dict[(Name, Union[(str, int)])])]]=None, header: bool=True, index: bool=True, na_rep: str='NaN', formatters: Optional[Union[(List[Callable[([Any], str)]], Dict[(Name, Callable[([Any], str)])])]]=None, float_format: Optional[Callable[([float], str)]]=None, sparsify: Optional[bool]=None, index_names: bool=True, justify: Optional[str]=None, max_rows: Optional[int]=None, max_cols: Optional[int]=None, show_dimensions: bool=False, decimal: str='.', line_width: Optional[int]=None) -> Optional[str]: "\n Render a DataFrame to a console-friendly tabular output.\n\n .. note:: This method should only be used if the resulting pandas object is expected\n to be small, as all the data is loaded into the driver's memory. If the input\n is large, set max_rows parameter.\n\n Parameters\n ----------\n buf : StringIO-like, optional\n Buffer to write to.\n columns : sequence, optional, default None\n The subset of columns to write. Writes all columns by default.\n col_space : int, optional\n The minimum width of each column.\n header : bool, optional\n Write out the column names. If a list of strings is given, it\n is assumed to be aliases for the column names\n index : bool, optional, default True\n Whether to print index (row) labels.\n na_rep : str, optional, default 'NaN'\n String representation of NAN to use.\n formatters : list or dict of one-param. functions, optional\n Formatter functions to apply to columns' elements by position or\n name.\n The result of each function must be a unicode string.\n List must be of length equal to the number of columns.\n float_format : one-parameter function, optional, default None\n Formatter function to apply to columns' elements if they are\n floats. The result of this function must be a unicode string.\n sparsify : bool, optional, default True\n Set to False for a DataFrame with a hierarchical index to print\n every multiindex key at each row.\n index_names : bool, optional, default True\n Prints the names of the indexes.\n justify : str, default None\n How to justify the column labels. If None uses the option from\n the print configuration (controlled by set_option), 'right' out\n of the box. Valid values are\n\n * left\n * right\n * center\n * justify\n * justify-all\n * start\n * end\n * inherit\n * match-parent\n * initial\n * unset.\n max_rows : int, optional\n Maximum number of rows to display in the console.\n max_cols : int, optional\n Maximum number of columns to display in the console.\n show_dimensions : bool, default False\n Display DataFrame dimensions (number of rows by number of columns).\n decimal : str, default '.'\n Character recognized as decimal separator, e.g. ',' in Europe.\n line_width : int, optional\n Width to wrap a line in characters.\n\n Returns\n -------\n str (or unicode, depending on data and options)\n String representation of the dataframe.\n\n See Also\n --------\n to_html : Convert DataFrame to HTML.\n\n Examples\n --------\n >>> df = ps.DataFrame({'col1': [1, 2, 3], 'col2': [4, 5, 6]}, columns=['col1', 'col2'])\n >>> print(df.to_string())\n col1 col2\n 0 1 4\n 1 2 5\n 2 3 6\n\n >>> print(df.to_string(max_rows=2))\n col1 col2\n 0 1 4\n 1 2 5\n " args = locals() if (max_rows is not None): psdf = self.head(max_rows) else: psdf = self return validate_arguments_and_invoke_function(psdf._to_internal_pandas(), self.to_string, pd.DataFrame.to_string, args)
-5,788,305,338,753,440,000
Render a DataFrame to a console-friendly tabular output. .. note:: This method should only be used if the resulting pandas object is expected to be small, as all the data is loaded into the driver's memory. If the input is large, set max_rows parameter. Parameters ---------- buf : StringIO-like, optional Buffer to write to. columns : sequence, optional, default None The subset of columns to write. Writes all columns by default. col_space : int, optional The minimum width of each column. header : bool, optional Write out the column names. If a list of strings is given, it is assumed to be aliases for the column names index : bool, optional, default True Whether to print index (row) labels. na_rep : str, optional, default 'NaN' String representation of NAN to use. formatters : list or dict of one-param. functions, optional Formatter functions to apply to columns' elements by position or name. The result of each function must be a unicode string. List must be of length equal to the number of columns. float_format : one-parameter function, optional, default None Formatter function to apply to columns' elements if they are floats. The result of this function must be a unicode string. sparsify : bool, optional, default True Set to False for a DataFrame with a hierarchical index to print every multiindex key at each row. index_names : bool, optional, default True Prints the names of the indexes. justify : str, default None How to justify the column labels. If None uses the option from the print configuration (controlled by set_option), 'right' out of the box. Valid values are * left * right * center * justify * justify-all * start * end * inherit * match-parent * initial * unset. max_rows : int, optional Maximum number of rows to display in the console. max_cols : int, optional Maximum number of columns to display in the console. show_dimensions : bool, default False Display DataFrame dimensions (number of rows by number of columns). decimal : str, default '.' Character recognized as decimal separator, e.g. ',' in Europe. line_width : int, optional Width to wrap a line in characters. Returns ------- str (or unicode, depending on data and options) String representation of the dataframe. See Also -------- to_html : Convert DataFrame to HTML. Examples -------- >>> df = ps.DataFrame({'col1': [1, 2, 3], 'col2': [4, 5, 6]}, columns=['col1', 'col2']) >>> print(df.to_string()) col1 col2 0 1 4 1 2 5 2 3 6 >>> print(df.to_string(max_rows=2)) col1 col2 0 1 4 1 2 5
python/pyspark/pandas/frame.py
to_string
Flyangz/spark
python
def to_string(self, buf: Optional[IO[str]]=None, columns: Optional[Sequence[Name]]=None, col_space: Optional[Union[(str, int, Dict[(Name, Union[(str, int)])])]]=None, header: bool=True, index: bool=True, na_rep: str='NaN', formatters: Optional[Union[(List[Callable[([Any], str)]], Dict[(Name, Callable[([Any], str)])])]]=None, float_format: Optional[Callable[([float], str)]]=None, sparsify: Optional[bool]=None, index_names: bool=True, justify: Optional[str]=None, max_rows: Optional[int]=None, max_cols: Optional[int]=None, show_dimensions: bool=False, decimal: str='.', line_width: Optional[int]=None) -> Optional[str]: "\n Render a DataFrame to a console-friendly tabular output.\n\n .. note:: This method should only be used if the resulting pandas object is expected\n to be small, as all the data is loaded into the driver's memory. If the input\n is large, set max_rows parameter.\n\n Parameters\n ----------\n buf : StringIO-like, optional\n Buffer to write to.\n columns : sequence, optional, default None\n The subset of columns to write. Writes all columns by default.\n col_space : int, optional\n The minimum width of each column.\n header : bool, optional\n Write out the column names. If a list of strings is given, it\n is assumed to be aliases for the column names\n index : bool, optional, default True\n Whether to print index (row) labels.\n na_rep : str, optional, default 'NaN'\n String representation of NAN to use.\n formatters : list or dict of one-param. functions, optional\n Formatter functions to apply to columns' elements by position or\n name.\n The result of each function must be a unicode string.\n List must be of length equal to the number of columns.\n float_format : one-parameter function, optional, default None\n Formatter function to apply to columns' elements if they are\n floats. The result of this function must be a unicode string.\n sparsify : bool, optional, default True\n Set to False for a DataFrame with a hierarchical index to print\n every multiindex key at each row.\n index_names : bool, optional, default True\n Prints the names of the indexes.\n justify : str, default None\n How to justify the column labels. If None uses the option from\n the print configuration (controlled by set_option), 'right' out\n of the box. Valid values are\n\n * left\n * right\n * center\n * justify\n * justify-all\n * start\n * end\n * inherit\n * match-parent\n * initial\n * unset.\n max_rows : int, optional\n Maximum number of rows to display in the console.\n max_cols : int, optional\n Maximum number of columns to display in the console.\n show_dimensions : bool, default False\n Display DataFrame dimensions (number of rows by number of columns).\n decimal : str, default '.'\n Character recognized as decimal separator, e.g. ',' in Europe.\n line_width : int, optional\n Width to wrap a line in characters.\n\n Returns\n -------\n str (or unicode, depending on data and options)\n String representation of the dataframe.\n\n See Also\n --------\n to_html : Convert DataFrame to HTML.\n\n Examples\n --------\n >>> df = ps.DataFrame({'col1': [1, 2, 3], 'col2': [4, 5, 6]}, columns=['col1', 'col2'])\n >>> print(df.to_string())\n col1 col2\n 0 1 4\n 1 2 5\n 2 3 6\n\n >>> print(df.to_string(max_rows=2))\n col1 col2\n 0 1 4\n 1 2 5\n " args = locals() if (max_rows is not None): psdf = self.head(max_rows) else: psdf = self return validate_arguments_and_invoke_function(psdf._to_internal_pandas(), self.to_string, pd.DataFrame.to_string, args)
def to_dict(self, orient: str='dict', into: Type=dict) -> Union[(List, Mapping)]: "\n Convert the DataFrame to a dictionary.\n\n The type of the key-value pairs can be customized with the parameters\n (see below).\n\n .. note:: This method should only be used if the resulting pandas DataFrame is expected\n to be small, as all the data is loaded into the driver's memory.\n\n Parameters\n ----------\n orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}\n Determines the type of the values of the dictionary.\n\n - 'dict' (default) : dict like {column -> {index -> value}}\n - 'list' : dict like {column -> [values]}\n - 'series' : dict like {column -> Series(values)}\n - 'split' : dict like\n {'index' -> [index], 'columns' -> [columns], 'data' -> [values]}\n - 'records' : list like\n [{column -> value}, ... , {column -> value}]\n - 'index' : dict like {index -> {column -> value}}\n\n Abbreviations are allowed. `s` indicates `series` and `sp`\n indicates `split`.\n\n into : class, default dict\n The collections.abc.Mapping subclass used for all Mappings\n in the return value. Can be the actual class or an empty\n instance of the mapping type you want. If you want a\n collections.defaultdict, you must pass it initialized.\n\n Returns\n -------\n dict, list or collections.abc.Mapping\n Return a collections.abc.Mapping object representing the DataFrame.\n The resulting transformation depends on the `orient` parameter.\n\n Examples\n --------\n >>> df = ps.DataFrame({'col1': [1, 2],\n ... 'col2': [0.5, 0.75]},\n ... index=['row1', 'row2'],\n ... columns=['col1', 'col2'])\n >>> df\n col1 col2\n row1 1 0.50\n row2 2 0.75\n\n >>> df_dict = df.to_dict()\n >>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()])\n [('col1', [('row1', 1), ('row2', 2)]), ('col2', [('row1', 0.5), ('row2', 0.75)])]\n\n You can specify the return orientation.\n\n >>> df_dict = df.to_dict('series')\n >>> sorted(df_dict.items())\n [('col1', row1 1\n row2 2\n Name: col1, dtype: int64), ('col2', row1 0.50\n row2 0.75\n Name: col2, dtype: float64)]\n\n >>> df_dict = df.to_dict('split')\n >>> sorted(df_dict.items()) # doctest: +ELLIPSIS\n [('columns', ['col1', 'col2']), ('data', [[1..., 0.75]]), ('index', ['row1', 'row2'])]\n\n >>> df_dict = df.to_dict('records')\n >>> [sorted(values.items()) for values in df_dict] # doctest: +ELLIPSIS\n [[('col1', 1...), ('col2', 0.5)], [('col1', 2...), ('col2', 0.75)]]\n\n >>> df_dict = df.to_dict('index')\n >>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()])\n [('row1', [('col1', 1), ('col2', 0.5)]), ('row2', [('col1', 2), ('col2', 0.75)])]\n\n You can also specify the mapping type.\n\n >>> from collections import OrderedDict, defaultdict\n >>> df.to_dict(into=OrderedDict)\n OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), ('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))])\n\n If you want a `defaultdict`, you need to initialize it:\n\n >>> dd = defaultdict(list)\n >>> df.to_dict('records', into=dd) # doctest: +ELLIPSIS\n [defaultdict(<class 'list'>, {'col..., 'col...}), defaultdict(<class 'list'>, {'col..., 'col...})]\n " args = locals() psdf = self return validate_arguments_and_invoke_function(psdf._to_internal_pandas(), self.to_dict, pd.DataFrame.to_dict, args)
1,214,464,030,283,405,000
Convert the DataFrame to a dictionary. The type of the key-value pairs can be customized with the parameters (see below). .. note:: This method should only be used if the resulting pandas DataFrame is expected to be small, as all the data is loaded into the driver's memory. Parameters ---------- orient : str {'dict', 'list', 'series', 'split', 'records', 'index'} Determines the type of the values of the dictionary. - 'dict' (default) : dict like {column -> {index -> value}} - 'list' : dict like {column -> [values]} - 'series' : dict like {column -> Series(values)} - 'split' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values]} - 'records' : list like [{column -> value}, ... , {column -> value}] - 'index' : dict like {index -> {column -> value}} Abbreviations are allowed. `s` indicates `series` and `sp` indicates `split`. into : class, default dict The collections.abc.Mapping subclass used for all Mappings in the return value. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. Returns ------- dict, list or collections.abc.Mapping Return a collections.abc.Mapping object representing the DataFrame. The resulting transformation depends on the `orient` parameter. Examples -------- >>> df = ps.DataFrame({'col1': [1, 2], ... 'col2': [0.5, 0.75]}, ... index=['row1', 'row2'], ... columns=['col1', 'col2']) >>> df col1 col2 row1 1 0.50 row2 2 0.75 >>> df_dict = df.to_dict() >>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()]) [('col1', [('row1', 1), ('row2', 2)]), ('col2', [('row1', 0.5), ('row2', 0.75)])] You can specify the return orientation. >>> df_dict = df.to_dict('series') >>> sorted(df_dict.items()) [('col1', row1 1 row2 2 Name: col1, dtype: int64), ('col2', row1 0.50 row2 0.75 Name: col2, dtype: float64)] >>> df_dict = df.to_dict('split') >>> sorted(df_dict.items()) # doctest: +ELLIPSIS [('columns', ['col1', 'col2']), ('data', [[1..., 0.75]]), ('index', ['row1', 'row2'])] >>> df_dict = df.to_dict('records') >>> [sorted(values.items()) for values in df_dict] # doctest: +ELLIPSIS [[('col1', 1...), ('col2', 0.5)], [('col1', 2...), ('col2', 0.75)]] >>> df_dict = df.to_dict('index') >>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()]) [('row1', [('col1', 1), ('col2', 0.5)]), ('row2', [('col1', 2), ('col2', 0.75)])] You can also specify the mapping type. >>> from collections import OrderedDict, defaultdict >>> df.to_dict(into=OrderedDict) OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), ('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))]) If you want a `defaultdict`, you need to initialize it: >>> dd = defaultdict(list) >>> df.to_dict('records', into=dd) # doctest: +ELLIPSIS [defaultdict(<class 'list'>, {'col..., 'col...}), defaultdict(<class 'list'>, {'col..., 'col...})]
python/pyspark/pandas/frame.py
to_dict
Flyangz/spark
python
def to_dict(self, orient: str='dict', into: Type=dict) -> Union[(List, Mapping)]: "\n Convert the DataFrame to a dictionary.\n\n The type of the key-value pairs can be customized with the parameters\n (see below).\n\n .. note:: This method should only be used if the resulting pandas DataFrame is expected\n to be small, as all the data is loaded into the driver's memory.\n\n Parameters\n ----------\n orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}\n Determines the type of the values of the dictionary.\n\n - 'dict' (default) : dict like {column -> {index -> value}}\n - 'list' : dict like {column -> [values]}\n - 'series' : dict like {column -> Series(values)}\n - 'split' : dict like\n {'index' -> [index], 'columns' -> [columns], 'data' -> [values]}\n - 'records' : list like\n [{column -> value}, ... , {column -> value}]\n - 'index' : dict like {index -> {column -> value}}\n\n Abbreviations are allowed. `s` indicates `series` and `sp`\n indicates `split`.\n\n into : class, default dict\n The collections.abc.Mapping subclass used for all Mappings\n in the return value. Can be the actual class or an empty\n instance of the mapping type you want. If you want a\n collections.defaultdict, you must pass it initialized.\n\n Returns\n -------\n dict, list or collections.abc.Mapping\n Return a collections.abc.Mapping object representing the DataFrame.\n The resulting transformation depends on the `orient` parameter.\n\n Examples\n --------\n >>> df = ps.DataFrame({'col1': [1, 2],\n ... 'col2': [0.5, 0.75]},\n ... index=['row1', 'row2'],\n ... columns=['col1', 'col2'])\n >>> df\n col1 col2\n row1 1 0.50\n row2 2 0.75\n\n >>> df_dict = df.to_dict()\n >>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()])\n [('col1', [('row1', 1), ('row2', 2)]), ('col2', [('row1', 0.5), ('row2', 0.75)])]\n\n You can specify the return orientation.\n\n >>> df_dict = df.to_dict('series')\n >>> sorted(df_dict.items())\n [('col1', row1 1\n row2 2\n Name: col1, dtype: int64), ('col2', row1 0.50\n row2 0.75\n Name: col2, dtype: float64)]\n\n >>> df_dict = df.to_dict('split')\n >>> sorted(df_dict.items()) # doctest: +ELLIPSIS\n [('columns', ['col1', 'col2']), ('data', [[1..., 0.75]]), ('index', ['row1', 'row2'])]\n\n >>> df_dict = df.to_dict('records')\n >>> [sorted(values.items()) for values in df_dict] # doctest: +ELLIPSIS\n [[('col1', 1...), ('col2', 0.5)], [('col1', 2...), ('col2', 0.75)]]\n\n >>> df_dict = df.to_dict('index')\n >>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()])\n [('row1', [('col1', 1), ('col2', 0.5)]), ('row2', [('col1', 2), ('col2', 0.75)])]\n\n You can also specify the mapping type.\n\n >>> from collections import OrderedDict, defaultdict\n >>> df.to_dict(into=OrderedDict)\n OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), ('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))])\n\n If you want a `defaultdict`, you need to initialize it:\n\n >>> dd = defaultdict(list)\n >>> df.to_dict('records', into=dd) # doctest: +ELLIPSIS\n [defaultdict(<class 'list'>, {'col..., 'col...}), defaultdict(<class 'list'>, {'col..., 'col...})]\n " args = locals() psdf = self return validate_arguments_and_invoke_function(psdf._to_internal_pandas(), self.to_dict, pd.DataFrame.to_dict, args)
def to_latex(self, buf: Optional[IO[str]]=None, columns: Optional[List[Name]]=None, col_space: Optional[int]=None, header: bool=True, index: bool=True, na_rep: str='NaN', formatters: Optional[Union[(List[Callable[([Any], str)]], Dict[(Name, Callable[([Any], str)])])]]=None, float_format: Optional[Callable[([float], str)]]=None, sparsify: Optional[bool]=None, index_names: bool=True, bold_rows: bool=False, column_format: Optional[str]=None, longtable: Optional[bool]=None, escape: Optional[bool]=None, encoding: Optional[str]=None, decimal: str='.', multicolumn: Optional[bool]=None, multicolumn_format: Optional[str]=None, multirow: Optional[bool]=None) -> Optional[str]: "\n Render an object to a LaTeX tabular environment table.\n\n Render an object to a tabular environment table. You can splice this into a LaTeX\n document. Requires usepackage{booktabs}.\n\n .. note:: This method should only be used if the resulting pandas object is expected\n to be small, as all the data is loaded into the driver's memory. If the input\n is large, consider alternative formats.\n\n Parameters\n ----------\n buf : file descriptor or None\n Buffer to write to. If None, the output is returned as a string.\n columns : list of label, optional\n The subset of columns to write. Writes all columns by default.\n col_space : int, optional\n The minimum width of each column.\n header : bool or list of str, default True\n Write out the column names. If a list of strings is given, it is assumed to be aliases\n for the column names.\n index : bool, default True\n Write row names (index).\n na_rep : str, default ‘NaN’\n Missing data representation.\n formatters : list of functions or dict of {str: function}, optional\n Formatter functions to apply to columns’ elements by position or name. The result of\n each function must be a unicode string. List must be of length equal to the number of\n columns.\n float_format : str, optional\n Format string for floating point numbers.\n sparsify : bool, optional\n Set to False for a DataFrame with a hierarchical index to print every multiindex key at\n each row. By default, the value will be read from the config module.\n index_names : bool, default True\n Prints the names of the indexes.\n bold_rows : bool, default False\n Make the row labels bold in the output.\n column_format : str, optional\n The columns format as specified in LaTeX table format e.g. ‘rcl’ for 3 columns. By\n default, ‘l’ will be used for all columns except columns of numbers, which default\n to ‘r’.\n longtable : bool, optional\n By default, the value will be read from the pandas config module. Use a longtable\n environment instead of tabular. Requires adding a usepackage{longtable} to your LaTeX\n preamble.\n escape : bool, optional\n By default, the value will be read from the pandas config module. When set to False\n prevents from escaping latex special characters in column names.\n encoding : str, optional\n A string representing the encoding to use in the output file, defaults to ‘ascii’ on\n Python 2 and ‘utf-8’ on Python 3.\n decimal : str, default ‘.’\n Character recognized as decimal separator, e.g. ‘,’ in Europe.\n multicolumn : bool, default True\n Use multicolumn to enhance MultiIndex columns. The default will be read from the config\n module.\n multicolumn_format : str, default ‘l’\n The alignment for multicolumns, similar to column_format The default will be read from\n the config module.\n multirow : bool, default False\n Use multirow to enhance MultiIndex rows. Requires adding a usepackage{multirow} to your\n LaTeX preamble. Will print centered labels (instead of top-aligned) across the contained\n rows, separating groups via clines. The default will be read from the pandas config\n module.\n\n Returns\n -------\n str or None\n If buf is None, returns the resulting LateX format as a string. Otherwise returns None.\n\n See Also\n --------\n DataFrame.to_string : Render a DataFrame to a console-friendly\n tabular output.\n DataFrame.to_html : Render a DataFrame as an HTML table.\n\n\n Examples\n --------\n >>> df = ps.DataFrame({'name': ['Raphael', 'Donatello'],\n ... 'mask': ['red', 'purple'],\n ... 'weapon': ['sai', 'bo staff']},\n ... columns=['name', 'mask', 'weapon'])\n >>> print(df.to_latex(index=False)) # doctest: +NORMALIZE_WHITESPACE\n \\begin{tabular}{lll}\n \\toprule\n name & mask & weapon \\\\\n \\midrule\n Raphael & red & sai \\\\\n Donatello & purple & bo staff \\\\\n \\bottomrule\n \\end{tabular}\n <BLANKLINE>\n " args = locals() psdf = self return validate_arguments_and_invoke_function(psdf._to_internal_pandas(), self.to_latex, pd.DataFrame.to_latex, args)
1,324,847,439,956,079,400
Render an object to a LaTeX tabular environment table. Render an object to a tabular environment table. You can splice this into a LaTeX document. Requires usepackage{booktabs}. .. note:: This method should only be used if the resulting pandas object is expected to be small, as all the data is loaded into the driver's memory. If the input is large, consider alternative formats. Parameters ---------- buf : file descriptor or None Buffer to write to. If None, the output is returned as a string. columns : list of label, optional The subset of columns to write. Writes all columns by default. col_space : int, optional The minimum width of each column. header : bool or list of str, default True Write out the column names. If a list of strings is given, it is assumed to be aliases for the column names. index : bool, default True Write row names (index). na_rep : str, default ‘NaN’ Missing data representation. formatters : list of functions or dict of {str: function}, optional Formatter functions to apply to columns’ elements by position or name. The result of each function must be a unicode string. List must be of length equal to the number of columns. float_format : str, optional Format string for floating point numbers. sparsify : bool, optional Set to False for a DataFrame with a hierarchical index to print every multiindex key at each row. By default, the value will be read from the config module. index_names : bool, default True Prints the names of the indexes. bold_rows : bool, default False Make the row labels bold in the output. column_format : str, optional The columns format as specified in LaTeX table format e.g. ‘rcl’ for 3 columns. By default, ‘l’ will be used for all columns except columns of numbers, which default to ‘r’. longtable : bool, optional By default, the value will be read from the pandas config module. Use a longtable environment instead of tabular. Requires adding a usepackage{longtable} to your LaTeX preamble. escape : bool, optional By default, the value will be read from the pandas config module. When set to False prevents from escaping latex special characters in column names. encoding : str, optional A string representing the encoding to use in the output file, defaults to ‘ascii’ on Python 2 and ‘utf-8’ on Python 3. decimal : str, default ‘.’ Character recognized as decimal separator, e.g. ‘,’ in Europe. multicolumn : bool, default True Use multicolumn to enhance MultiIndex columns. The default will be read from the config module. multicolumn_format : str, default ‘l’ The alignment for multicolumns, similar to column_format The default will be read from the config module. multirow : bool, default False Use multirow to enhance MultiIndex rows. Requires adding a usepackage{multirow} to your LaTeX preamble. Will print centered labels (instead of top-aligned) across the contained rows, separating groups via clines. The default will be read from the pandas config module. Returns ------- str or None If buf is None, returns the resulting LateX format as a string. Otherwise returns None. See Also -------- DataFrame.to_string : Render a DataFrame to a console-friendly tabular output. DataFrame.to_html : Render a DataFrame as an HTML table. Examples -------- >>> df = ps.DataFrame({'name': ['Raphael', 'Donatello'], ... 'mask': ['red', 'purple'], ... 'weapon': ['sai', 'bo staff']}, ... columns=['name', 'mask', 'weapon']) >>> print(df.to_latex(index=False)) # doctest: +NORMALIZE_WHITESPACE \begin{tabular}{lll} \toprule name & mask & weapon \\ \midrule Raphael & red & sai \\ Donatello & purple & bo staff \\ \bottomrule \end{tabular} <BLANKLINE>
python/pyspark/pandas/frame.py
to_latex
Flyangz/spark
python
def to_latex(self, buf: Optional[IO[str]]=None, columns: Optional[List[Name]]=None, col_space: Optional[int]=None, header: bool=True, index: bool=True, na_rep: str='NaN', formatters: Optional[Union[(List[Callable[([Any], str)]], Dict[(Name, Callable[([Any], str)])])]]=None, float_format: Optional[Callable[([float], str)]]=None, sparsify: Optional[bool]=None, index_names: bool=True, bold_rows: bool=False, column_format: Optional[str]=None, longtable: Optional[bool]=None, escape: Optional[bool]=None, encoding: Optional[str]=None, decimal: str='.', multicolumn: Optional[bool]=None, multicolumn_format: Optional[str]=None, multirow: Optional[bool]=None) -> Optional[str]: "\n Render an object to a LaTeX tabular environment table.\n\n Render an object to a tabular environment table. You can splice this into a LaTeX\n document. Requires usepackage{booktabs}.\n\n .. note:: This method should only be used if the resulting pandas object is expected\n to be small, as all the data is loaded into the driver's memory. If the input\n is large, consider alternative formats.\n\n Parameters\n ----------\n buf : file descriptor or None\n Buffer to write to. If None, the output is returned as a string.\n columns : list of label, optional\n The subset of columns to write. Writes all columns by default.\n col_space : int, optional\n The minimum width of each column.\n header : bool or list of str, default True\n Write out the column names. If a list of strings is given, it is assumed to be aliases\n for the column names.\n index : bool, default True\n Write row names (index).\n na_rep : str, default ‘NaN’\n Missing data representation.\n formatters : list of functions or dict of {str: function}, optional\n Formatter functions to apply to columns’ elements by position or name. The result of\n each function must be a unicode string. List must be of length equal to the number of\n columns.\n float_format : str, optional\n Format string for floating point numbers.\n sparsify : bool, optional\n Set to False for a DataFrame with a hierarchical index to print every multiindex key at\n each row. By default, the value will be read from the config module.\n index_names : bool, default True\n Prints the names of the indexes.\n bold_rows : bool, default False\n Make the row labels bold in the output.\n column_format : str, optional\n The columns format as specified in LaTeX table format e.g. ‘rcl’ for 3 columns. By\n default, ‘l’ will be used for all columns except columns of numbers, which default\n to ‘r’.\n longtable : bool, optional\n By default, the value will be read from the pandas config module. Use a longtable\n environment instead of tabular. Requires adding a usepackage{longtable} to your LaTeX\n preamble.\n escape : bool, optional\n By default, the value will be read from the pandas config module. When set to False\n prevents from escaping latex special characters in column names.\n encoding : str, optional\n A string representing the encoding to use in the output file, defaults to ‘ascii’ on\n Python 2 and ‘utf-8’ on Python 3.\n decimal : str, default ‘.’\n Character recognized as decimal separator, e.g. ‘,’ in Europe.\n multicolumn : bool, default True\n Use multicolumn to enhance MultiIndex columns. The default will be read from the config\n module.\n multicolumn_format : str, default ‘l’\n The alignment for multicolumns, similar to column_format The default will be read from\n the config module.\n multirow : bool, default False\n Use multirow to enhance MultiIndex rows. Requires adding a usepackage{multirow} to your\n LaTeX preamble. Will print centered labels (instead of top-aligned) across the contained\n rows, separating groups via clines. The default will be read from the pandas config\n module.\n\n Returns\n -------\n str or None\n If buf is None, returns the resulting LateX format as a string. Otherwise returns None.\n\n See Also\n --------\n DataFrame.to_string : Render a DataFrame to a console-friendly\n tabular output.\n DataFrame.to_html : Render a DataFrame as an HTML table.\n\n\n Examples\n --------\n >>> df = ps.DataFrame({'name': ['Raphael', 'Donatello'],\n ... 'mask': ['red', 'purple'],\n ... 'weapon': ['sai', 'bo staff']},\n ... columns=['name', 'mask', 'weapon'])\n >>> print(df.to_latex(index=False)) # doctest: +NORMALIZE_WHITESPACE\n \\begin{tabular}{lll}\n \\toprule\n name & mask & weapon \\\\\n \\midrule\n Raphael & red & sai \\\\\n Donatello & purple & bo staff \\\\\n \\bottomrule\n \\end{tabular}\n <BLANKLINE>\n " args = locals() psdf = self return validate_arguments_and_invoke_function(psdf._to_internal_pandas(), self.to_latex, pd.DataFrame.to_latex, args)
def transpose(self) -> 'DataFrame': "\n Transpose index and columns.\n\n Reflect the DataFrame over its main diagonal by writing rows as columns\n and vice-versa. The property :attr:`.T` is an accessor to the method\n :meth:`transpose`.\n\n .. note:: This method is based on an expensive operation due to the nature\n of big data. Internally it needs to generate each row for each value, and\n then group twice - it is a huge operation. To prevent misusage, this method\n has the 'compute.max_rows' default limit of input length, and raises a ValueError.\n\n >>> from pyspark.pandas.config import option_context\n >>> with option_context('compute.max_rows', 1000): # doctest: +NORMALIZE_WHITESPACE\n ... ps.DataFrame({'a': range(1001)}).transpose()\n Traceback (most recent call last):\n ...\n ValueError: Current DataFrame has more then the given limit 1000 rows.\n Please set 'compute.max_rows' by using 'pyspark.pandas.config.set_option'\n to retrieve to retrieve more than 1000 rows. Note that, before changing the\n 'compute.max_rows', this operation is considerably expensive.\n\n Returns\n -------\n DataFrame\n The transposed DataFrame.\n\n Notes\n -----\n Transposing a DataFrame with mixed dtypes will result in a homogeneous\n DataFrame with the coerced dtype. For instance, if int and float have\n to be placed in same column, it becomes float. If type coercion is not\n possible, it fails.\n\n Also, note that the values in index should be unique because they become\n unique column names.\n\n In addition, if Spark 2.3 is used, the types should always be exactly same.\n\n Examples\n --------\n **Square DataFrame with homogeneous dtype**\n\n >>> d1 = {'col1': [1, 2], 'col2': [3, 4]}\n >>> df1 = ps.DataFrame(data=d1, columns=['col1', 'col2'])\n >>> df1\n col1 col2\n 0 1 3\n 1 2 4\n\n >>> df1_transposed = df1.T.sort_index() # doctest: +SKIP\n >>> df1_transposed # doctest: +SKIP\n 0 1\n col1 1 2\n col2 3 4\n\n When the dtype is homogeneous in the original DataFrame, we get a\n transposed DataFrame with the same dtype:\n\n >>> df1.dtypes\n col1 int64\n col2 int64\n dtype: object\n >>> df1_transposed.dtypes # doctest: +SKIP\n 0 int64\n 1 int64\n dtype: object\n\n **Non-square DataFrame with mixed dtypes**\n\n >>> d2 = {'score': [9.5, 8],\n ... 'kids': [0, 0],\n ... 'age': [12, 22]}\n >>> df2 = ps.DataFrame(data=d2, columns=['score', 'kids', 'age'])\n >>> df2\n score kids age\n 0 9.5 0 12\n 1 8.0 0 22\n\n >>> df2_transposed = df2.T.sort_index() # doctest: +SKIP\n >>> df2_transposed # doctest: +SKIP\n 0 1\n age 12.0 22.0\n kids 0.0 0.0\n score 9.5 8.0\n\n When the DataFrame has mixed dtypes, we get a transposed DataFrame with\n the coerced dtype:\n\n >>> df2.dtypes\n score float64\n kids int64\n age int64\n dtype: object\n\n >>> df2_transposed.dtypes # doctest: +SKIP\n 0 float64\n 1 float64\n dtype: object\n " max_compute_count = get_option('compute.max_rows') if (max_compute_count is not None): pdf = self.head((max_compute_count + 1))._to_internal_pandas() if (len(pdf) > max_compute_count): raise ValueError("Current DataFrame has more then the given limit {0} rows. Please set 'compute.max_rows' by using 'pyspark.pandas.config.set_option' to retrieve to retrieve more than {0} rows. Note that, before changing the 'compute.max_rows', this operation is considerably expensive.".format(max_compute_count)) return DataFrame(pdf.transpose()) pairs = F.explode(F.array(*[F.struct(*[SF.lit(col).alias(SPARK_INDEX_NAME_FORMAT(i)) for (i, col) in enumerate(label)], *[self._internal.spark_column_for(label).alias('value')]) for label in self._internal.column_labels])) exploded_df = self._internal.spark_frame.withColumn('pairs', pairs).select([F.to_json(F.struct(F.array(*[scol for scol in self._internal.index_spark_columns]).alias('a'))).alias('index'), F.col('pairs.*')]) internal_index_columns = [SPARK_INDEX_NAME_FORMAT(i) for i in range(self._internal.column_labels_level)] pivoted_df = exploded_df.groupBy(internal_index_columns).pivot('index') transposed_df = pivoted_df.agg(F.first(F.col('value'))) new_data_columns = list(filter((lambda x: (x not in internal_index_columns)), transposed_df.columns)) column_labels = [(None if ((len(label) == 1) and (label[0] is None)) else label) for label in (tuple(json.loads(col)['a']) for col in new_data_columns)] internal = InternalFrame(spark_frame=transposed_df, index_spark_columns=[scol_for(transposed_df, col) for col in internal_index_columns], index_names=self._internal.column_label_names, column_labels=column_labels, data_spark_columns=[scol_for(transposed_df, col) for col in new_data_columns], column_label_names=self._internal.index_names) return DataFrame(internal)
5,282,941,633,021,687,000
Transpose index and columns. Reflect the DataFrame over its main diagonal by writing rows as columns and vice-versa. The property :attr:`.T` is an accessor to the method :meth:`transpose`. .. note:: This method is based on an expensive operation due to the nature of big data. Internally it needs to generate each row for each value, and then group twice - it is a huge operation. To prevent misusage, this method has the 'compute.max_rows' default limit of input length, and raises a ValueError. >>> from pyspark.pandas.config import option_context >>> with option_context('compute.max_rows', 1000): # doctest: +NORMALIZE_WHITESPACE ... ps.DataFrame({'a': range(1001)}).transpose() Traceback (most recent call last): ... ValueError: Current DataFrame has more then the given limit 1000 rows. Please set 'compute.max_rows' by using 'pyspark.pandas.config.set_option' to retrieve to retrieve more than 1000 rows. Note that, before changing the 'compute.max_rows', this operation is considerably expensive. Returns ------- DataFrame The transposed DataFrame. Notes ----- Transposing a DataFrame with mixed dtypes will result in a homogeneous DataFrame with the coerced dtype. For instance, if int and float have to be placed in same column, it becomes float. If type coercion is not possible, it fails. Also, note that the values in index should be unique because they become unique column names. In addition, if Spark 2.3 is used, the types should always be exactly same. Examples -------- **Square DataFrame with homogeneous dtype** >>> d1 = {'col1': [1, 2], 'col2': [3, 4]} >>> df1 = ps.DataFrame(data=d1, columns=['col1', 'col2']) >>> df1 col1 col2 0 1 3 1 2 4 >>> df1_transposed = df1.T.sort_index() # doctest: +SKIP >>> df1_transposed # doctest: +SKIP 0 1 col1 1 2 col2 3 4 When the dtype is homogeneous in the original DataFrame, we get a transposed DataFrame with the same dtype: >>> df1.dtypes col1 int64 col2 int64 dtype: object >>> df1_transposed.dtypes # doctest: +SKIP 0 int64 1 int64 dtype: object **Non-square DataFrame with mixed dtypes** >>> d2 = {'score': [9.5, 8], ... 'kids': [0, 0], ... 'age': [12, 22]} >>> df2 = ps.DataFrame(data=d2, columns=['score', 'kids', 'age']) >>> df2 score kids age 0 9.5 0 12 1 8.0 0 22 >>> df2_transposed = df2.T.sort_index() # doctest: +SKIP >>> df2_transposed # doctest: +SKIP 0 1 age 12.0 22.0 kids 0.0 0.0 score 9.5 8.0 When the DataFrame has mixed dtypes, we get a transposed DataFrame with the coerced dtype: >>> df2.dtypes score float64 kids int64 age int64 dtype: object >>> df2_transposed.dtypes # doctest: +SKIP 0 float64 1 float64 dtype: object
python/pyspark/pandas/frame.py
transpose
Flyangz/spark
python
def transpose(self) -> 'DataFrame': "\n Transpose index and columns.\n\n Reflect the DataFrame over its main diagonal by writing rows as columns\n and vice-versa. The property :attr:`.T` is an accessor to the method\n :meth:`transpose`.\n\n .. note:: This method is based on an expensive operation due to the nature\n of big data. Internally it needs to generate each row for each value, and\n then group twice - it is a huge operation. To prevent misusage, this method\n has the 'compute.max_rows' default limit of input length, and raises a ValueError.\n\n >>> from pyspark.pandas.config import option_context\n >>> with option_context('compute.max_rows', 1000): # doctest: +NORMALIZE_WHITESPACE\n ... ps.DataFrame({'a': range(1001)}).transpose()\n Traceback (most recent call last):\n ...\n ValueError: Current DataFrame has more then the given limit 1000 rows.\n Please set 'compute.max_rows' by using 'pyspark.pandas.config.set_option'\n to retrieve to retrieve more than 1000 rows. Note that, before changing the\n 'compute.max_rows', this operation is considerably expensive.\n\n Returns\n -------\n DataFrame\n The transposed DataFrame.\n\n Notes\n -----\n Transposing a DataFrame with mixed dtypes will result in a homogeneous\n DataFrame with the coerced dtype. For instance, if int and float have\n to be placed in same column, it becomes float. If type coercion is not\n possible, it fails.\n\n Also, note that the values in index should be unique because they become\n unique column names.\n\n In addition, if Spark 2.3 is used, the types should always be exactly same.\n\n Examples\n --------\n **Square DataFrame with homogeneous dtype**\n\n >>> d1 = {'col1': [1, 2], 'col2': [3, 4]}\n >>> df1 = ps.DataFrame(data=d1, columns=['col1', 'col2'])\n >>> df1\n col1 col2\n 0 1 3\n 1 2 4\n\n >>> df1_transposed = df1.T.sort_index() # doctest: +SKIP\n >>> df1_transposed # doctest: +SKIP\n 0 1\n col1 1 2\n col2 3 4\n\n When the dtype is homogeneous in the original DataFrame, we get a\n transposed DataFrame with the same dtype:\n\n >>> df1.dtypes\n col1 int64\n col2 int64\n dtype: object\n >>> df1_transposed.dtypes # doctest: +SKIP\n 0 int64\n 1 int64\n dtype: object\n\n **Non-square DataFrame with mixed dtypes**\n\n >>> d2 = {'score': [9.5, 8],\n ... 'kids': [0, 0],\n ... 'age': [12, 22]}\n >>> df2 = ps.DataFrame(data=d2, columns=['score', 'kids', 'age'])\n >>> df2\n score kids age\n 0 9.5 0 12\n 1 8.0 0 22\n\n >>> df2_transposed = df2.T.sort_index() # doctest: +SKIP\n >>> df2_transposed # doctest: +SKIP\n 0 1\n age 12.0 22.0\n kids 0.0 0.0\n score 9.5 8.0\n\n When the DataFrame has mixed dtypes, we get a transposed DataFrame with\n the coerced dtype:\n\n >>> df2.dtypes\n score float64\n kids int64\n age int64\n dtype: object\n\n >>> df2_transposed.dtypes # doctest: +SKIP\n 0 float64\n 1 float64\n dtype: object\n " max_compute_count = get_option('compute.max_rows') if (max_compute_count is not None): pdf = self.head((max_compute_count + 1))._to_internal_pandas() if (len(pdf) > max_compute_count): raise ValueError("Current DataFrame has more then the given limit {0} rows. Please set 'compute.max_rows' by using 'pyspark.pandas.config.set_option' to retrieve to retrieve more than {0} rows. Note that, before changing the 'compute.max_rows', this operation is considerably expensive.".format(max_compute_count)) return DataFrame(pdf.transpose()) pairs = F.explode(F.array(*[F.struct(*[SF.lit(col).alias(SPARK_INDEX_NAME_FORMAT(i)) for (i, col) in enumerate(label)], *[self._internal.spark_column_for(label).alias('value')]) for label in self._internal.column_labels])) exploded_df = self._internal.spark_frame.withColumn('pairs', pairs).select([F.to_json(F.struct(F.array(*[scol for scol in self._internal.index_spark_columns]).alias('a'))).alias('index'), F.col('pairs.*')]) internal_index_columns = [SPARK_INDEX_NAME_FORMAT(i) for i in range(self._internal.column_labels_level)] pivoted_df = exploded_df.groupBy(internal_index_columns).pivot('index') transposed_df = pivoted_df.agg(F.first(F.col('value'))) new_data_columns = list(filter((lambda x: (x not in internal_index_columns)), transposed_df.columns)) column_labels = [(None if ((len(label) == 1) and (label[0] is None)) else label) for label in (tuple(json.loads(col)['a']) for col in new_data_columns)] internal = InternalFrame(spark_frame=transposed_df, index_spark_columns=[scol_for(transposed_df, col) for col in internal_index_columns], index_names=self._internal.column_label_names, column_labels=column_labels, data_spark_columns=[scol_for(transposed_df, col) for col in new_data_columns], column_label_names=self._internal.index_names) return DataFrame(internal)
def apply(self, func: Callable, axis: Axis=0, args: Sequence[Any]=(), **kwds: Any) -> Union[('Series', 'DataFrame', 'Index')]: '\n Apply a function along an axis of the DataFrame.\n\n Objects passed to the function are Series objects whose index is\n either the DataFrame\'s index (``axis=0``) or the DataFrame\'s columns\n (``axis=1``).\n\n See also `Transform and apply a function\n <https://koalas.readthedocs.io/en/latest/user_guide/transform_apply.html>`_.\n\n .. note:: when `axis` is 0 or \'index\', the `func` is unable to access\n to the whole input series. pandas-on-Spark internally splits the input series into\n multiple batches and calls `func` with each batch multiple times. Therefore, operations\n such as global aggregations are impossible. See the example below.\n\n >>> # This case does not return the length of whole series but of the batch internally\n ... # used.\n ... def length(s) -> int:\n ... return len(s)\n ...\n >>> df = ps.DataFrame({\'A\': range(1000)})\n >>> df.apply(length, axis=0) # doctest: +SKIP\n 0 83\n 1 83\n 2 83\n ...\n 10 83\n 11 83\n dtype: int32\n\n .. note:: this API executes the function once to infer the type which is\n potentially expensive, for instance, when the dataset is created after\n aggregations or sorting.\n\n To avoid this, specify the return type as `Series` or scalar value in ``func``,\n for instance, as below:\n\n >>> def square(s) -> ps.Series[np.int32]:\n ... return s ** 2\n\n pandas-on-Spark uses return type hint and does not try to infer the type.\n\n In case when axis is 1, it requires to specify `DataFrame` or scalar value\n with type hints as below:\n\n >>> def plus_one(x) -> ps.DataFrame[int, [float, float]]:\n ... return x + 1\n\n If the return type is specified as `DataFrame`, the output column names become\n `c0, c1, c2 ... cn`. These names are positionally mapped to the returned\n DataFrame in ``func``.\n\n To specify the column names, you can assign them in a pandas friendly style as below:\n\n >>> def plus_one(x) -> ps.DataFrame[("index", int), [("a", float), ("b", float)]]:\n ... return x + 1\n\n >>> pdf = pd.DataFrame({\'a\': [1, 2, 3], \'b\': [3, 4, 5]})\n >>> def plus_one(x) -> ps.DataFrame[\n ... (pdf.index.name, pdf.index.dtype), zip(pdf.dtypes, pdf.columns)]:\n ... return x + 1\n\n Parameters\n ----------\n func : function\n Function to apply to each column or row.\n axis : {0 or \'index\', 1 or \'columns\'}, default 0\n Axis along which the function is applied:\n\n * 0 or \'index\': apply function to each column.\n * 1 or \'columns\': apply function to each row.\n args : tuple\n Positional arguments to pass to `func` in addition to the\n array/series.\n **kwds\n Additional keyword arguments to pass as keywords arguments to\n `func`.\n\n Returns\n -------\n Series or DataFrame\n Result of applying ``func`` along the given axis of the\n DataFrame.\n\n See Also\n --------\n DataFrame.applymap : For elementwise operations.\n DataFrame.aggregate : Only perform aggregating type operations.\n DataFrame.transform : Only perform transforming type operations.\n Series.apply : The equivalent function for Series.\n\n Examples\n --------\n >>> df = ps.DataFrame([[4, 9]] * 3, columns=[\'A\', \'B\'])\n >>> df\n A B\n 0 4 9\n 1 4 9\n 2 4 9\n\n Using a numpy universal function (in this case the same as\n ``np.sqrt(df)``):\n\n >>> def sqrt(x) -> ps.Series[float]:\n ... return np.sqrt(x)\n ...\n >>> df.apply(sqrt, axis=0)\n A B\n 0 2.0 3.0\n 1 2.0 3.0\n 2 2.0 3.0\n\n You can omit the type hint and let pandas-on-Spark infer its type.\n\n >>> df.apply(np.sqrt, axis=0)\n A B\n 0 2.0 3.0\n 1 2.0 3.0\n 2 2.0 3.0\n\n When `axis` is 1 or \'columns\', it applies the function for each row.\n\n >>> def summation(x) -> np.int64:\n ... return np.sum(x)\n ...\n >>> df.apply(summation, axis=1)\n 0 13\n 1 13\n 2 13\n dtype: int64\n\n Likewise, you can omit the type hint and let pandas-on-Spark infer its type.\n\n >>> df.apply(np.sum, axis=1)\n 0 13\n 1 13\n 2 13\n dtype: int64\n\n >>> df.apply(max, axis=1)\n 0 9\n 1 9\n 2 9\n dtype: int64\n\n Returning a list-like will result in a Series\n\n >>> df.apply(lambda x: [1, 2], axis=1)\n 0 [1, 2]\n 1 [1, 2]\n 2 [1, 2]\n dtype: object\n\n In order to specify the types when `axis` is \'1\', it should use DataFrame[...]\n annotation. In this case, the column names are automatically generated.\n\n >>> def identify(x) -> ps.DataFrame[(\'index\', int), [(\'A\', np.int64), (\'B\', np.int64)]]:\n ... return x\n ...\n >>> df.apply(identify, axis=1) # doctest: +NORMALIZE_WHITESPACE\n A B\n index\n 0 4 9\n 1 4 9\n 2 4 9\n\n You can also specify extra arguments.\n\n >>> def plus_two(a, b, c) -> ps.DataFrame[np.int64, [np.int64, np.int64]]:\n ... return a + b + c\n ...\n >>> df.apply(plus_two, axis=1, args=(1,), c=3)\n c0 c1\n 0 8 13\n 1 8 13\n 2 8 13\n ' from pyspark.pandas.groupby import GroupBy from pyspark.pandas.series import first_series if (not isinstance(func, types.FunctionType)): assert callable(func), 'the first argument should be a callable function.' f = func func = (lambda *args, **kwargs: f(*args, **kwargs)) axis = validate_axis(axis) should_return_series = False spec = inspect.getfullargspec(func) return_sig = spec.annotations.get('return', None) should_infer_schema = (return_sig is None) should_retain_index = should_infer_schema def apply_func(pdf: pd.DataFrame) -> pd.DataFrame: pdf_or_pser = pdf.apply(func, axis=axis, args=args, **kwds) if isinstance(pdf_or_pser, pd.Series): return pdf_or_pser.to_frame() else: return pdf_or_pser self_applied: DataFrame = DataFrame(self._internal.resolved_copy) column_labels: Optional[List[Label]] = None if should_infer_schema: log_advice('If the type hints is not specified for `apply`, it is expensive to infer the data type internally.') limit = get_option('compute.shortcut_limit') pdf = self_applied.head((limit + 1))._to_internal_pandas() applied = pdf.apply(func, axis=axis, args=args, **kwds) psser_or_psdf = ps.from_pandas(applied) if (len(pdf) <= limit): return psser_or_psdf psdf = psser_or_psdf if isinstance(psser_or_psdf, ps.Series): should_return_series = True psdf = psser_or_psdf._psdf index_fields = [field.normalize_spark_type() for field in psdf._internal.index_fields] data_fields = [field.normalize_spark_type() for field in psdf._internal.data_fields] return_schema = StructType([field.struct_field for field in (index_fields + data_fields)]) output_func = GroupBy._make_pandas_df_builder_func(self_applied, apply_func, return_schema, retain_index=should_retain_index) sdf = self_applied._internal.to_internal_spark_frame.mapInPandas((lambda iterator: map(output_func, iterator)), schema=return_schema) internal = psdf._internal.with_new_sdf(spark_frame=sdf, index_fields=index_fields, data_fields=data_fields) else: return_type = infer_return_type(func) require_index_axis = isinstance(return_type, SeriesType) require_column_axis = isinstance(return_type, DataFrameType) index_fields = None if require_index_axis: if (axis != 0): raise TypeError(("The given function should specify a scalar or a series as its type hints when axis is 0 or 'index'; however, the return type was %s" % return_sig)) dtype = cast(SeriesType, return_type).dtype spark_type = cast(SeriesType, return_type).spark_type data_fields = [InternalField(dtype=dtype, struct_field=StructField(name=name, dataType=spark_type)) for name in self_applied.columns] return_schema = StructType([field.struct_field for field in data_fields]) elif require_column_axis: if (axis != 1): raise TypeError(("The given function should specify a scalar or a frame as its type hints when axis is 1 or 'column'; however, the return type was %s" % return_sig)) index_fields = cast(DataFrameType, return_type).index_fields should_retain_index = (len(index_fields) > 0) data_fields = cast(DataFrameType, return_type).data_fields return_schema = cast(DataFrameType, return_type).spark_type else: should_return_series = True spark_type = cast(ScalarType, return_type).spark_type dtype = cast(ScalarType, return_type).dtype data_fields = [InternalField(dtype=dtype, struct_field=StructField(name=SPARK_DEFAULT_SERIES_NAME, dataType=spark_type))] return_schema = StructType([field.struct_field for field in data_fields]) column_labels = [None] output_func = GroupBy._make_pandas_df_builder_func(self_applied, apply_func, return_schema, retain_index=should_retain_index) sdf = self_applied._internal.to_internal_spark_frame.mapInPandas((lambda iterator: map(output_func, iterator)), schema=return_schema) index_spark_columns = None index_names: Optional[List[Optional[Tuple[(Any, ...)]]]] = None if should_retain_index: index_spark_columns = [scol_for(sdf, index_field.struct_field.name) for index_field in index_fields] if (not any([SPARK_INDEX_NAME_PATTERN.match(index_field.struct_field.name) for index_field in index_fields])): index_names = [(index_field.struct_field.name,) for index_field in index_fields] internal = InternalFrame(spark_frame=sdf, index_names=index_names, index_spark_columns=index_spark_columns, index_fields=index_fields, data_fields=data_fields, column_labels=column_labels) result: DataFrame = DataFrame(internal) if should_return_series: return first_series(result) else: return result
2,361,285,583,190,661,600
Apply a function along an axis of the DataFrame. Objects passed to the function are Series objects whose index is either the DataFrame's index (``axis=0``) or the DataFrame's columns (``axis=1``). See also `Transform and apply a function <https://koalas.readthedocs.io/en/latest/user_guide/transform_apply.html>`_. .. note:: when `axis` is 0 or 'index', the `func` is unable to access to the whole input series. pandas-on-Spark internally splits the input series into multiple batches and calls `func` with each batch multiple times. Therefore, operations such as global aggregations are impossible. See the example below. >>> # This case does not return the length of whole series but of the batch internally ... # used. ... def length(s) -> int: ... return len(s) ... >>> df = ps.DataFrame({'A': range(1000)}) >>> df.apply(length, axis=0) # doctest: +SKIP 0 83 1 83 2 83 ... 10 83 11 83 dtype: int32 .. note:: this API executes the function once to infer the type which is potentially expensive, for instance, when the dataset is created after aggregations or sorting. To avoid this, specify the return type as `Series` or scalar value in ``func``, for instance, as below: >>> def square(s) -> ps.Series[np.int32]: ... return s ** 2 pandas-on-Spark uses return type hint and does not try to infer the type. In case when axis is 1, it requires to specify `DataFrame` or scalar value with type hints as below: >>> def plus_one(x) -> ps.DataFrame[int, [float, float]]: ... return x + 1 If the return type is specified as `DataFrame`, the output column names become `c0, c1, c2 ... cn`. These names are positionally mapped to the returned DataFrame in ``func``. To specify the column names, you can assign them in a pandas friendly style as below: >>> def plus_one(x) -> ps.DataFrame[("index", int), [("a", float), ("b", float)]]: ... return x + 1 >>> pdf = pd.DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]}) >>> def plus_one(x) -> ps.DataFrame[ ... (pdf.index.name, pdf.index.dtype), zip(pdf.dtypes, pdf.columns)]: ... return x + 1 Parameters ---------- func : function Function to apply to each column or row. axis : {0 or 'index', 1 or 'columns'}, default 0 Axis along which the function is applied: * 0 or 'index': apply function to each column. * 1 or 'columns': apply function to each row. args : tuple Positional arguments to pass to `func` in addition to the array/series. **kwds Additional keyword arguments to pass as keywords arguments to `func`. Returns ------- Series or DataFrame Result of applying ``func`` along the given axis of the DataFrame. See Also -------- DataFrame.applymap : For elementwise operations. DataFrame.aggregate : Only perform aggregating type operations. DataFrame.transform : Only perform transforming type operations. Series.apply : The equivalent function for Series. Examples -------- >>> df = ps.DataFrame([[4, 9]] * 3, columns=['A', 'B']) >>> df A B 0 4 9 1 4 9 2 4 9 Using a numpy universal function (in this case the same as ``np.sqrt(df)``): >>> def sqrt(x) -> ps.Series[float]: ... return np.sqrt(x) ... >>> df.apply(sqrt, axis=0) A B 0 2.0 3.0 1 2.0 3.0 2 2.0 3.0 You can omit the type hint and let pandas-on-Spark infer its type. >>> df.apply(np.sqrt, axis=0) A B 0 2.0 3.0 1 2.0 3.0 2 2.0 3.0 When `axis` is 1 or 'columns', it applies the function for each row. >>> def summation(x) -> np.int64: ... return np.sum(x) ... >>> df.apply(summation, axis=1) 0 13 1 13 2 13 dtype: int64 Likewise, you can omit the type hint and let pandas-on-Spark infer its type. >>> df.apply(np.sum, axis=1) 0 13 1 13 2 13 dtype: int64 >>> df.apply(max, axis=1) 0 9 1 9 2 9 dtype: int64 Returning a list-like will result in a Series >>> df.apply(lambda x: [1, 2], axis=1) 0 [1, 2] 1 [1, 2] 2 [1, 2] dtype: object In order to specify the types when `axis` is '1', it should use DataFrame[...] annotation. In this case, the column names are automatically generated. >>> def identify(x) -> ps.DataFrame[('index', int), [('A', np.int64), ('B', np.int64)]]: ... return x ... >>> df.apply(identify, axis=1) # doctest: +NORMALIZE_WHITESPACE A B index 0 4 9 1 4 9 2 4 9 You can also specify extra arguments. >>> def plus_two(a, b, c) -> ps.DataFrame[np.int64, [np.int64, np.int64]]: ... return a + b + c ... >>> df.apply(plus_two, axis=1, args=(1,), c=3) c0 c1 0 8 13 1 8 13 2 8 13
python/pyspark/pandas/frame.py
apply
Flyangz/spark
python
def apply(self, func: Callable, axis: Axis=0, args: Sequence[Any]=(), **kwds: Any) -> Union[('Series', 'DataFrame', 'Index')]: '\n Apply a function along an axis of the DataFrame.\n\n Objects passed to the function are Series objects whose index is\n either the DataFrame\'s index (``axis=0``) or the DataFrame\'s columns\n (``axis=1``).\n\n See also `Transform and apply a function\n <https://koalas.readthedocs.io/en/latest/user_guide/transform_apply.html>`_.\n\n .. note:: when `axis` is 0 or \'index\', the `func` is unable to access\n to the whole input series. pandas-on-Spark internally splits the input series into\n multiple batches and calls `func` with each batch multiple times. Therefore, operations\n such as global aggregations are impossible. See the example below.\n\n >>> # This case does not return the length of whole series but of the batch internally\n ... # used.\n ... def length(s) -> int:\n ... return len(s)\n ...\n >>> df = ps.DataFrame({\'A\': range(1000)})\n >>> df.apply(length, axis=0) # doctest: +SKIP\n 0 83\n 1 83\n 2 83\n ...\n 10 83\n 11 83\n dtype: int32\n\n .. note:: this API executes the function once to infer the type which is\n potentially expensive, for instance, when the dataset is created after\n aggregations or sorting.\n\n To avoid this, specify the return type as `Series` or scalar value in ``func``,\n for instance, as below:\n\n >>> def square(s) -> ps.Series[np.int32]:\n ... return s ** 2\n\n pandas-on-Spark uses return type hint and does not try to infer the type.\n\n In case when axis is 1, it requires to specify `DataFrame` or scalar value\n with type hints as below:\n\n >>> def plus_one(x) -> ps.DataFrame[int, [float, float]]:\n ... return x + 1\n\n If the return type is specified as `DataFrame`, the output column names become\n `c0, c1, c2 ... cn`. These names are positionally mapped to the returned\n DataFrame in ``func``.\n\n To specify the column names, you can assign them in a pandas friendly style as below:\n\n >>> def plus_one(x) -> ps.DataFrame[("index", int), [("a", float), ("b", float)]]:\n ... return x + 1\n\n >>> pdf = pd.DataFrame({\'a\': [1, 2, 3], \'b\': [3, 4, 5]})\n >>> def plus_one(x) -> ps.DataFrame[\n ... (pdf.index.name, pdf.index.dtype), zip(pdf.dtypes, pdf.columns)]:\n ... return x + 1\n\n Parameters\n ----------\n func : function\n Function to apply to each column or row.\n axis : {0 or \'index\', 1 or \'columns\'}, default 0\n Axis along which the function is applied:\n\n * 0 or \'index\': apply function to each column.\n * 1 or \'columns\': apply function to each row.\n args : tuple\n Positional arguments to pass to `func` in addition to the\n array/series.\n **kwds\n Additional keyword arguments to pass as keywords arguments to\n `func`.\n\n Returns\n -------\n Series or DataFrame\n Result of applying ``func`` along the given axis of the\n DataFrame.\n\n See Also\n --------\n DataFrame.applymap : For elementwise operations.\n DataFrame.aggregate : Only perform aggregating type operations.\n DataFrame.transform : Only perform transforming type operations.\n Series.apply : The equivalent function for Series.\n\n Examples\n --------\n >>> df = ps.DataFrame([[4, 9]] * 3, columns=[\'A\', \'B\'])\n >>> df\n A B\n 0 4 9\n 1 4 9\n 2 4 9\n\n Using a numpy universal function (in this case the same as\n ``np.sqrt(df)``):\n\n >>> def sqrt(x) -> ps.Series[float]:\n ... return np.sqrt(x)\n ...\n >>> df.apply(sqrt, axis=0)\n A B\n 0 2.0 3.0\n 1 2.0 3.0\n 2 2.0 3.0\n\n You can omit the type hint and let pandas-on-Spark infer its type.\n\n >>> df.apply(np.sqrt, axis=0)\n A B\n 0 2.0 3.0\n 1 2.0 3.0\n 2 2.0 3.0\n\n When `axis` is 1 or \'columns\', it applies the function for each row.\n\n >>> def summation(x) -> np.int64:\n ... return np.sum(x)\n ...\n >>> df.apply(summation, axis=1)\n 0 13\n 1 13\n 2 13\n dtype: int64\n\n Likewise, you can omit the type hint and let pandas-on-Spark infer its type.\n\n >>> df.apply(np.sum, axis=1)\n 0 13\n 1 13\n 2 13\n dtype: int64\n\n >>> df.apply(max, axis=1)\n 0 9\n 1 9\n 2 9\n dtype: int64\n\n Returning a list-like will result in a Series\n\n >>> df.apply(lambda x: [1, 2], axis=1)\n 0 [1, 2]\n 1 [1, 2]\n 2 [1, 2]\n dtype: object\n\n In order to specify the types when `axis` is \'1\', it should use DataFrame[...]\n annotation. In this case, the column names are automatically generated.\n\n >>> def identify(x) -> ps.DataFrame[(\'index\', int), [(\'A\', np.int64), (\'B\', np.int64)]]:\n ... return x\n ...\n >>> df.apply(identify, axis=1) # doctest: +NORMALIZE_WHITESPACE\n A B\n index\n 0 4 9\n 1 4 9\n 2 4 9\n\n You can also specify extra arguments.\n\n >>> def plus_two(a, b, c) -> ps.DataFrame[np.int64, [np.int64, np.int64]]:\n ... return a + b + c\n ...\n >>> df.apply(plus_two, axis=1, args=(1,), c=3)\n c0 c1\n 0 8 13\n 1 8 13\n 2 8 13\n ' from pyspark.pandas.groupby import GroupBy from pyspark.pandas.series import first_series if (not isinstance(func, types.FunctionType)): assert callable(func), 'the first argument should be a callable function.' f = func func = (lambda *args, **kwargs: f(*args, **kwargs)) axis = validate_axis(axis) should_return_series = False spec = inspect.getfullargspec(func) return_sig = spec.annotations.get('return', None) should_infer_schema = (return_sig is None) should_retain_index = should_infer_schema def apply_func(pdf: pd.DataFrame) -> pd.DataFrame: pdf_or_pser = pdf.apply(func, axis=axis, args=args, **kwds) if isinstance(pdf_or_pser, pd.Series): return pdf_or_pser.to_frame() else: return pdf_or_pser self_applied: DataFrame = DataFrame(self._internal.resolved_copy) column_labels: Optional[List[Label]] = None if should_infer_schema: log_advice('If the type hints is not specified for `apply`, it is expensive to infer the data type internally.') limit = get_option('compute.shortcut_limit') pdf = self_applied.head((limit + 1))._to_internal_pandas() applied = pdf.apply(func, axis=axis, args=args, **kwds) psser_or_psdf = ps.from_pandas(applied) if (len(pdf) <= limit): return psser_or_psdf psdf = psser_or_psdf if isinstance(psser_or_psdf, ps.Series): should_return_series = True psdf = psser_or_psdf._psdf index_fields = [field.normalize_spark_type() for field in psdf._internal.index_fields] data_fields = [field.normalize_spark_type() for field in psdf._internal.data_fields] return_schema = StructType([field.struct_field for field in (index_fields + data_fields)]) output_func = GroupBy._make_pandas_df_builder_func(self_applied, apply_func, return_schema, retain_index=should_retain_index) sdf = self_applied._internal.to_internal_spark_frame.mapInPandas((lambda iterator: map(output_func, iterator)), schema=return_schema) internal = psdf._internal.with_new_sdf(spark_frame=sdf, index_fields=index_fields, data_fields=data_fields) else: return_type = infer_return_type(func) require_index_axis = isinstance(return_type, SeriesType) require_column_axis = isinstance(return_type, DataFrameType) index_fields = None if require_index_axis: if (axis != 0): raise TypeError(("The given function should specify a scalar or a series as its type hints when axis is 0 or 'index'; however, the return type was %s" % return_sig)) dtype = cast(SeriesType, return_type).dtype spark_type = cast(SeriesType, return_type).spark_type data_fields = [InternalField(dtype=dtype, struct_field=StructField(name=name, dataType=spark_type)) for name in self_applied.columns] return_schema = StructType([field.struct_field for field in data_fields]) elif require_column_axis: if (axis != 1): raise TypeError(("The given function should specify a scalar or a frame as its type hints when axis is 1 or 'column'; however, the return type was %s" % return_sig)) index_fields = cast(DataFrameType, return_type).index_fields should_retain_index = (len(index_fields) > 0) data_fields = cast(DataFrameType, return_type).data_fields return_schema = cast(DataFrameType, return_type).spark_type else: should_return_series = True spark_type = cast(ScalarType, return_type).spark_type dtype = cast(ScalarType, return_type).dtype data_fields = [InternalField(dtype=dtype, struct_field=StructField(name=SPARK_DEFAULT_SERIES_NAME, dataType=spark_type))] return_schema = StructType([field.struct_field for field in data_fields]) column_labels = [None] output_func = GroupBy._make_pandas_df_builder_func(self_applied, apply_func, return_schema, retain_index=should_retain_index) sdf = self_applied._internal.to_internal_spark_frame.mapInPandas((lambda iterator: map(output_func, iterator)), schema=return_schema) index_spark_columns = None index_names: Optional[List[Optional[Tuple[(Any, ...)]]]] = None if should_retain_index: index_spark_columns = [scol_for(sdf, index_field.struct_field.name) for index_field in index_fields] if (not any([SPARK_INDEX_NAME_PATTERN.match(index_field.struct_field.name) for index_field in index_fields])): index_names = [(index_field.struct_field.name,) for index_field in index_fields] internal = InternalFrame(spark_frame=sdf, index_names=index_names, index_spark_columns=index_spark_columns, index_fields=index_fields, data_fields=data_fields, column_labels=column_labels) result: DataFrame = DataFrame(internal) if should_return_series: return first_series(result) else: return result
def transform(self, func: Callable[(..., 'Series')], axis: Axis=0, *args: Any, **kwargs: Any) -> 'DataFrame': "\n Call ``func`` on self producing a Series with transformed values\n and that has the same length as its input.\n\n See also `Transform and apply a function\n <https://koalas.readthedocs.io/en/latest/user_guide/transform_apply.html>`_.\n\n .. note:: this API executes the function once to infer the type which is\n potentially expensive, for instance, when the dataset is created after\n aggregations or sorting.\n\n To avoid this, specify return type in ``func``, for instance, as below:\n\n >>> def square(x) -> ps.Series[np.int32]:\n ... return x ** 2\n\n pandas-on-Spark uses return type hint and does not try to infer the type.\n\n .. note:: the series within ``func`` is actually multiple pandas series as the\n segments of the whole pandas-on-Spark series; therefore, the length of each series\n is not guaranteed. As an example, an aggregation against each series\n does work as a global aggregation but an aggregation of each segment. See\n below:\n\n >>> def func(x) -> ps.Series[np.int32]:\n ... return x + sum(x)\n\n Parameters\n ----------\n func : function\n Function to use for transforming the data. It must work when pandas Series\n is passed.\n axis : int, default 0 or 'index'\n Can only be set to 0 at the moment.\n *args\n Positional arguments to pass to func.\n **kwargs\n Keyword arguments to pass to func.\n\n Returns\n -------\n DataFrame\n A DataFrame that must have the same length as self.\n\n Raises\n ------\n Exception : If the returned DataFrame has a different length than self.\n\n See Also\n --------\n DataFrame.aggregate : Only perform aggregating type operations.\n DataFrame.apply : Invoke function on DataFrame.\n Series.transform : The equivalent function for Series.\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': range(3), 'B': range(1, 4)}, columns=['A', 'B'])\n >>> df\n A B\n 0 0 1\n 1 1 2\n 2 2 3\n\n >>> def square(x) -> ps.Series[np.int32]:\n ... return x ** 2\n >>> df.transform(square)\n A B\n 0 0 1\n 1 1 4\n 2 4 9\n\n You can omit the type hint and let pandas-on-Spark infer its type.\n\n >>> df.transform(lambda x: x ** 2)\n A B\n 0 0 1\n 1 1 4\n 2 4 9\n\n For multi-index columns:\n\n >>> df.columns = [('X', 'A'), ('X', 'B')]\n >>> df.transform(square) # doctest: +NORMALIZE_WHITESPACE\n X\n A B\n 0 0 1\n 1 1 4\n 2 4 9\n\n >>> (df * -1).transform(abs) # doctest: +NORMALIZE_WHITESPACE\n X\n A B\n 0 0 1\n 1 1 2\n 2 2 3\n\n You can also specify extra arguments.\n\n >>> def calculation(x, y, z) -> ps.Series[int]:\n ... return x ** y + z\n >>> df.transform(calculation, y=10, z=20) # doctest: +NORMALIZE_WHITESPACE\n X\n A B\n 0 20 21\n 1 21 1044\n 2 1044 59069\n " if (not isinstance(func, types.FunctionType)): assert callable(func), 'the first argument should be a callable function.' f = func func = (lambda *args, **kwargs: f(*args, **kwargs)) axis = validate_axis(axis) if (axis != 0): raise NotImplementedError('axis should be either 0 or "index" currently.') spec = inspect.getfullargspec(func) return_sig = spec.annotations.get('return', None) should_infer_schema = (return_sig is None) if should_infer_schema: log_advice('If the type hints is not specified for `transform`, it is expensive to infer the data type internally.') limit = get_option('compute.shortcut_limit') pdf = self.head((limit + 1))._to_internal_pandas() transformed = pdf.transform(func, axis, *args, **kwargs) psdf: DataFrame = DataFrame(transformed) if (len(pdf) <= limit): return psdf applied = [] data_fields = [] for (input_label, output_label) in zip(self._internal.column_labels, psdf._internal.column_labels): psser = self._psser_for(input_label) field = psdf._internal.field_for(output_label).normalize_spark_type() data_fields.append(field) return_schema = field.spark_type applied.append(psser.pandas_on_spark._transform_batch(func=(lambda c: func(c, *args, **kwargs)), return_type=SeriesType(field.dtype, return_schema))) internal = self._internal.with_new_columns(applied, data_fields=data_fields) return DataFrame(internal) else: return self._apply_series_op((lambda psser: psser.pandas_on_spark.transform_batch(func, *args, **kwargs)))
1,455,522,935,585,915,100
Call ``func`` on self producing a Series with transformed values and that has the same length as its input. See also `Transform and apply a function <https://koalas.readthedocs.io/en/latest/user_guide/transform_apply.html>`_. .. note:: this API executes the function once to infer the type which is potentially expensive, for instance, when the dataset is created after aggregations or sorting. To avoid this, specify return type in ``func``, for instance, as below: >>> def square(x) -> ps.Series[np.int32]: ... return x ** 2 pandas-on-Spark uses return type hint and does not try to infer the type. .. note:: the series within ``func`` is actually multiple pandas series as the segments of the whole pandas-on-Spark series; therefore, the length of each series is not guaranteed. As an example, an aggregation against each series does work as a global aggregation but an aggregation of each segment. See below: >>> def func(x) -> ps.Series[np.int32]: ... return x + sum(x) Parameters ---------- func : function Function to use for transforming the data. It must work when pandas Series is passed. axis : int, default 0 or 'index' Can only be set to 0 at the moment. *args Positional arguments to pass to func. **kwargs Keyword arguments to pass to func. Returns ------- DataFrame A DataFrame that must have the same length as self. Raises ------ Exception : If the returned DataFrame has a different length than self. See Also -------- DataFrame.aggregate : Only perform aggregating type operations. DataFrame.apply : Invoke function on DataFrame. Series.transform : The equivalent function for Series. Examples -------- >>> df = ps.DataFrame({'A': range(3), 'B': range(1, 4)}, columns=['A', 'B']) >>> df A B 0 0 1 1 1 2 2 2 3 >>> def square(x) -> ps.Series[np.int32]: ... return x ** 2 >>> df.transform(square) A B 0 0 1 1 1 4 2 4 9 You can omit the type hint and let pandas-on-Spark infer its type. >>> df.transform(lambda x: x ** 2) A B 0 0 1 1 1 4 2 4 9 For multi-index columns: >>> df.columns = [('X', 'A'), ('X', 'B')] >>> df.transform(square) # doctest: +NORMALIZE_WHITESPACE X A B 0 0 1 1 1 4 2 4 9 >>> (df * -1).transform(abs) # doctest: +NORMALIZE_WHITESPACE X A B 0 0 1 1 1 2 2 2 3 You can also specify extra arguments. >>> def calculation(x, y, z) -> ps.Series[int]: ... return x ** y + z >>> df.transform(calculation, y=10, z=20) # doctest: +NORMALIZE_WHITESPACE X A B 0 20 21 1 21 1044 2 1044 59069
python/pyspark/pandas/frame.py
transform
Flyangz/spark
python
def transform(self, func: Callable[(..., 'Series')], axis: Axis=0, *args: Any, **kwargs: Any) -> 'DataFrame': "\n Call ``func`` on self producing a Series with transformed values\n and that has the same length as its input.\n\n See also `Transform and apply a function\n <https://koalas.readthedocs.io/en/latest/user_guide/transform_apply.html>`_.\n\n .. note:: this API executes the function once to infer the type which is\n potentially expensive, for instance, when the dataset is created after\n aggregations or sorting.\n\n To avoid this, specify return type in ``func``, for instance, as below:\n\n >>> def square(x) -> ps.Series[np.int32]:\n ... return x ** 2\n\n pandas-on-Spark uses return type hint and does not try to infer the type.\n\n .. note:: the series within ``func`` is actually multiple pandas series as the\n segments of the whole pandas-on-Spark series; therefore, the length of each series\n is not guaranteed. As an example, an aggregation against each series\n does work as a global aggregation but an aggregation of each segment. See\n below:\n\n >>> def func(x) -> ps.Series[np.int32]:\n ... return x + sum(x)\n\n Parameters\n ----------\n func : function\n Function to use for transforming the data. It must work when pandas Series\n is passed.\n axis : int, default 0 or 'index'\n Can only be set to 0 at the moment.\n *args\n Positional arguments to pass to func.\n **kwargs\n Keyword arguments to pass to func.\n\n Returns\n -------\n DataFrame\n A DataFrame that must have the same length as self.\n\n Raises\n ------\n Exception : If the returned DataFrame has a different length than self.\n\n See Also\n --------\n DataFrame.aggregate : Only perform aggregating type operations.\n DataFrame.apply : Invoke function on DataFrame.\n Series.transform : The equivalent function for Series.\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': range(3), 'B': range(1, 4)}, columns=['A', 'B'])\n >>> df\n A B\n 0 0 1\n 1 1 2\n 2 2 3\n\n >>> def square(x) -> ps.Series[np.int32]:\n ... return x ** 2\n >>> df.transform(square)\n A B\n 0 0 1\n 1 1 4\n 2 4 9\n\n You can omit the type hint and let pandas-on-Spark infer its type.\n\n >>> df.transform(lambda x: x ** 2)\n A B\n 0 0 1\n 1 1 4\n 2 4 9\n\n For multi-index columns:\n\n >>> df.columns = [('X', 'A'), ('X', 'B')]\n >>> df.transform(square) # doctest: +NORMALIZE_WHITESPACE\n X\n A B\n 0 0 1\n 1 1 4\n 2 4 9\n\n >>> (df * -1).transform(abs) # doctest: +NORMALIZE_WHITESPACE\n X\n A B\n 0 0 1\n 1 1 2\n 2 2 3\n\n You can also specify extra arguments.\n\n >>> def calculation(x, y, z) -> ps.Series[int]:\n ... return x ** y + z\n >>> df.transform(calculation, y=10, z=20) # doctest: +NORMALIZE_WHITESPACE\n X\n A B\n 0 20 21\n 1 21 1044\n 2 1044 59069\n " if (not isinstance(func, types.FunctionType)): assert callable(func), 'the first argument should be a callable function.' f = func func = (lambda *args, **kwargs: f(*args, **kwargs)) axis = validate_axis(axis) if (axis != 0): raise NotImplementedError('axis should be either 0 or "index" currently.') spec = inspect.getfullargspec(func) return_sig = spec.annotations.get('return', None) should_infer_schema = (return_sig is None) if should_infer_schema: log_advice('If the type hints is not specified for `transform`, it is expensive to infer the data type internally.') limit = get_option('compute.shortcut_limit') pdf = self.head((limit + 1))._to_internal_pandas() transformed = pdf.transform(func, axis, *args, **kwargs) psdf: DataFrame = DataFrame(transformed) if (len(pdf) <= limit): return psdf applied = [] data_fields = [] for (input_label, output_label) in zip(self._internal.column_labels, psdf._internal.column_labels): psser = self._psser_for(input_label) field = psdf._internal.field_for(output_label).normalize_spark_type() data_fields.append(field) return_schema = field.spark_type applied.append(psser.pandas_on_spark._transform_batch(func=(lambda c: func(c, *args, **kwargs)), return_type=SeriesType(field.dtype, return_schema))) internal = self._internal.with_new_columns(applied, data_fields=data_fields) return DataFrame(internal) else: return self._apply_series_op((lambda psser: psser.pandas_on_spark.transform_batch(func, *args, **kwargs)))
def pop(self, item: Name) -> 'DataFrame': "\n Return item and drop from frame. Raise KeyError if not found.\n\n Parameters\n ----------\n item : str\n Label of column to be popped.\n\n Returns\n -------\n Series\n\n Examples\n --------\n >>> df = ps.DataFrame([('falcon', 'bird', 389.0),\n ... ('parrot', 'bird', 24.0),\n ... ('lion', 'mammal', 80.5),\n ... ('monkey','mammal', np.nan)],\n ... columns=('name', 'class', 'max_speed'))\n\n >>> df\n name class max_speed\n 0 falcon bird 389.0\n 1 parrot bird 24.0\n 2 lion mammal 80.5\n 3 monkey mammal NaN\n\n >>> df.pop('class')\n 0 bird\n 1 bird\n 2 mammal\n 3 mammal\n Name: class, dtype: object\n\n >>> df\n name max_speed\n 0 falcon 389.0\n 1 parrot 24.0\n 2 lion 80.5\n 3 monkey NaN\n\n Also support for MultiIndex\n\n >>> df = ps.DataFrame([('falcon', 'bird', 389.0),\n ... ('parrot', 'bird', 24.0),\n ... ('lion', 'mammal', 80.5),\n ... ('monkey','mammal', np.nan)],\n ... columns=('name', 'class', 'max_speed'))\n >>> columns = [('a', 'name'), ('a', 'class'), ('b', 'max_speed')]\n >>> df.columns = pd.MultiIndex.from_tuples(columns)\n >>> df\n a b\n name class max_speed\n 0 falcon bird 389.0\n 1 parrot bird 24.0\n 2 lion mammal 80.5\n 3 monkey mammal NaN\n\n >>> df.pop('a')\n name class\n 0 falcon bird\n 1 parrot bird\n 2 lion mammal\n 3 monkey mammal\n\n >>> df\n b\n max_speed\n 0 389.0\n 1 24.0\n 2 80.5\n 3 NaN\n " result = self[item] self._update_internal_frame(self.drop(columns=item)._internal) return result
-7,348,325,957,458,014,000
Return item and drop from frame. Raise KeyError if not found. Parameters ---------- item : str Label of column to be popped. Returns ------- Series Examples -------- >>> df = ps.DataFrame([('falcon', 'bird', 389.0), ... ('parrot', 'bird', 24.0), ... ('lion', 'mammal', 80.5), ... ('monkey','mammal', np.nan)], ... columns=('name', 'class', 'max_speed')) >>> df name class max_speed 0 falcon bird 389.0 1 parrot bird 24.0 2 lion mammal 80.5 3 monkey mammal NaN >>> df.pop('class') 0 bird 1 bird 2 mammal 3 mammal Name: class, dtype: object >>> df name max_speed 0 falcon 389.0 1 parrot 24.0 2 lion 80.5 3 monkey NaN Also support for MultiIndex >>> df = ps.DataFrame([('falcon', 'bird', 389.0), ... ('parrot', 'bird', 24.0), ... ('lion', 'mammal', 80.5), ... ('monkey','mammal', np.nan)], ... columns=('name', 'class', 'max_speed')) >>> columns = [('a', 'name'), ('a', 'class'), ('b', 'max_speed')] >>> df.columns = pd.MultiIndex.from_tuples(columns) >>> df a b name class max_speed 0 falcon bird 389.0 1 parrot bird 24.0 2 lion mammal 80.5 3 monkey mammal NaN >>> df.pop('a') name class 0 falcon bird 1 parrot bird 2 lion mammal 3 monkey mammal >>> df b max_speed 0 389.0 1 24.0 2 80.5 3 NaN
python/pyspark/pandas/frame.py
pop
Flyangz/spark
python
def pop(self, item: Name) -> 'DataFrame': "\n Return item and drop from frame. Raise KeyError if not found.\n\n Parameters\n ----------\n item : str\n Label of column to be popped.\n\n Returns\n -------\n Series\n\n Examples\n --------\n >>> df = ps.DataFrame([('falcon', 'bird', 389.0),\n ... ('parrot', 'bird', 24.0),\n ... ('lion', 'mammal', 80.5),\n ... ('monkey','mammal', np.nan)],\n ... columns=('name', 'class', 'max_speed'))\n\n >>> df\n name class max_speed\n 0 falcon bird 389.0\n 1 parrot bird 24.0\n 2 lion mammal 80.5\n 3 monkey mammal NaN\n\n >>> df.pop('class')\n 0 bird\n 1 bird\n 2 mammal\n 3 mammal\n Name: class, dtype: object\n\n >>> df\n name max_speed\n 0 falcon 389.0\n 1 parrot 24.0\n 2 lion 80.5\n 3 monkey NaN\n\n Also support for MultiIndex\n\n >>> df = ps.DataFrame([('falcon', 'bird', 389.0),\n ... ('parrot', 'bird', 24.0),\n ... ('lion', 'mammal', 80.5),\n ... ('monkey','mammal', np.nan)],\n ... columns=('name', 'class', 'max_speed'))\n >>> columns = [('a', 'name'), ('a', 'class'), ('b', 'max_speed')]\n >>> df.columns = pd.MultiIndex.from_tuples(columns)\n >>> df\n a b\n name class max_speed\n 0 falcon bird 389.0\n 1 parrot bird 24.0\n 2 lion mammal 80.5\n 3 monkey mammal NaN\n\n >>> df.pop('a')\n name class\n 0 falcon bird\n 1 parrot bird\n 2 lion mammal\n 3 monkey mammal\n\n >>> df\n b\n max_speed\n 0 389.0\n 1 24.0\n 2 80.5\n 3 NaN\n " result = self[item] self._update_internal_frame(self.drop(columns=item)._internal) return result
def xs(self, key: Name, axis: Axis=0, level: Optional[int]=None) -> DataFrameOrSeries: "\n Return cross-section from the DataFrame.\n\n This method takes a `key` argument to select data at a particular\n level of a MultiIndex.\n\n Parameters\n ----------\n key : label or tuple of label\n Label contained in the index, or partially in a MultiIndex.\n axis : 0 or 'index', default 0\n Axis to retrieve cross-section on.\n currently only support 0 or 'index'\n level : object, defaults to first n levels (n=1 or len(key))\n In case of a key partially contained in a MultiIndex, indicate\n which levels are used. Levels can be referred by label or position.\n\n Returns\n -------\n DataFrame or Series\n Cross-section from the original DataFrame\n corresponding to the selected index levels.\n\n See Also\n --------\n DataFrame.loc : Access a group of rows and columns\n by label(s) or a boolean array.\n DataFrame.iloc : Purely integer-location based indexing\n for selection by position.\n\n Examples\n --------\n >>> d = {'num_legs': [4, 4, 2, 2],\n ... 'num_wings': [0, 0, 2, 2],\n ... 'class': ['mammal', 'mammal', 'mammal', 'bird'],\n ... 'animal': ['cat', 'dog', 'bat', 'penguin'],\n ... 'locomotion': ['walks', 'walks', 'flies', 'walks']}\n >>> df = ps.DataFrame(data=d)\n >>> df = df.set_index(['class', 'animal', 'locomotion'])\n >>> df # doctest: +NORMALIZE_WHITESPACE\n num_legs num_wings\n class animal locomotion\n mammal cat walks 4 0\n dog walks 4 0\n bat flies 2 2\n bird penguin walks 2 2\n\n Get values at specified index\n\n >>> df.xs('mammal') # doctest: +NORMALIZE_WHITESPACE\n num_legs num_wings\n animal locomotion\n cat walks 4 0\n dog walks 4 0\n bat flies 2 2\n\n Get values at several indexes\n\n >>> df.xs(('mammal', 'dog')) # doctest: +NORMALIZE_WHITESPACE\n num_legs num_wings\n locomotion\n walks 4 0\n\n >>> df.xs(('mammal', 'dog', 'walks')) # doctest: +NORMALIZE_WHITESPACE\n num_legs 4\n num_wings 0\n Name: (mammal, dog, walks), dtype: int64\n\n Get values at specified index and level\n\n >>> df.xs('cat', level=1) # doctest: +NORMALIZE_WHITESPACE\n num_legs num_wings\n class locomotion\n mammal walks 4 0\n " from pyspark.pandas.series import first_series if (not is_name_like_value(key)): raise TypeError("'key' should be a scalar value or tuple that contains scalar values") if ((level is not None) and is_name_like_tuple(key)): raise KeyError(key) axis = validate_axis(axis) if (axis != 0): raise NotImplementedError('axis should be either 0 or "index" currently.') if (not is_name_like_tuple(key)): key = (key,) if (len(key) > self._internal.index_level): raise KeyError('Key length ({}) exceeds index depth ({})'.format(len(key), self._internal.index_level)) if (level is None): level = 0 rows = [(self._internal.index_spark_columns[lvl] == index) for (lvl, index) in enumerate(key, level)] internal = self._internal.with_filter(reduce((lambda x, y: (x & y)), rows)) if (len(key) == self._internal.index_level): psdf: DataFrame = DataFrame(internal) pdf = psdf.head(2)._to_internal_pandas() if (len(pdf) == 0): raise KeyError(key) elif (len(pdf) > 1): return psdf else: return first_series(DataFrame(pdf.transpose())) else: index_spark_columns = (internal.index_spark_columns[:level] + internal.index_spark_columns[(level + len(key)):]) index_names = (internal.index_names[:level] + internal.index_names[(level + len(key)):]) index_fields = (internal.index_fields[:level] + internal.index_fields[(level + len(key)):]) internal = internal.copy(index_spark_columns=index_spark_columns, index_names=index_names, index_fields=index_fields).resolved_copy return DataFrame(internal)
8,990,384,505,172,742,000
Return cross-section from the DataFrame. This method takes a `key` argument to select data at a particular level of a MultiIndex. Parameters ---------- key : label or tuple of label Label contained in the index, or partially in a MultiIndex. axis : 0 or 'index', default 0 Axis to retrieve cross-section on. currently only support 0 or 'index' level : object, defaults to first n levels (n=1 or len(key)) In case of a key partially contained in a MultiIndex, indicate which levels are used. Levels can be referred by label or position. Returns ------- DataFrame or Series Cross-section from the original DataFrame corresponding to the selected index levels. See Also -------- DataFrame.loc : Access a group of rows and columns by label(s) or a boolean array. DataFrame.iloc : Purely integer-location based indexing for selection by position. Examples -------- >>> d = {'num_legs': [4, 4, 2, 2], ... 'num_wings': [0, 0, 2, 2], ... 'class': ['mammal', 'mammal', 'mammal', 'bird'], ... 'animal': ['cat', 'dog', 'bat', 'penguin'], ... 'locomotion': ['walks', 'walks', 'flies', 'walks']} >>> df = ps.DataFrame(data=d) >>> df = df.set_index(['class', 'animal', 'locomotion']) >>> df # doctest: +NORMALIZE_WHITESPACE num_legs num_wings class animal locomotion mammal cat walks 4 0 dog walks 4 0 bat flies 2 2 bird penguin walks 2 2 Get values at specified index >>> df.xs('mammal') # doctest: +NORMALIZE_WHITESPACE num_legs num_wings animal locomotion cat walks 4 0 dog walks 4 0 bat flies 2 2 Get values at several indexes >>> df.xs(('mammal', 'dog')) # doctest: +NORMALIZE_WHITESPACE num_legs num_wings locomotion walks 4 0 >>> df.xs(('mammal', 'dog', 'walks')) # doctest: +NORMALIZE_WHITESPACE num_legs 4 num_wings 0 Name: (mammal, dog, walks), dtype: int64 Get values at specified index and level >>> df.xs('cat', level=1) # doctest: +NORMALIZE_WHITESPACE num_legs num_wings class locomotion mammal walks 4 0
python/pyspark/pandas/frame.py
xs
Flyangz/spark
python
def xs(self, key: Name, axis: Axis=0, level: Optional[int]=None) -> DataFrameOrSeries: "\n Return cross-section from the DataFrame.\n\n This method takes a `key` argument to select data at a particular\n level of a MultiIndex.\n\n Parameters\n ----------\n key : label or tuple of label\n Label contained in the index, or partially in a MultiIndex.\n axis : 0 or 'index', default 0\n Axis to retrieve cross-section on.\n currently only support 0 or 'index'\n level : object, defaults to first n levels (n=1 or len(key))\n In case of a key partially contained in a MultiIndex, indicate\n which levels are used. Levels can be referred by label or position.\n\n Returns\n -------\n DataFrame or Series\n Cross-section from the original DataFrame\n corresponding to the selected index levels.\n\n See Also\n --------\n DataFrame.loc : Access a group of rows and columns\n by label(s) or a boolean array.\n DataFrame.iloc : Purely integer-location based indexing\n for selection by position.\n\n Examples\n --------\n >>> d = {'num_legs': [4, 4, 2, 2],\n ... 'num_wings': [0, 0, 2, 2],\n ... 'class': ['mammal', 'mammal', 'mammal', 'bird'],\n ... 'animal': ['cat', 'dog', 'bat', 'penguin'],\n ... 'locomotion': ['walks', 'walks', 'flies', 'walks']}\n >>> df = ps.DataFrame(data=d)\n >>> df = df.set_index(['class', 'animal', 'locomotion'])\n >>> df # doctest: +NORMALIZE_WHITESPACE\n num_legs num_wings\n class animal locomotion\n mammal cat walks 4 0\n dog walks 4 0\n bat flies 2 2\n bird penguin walks 2 2\n\n Get values at specified index\n\n >>> df.xs('mammal') # doctest: +NORMALIZE_WHITESPACE\n num_legs num_wings\n animal locomotion\n cat walks 4 0\n dog walks 4 0\n bat flies 2 2\n\n Get values at several indexes\n\n >>> df.xs(('mammal', 'dog')) # doctest: +NORMALIZE_WHITESPACE\n num_legs num_wings\n locomotion\n walks 4 0\n\n >>> df.xs(('mammal', 'dog', 'walks')) # doctest: +NORMALIZE_WHITESPACE\n num_legs 4\n num_wings 0\n Name: (mammal, dog, walks), dtype: int64\n\n Get values at specified index and level\n\n >>> df.xs('cat', level=1) # doctest: +NORMALIZE_WHITESPACE\n num_legs num_wings\n class locomotion\n mammal walks 4 0\n " from pyspark.pandas.series import first_series if (not is_name_like_value(key)): raise TypeError("'key' should be a scalar value or tuple that contains scalar values") if ((level is not None) and is_name_like_tuple(key)): raise KeyError(key) axis = validate_axis(axis) if (axis != 0): raise NotImplementedError('axis should be either 0 or "index" currently.') if (not is_name_like_tuple(key)): key = (key,) if (len(key) > self._internal.index_level): raise KeyError('Key length ({}) exceeds index depth ({})'.format(len(key), self._internal.index_level)) if (level is None): level = 0 rows = [(self._internal.index_spark_columns[lvl] == index) for (lvl, index) in enumerate(key, level)] internal = self._internal.with_filter(reduce((lambda x, y: (x & y)), rows)) if (len(key) == self._internal.index_level): psdf: DataFrame = DataFrame(internal) pdf = psdf.head(2)._to_internal_pandas() if (len(pdf) == 0): raise KeyError(key) elif (len(pdf) > 1): return psdf else: return first_series(DataFrame(pdf.transpose())) else: index_spark_columns = (internal.index_spark_columns[:level] + internal.index_spark_columns[(level + len(key)):]) index_names = (internal.index_names[:level] + internal.index_names[(level + len(key)):]) index_fields = (internal.index_fields[:level] + internal.index_fields[(level + len(key)):]) internal = internal.copy(index_spark_columns=index_spark_columns, index_names=index_names, index_fields=index_fields).resolved_copy return DataFrame(internal)
def between_time(self, start_time: Union[(datetime.time, str)], end_time: Union[(datetime.time, str)], include_start: bool=True, include_end: bool=True, axis: Axis=0) -> 'DataFrame': "\n Select values between particular times of the day (example: 9:00-9:30 AM).\n\n By setting ``start_time`` to be later than ``end_time``,\n you can get the times that are *not* between the two times.\n\n Parameters\n ----------\n start_time : datetime.time or str\n Initial time as a time filter limit.\n end_time : datetime.time or str\n End time as a time filter limit.\n include_start : bool, default True\n Whether the start time needs to be included in the result.\n include_end : bool, default True\n Whether the end time needs to be included in the result.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Determine range time on index or columns value.\n\n Returns\n -------\n DataFrame\n Data from the original object filtered to the specified dates range.\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n See Also\n --------\n at_time : Select values at a particular time of the day.\n first : Select initial periods of time series based on a date offset.\n last : Select final periods of time series based on a date offset.\n DatetimeIndex.indexer_between_time : Get just the index locations for\n values between particular times of the day.\n\n Examples\n --------\n >>> idx = pd.date_range('2018-04-09', periods=4, freq='1D20min')\n >>> psdf = ps.DataFrame({'A': [1, 2, 3, 4]}, index=idx)\n >>> psdf\n A\n 2018-04-09 00:00:00 1\n 2018-04-10 00:20:00 2\n 2018-04-11 00:40:00 3\n 2018-04-12 01:00:00 4\n\n >>> psdf.between_time('0:15', '0:45')\n A\n 2018-04-10 00:20:00 2\n 2018-04-11 00:40:00 3\n\n You get the times that are *not* between two times by setting\n ``start_time`` later than ``end_time``:\n\n >>> psdf.between_time('0:45', '0:15')\n A\n 2018-04-09 00:00:00 1\n 2018-04-12 01:00:00 4\n " axis = validate_axis(axis) if (axis != 0): raise NotImplementedError('between_time currently only works for axis=0') if (not isinstance(self.index, ps.DatetimeIndex)): raise TypeError('Index must be DatetimeIndex') psdf = self.copy() psdf.index.name = verify_temp_column_name(psdf, '__index_name__') return_types = ([psdf.index.dtype] + list(psdf.dtypes)) def pandas_between_time(pdf) -> ps.DataFrame[return_types]: return pdf.between_time(start_time, end_time, include_start, include_end).reset_index() with option_context('compute.default_index_type', 'distributed'): psdf = psdf.pandas_on_spark.apply_batch(pandas_between_time) return DataFrame(self._internal.copy(spark_frame=psdf._internal.spark_frame, index_spark_columns=psdf._internal.data_spark_columns[:1], index_fields=psdf._internal.data_fields[:1], data_spark_columns=psdf._internal.data_spark_columns[1:], data_fields=psdf._internal.data_fields[1:]))
-4,282,171,030,924,375,000
Select values between particular times of the day (example: 9:00-9:30 AM). By setting ``start_time`` to be later than ``end_time``, you can get the times that are *not* between the two times. Parameters ---------- start_time : datetime.time or str Initial time as a time filter limit. end_time : datetime.time or str End time as a time filter limit. include_start : bool, default True Whether the start time needs to be included in the result. include_end : bool, default True Whether the end time needs to be included in the result. axis : {0 or 'index', 1 or 'columns'}, default 0 Determine range time on index or columns value. Returns ------- DataFrame Data from the original object filtered to the specified dates range. Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- at_time : Select values at a particular time of the day. first : Select initial periods of time series based on a date offset. last : Select final periods of time series based on a date offset. DatetimeIndex.indexer_between_time : Get just the index locations for values between particular times of the day. Examples -------- >>> idx = pd.date_range('2018-04-09', periods=4, freq='1D20min') >>> psdf = ps.DataFrame({'A': [1, 2, 3, 4]}, index=idx) >>> psdf A 2018-04-09 00:00:00 1 2018-04-10 00:20:00 2 2018-04-11 00:40:00 3 2018-04-12 01:00:00 4 >>> psdf.between_time('0:15', '0:45') A 2018-04-10 00:20:00 2 2018-04-11 00:40:00 3 You get the times that are *not* between two times by setting ``start_time`` later than ``end_time``: >>> psdf.between_time('0:45', '0:15') A 2018-04-09 00:00:00 1 2018-04-12 01:00:00 4
python/pyspark/pandas/frame.py
between_time
Flyangz/spark
python
def between_time(self, start_time: Union[(datetime.time, str)], end_time: Union[(datetime.time, str)], include_start: bool=True, include_end: bool=True, axis: Axis=0) -> 'DataFrame': "\n Select values between particular times of the day (example: 9:00-9:30 AM).\n\n By setting ``start_time`` to be later than ``end_time``,\n you can get the times that are *not* between the two times.\n\n Parameters\n ----------\n start_time : datetime.time or str\n Initial time as a time filter limit.\n end_time : datetime.time or str\n End time as a time filter limit.\n include_start : bool, default True\n Whether the start time needs to be included in the result.\n include_end : bool, default True\n Whether the end time needs to be included in the result.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Determine range time on index or columns value.\n\n Returns\n -------\n DataFrame\n Data from the original object filtered to the specified dates range.\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n See Also\n --------\n at_time : Select values at a particular time of the day.\n first : Select initial periods of time series based on a date offset.\n last : Select final periods of time series based on a date offset.\n DatetimeIndex.indexer_between_time : Get just the index locations for\n values between particular times of the day.\n\n Examples\n --------\n >>> idx = pd.date_range('2018-04-09', periods=4, freq='1D20min')\n >>> psdf = ps.DataFrame({'A': [1, 2, 3, 4]}, index=idx)\n >>> psdf\n A\n 2018-04-09 00:00:00 1\n 2018-04-10 00:20:00 2\n 2018-04-11 00:40:00 3\n 2018-04-12 01:00:00 4\n\n >>> psdf.between_time('0:15', '0:45')\n A\n 2018-04-10 00:20:00 2\n 2018-04-11 00:40:00 3\n\n You get the times that are *not* between two times by setting\n ``start_time`` later than ``end_time``:\n\n >>> psdf.between_time('0:45', '0:15')\n A\n 2018-04-09 00:00:00 1\n 2018-04-12 01:00:00 4\n " axis = validate_axis(axis) if (axis != 0): raise NotImplementedError('between_time currently only works for axis=0') if (not isinstance(self.index, ps.DatetimeIndex)): raise TypeError('Index must be DatetimeIndex') psdf = self.copy() psdf.index.name = verify_temp_column_name(psdf, '__index_name__') return_types = ([psdf.index.dtype] + list(psdf.dtypes)) def pandas_between_time(pdf) -> ps.DataFrame[return_types]: return pdf.between_time(start_time, end_time, include_start, include_end).reset_index() with option_context('compute.default_index_type', 'distributed'): psdf = psdf.pandas_on_spark.apply_batch(pandas_between_time) return DataFrame(self._internal.copy(spark_frame=psdf._internal.spark_frame, index_spark_columns=psdf._internal.data_spark_columns[:1], index_fields=psdf._internal.data_fields[:1], data_spark_columns=psdf._internal.data_spark_columns[1:], data_fields=psdf._internal.data_fields[1:]))
def at_time(self, time: Union[(datetime.time, str)], asof: bool=False, axis: Axis=0) -> 'DataFrame': "\n Select values at particular time of day (example: 9:30AM).\n\n Parameters\n ----------\n time : datetime.time or str\n axis : {0 or 'index', 1 or 'columns'}, default 0\n\n Returns\n -------\n DataFrame\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n See Also\n --------\n between_time : Select values between particular times of the day.\n DatetimeIndex.indexer_at_time : Get just the index locations for\n values at particular time of the day.\n\n Examples\n --------\n >>> idx = pd.date_range('2018-04-09', periods=4, freq='12H')\n >>> psdf = ps.DataFrame({'A': [1, 2, 3, 4]}, index=idx)\n >>> psdf\n A\n 2018-04-09 00:00:00 1\n 2018-04-09 12:00:00 2\n 2018-04-10 00:00:00 3\n 2018-04-10 12:00:00 4\n\n >>> psdf.at_time('12:00')\n A\n 2018-04-09 12:00:00 2\n 2018-04-10 12:00:00 4\n " if asof: raise NotImplementedError("'asof' argument is not supported") axis = validate_axis(axis) if (axis != 0): raise NotImplementedError('at_time currently only works for axis=0') if (not isinstance(self.index, ps.DatetimeIndex)): raise TypeError('Index must be DatetimeIndex') psdf = self.copy() psdf.index.name = verify_temp_column_name(psdf, '__index_name__') return_types = ([psdf.index.dtype] + list(psdf.dtypes)) def pandas_at_time(pdf) -> ps.DataFrame[return_types]: return pdf.at_time(time, asof, axis).reset_index() with option_context('compute.default_index_type', 'distributed'): psdf = psdf.pandas_on_spark.apply_batch(pandas_at_time) return DataFrame(self._internal.copy(spark_frame=psdf._internal.spark_frame, index_spark_columns=psdf._internal.data_spark_columns[:1], index_fields=psdf._internal.data_fields[:1], data_spark_columns=psdf._internal.data_spark_columns[1:], data_fields=psdf._internal.data_fields[1:]))
4,169,738,953,941,191,000
Select values at particular time of day (example: 9:30AM). Parameters ---------- time : datetime.time or str axis : {0 or 'index', 1 or 'columns'}, default 0 Returns ------- DataFrame Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- between_time : Select values between particular times of the day. DatetimeIndex.indexer_at_time : Get just the index locations for values at particular time of the day. Examples -------- >>> idx = pd.date_range('2018-04-09', periods=4, freq='12H') >>> psdf = ps.DataFrame({'A': [1, 2, 3, 4]}, index=idx) >>> psdf A 2018-04-09 00:00:00 1 2018-04-09 12:00:00 2 2018-04-10 00:00:00 3 2018-04-10 12:00:00 4 >>> psdf.at_time('12:00') A 2018-04-09 12:00:00 2 2018-04-10 12:00:00 4
python/pyspark/pandas/frame.py
at_time
Flyangz/spark
python
def at_time(self, time: Union[(datetime.time, str)], asof: bool=False, axis: Axis=0) -> 'DataFrame': "\n Select values at particular time of day (example: 9:30AM).\n\n Parameters\n ----------\n time : datetime.time or str\n axis : {0 or 'index', 1 or 'columns'}, default 0\n\n Returns\n -------\n DataFrame\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n See Also\n --------\n between_time : Select values between particular times of the day.\n DatetimeIndex.indexer_at_time : Get just the index locations for\n values at particular time of the day.\n\n Examples\n --------\n >>> idx = pd.date_range('2018-04-09', periods=4, freq='12H')\n >>> psdf = ps.DataFrame({'A': [1, 2, 3, 4]}, index=idx)\n >>> psdf\n A\n 2018-04-09 00:00:00 1\n 2018-04-09 12:00:00 2\n 2018-04-10 00:00:00 3\n 2018-04-10 12:00:00 4\n\n >>> psdf.at_time('12:00')\n A\n 2018-04-09 12:00:00 2\n 2018-04-10 12:00:00 4\n " if asof: raise NotImplementedError("'asof' argument is not supported") axis = validate_axis(axis) if (axis != 0): raise NotImplementedError('at_time currently only works for axis=0') if (not isinstance(self.index, ps.DatetimeIndex)): raise TypeError('Index must be DatetimeIndex') psdf = self.copy() psdf.index.name = verify_temp_column_name(psdf, '__index_name__') return_types = ([psdf.index.dtype] + list(psdf.dtypes)) def pandas_at_time(pdf) -> ps.DataFrame[return_types]: return pdf.at_time(time, asof, axis).reset_index() with option_context('compute.default_index_type', 'distributed'): psdf = psdf.pandas_on_spark.apply_batch(pandas_at_time) return DataFrame(self._internal.copy(spark_frame=psdf._internal.spark_frame, index_spark_columns=psdf._internal.data_spark_columns[:1], index_fields=psdf._internal.data_fields[:1], data_spark_columns=psdf._internal.data_spark_columns[1:], data_fields=psdf._internal.data_fields[1:]))
def where(self, cond: DataFrameOrSeries, other: Union[(DataFrameOrSeries, Any)]=np.nan, axis: Axis=None) -> 'DataFrame': '\n Replace values where the condition is False.\n\n Parameters\n ----------\n cond : boolean DataFrame\n Where cond is True, keep the original value. Where False,\n replace with corresponding value from other.\n other : scalar, DataFrame\n Entries where cond is False are replaced with corresponding value from other.\n axis : int, default None\n Can only be set to 0 at the moment for compatibility with pandas.\n\n Returns\n -------\n DataFrame\n\n Examples\n --------\n\n >>> from pyspark.pandas.config import set_option, reset_option\n >>> set_option("compute.ops_on_diff_frames", True)\n >>> df1 = ps.DataFrame({\'A\': [0, 1, 2, 3, 4], \'B\':[100, 200, 300, 400, 500]})\n >>> df2 = ps.DataFrame({\'A\': [0, -1, -2, -3, -4], \'B\':[-100, -200, -300, -400, -500]})\n >>> df1\n A B\n 0 0 100\n 1 1 200\n 2 2 300\n 3 3 400\n 4 4 500\n >>> df2\n A B\n 0 0 -100\n 1 -1 -200\n 2 -2 -300\n 3 -3 -400\n 4 -4 -500\n\n >>> df1.where(df1 > 0).sort_index()\n A B\n 0 NaN 100.0\n 1 1.0 200.0\n 2 2.0 300.0\n 3 3.0 400.0\n 4 4.0 500.0\n\n >>> df1.where(df1 > 1, 10).sort_index()\n A B\n 0 10 100\n 1 10 200\n 2 2 300\n 3 3 400\n 4 4 500\n\n >>> df1.where(df1 > 1, df1 + 100).sort_index()\n A B\n 0 100 100\n 1 101 200\n 2 2 300\n 3 3 400\n 4 4 500\n\n >>> df1.where(df1 > 1, df2).sort_index()\n A B\n 0 0 100\n 1 -1 200\n 2 2 300\n 3 3 400\n 4 4 500\n\n When the column name of cond is different from self, it treats all values are False\n\n >>> cond = ps.DataFrame({\'C\': [0, -1, -2, -3, -4], \'D\':[4, 3, 2, 1, 0]}) % 3 == 0\n >>> cond\n C D\n 0 True False\n 1 False True\n 2 False False\n 3 True False\n 4 False True\n\n >>> df1.where(cond).sort_index()\n A B\n 0 NaN NaN\n 1 NaN NaN\n 2 NaN NaN\n 3 NaN NaN\n 4 NaN NaN\n\n When the type of cond is Series, it just check boolean regardless of column name\n\n >>> cond = ps.Series([1, 2]) > 1\n >>> cond\n 0 False\n 1 True\n dtype: bool\n\n >>> df1.where(cond).sort_index()\n A B\n 0 NaN NaN\n 1 1.0 200.0\n 2 NaN NaN\n 3 NaN NaN\n 4 NaN NaN\n\n >>> reset_option("compute.ops_on_diff_frames")\n ' from pyspark.pandas.series import Series axis = validate_axis(axis) if (axis != 0): raise NotImplementedError('axis should be either 0 or "index" currently.') tmp_cond_col_name = '__tmp_cond_col_{}__'.format tmp_other_col_name = '__tmp_other_col_{}__'.format psdf = self.copy() tmp_cond_col_names = [tmp_cond_col_name(name_like_string(label)) for label in self._internal.column_labels] if isinstance(cond, DataFrame): cond = cond[[(cond._internal.spark_column_for(label) if (label in cond._internal.column_labels) else SF.lit(False)).alias(name) for (label, name) in zip(self._internal.column_labels, tmp_cond_col_names)]] psdf[tmp_cond_col_names] = cond elif isinstance(cond, Series): cond = cond.to_frame() cond = cond[[cond._internal.data_spark_columns[0].alias(name) for name in tmp_cond_col_names]] psdf[tmp_cond_col_names] = cond else: raise TypeError('type of cond must be a DataFrame or Series') tmp_other_col_names = [tmp_other_col_name(name_like_string(label)) for label in self._internal.column_labels] if isinstance(other, DataFrame): other = other[[(other._internal.spark_column_for(label) if (label in other._internal.column_labels) else SF.lit(np.nan)).alias(name) for (label, name) in zip(self._internal.column_labels, tmp_other_col_names)]] psdf[tmp_other_col_names] = other elif isinstance(other, Series): other = other.to_frame() other = other[[other._internal.data_spark_columns[0].alias(name) for name in tmp_other_col_names]] psdf[tmp_other_col_names] = other else: for label in self._internal.column_labels: psdf[tmp_other_col_name(name_like_string(label))] = other data_spark_columns = [] for label in self._internal.column_labels: data_spark_columns.append(F.when(psdf[tmp_cond_col_name(name_like_string(label))].spark.column, psdf._internal.spark_column_for(label)).otherwise(psdf[tmp_other_col_name(name_like_string(label))].spark.column).alias(psdf._internal.spark_column_name_for(label))) return DataFrame(psdf._internal.with_new_columns(data_spark_columns, column_labels=self._internal.column_labels))
-4,389,328,793,971,374,600
Replace values where the condition is False. Parameters ---------- cond : boolean DataFrame Where cond is True, keep the original value. Where False, replace with corresponding value from other. other : scalar, DataFrame Entries where cond is False are replaced with corresponding value from other. axis : int, default None Can only be set to 0 at the moment for compatibility with pandas. Returns ------- DataFrame Examples -------- >>> from pyspark.pandas.config import set_option, reset_option >>> set_option("compute.ops_on_diff_frames", True) >>> df1 = ps.DataFrame({'A': [0, 1, 2, 3, 4], 'B':[100, 200, 300, 400, 500]}) >>> df2 = ps.DataFrame({'A': [0, -1, -2, -3, -4], 'B':[-100, -200, -300, -400, -500]}) >>> df1 A B 0 0 100 1 1 200 2 2 300 3 3 400 4 4 500 >>> df2 A B 0 0 -100 1 -1 -200 2 -2 -300 3 -3 -400 4 -4 -500 >>> df1.where(df1 > 0).sort_index() A B 0 NaN 100.0 1 1.0 200.0 2 2.0 300.0 3 3.0 400.0 4 4.0 500.0 >>> df1.where(df1 > 1, 10).sort_index() A B 0 10 100 1 10 200 2 2 300 3 3 400 4 4 500 >>> df1.where(df1 > 1, df1 + 100).sort_index() A B 0 100 100 1 101 200 2 2 300 3 3 400 4 4 500 >>> df1.where(df1 > 1, df2).sort_index() A B 0 0 100 1 -1 200 2 2 300 3 3 400 4 4 500 When the column name of cond is different from self, it treats all values are False >>> cond = ps.DataFrame({'C': [0, -1, -2, -3, -4], 'D':[4, 3, 2, 1, 0]}) % 3 == 0 >>> cond C D 0 True False 1 False True 2 False False 3 True False 4 False True >>> df1.where(cond).sort_index() A B 0 NaN NaN 1 NaN NaN 2 NaN NaN 3 NaN NaN 4 NaN NaN When the type of cond is Series, it just check boolean regardless of column name >>> cond = ps.Series([1, 2]) > 1 >>> cond 0 False 1 True dtype: bool >>> df1.where(cond).sort_index() A B 0 NaN NaN 1 1.0 200.0 2 NaN NaN 3 NaN NaN 4 NaN NaN >>> reset_option("compute.ops_on_diff_frames")
python/pyspark/pandas/frame.py
where
Flyangz/spark
python
def where(self, cond: DataFrameOrSeries, other: Union[(DataFrameOrSeries, Any)]=np.nan, axis: Axis=None) -> 'DataFrame': '\n Replace values where the condition is False.\n\n Parameters\n ----------\n cond : boolean DataFrame\n Where cond is True, keep the original value. Where False,\n replace with corresponding value from other.\n other : scalar, DataFrame\n Entries where cond is False are replaced with corresponding value from other.\n axis : int, default None\n Can only be set to 0 at the moment for compatibility with pandas.\n\n Returns\n -------\n DataFrame\n\n Examples\n --------\n\n >>> from pyspark.pandas.config import set_option, reset_option\n >>> set_option("compute.ops_on_diff_frames", True)\n >>> df1 = ps.DataFrame({\'A\': [0, 1, 2, 3, 4], \'B\':[100, 200, 300, 400, 500]})\n >>> df2 = ps.DataFrame({\'A\': [0, -1, -2, -3, -4], \'B\':[-100, -200, -300, -400, -500]})\n >>> df1\n A B\n 0 0 100\n 1 1 200\n 2 2 300\n 3 3 400\n 4 4 500\n >>> df2\n A B\n 0 0 -100\n 1 -1 -200\n 2 -2 -300\n 3 -3 -400\n 4 -4 -500\n\n >>> df1.where(df1 > 0).sort_index()\n A B\n 0 NaN 100.0\n 1 1.0 200.0\n 2 2.0 300.0\n 3 3.0 400.0\n 4 4.0 500.0\n\n >>> df1.where(df1 > 1, 10).sort_index()\n A B\n 0 10 100\n 1 10 200\n 2 2 300\n 3 3 400\n 4 4 500\n\n >>> df1.where(df1 > 1, df1 + 100).sort_index()\n A B\n 0 100 100\n 1 101 200\n 2 2 300\n 3 3 400\n 4 4 500\n\n >>> df1.where(df1 > 1, df2).sort_index()\n A B\n 0 0 100\n 1 -1 200\n 2 2 300\n 3 3 400\n 4 4 500\n\n When the column name of cond is different from self, it treats all values are False\n\n >>> cond = ps.DataFrame({\'C\': [0, -1, -2, -3, -4], \'D\':[4, 3, 2, 1, 0]}) % 3 == 0\n >>> cond\n C D\n 0 True False\n 1 False True\n 2 False False\n 3 True False\n 4 False True\n\n >>> df1.where(cond).sort_index()\n A B\n 0 NaN NaN\n 1 NaN NaN\n 2 NaN NaN\n 3 NaN NaN\n 4 NaN NaN\n\n When the type of cond is Series, it just check boolean regardless of column name\n\n >>> cond = ps.Series([1, 2]) > 1\n >>> cond\n 0 False\n 1 True\n dtype: bool\n\n >>> df1.where(cond).sort_index()\n A B\n 0 NaN NaN\n 1 1.0 200.0\n 2 NaN NaN\n 3 NaN NaN\n 4 NaN NaN\n\n >>> reset_option("compute.ops_on_diff_frames")\n ' from pyspark.pandas.series import Series axis = validate_axis(axis) if (axis != 0): raise NotImplementedError('axis should be either 0 or "index" currently.') tmp_cond_col_name = '__tmp_cond_col_{}__'.format tmp_other_col_name = '__tmp_other_col_{}__'.format psdf = self.copy() tmp_cond_col_names = [tmp_cond_col_name(name_like_string(label)) for label in self._internal.column_labels] if isinstance(cond, DataFrame): cond = cond[[(cond._internal.spark_column_for(label) if (label in cond._internal.column_labels) else SF.lit(False)).alias(name) for (label, name) in zip(self._internal.column_labels, tmp_cond_col_names)]] psdf[tmp_cond_col_names] = cond elif isinstance(cond, Series): cond = cond.to_frame() cond = cond[[cond._internal.data_spark_columns[0].alias(name) for name in tmp_cond_col_names]] psdf[tmp_cond_col_names] = cond else: raise TypeError('type of cond must be a DataFrame or Series') tmp_other_col_names = [tmp_other_col_name(name_like_string(label)) for label in self._internal.column_labels] if isinstance(other, DataFrame): other = other[[(other._internal.spark_column_for(label) if (label in other._internal.column_labels) else SF.lit(np.nan)).alias(name) for (label, name) in zip(self._internal.column_labels, tmp_other_col_names)]] psdf[tmp_other_col_names] = other elif isinstance(other, Series): other = other.to_frame() other = other[[other._internal.data_spark_columns[0].alias(name) for name in tmp_other_col_names]] psdf[tmp_other_col_names] = other else: for label in self._internal.column_labels: psdf[tmp_other_col_name(name_like_string(label))] = other data_spark_columns = [] for label in self._internal.column_labels: data_spark_columns.append(F.when(psdf[tmp_cond_col_name(name_like_string(label))].spark.column, psdf._internal.spark_column_for(label)).otherwise(psdf[tmp_other_col_name(name_like_string(label))].spark.column).alias(psdf._internal.spark_column_name_for(label))) return DataFrame(psdf._internal.with_new_columns(data_spark_columns, column_labels=self._internal.column_labels))
def mask(self, cond: DataFrameOrSeries, other: Union[(DataFrameOrSeries, Any)]=np.nan) -> 'DataFrame': '\n Replace values where the condition is True.\n\n Parameters\n ----------\n cond : boolean DataFrame\n Where cond is False, keep the original value. Where True,\n replace with corresponding value from other.\n other : scalar, DataFrame\n Entries where cond is True are replaced with corresponding value from other.\n\n Returns\n -------\n DataFrame\n\n Examples\n --------\n\n >>> from pyspark.pandas.config import set_option, reset_option\n >>> set_option("compute.ops_on_diff_frames", True)\n >>> df1 = ps.DataFrame({\'A\': [0, 1, 2, 3, 4], \'B\':[100, 200, 300, 400, 500]})\n >>> df2 = ps.DataFrame({\'A\': [0, -1, -2, -3, -4], \'B\':[-100, -200, -300, -400, -500]})\n >>> df1\n A B\n 0 0 100\n 1 1 200\n 2 2 300\n 3 3 400\n 4 4 500\n >>> df2\n A B\n 0 0 -100\n 1 -1 -200\n 2 -2 -300\n 3 -3 -400\n 4 -4 -500\n\n >>> df1.mask(df1 > 0).sort_index()\n A B\n 0 0.0 NaN\n 1 NaN NaN\n 2 NaN NaN\n 3 NaN NaN\n 4 NaN NaN\n\n >>> df1.mask(df1 > 1, 10).sort_index()\n A B\n 0 0 10\n 1 1 10\n 2 10 10\n 3 10 10\n 4 10 10\n\n >>> df1.mask(df1 > 1, df1 + 100).sort_index()\n A B\n 0 0 200\n 1 1 300\n 2 102 400\n 3 103 500\n 4 104 600\n\n >>> df1.mask(df1 > 1, df2).sort_index()\n A B\n 0 0 -100\n 1 1 -200\n 2 -2 -300\n 3 -3 -400\n 4 -4 -500\n\n >>> reset_option("compute.ops_on_diff_frames")\n ' from pyspark.pandas.series import Series if (not isinstance(cond, (DataFrame, Series))): raise TypeError('type of cond must be a DataFrame or Series') cond_inversed = cond._apply_series_op((lambda psser: (~ psser))) return self.where(cond_inversed, other)
2,378,262,501,612,776,400
Replace values where the condition is True. Parameters ---------- cond : boolean DataFrame Where cond is False, keep the original value. Where True, replace with corresponding value from other. other : scalar, DataFrame Entries where cond is True are replaced with corresponding value from other. Returns ------- DataFrame Examples -------- >>> from pyspark.pandas.config import set_option, reset_option >>> set_option("compute.ops_on_diff_frames", True) >>> df1 = ps.DataFrame({'A': [0, 1, 2, 3, 4], 'B':[100, 200, 300, 400, 500]}) >>> df2 = ps.DataFrame({'A': [0, -1, -2, -3, -4], 'B':[-100, -200, -300, -400, -500]}) >>> df1 A B 0 0 100 1 1 200 2 2 300 3 3 400 4 4 500 >>> df2 A B 0 0 -100 1 -1 -200 2 -2 -300 3 -3 -400 4 -4 -500 >>> df1.mask(df1 > 0).sort_index() A B 0 0.0 NaN 1 NaN NaN 2 NaN NaN 3 NaN NaN 4 NaN NaN >>> df1.mask(df1 > 1, 10).sort_index() A B 0 0 10 1 1 10 2 10 10 3 10 10 4 10 10 >>> df1.mask(df1 > 1, df1 + 100).sort_index() A B 0 0 200 1 1 300 2 102 400 3 103 500 4 104 600 >>> df1.mask(df1 > 1, df2).sort_index() A B 0 0 -100 1 1 -200 2 -2 -300 3 -3 -400 4 -4 -500 >>> reset_option("compute.ops_on_diff_frames")
python/pyspark/pandas/frame.py
mask
Flyangz/spark
python
def mask(self, cond: DataFrameOrSeries, other: Union[(DataFrameOrSeries, Any)]=np.nan) -> 'DataFrame': '\n Replace values where the condition is True.\n\n Parameters\n ----------\n cond : boolean DataFrame\n Where cond is False, keep the original value. Where True,\n replace with corresponding value from other.\n other : scalar, DataFrame\n Entries where cond is True are replaced with corresponding value from other.\n\n Returns\n -------\n DataFrame\n\n Examples\n --------\n\n >>> from pyspark.pandas.config import set_option, reset_option\n >>> set_option("compute.ops_on_diff_frames", True)\n >>> df1 = ps.DataFrame({\'A\': [0, 1, 2, 3, 4], \'B\':[100, 200, 300, 400, 500]})\n >>> df2 = ps.DataFrame({\'A\': [0, -1, -2, -3, -4], \'B\':[-100, -200, -300, -400, -500]})\n >>> df1\n A B\n 0 0 100\n 1 1 200\n 2 2 300\n 3 3 400\n 4 4 500\n >>> df2\n A B\n 0 0 -100\n 1 -1 -200\n 2 -2 -300\n 3 -3 -400\n 4 -4 -500\n\n >>> df1.mask(df1 > 0).sort_index()\n A B\n 0 0.0 NaN\n 1 NaN NaN\n 2 NaN NaN\n 3 NaN NaN\n 4 NaN NaN\n\n >>> df1.mask(df1 > 1, 10).sort_index()\n A B\n 0 0 10\n 1 1 10\n 2 10 10\n 3 10 10\n 4 10 10\n\n >>> df1.mask(df1 > 1, df1 + 100).sort_index()\n A B\n 0 0 200\n 1 1 300\n 2 102 400\n 3 103 500\n 4 104 600\n\n >>> df1.mask(df1 > 1, df2).sort_index()\n A B\n 0 0 -100\n 1 1 -200\n 2 -2 -300\n 3 -3 -400\n 4 -4 -500\n\n >>> reset_option("compute.ops_on_diff_frames")\n ' from pyspark.pandas.series import Series if (not isinstance(cond, (DataFrame, Series))): raise TypeError('type of cond must be a DataFrame or Series') cond_inversed = cond._apply_series_op((lambda psser: (~ psser))) return self.where(cond_inversed, other)
@property def index(self) -> 'Index': 'The index (row labels) Column of the DataFrame.\n\n Currently not supported when the DataFrame has no index.\n\n See Also\n --------\n Index\n ' from pyspark.pandas.indexes.base import Index return Index._new_instance(self)
7,843,778,894,878,117,000
The index (row labels) Column of the DataFrame. Currently not supported when the DataFrame has no index. See Also -------- Index
python/pyspark/pandas/frame.py
index
Flyangz/spark
python
@property def index(self) -> 'Index': 'The index (row labels) Column of the DataFrame.\n\n Currently not supported when the DataFrame has no index.\n\n See Also\n --------\n Index\n ' from pyspark.pandas.indexes.base import Index return Index._new_instance(self)
@property def empty(self) -> bool: "\n Returns true if the current DataFrame is empty. Otherwise, returns false.\n\n Examples\n --------\n >>> ps.range(10).empty\n False\n\n >>> ps.range(0).empty\n True\n\n >>> ps.DataFrame({}, index=list('abc')).empty\n True\n " return ((len(self._internal.column_labels) == 0) or self._internal.resolved_copy.spark_frame.rdd.isEmpty())
661,226,960,287,139,000
Returns true if the current DataFrame is empty. Otherwise, returns false. Examples -------- >>> ps.range(10).empty False >>> ps.range(0).empty True >>> ps.DataFrame({}, index=list('abc')).empty True
python/pyspark/pandas/frame.py
empty
Flyangz/spark
python
@property def empty(self) -> bool: "\n Returns true if the current DataFrame is empty. Otherwise, returns false.\n\n Examples\n --------\n >>> ps.range(10).empty\n False\n\n >>> ps.range(0).empty\n True\n\n >>> ps.DataFrame({}, index=list('abc')).empty\n True\n " return ((len(self._internal.column_labels) == 0) or self._internal.resolved_copy.spark_frame.rdd.isEmpty())
@property def style(self) -> 'Styler': '\n Property returning a Styler object containing methods for\n building a styled HTML representation for the DataFrame.\n\n .. note:: currently it collects top 1000 rows and return its\n pandas `pandas.io.formats.style.Styler` instance.\n\n Examples\n --------\n >>> ps.range(1001).style # doctest: +SKIP\n <pandas.io.formats.style.Styler object at ...>\n ' max_results = get_option('compute.max_rows') pdf = self.head((max_results + 1))._to_internal_pandas() if (len(pdf) > max_results): warnings.warn(("'style' property will only use top %s rows." % max_results), UserWarning) return pdf.head(max_results).style
-856,584,549,295,894,000
Property returning a Styler object containing methods for building a styled HTML representation for the DataFrame. .. note:: currently it collects top 1000 rows and return its pandas `pandas.io.formats.style.Styler` instance. Examples -------- >>> ps.range(1001).style # doctest: +SKIP <pandas.io.formats.style.Styler object at ...>
python/pyspark/pandas/frame.py
style
Flyangz/spark
python
@property def style(self) -> 'Styler': '\n Property returning a Styler object containing methods for\n building a styled HTML representation for the DataFrame.\n\n .. note:: currently it collects top 1000 rows and return its\n pandas `pandas.io.formats.style.Styler` instance.\n\n Examples\n --------\n >>> ps.range(1001).style # doctest: +SKIP\n <pandas.io.formats.style.Styler object at ...>\n ' max_results = get_option('compute.max_rows') pdf = self.head((max_results + 1))._to_internal_pandas() if (len(pdf) > max_results): warnings.warn(("'style' property will only use top %s rows." % max_results), UserWarning) return pdf.head(max_results).style
def set_index(self, keys: Union[(Name, List[Name])], drop: bool=True, append: bool=False, inplace: bool=False) -> Optional['DataFrame']: 'Set the DataFrame index (row labels) using one or more existing columns.\n\n Set the DataFrame index (row labels) using one or more existing\n columns or arrays (of the correct length). The index can replace the\n existing index or expand on it.\n\n Parameters\n ----------\n keys : label or array-like or list of labels/arrays\n This parameter can be either a single column key, a single array of\n the same length as the calling DataFrame, or a list containing an\n arbitrary combination of column keys and arrays. Here, "array"\n encompasses :class:`Series`, :class:`Index` and ``np.ndarray``.\n drop : bool, default True\n Delete columns to be used as the new index.\n append : bool, default False\n Whether to append columns to existing index.\n inplace : bool, default False\n Modify the DataFrame in place (do not create a new object).\n\n Returns\n -------\n DataFrame\n Changed row labels.\n\n See Also\n --------\n DataFrame.reset_index : Opposite of set_index.\n\n Examples\n --------\n >>> df = ps.DataFrame({\'month\': [1, 4, 7, 10],\n ... \'year\': [2012, 2014, 2013, 2014],\n ... \'sale\': [55, 40, 84, 31]},\n ... columns=[\'month\', \'year\', \'sale\'])\n >>> df\n month year sale\n 0 1 2012 55\n 1 4 2014 40\n 2 7 2013 84\n 3 10 2014 31\n\n Set the index to become the \'month\' column:\n\n >>> df.set_index(\'month\') # doctest: +NORMALIZE_WHITESPACE\n year sale\n month\n 1 2012 55\n 4 2014 40\n 7 2013 84\n 10 2014 31\n\n Create a MultiIndex using columns \'year\' and \'month\':\n\n >>> df.set_index([\'year\', \'month\']) # doctest: +NORMALIZE_WHITESPACE\n sale\n year month\n 2012 1 55\n 2014 4 40\n 2013 7 84\n 2014 10 31\n ' inplace = validate_bool_kwarg(inplace, 'inplace') key_list: List[Label] if is_name_like_tuple(keys): key_list = [cast(Label, keys)] elif is_name_like_value(keys): key_list = [(keys,)] else: key_list = [(key if is_name_like_tuple(key) else (key,)) for key in keys] columns = set(self._internal.column_labels) for key in key_list: if (key not in columns): raise KeyError(name_like_string(key)) if drop: column_labels = [label for label in self._internal.column_labels if (label not in key_list)] else: column_labels = self._internal.column_labels if append: index_spark_columns = (self._internal.index_spark_columns + [self._internal.spark_column_for(label) for label in key_list]) index_names = (self._internal.index_names + key_list) index_fields = (self._internal.index_fields + [self._internal.field_for(label) for label in key_list]) else: index_spark_columns = [self._internal.spark_column_for(label) for label in key_list] index_names = key_list index_fields = [self._internal.field_for(label) for label in key_list] internal = self._internal.copy(index_spark_columns=index_spark_columns, index_names=index_names, index_fields=index_fields, column_labels=column_labels, data_spark_columns=[self._internal.spark_column_for(label) for label in column_labels], data_fields=[self._internal.field_for(label) for label in column_labels]) if inplace: self._update_internal_frame(internal) return None else: return DataFrame(internal)
6,711,258,925,545,952,000
Set the DataFrame index (row labels) using one or more existing columns. Set the DataFrame index (row labels) using one or more existing columns or arrays (of the correct length). The index can replace the existing index or expand on it. Parameters ---------- keys : label or array-like or list of labels/arrays This parameter can be either a single column key, a single array of the same length as the calling DataFrame, or a list containing an arbitrary combination of column keys and arrays. Here, "array" encompasses :class:`Series`, :class:`Index` and ``np.ndarray``. drop : bool, default True Delete columns to be used as the new index. append : bool, default False Whether to append columns to existing index. inplace : bool, default False Modify the DataFrame in place (do not create a new object). Returns ------- DataFrame Changed row labels. See Also -------- DataFrame.reset_index : Opposite of set_index. Examples -------- >>> df = ps.DataFrame({'month': [1, 4, 7, 10], ... 'year': [2012, 2014, 2013, 2014], ... 'sale': [55, 40, 84, 31]}, ... columns=['month', 'year', 'sale']) >>> df month year sale 0 1 2012 55 1 4 2014 40 2 7 2013 84 3 10 2014 31 Set the index to become the 'month' column: >>> df.set_index('month') # doctest: +NORMALIZE_WHITESPACE year sale month 1 2012 55 4 2014 40 7 2013 84 10 2014 31 Create a MultiIndex using columns 'year' and 'month': >>> df.set_index(['year', 'month']) # doctest: +NORMALIZE_WHITESPACE sale year month 2012 1 55 2014 4 40 2013 7 84 2014 10 31
python/pyspark/pandas/frame.py
set_index
Flyangz/spark
python
def set_index(self, keys: Union[(Name, List[Name])], drop: bool=True, append: bool=False, inplace: bool=False) -> Optional['DataFrame']: 'Set the DataFrame index (row labels) using one or more existing columns.\n\n Set the DataFrame index (row labels) using one or more existing\n columns or arrays (of the correct length). The index can replace the\n existing index or expand on it.\n\n Parameters\n ----------\n keys : label or array-like or list of labels/arrays\n This parameter can be either a single column key, a single array of\n the same length as the calling DataFrame, or a list containing an\n arbitrary combination of column keys and arrays. Here, "array"\n encompasses :class:`Series`, :class:`Index` and ``np.ndarray``.\n drop : bool, default True\n Delete columns to be used as the new index.\n append : bool, default False\n Whether to append columns to existing index.\n inplace : bool, default False\n Modify the DataFrame in place (do not create a new object).\n\n Returns\n -------\n DataFrame\n Changed row labels.\n\n See Also\n --------\n DataFrame.reset_index : Opposite of set_index.\n\n Examples\n --------\n >>> df = ps.DataFrame({\'month\': [1, 4, 7, 10],\n ... \'year\': [2012, 2014, 2013, 2014],\n ... \'sale\': [55, 40, 84, 31]},\n ... columns=[\'month\', \'year\', \'sale\'])\n >>> df\n month year sale\n 0 1 2012 55\n 1 4 2014 40\n 2 7 2013 84\n 3 10 2014 31\n\n Set the index to become the \'month\' column:\n\n >>> df.set_index(\'month\') # doctest: +NORMALIZE_WHITESPACE\n year sale\n month\n 1 2012 55\n 4 2014 40\n 7 2013 84\n 10 2014 31\n\n Create a MultiIndex using columns \'year\' and \'month\':\n\n >>> df.set_index([\'year\', \'month\']) # doctest: +NORMALIZE_WHITESPACE\n sale\n year month\n 2012 1 55\n 2014 4 40\n 2013 7 84\n 2014 10 31\n ' inplace = validate_bool_kwarg(inplace, 'inplace') key_list: List[Label] if is_name_like_tuple(keys): key_list = [cast(Label, keys)] elif is_name_like_value(keys): key_list = [(keys,)] else: key_list = [(key if is_name_like_tuple(key) else (key,)) for key in keys] columns = set(self._internal.column_labels) for key in key_list: if (key not in columns): raise KeyError(name_like_string(key)) if drop: column_labels = [label for label in self._internal.column_labels if (label not in key_list)] else: column_labels = self._internal.column_labels if append: index_spark_columns = (self._internal.index_spark_columns + [self._internal.spark_column_for(label) for label in key_list]) index_names = (self._internal.index_names + key_list) index_fields = (self._internal.index_fields + [self._internal.field_for(label) for label in key_list]) else: index_spark_columns = [self._internal.spark_column_for(label) for label in key_list] index_names = key_list index_fields = [self._internal.field_for(label) for label in key_list] internal = self._internal.copy(index_spark_columns=index_spark_columns, index_names=index_names, index_fields=index_fields, column_labels=column_labels, data_spark_columns=[self._internal.spark_column_for(label) for label in column_labels], data_fields=[self._internal.field_for(label) for label in column_labels]) if inplace: self._update_internal_frame(internal) return None else: return DataFrame(internal)
def reset_index(self, level: Optional[Union[(int, Name, Sequence[Union[(int, Name)]])]]=None, drop: bool=False, inplace: bool=False, col_level: int=0, col_fill: str='') -> Optional['DataFrame']: "Reset the index, or a level of it.\n\n For DataFrame with multi-level index, return new DataFrame with labeling information in\n the columns under the index names, defaulting to 'level_0', 'level_1', etc. if any are None.\n For a standard index, the index name will be used (if set), otherwise a default 'index' or\n 'level_0' (if 'index' is already taken) will be used.\n\n Parameters\n ----------\n level : int, str, tuple, or list, default None\n Only remove the given levels from the index. Removes all levels by\n default.\n drop : bool, default False\n Do not try to insert index into dataframe columns. This resets\n the index to the default integer index.\n inplace : bool, default False\n Modify the DataFrame in place (do not create a new object).\n col_level : int or str, default 0\n If the columns have multiple levels, determines which level the\n labels are inserted into. By default it is inserted into the first\n level.\n col_fill : object, default ''\n If the columns have multiple levels, determines how the other\n levels are named. If None then the index name is repeated.\n\n Returns\n -------\n DataFrame\n DataFrame with the new index.\n\n See Also\n --------\n DataFrame.set_index : Opposite of reset_index.\n\n Examples\n --------\n >>> df = ps.DataFrame([('bird', 389.0),\n ... ('bird', 24.0),\n ... ('mammal', 80.5),\n ... ('mammal', np.nan)],\n ... index=['falcon', 'parrot', 'lion', 'monkey'],\n ... columns=('class', 'max_speed'))\n >>> df\n class max_speed\n falcon bird 389.0\n parrot bird 24.0\n lion mammal 80.5\n monkey mammal NaN\n\n When we reset the index, the old index is added as a column. Unlike pandas, pandas-on-Spark\n does not automatically add a sequential index. The following 0, 1, 2, 3 are only\n there when we display the DataFrame.\n\n >>> df.reset_index()\n index class max_speed\n 0 falcon bird 389.0\n 1 parrot bird 24.0\n 2 lion mammal 80.5\n 3 monkey mammal NaN\n\n We can use the `drop` parameter to avoid the old index being added as\n a column:\n\n >>> df.reset_index(drop=True)\n class max_speed\n 0 bird 389.0\n 1 bird 24.0\n 2 mammal 80.5\n 3 mammal NaN\n\n You can also use `reset_index` with `MultiIndex`.\n\n >>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'),\n ... ('bird', 'parrot'),\n ... ('mammal', 'lion'),\n ... ('mammal', 'monkey')],\n ... names=['class', 'name'])\n >>> columns = pd.MultiIndex.from_tuples([('speed', 'max'),\n ... ('species', 'type')])\n >>> df = ps.DataFrame([(389.0, 'fly'),\n ... ( 24.0, 'fly'),\n ... ( 80.5, 'run'),\n ... (np.nan, 'jump')],\n ... index=index,\n ... columns=columns)\n >>> df # doctest: +NORMALIZE_WHITESPACE\n speed species\n max type\n class name\n bird falcon 389.0 fly\n parrot 24.0 fly\n mammal lion 80.5 run\n monkey NaN jump\n\n If the index has multiple levels, we can reset a subset of them:\n\n >>> df.reset_index(level='class') # doctest: +NORMALIZE_WHITESPACE\n class speed species\n max type\n name\n falcon bird 389.0 fly\n parrot bird 24.0 fly\n lion mammal 80.5 run\n monkey mammal NaN jump\n\n If we are not dropping the index, by default, it is placed in the top\n level. We can place it in another level:\n\n >>> df.reset_index(level='class', col_level=1) # doctest: +NORMALIZE_WHITESPACE\n speed species\n class max type\n name\n falcon bird 389.0 fly\n parrot bird 24.0 fly\n lion mammal 80.5 run\n monkey mammal NaN jump\n\n When the index is inserted under another level, we can specify under\n which one with the parameter `col_fill`:\n\n >>> df.reset_index(level='class', col_level=1,\n ... col_fill='species') # doctest: +NORMALIZE_WHITESPACE\n species speed species\n class max type\n name\n falcon bird 389.0 fly\n parrot bird 24.0 fly\n lion mammal 80.5 run\n monkey mammal NaN jump\n\n If we specify a nonexistent level for `col_fill`, it is created:\n\n >>> df.reset_index(level='class', col_level=1,\n ... col_fill='genus') # doctest: +NORMALIZE_WHITESPACE\n genus speed species\n class max type\n name\n falcon bird 389.0 fly\n parrot bird 24.0 fly\n lion mammal 80.5 run\n monkey mammal NaN jump\n " inplace = validate_bool_kwarg(inplace, 'inplace') multi_index = (self._internal.index_level > 1) def rename(index: int) -> Label: if multi_index: return ('level_{}'.format(index),) elif (('index',) not in self._internal.column_labels): return ('index',) else: return ('level_{}'.format(index),) if (level is None): new_column_labels = [(name if (name is not None) else rename(i)) for (i, name) in enumerate(self._internal.index_names)] new_data_spark_columns = [scol.alias(name_like_string(label)) for (scol, label) in zip(self._internal.index_spark_columns, new_column_labels)] new_data_fields = self._internal.index_fields index_spark_columns = [] index_names = [] index_fields = [] else: if is_list_like(level): level = list(cast(Sequence[Union[(int, Name)]], level)) if (isinstance(level, int) or is_name_like_tuple(level)): level_list = [cast(Union[(int, Label)], level)] elif is_name_like_value(level): level_list = [(level,)] else: level_list = [(lvl if (isinstance(lvl, int) or is_name_like_tuple(lvl)) else (lvl,)) for lvl in level] if all((isinstance(lvl, int) for lvl in level_list)): int_level_list = cast(List[int], level_list) for lev in int_level_list: if (lev >= self._internal.index_level): raise IndexError('Too many levels: Index has only {} level, not {}'.format(self._internal.index_level, (lev + 1))) idx = int_level_list elif all((is_name_like_tuple(lev) for lev in level_list)): idx = [] for label in cast(List[Label], level_list): try: i = self._internal.index_names.index(label) idx.append(i) except ValueError: if multi_index: raise KeyError('Level unknown not found') else: raise KeyError('Level unknown must be same as name ({})'.format(name_like_string(self._internal.index_names[0]))) else: raise ValueError('Level should be all int or all string.') idx.sort() new_column_labels = [] new_data_spark_columns = [] new_data_fields = [] index_spark_columns = self._internal.index_spark_columns.copy() index_names = self._internal.index_names.copy() index_fields = self._internal.index_fields.copy() for i in idx[::(- 1)]: name = index_names.pop(i) new_column_labels.insert(0, (name if (name is not None) else rename(i))) scol = index_spark_columns.pop(i) new_data_spark_columns.insert(0, scol.alias(name_like_string(name))) new_data_fields.insert(0, index_fields.pop(i).copy(name=name_like_string(name))) if drop: new_data_spark_columns = [] new_column_labels = [] new_data_fields = [] for label in new_column_labels: if (label in self._internal.column_labels): raise ValueError('cannot insert {}, already exists'.format(name_like_string(label))) if (self._internal.column_labels_level > 1): column_depth = len(self._internal.column_labels[0]) if (col_level >= column_depth): raise IndexError('Too many levels: Index has only {} levels, not {}'.format(column_depth, (col_level + 1))) if any((((col_level + len(label)) > column_depth) for label in new_column_labels)): raise ValueError('Item must have length equal to number of levels.') new_column_labels = [tuple(((([col_fill] * col_level) + list(label)) + ([col_fill] * (column_depth - (len(label) + col_level))))) for label in new_column_labels] internal = self._internal.copy(index_spark_columns=index_spark_columns, index_names=index_names, index_fields=index_fields, column_labels=(new_column_labels + self._internal.column_labels), data_spark_columns=(new_data_spark_columns + self._internal.data_spark_columns), data_fields=(new_data_fields + self._internal.data_fields)) if inplace: self._update_internal_frame(internal) return None else: return DataFrame(internal)
5,446,117,962,936,664,000
Reset the index, or a level of it. For DataFrame with multi-level index, return new DataFrame with labeling information in the columns under the index names, defaulting to 'level_0', 'level_1', etc. if any are None. For a standard index, the index name will be used (if set), otherwise a default 'index' or 'level_0' (if 'index' is already taken) will be used. Parameters ---------- level : int, str, tuple, or list, default None Only remove the given levels from the index. Removes all levels by default. drop : bool, default False Do not try to insert index into dataframe columns. This resets the index to the default integer index. inplace : bool, default False Modify the DataFrame in place (do not create a new object). col_level : int or str, default 0 If the columns have multiple levels, determines which level the labels are inserted into. By default it is inserted into the first level. col_fill : object, default '' If the columns have multiple levels, determines how the other levels are named. If None then the index name is repeated. Returns ------- DataFrame DataFrame with the new index. See Also -------- DataFrame.set_index : Opposite of reset_index. Examples -------- >>> df = ps.DataFrame([('bird', 389.0), ... ('bird', 24.0), ... ('mammal', 80.5), ... ('mammal', np.nan)], ... index=['falcon', 'parrot', 'lion', 'monkey'], ... columns=('class', 'max_speed')) >>> df class max_speed falcon bird 389.0 parrot bird 24.0 lion mammal 80.5 monkey mammal NaN When we reset the index, the old index is added as a column. Unlike pandas, pandas-on-Spark does not automatically add a sequential index. The following 0, 1, 2, 3 are only there when we display the DataFrame. >>> df.reset_index() index class max_speed 0 falcon bird 389.0 1 parrot bird 24.0 2 lion mammal 80.5 3 monkey mammal NaN We can use the `drop` parameter to avoid the old index being added as a column: >>> df.reset_index(drop=True) class max_speed 0 bird 389.0 1 bird 24.0 2 mammal 80.5 3 mammal NaN You can also use `reset_index` with `MultiIndex`. >>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'), ... ('bird', 'parrot'), ... ('mammal', 'lion'), ... ('mammal', 'monkey')], ... names=['class', 'name']) >>> columns = pd.MultiIndex.from_tuples([('speed', 'max'), ... ('species', 'type')]) >>> df = ps.DataFrame([(389.0, 'fly'), ... ( 24.0, 'fly'), ... ( 80.5, 'run'), ... (np.nan, 'jump')], ... index=index, ... columns=columns) >>> df # doctest: +NORMALIZE_WHITESPACE speed species max type class name bird falcon 389.0 fly parrot 24.0 fly mammal lion 80.5 run monkey NaN jump If the index has multiple levels, we can reset a subset of them: >>> df.reset_index(level='class') # doctest: +NORMALIZE_WHITESPACE class speed species max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump If we are not dropping the index, by default, it is placed in the top level. We can place it in another level: >>> df.reset_index(level='class', col_level=1) # doctest: +NORMALIZE_WHITESPACE speed species class max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump When the index is inserted under another level, we can specify under which one with the parameter `col_fill`: >>> df.reset_index(level='class', col_level=1, ... col_fill='species') # doctest: +NORMALIZE_WHITESPACE species speed species class max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump If we specify a nonexistent level for `col_fill`, it is created: >>> df.reset_index(level='class', col_level=1, ... col_fill='genus') # doctest: +NORMALIZE_WHITESPACE genus speed species class max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump
python/pyspark/pandas/frame.py
reset_index
Flyangz/spark
python
def reset_index(self, level: Optional[Union[(int, Name, Sequence[Union[(int, Name)]])]]=None, drop: bool=False, inplace: bool=False, col_level: int=0, col_fill: str=) -> Optional['DataFrame']: "Reset the index, or a level of it.\n\n For DataFrame with multi-level index, return new DataFrame with labeling information in\n the columns under the index names, defaulting to 'level_0', 'level_1', etc. if any are None.\n For a standard index, the index name will be used (if set), otherwise a default 'index' or\n 'level_0' (if 'index' is already taken) will be used.\n\n Parameters\n ----------\n level : int, str, tuple, or list, default None\n Only remove the given levels from the index. Removes all levels by\n default.\n drop : bool, default False\n Do not try to insert index into dataframe columns. This resets\n the index to the default integer index.\n inplace : bool, default False\n Modify the DataFrame in place (do not create a new object).\n col_level : int or str, default 0\n If the columns have multiple levels, determines which level the\n labels are inserted into. By default it is inserted into the first\n level.\n col_fill : object, default \n If the columns have multiple levels, determines how the other\n levels are named. If None then the index name is repeated.\n\n Returns\n -------\n DataFrame\n DataFrame with the new index.\n\n See Also\n --------\n DataFrame.set_index : Opposite of reset_index.\n\n Examples\n --------\n >>> df = ps.DataFrame([('bird', 389.0),\n ... ('bird', 24.0),\n ... ('mammal', 80.5),\n ... ('mammal', np.nan)],\n ... index=['falcon', 'parrot', 'lion', 'monkey'],\n ... columns=('class', 'max_speed'))\n >>> df\n class max_speed\n falcon bird 389.0\n parrot bird 24.0\n lion mammal 80.5\n monkey mammal NaN\n\n When we reset the index, the old index is added as a column. Unlike pandas, pandas-on-Spark\n does not automatically add a sequential index. The following 0, 1, 2, 3 are only\n there when we display the DataFrame.\n\n >>> df.reset_index()\n index class max_speed\n 0 falcon bird 389.0\n 1 parrot bird 24.0\n 2 lion mammal 80.5\n 3 monkey mammal NaN\n\n We can use the `drop` parameter to avoid the old index being added as\n a column:\n\n >>> df.reset_index(drop=True)\n class max_speed\n 0 bird 389.0\n 1 bird 24.0\n 2 mammal 80.5\n 3 mammal NaN\n\n You can also use `reset_index` with `MultiIndex`.\n\n >>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'),\n ... ('bird', 'parrot'),\n ... ('mammal', 'lion'),\n ... ('mammal', 'monkey')],\n ... names=['class', 'name'])\n >>> columns = pd.MultiIndex.from_tuples([('speed', 'max'),\n ... ('species', 'type')])\n >>> df = ps.DataFrame([(389.0, 'fly'),\n ... ( 24.0, 'fly'),\n ... ( 80.5, 'run'),\n ... (np.nan, 'jump')],\n ... index=index,\n ... columns=columns)\n >>> df # doctest: +NORMALIZE_WHITESPACE\n speed species\n max type\n class name\n bird falcon 389.0 fly\n parrot 24.0 fly\n mammal lion 80.5 run\n monkey NaN jump\n\n If the index has multiple levels, we can reset a subset of them:\n\n >>> df.reset_index(level='class') # doctest: +NORMALIZE_WHITESPACE\n class speed species\n max type\n name\n falcon bird 389.0 fly\n parrot bird 24.0 fly\n lion mammal 80.5 run\n monkey mammal NaN jump\n\n If we are not dropping the index, by default, it is placed in the top\n level. We can place it in another level:\n\n >>> df.reset_index(level='class', col_level=1) # doctest: +NORMALIZE_WHITESPACE\n speed species\n class max type\n name\n falcon bird 389.0 fly\n parrot bird 24.0 fly\n lion mammal 80.5 run\n monkey mammal NaN jump\n\n When the index is inserted under another level, we can specify under\n which one with the parameter `col_fill`:\n\n >>> df.reset_index(level='class', col_level=1,\n ... col_fill='species') # doctest: +NORMALIZE_WHITESPACE\n species speed species\n class max type\n name\n falcon bird 389.0 fly\n parrot bird 24.0 fly\n lion mammal 80.5 run\n monkey mammal NaN jump\n\n If we specify a nonexistent level for `col_fill`, it is created:\n\n >>> df.reset_index(level='class', col_level=1,\n ... col_fill='genus') # doctest: +NORMALIZE_WHITESPACE\n genus speed species\n class max type\n name\n falcon bird 389.0 fly\n parrot bird 24.0 fly\n lion mammal 80.5 run\n monkey mammal NaN jump\n " inplace = validate_bool_kwarg(inplace, 'inplace') multi_index = (self._internal.index_level > 1) def rename(index: int) -> Label: if multi_index: return ('level_{}'.format(index),) elif (('index',) not in self._internal.column_labels): return ('index',) else: return ('level_{}'.format(index),) if (level is None): new_column_labels = [(name if (name is not None) else rename(i)) for (i, name) in enumerate(self._internal.index_names)] new_data_spark_columns = [scol.alias(name_like_string(label)) for (scol, label) in zip(self._internal.index_spark_columns, new_column_labels)] new_data_fields = self._internal.index_fields index_spark_columns = [] index_names = [] index_fields = [] else: if is_list_like(level): level = list(cast(Sequence[Union[(int, Name)]], level)) if (isinstance(level, int) or is_name_like_tuple(level)): level_list = [cast(Union[(int, Label)], level)] elif is_name_like_value(level): level_list = [(level,)] else: level_list = [(lvl if (isinstance(lvl, int) or is_name_like_tuple(lvl)) else (lvl,)) for lvl in level] if all((isinstance(lvl, int) for lvl in level_list)): int_level_list = cast(List[int], level_list) for lev in int_level_list: if (lev >= self._internal.index_level): raise IndexError('Too many levels: Index has only {} level, not {}'.format(self._internal.index_level, (lev + 1))) idx = int_level_list elif all((is_name_like_tuple(lev) for lev in level_list)): idx = [] for label in cast(List[Label], level_list): try: i = self._internal.index_names.index(label) idx.append(i) except ValueError: if multi_index: raise KeyError('Level unknown not found') else: raise KeyError('Level unknown must be same as name ({})'.format(name_like_string(self._internal.index_names[0]))) else: raise ValueError('Level should be all int or all string.') idx.sort() new_column_labels = [] new_data_spark_columns = [] new_data_fields = [] index_spark_columns = self._internal.index_spark_columns.copy() index_names = self._internal.index_names.copy() index_fields = self._internal.index_fields.copy() for i in idx[::(- 1)]: name = index_names.pop(i) new_column_labels.insert(0, (name if (name is not None) else rename(i))) scol = index_spark_columns.pop(i) new_data_spark_columns.insert(0, scol.alias(name_like_string(name))) new_data_fields.insert(0, index_fields.pop(i).copy(name=name_like_string(name))) if drop: new_data_spark_columns = [] new_column_labels = [] new_data_fields = [] for label in new_column_labels: if (label in self._internal.column_labels): raise ValueError('cannot insert {}, already exists'.format(name_like_string(label))) if (self._internal.column_labels_level > 1): column_depth = len(self._internal.column_labels[0]) if (col_level >= column_depth): raise IndexError('Too many levels: Index has only {} levels, not {}'.format(column_depth, (col_level + 1))) if any((((col_level + len(label)) > column_depth) for label in new_column_labels)): raise ValueError('Item must have length equal to number of levels.') new_column_labels = [tuple(((([col_fill] * col_level) + list(label)) + ([col_fill] * (column_depth - (len(label) + col_level))))) for label in new_column_labels] internal = self._internal.copy(index_spark_columns=index_spark_columns, index_names=index_names, index_fields=index_fields, column_labels=(new_column_labels + self._internal.column_labels), data_spark_columns=(new_data_spark_columns + self._internal.data_spark_columns), data_fields=(new_data_fields + self._internal.data_fields)) if inplace: self._update_internal_frame(internal) return None else: return DataFrame(internal)
def isnull(self) -> 'DataFrame': "\n Detects missing values for items in the current Dataframe.\n\n Return a boolean same-sized Dataframe indicating if the values are NA.\n NA values, such as None or numpy.NaN, gets mapped to True values.\n Everything else gets mapped to False values.\n\n See Also\n --------\n DataFrame.notnull\n\n Examples\n --------\n >>> df = ps.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)])\n >>> df.isnull()\n 0 1\n 0 False False\n 1 False True\n 2 False True\n 3 False False\n\n >>> df = ps.DataFrame([[None, 'bee', None], ['dog', None, 'fly']])\n >>> df.isnull()\n 0 1 2\n 0 True False True\n 1 False True False\n " return self._apply_series_op((lambda psser: psser.isnull()))
9,060,744,655,024,991,000
Detects missing values for items in the current Dataframe. Return a boolean same-sized Dataframe indicating if the values are NA. NA values, such as None or numpy.NaN, gets mapped to True values. Everything else gets mapped to False values. See Also -------- DataFrame.notnull Examples -------- >>> df = ps.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)]) >>> df.isnull() 0 1 0 False False 1 False True 2 False True 3 False False >>> df = ps.DataFrame([[None, 'bee', None], ['dog', None, 'fly']]) >>> df.isnull() 0 1 2 0 True False True 1 False True False
python/pyspark/pandas/frame.py
isnull
Flyangz/spark
python
def isnull(self) -> 'DataFrame': "\n Detects missing values for items in the current Dataframe.\n\n Return a boolean same-sized Dataframe indicating if the values are NA.\n NA values, such as None or numpy.NaN, gets mapped to True values.\n Everything else gets mapped to False values.\n\n See Also\n --------\n DataFrame.notnull\n\n Examples\n --------\n >>> df = ps.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)])\n >>> df.isnull()\n 0 1\n 0 False False\n 1 False True\n 2 False True\n 3 False False\n\n >>> df = ps.DataFrame([[None, 'bee', None], ['dog', None, 'fly']])\n >>> df.isnull()\n 0 1 2\n 0 True False True\n 1 False True False\n " return self._apply_series_op((lambda psser: psser.isnull()))
def notnull(self) -> 'DataFrame': "\n Detects non-missing values for items in the current Dataframe.\n\n This function takes a dataframe and indicates whether it's\n values are valid (not missing, which is ``NaN`` in numeric\n datatypes, ``None`` or ``NaN`` in objects and ``NaT`` in datetimelike).\n\n See Also\n --------\n DataFrame.isnull\n\n Examples\n --------\n >>> df = ps.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)])\n >>> df.notnull()\n 0 1\n 0 True True\n 1 True False\n 2 True False\n 3 True True\n\n >>> df = ps.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])\n >>> df.notnull()\n 0 1 2\n 0 True True True\n 1 True False True\n " return self._apply_series_op((lambda psser: psser.notnull()))
-3,189,777,233,647,179,300
Detects non-missing values for items in the current Dataframe. This function takes a dataframe and indicates whether it's values are valid (not missing, which is ``NaN`` in numeric datatypes, ``None`` or ``NaN`` in objects and ``NaT`` in datetimelike). See Also -------- DataFrame.isnull Examples -------- >>> df = ps.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)]) >>> df.notnull() 0 1 0 True True 1 True False 2 True False 3 True True >>> df = ps.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']]) >>> df.notnull() 0 1 2 0 True True True 1 True False True
python/pyspark/pandas/frame.py
notnull
Flyangz/spark
python
def notnull(self) -> 'DataFrame': "\n Detects non-missing values for items in the current Dataframe.\n\n This function takes a dataframe and indicates whether it's\n values are valid (not missing, which is ``NaN`` in numeric\n datatypes, ``None`` or ``NaN`` in objects and ``NaT`` in datetimelike).\n\n See Also\n --------\n DataFrame.isnull\n\n Examples\n --------\n >>> df = ps.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)])\n >>> df.notnull()\n 0 1\n 0 True True\n 1 True False\n 2 True False\n 3 True True\n\n >>> df = ps.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])\n >>> df.notnull()\n 0 1 2\n 0 True True True\n 1 True False True\n " return self._apply_series_op((lambda psser: psser.notnull()))
def insert(self, loc: int, column: Name, value: Union[(Scalar, 'Series', Iterable)], allow_duplicates: bool=False) -> None: '\n Insert column into DataFrame at specified location.\n\n Raises a ValueError if `column` is already contained in the DataFrame,\n unless `allow_duplicates` is set to True.\n\n Parameters\n ----------\n loc : int\n Insertion index. Must verify 0 <= loc <= len(columns).\n column : str, number, or hashable object\n Label of the inserted column.\n value : int, Series, or array-like\n allow_duplicates : bool, optional\n\n Examples\n --------\n >>> psdf = ps.DataFrame([1, 2, 3])\n >>> psdf.sort_index()\n 0\n 0 1\n 1 2\n 2 3\n >>> psdf.insert(0, \'x\', 4)\n >>> psdf.sort_index()\n x 0\n 0 4 1\n 1 4 2\n 2 4 3\n\n >>> from pyspark.pandas.config import set_option, reset_option\n >>> set_option("compute.ops_on_diff_frames", True)\n\n >>> psdf.insert(1, \'y\', [5, 6, 7])\n >>> psdf.sort_index()\n x y 0\n 0 4 5 1\n 1 4 6 2\n 2 4 7 3\n\n >>> psdf.insert(2, \'z\', ps.Series([8, 9, 10]))\n >>> psdf.sort_index()\n x y z 0\n 0 4 5 8 1\n 1 4 6 9 2\n 2 4 7 10 3\n\n >>> reset_option("compute.ops_on_diff_frames")\n ' if (not isinstance(loc, int)): raise TypeError('loc must be int') assert (0 <= loc <= len(self.columns)) assert (allow_duplicates is False) if (not is_name_like_value(column)): raise TypeError('"column" should be a scalar value or tuple that contains scalar values') if is_name_like_tuple(column): if (self._internal.column_labels_level > 1): if (len(column) != len(self.columns.levels)): raise ValueError('"column" must have length equal to number of column levels.') else: raise NotImplementedError('Assigning column name as tuple is only supported for MultiIndex columns for now.') if (column in self.columns): raise ValueError(('cannot insert %s, already exists' % str(column))) psdf = self.copy() psdf[column] = value columns = psdf.columns[:(- 1)].insert(loc, psdf.columns[(- 1)]) psdf = psdf[columns] self._update_internal_frame(psdf._internal)
-821,030,439,595,169,900
Insert column into DataFrame at specified location. Raises a ValueError if `column` is already contained in the DataFrame, unless `allow_duplicates` is set to True. Parameters ---------- loc : int Insertion index. Must verify 0 <= loc <= len(columns). column : str, number, or hashable object Label of the inserted column. value : int, Series, or array-like allow_duplicates : bool, optional Examples -------- >>> psdf = ps.DataFrame([1, 2, 3]) >>> psdf.sort_index() 0 0 1 1 2 2 3 >>> psdf.insert(0, 'x', 4) >>> psdf.sort_index() x 0 0 4 1 1 4 2 2 4 3 >>> from pyspark.pandas.config import set_option, reset_option >>> set_option("compute.ops_on_diff_frames", True) >>> psdf.insert(1, 'y', [5, 6, 7]) >>> psdf.sort_index() x y 0 0 4 5 1 1 4 6 2 2 4 7 3 >>> psdf.insert(2, 'z', ps.Series([8, 9, 10])) >>> psdf.sort_index() x y z 0 0 4 5 8 1 1 4 6 9 2 2 4 7 10 3 >>> reset_option("compute.ops_on_diff_frames")
python/pyspark/pandas/frame.py
insert
Flyangz/spark
python
def insert(self, loc: int, column: Name, value: Union[(Scalar, 'Series', Iterable)], allow_duplicates: bool=False) -> None: '\n Insert column into DataFrame at specified location.\n\n Raises a ValueError if `column` is already contained in the DataFrame,\n unless `allow_duplicates` is set to True.\n\n Parameters\n ----------\n loc : int\n Insertion index. Must verify 0 <= loc <= len(columns).\n column : str, number, or hashable object\n Label of the inserted column.\n value : int, Series, or array-like\n allow_duplicates : bool, optional\n\n Examples\n --------\n >>> psdf = ps.DataFrame([1, 2, 3])\n >>> psdf.sort_index()\n 0\n 0 1\n 1 2\n 2 3\n >>> psdf.insert(0, \'x\', 4)\n >>> psdf.sort_index()\n x 0\n 0 4 1\n 1 4 2\n 2 4 3\n\n >>> from pyspark.pandas.config import set_option, reset_option\n >>> set_option("compute.ops_on_diff_frames", True)\n\n >>> psdf.insert(1, \'y\', [5, 6, 7])\n >>> psdf.sort_index()\n x y 0\n 0 4 5 1\n 1 4 6 2\n 2 4 7 3\n\n >>> psdf.insert(2, \'z\', ps.Series([8, 9, 10]))\n >>> psdf.sort_index()\n x y z 0\n 0 4 5 8 1\n 1 4 6 9 2\n 2 4 7 10 3\n\n >>> reset_option("compute.ops_on_diff_frames")\n ' if (not isinstance(loc, int)): raise TypeError('loc must be int') assert (0 <= loc <= len(self.columns)) assert (allow_duplicates is False) if (not is_name_like_value(column)): raise TypeError('"column" should be a scalar value or tuple that contains scalar values') if is_name_like_tuple(column): if (self._internal.column_labels_level > 1): if (len(column) != len(self.columns.levels)): raise ValueError('"column" must have length equal to number of column levels.') else: raise NotImplementedError('Assigning column name as tuple is only supported for MultiIndex columns for now.') if (column in self.columns): raise ValueError(('cannot insert %s, already exists' % str(column))) psdf = self.copy() psdf[column] = value columns = psdf.columns[:(- 1)].insert(loc, psdf.columns[(- 1)]) psdf = psdf[columns] self._update_internal_frame(psdf._internal)
def shift(self, periods: int=1, fill_value: Optional[Any]=None) -> 'DataFrame': "\n Shift DataFrame by desired number of periods.\n\n .. note:: the current implementation of shift uses Spark's Window without\n specifying partition specification. This leads to move all data into\n single partition in single machine and could cause serious\n performance degradation. Avoid this method against very large dataset.\n\n Parameters\n ----------\n periods : int\n Number of periods to shift. Can be positive or negative.\n fill_value : object, optional\n The scalar value to use for newly introduced missing values.\n The default depends on the dtype of self. For numeric data, np.nan is used.\n\n Returns\n -------\n Copy of input DataFrame, shifted.\n\n Examples\n --------\n >>> df = ps.DataFrame({'Col1': [10, 20, 15, 30, 45],\n ... 'Col2': [13, 23, 18, 33, 48],\n ... 'Col3': [17, 27, 22, 37, 52]},\n ... columns=['Col1', 'Col2', 'Col3'])\n\n >>> df.shift(periods=3)\n Col1 Col2 Col3\n 0 NaN NaN NaN\n 1 NaN NaN NaN\n 2 NaN NaN NaN\n 3 10.0 13.0 17.0\n 4 20.0 23.0 27.0\n\n >>> df.shift(periods=3, fill_value=0)\n Col1 Col2 Col3\n 0 0 0 0\n 1 0 0 0\n 2 0 0 0\n 3 10 13 17\n 4 20 23 27\n\n " return self._apply_series_op((lambda psser: psser._shift(periods, fill_value)), should_resolve=True)
1,002,297,955,590,259,000
Shift DataFrame by desired number of periods. .. note:: the current implementation of shift uses Spark's Window without specifying partition specification. This leads to move all data into single partition in single machine and could cause serious performance degradation. Avoid this method against very large dataset. Parameters ---------- periods : int Number of periods to shift. Can be positive or negative. fill_value : object, optional The scalar value to use for newly introduced missing values. The default depends on the dtype of self. For numeric data, np.nan is used. Returns ------- Copy of input DataFrame, shifted. Examples -------- >>> df = ps.DataFrame({'Col1': [10, 20, 15, 30, 45], ... 'Col2': [13, 23, 18, 33, 48], ... 'Col3': [17, 27, 22, 37, 52]}, ... columns=['Col1', 'Col2', 'Col3']) >>> df.shift(periods=3) Col1 Col2 Col3 0 NaN NaN NaN 1 NaN NaN NaN 2 NaN NaN NaN 3 10.0 13.0 17.0 4 20.0 23.0 27.0 >>> df.shift(periods=3, fill_value=0) Col1 Col2 Col3 0 0 0 0 1 0 0 0 2 0 0 0 3 10 13 17 4 20 23 27
python/pyspark/pandas/frame.py
shift
Flyangz/spark
python
def shift(self, periods: int=1, fill_value: Optional[Any]=None) -> 'DataFrame': "\n Shift DataFrame by desired number of periods.\n\n .. note:: the current implementation of shift uses Spark's Window without\n specifying partition specification. This leads to move all data into\n single partition in single machine and could cause serious\n performance degradation. Avoid this method against very large dataset.\n\n Parameters\n ----------\n periods : int\n Number of periods to shift. Can be positive or negative.\n fill_value : object, optional\n The scalar value to use for newly introduced missing values.\n The default depends on the dtype of self. For numeric data, np.nan is used.\n\n Returns\n -------\n Copy of input DataFrame, shifted.\n\n Examples\n --------\n >>> df = ps.DataFrame({'Col1': [10, 20, 15, 30, 45],\n ... 'Col2': [13, 23, 18, 33, 48],\n ... 'Col3': [17, 27, 22, 37, 52]},\n ... columns=['Col1', 'Col2', 'Col3'])\n\n >>> df.shift(periods=3)\n Col1 Col2 Col3\n 0 NaN NaN NaN\n 1 NaN NaN NaN\n 2 NaN NaN NaN\n 3 10.0 13.0 17.0\n 4 20.0 23.0 27.0\n\n >>> df.shift(periods=3, fill_value=0)\n Col1 Col2 Col3\n 0 0 0 0\n 1 0 0 0\n 2 0 0 0\n 3 10 13 17\n 4 20 23 27\n\n " return self._apply_series_op((lambda psser: psser._shift(periods, fill_value)), should_resolve=True)
def diff(self, periods: int=1, axis: Axis=0) -> 'DataFrame': "\n First discrete difference of element.\n\n Calculates the difference of a DataFrame element compared with another element in the\n DataFrame (default is the element in the same column of the previous row).\n\n .. note:: the current implementation of diff uses Spark's Window without\n specifying partition specification. This leads to move all data into\n single partition in single machine and could cause serious\n performance degradation. Avoid this method against very large dataset.\n\n Parameters\n ----------\n periods : int, default 1\n Periods to shift for calculating difference, accepts negative values.\n axis : int, default 0 or 'index'\n Can only be set to 0 at the moment.\n\n Returns\n -------\n diffed : DataFrame\n\n Examples\n --------\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4, 5, 6],\n ... 'b': [1, 1, 2, 3, 5, 8],\n ... 'c': [1, 4, 9, 16, 25, 36]}, columns=['a', 'b', 'c'])\n >>> df\n a b c\n 0 1 1 1\n 1 2 1 4\n 2 3 2 9\n 3 4 3 16\n 4 5 5 25\n 5 6 8 36\n\n >>> df.diff()\n a b c\n 0 NaN NaN NaN\n 1 1.0 0.0 3.0\n 2 1.0 1.0 5.0\n 3 1.0 1.0 7.0\n 4 1.0 2.0 9.0\n 5 1.0 3.0 11.0\n\n Difference with previous column\n\n >>> df.diff(periods=3)\n a b c\n 0 NaN NaN NaN\n 1 NaN NaN NaN\n 2 NaN NaN NaN\n 3 3.0 2.0 15.0\n 4 3.0 4.0 21.0\n 5 3.0 6.0 27.0\n\n Difference with following row\n\n >>> df.diff(periods=-1)\n a b c\n 0 -1.0 0.0 -3.0\n 1 -1.0 -1.0 -5.0\n 2 -1.0 -1.0 -7.0\n 3 -1.0 -2.0 -9.0\n 4 -1.0 -3.0 -11.0\n 5 NaN NaN NaN\n " axis = validate_axis(axis) if (axis != 0): raise NotImplementedError('axis should be either 0 or "index" currently.') return self._apply_series_op((lambda psser: psser._diff(periods)), should_resolve=True)
-2,800,644,616,705,503,700
First discrete difference of element. Calculates the difference of a DataFrame element compared with another element in the DataFrame (default is the element in the same column of the previous row). .. note:: the current implementation of diff uses Spark's Window without specifying partition specification. This leads to move all data into single partition in single machine and could cause serious performance degradation. Avoid this method against very large dataset. Parameters ---------- periods : int, default 1 Periods to shift for calculating difference, accepts negative values. axis : int, default 0 or 'index' Can only be set to 0 at the moment. Returns ------- diffed : DataFrame Examples -------- >>> df = ps.DataFrame({'a': [1, 2, 3, 4, 5, 6], ... 'b': [1, 1, 2, 3, 5, 8], ... 'c': [1, 4, 9, 16, 25, 36]}, columns=['a', 'b', 'c']) >>> df a b c 0 1 1 1 1 2 1 4 2 3 2 9 3 4 3 16 4 5 5 25 5 6 8 36 >>> df.diff() a b c 0 NaN NaN NaN 1 1.0 0.0 3.0 2 1.0 1.0 5.0 3 1.0 1.0 7.0 4 1.0 2.0 9.0 5 1.0 3.0 11.0 Difference with previous column >>> df.diff(periods=3) a b c 0 NaN NaN NaN 1 NaN NaN NaN 2 NaN NaN NaN 3 3.0 2.0 15.0 4 3.0 4.0 21.0 5 3.0 6.0 27.0 Difference with following row >>> df.diff(periods=-1) a b c 0 -1.0 0.0 -3.0 1 -1.0 -1.0 -5.0 2 -1.0 -1.0 -7.0 3 -1.0 -2.0 -9.0 4 -1.0 -3.0 -11.0 5 NaN NaN NaN
python/pyspark/pandas/frame.py
diff
Flyangz/spark
python
def diff(self, periods: int=1, axis: Axis=0) -> 'DataFrame': "\n First discrete difference of element.\n\n Calculates the difference of a DataFrame element compared with another element in the\n DataFrame (default is the element in the same column of the previous row).\n\n .. note:: the current implementation of diff uses Spark's Window without\n specifying partition specification. This leads to move all data into\n single partition in single machine and could cause serious\n performance degradation. Avoid this method against very large dataset.\n\n Parameters\n ----------\n periods : int, default 1\n Periods to shift for calculating difference, accepts negative values.\n axis : int, default 0 or 'index'\n Can only be set to 0 at the moment.\n\n Returns\n -------\n diffed : DataFrame\n\n Examples\n --------\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4, 5, 6],\n ... 'b': [1, 1, 2, 3, 5, 8],\n ... 'c': [1, 4, 9, 16, 25, 36]}, columns=['a', 'b', 'c'])\n >>> df\n a b c\n 0 1 1 1\n 1 2 1 4\n 2 3 2 9\n 3 4 3 16\n 4 5 5 25\n 5 6 8 36\n\n >>> df.diff()\n a b c\n 0 NaN NaN NaN\n 1 1.0 0.0 3.0\n 2 1.0 1.0 5.0\n 3 1.0 1.0 7.0\n 4 1.0 2.0 9.0\n 5 1.0 3.0 11.0\n\n Difference with previous column\n\n >>> df.diff(periods=3)\n a b c\n 0 NaN NaN NaN\n 1 NaN NaN NaN\n 2 NaN NaN NaN\n 3 3.0 2.0 15.0\n 4 3.0 4.0 21.0\n 5 3.0 6.0 27.0\n\n Difference with following row\n\n >>> df.diff(periods=-1)\n a b c\n 0 -1.0 0.0 -3.0\n 1 -1.0 -1.0 -5.0\n 2 -1.0 -1.0 -7.0\n 3 -1.0 -2.0 -9.0\n 4 -1.0 -3.0 -11.0\n 5 NaN NaN NaN\n " axis = validate_axis(axis) if (axis != 0): raise NotImplementedError('axis should be either 0 or "index" currently.') return self._apply_series_op((lambda psser: psser._diff(periods)), should_resolve=True)
def nunique(self, axis: Axis=0, dropna: bool=True, approx: bool=False, rsd: float=0.05) -> 'Series': "\n Return number of unique elements in the object.\n\n Excludes NA values by default.\n\n Parameters\n ----------\n axis : int, default 0 or 'index'\n Can only be set to 0 at the moment.\n dropna : bool, default True\n Don’t include NaN in the count.\n approx: bool, default False\n If False, will use the exact algorithm and return the exact number of unique.\n If True, it uses the HyperLogLog approximate algorithm, which is significantly faster\n for large amount of data.\n Note: This parameter is specific to pandas-on-Spark and is not found in pandas.\n rsd: float, default 0.05\n Maximum estimation error allowed in the HyperLogLog algorithm.\n Note: Just like ``approx`` this parameter is specific to pandas-on-Spark.\n\n Returns\n -------\n The number of unique values per column as a pandas-on-Spark Series.\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': [1, 2, 3], 'B': [np.nan, 3, np.nan]})\n >>> df.nunique()\n A 3\n B 1\n dtype: int64\n\n >>> df.nunique(dropna=False)\n A 3\n B 2\n dtype: int64\n\n On big data, we recommend using the approximate algorithm to speed up this function.\n The result will be very close to the exact unique count.\n\n >>> df.nunique(approx=True)\n A 3\n B 1\n dtype: int64\n " from pyspark.pandas.series import first_series axis = validate_axis(axis) if (axis != 0): raise NotImplementedError('axis should be either 0 or "index" currently.') sdf = self._internal.spark_frame.select(([SF.lit(None).cast(StringType()).alias(SPARK_DEFAULT_INDEX_NAME)] + [self._psser_for(label)._nunique(dropna, approx, rsd) for label in self._internal.column_labels])) with ps.option_context('compute.max_rows', 1): internal = self._internal.copy(spark_frame=sdf, index_spark_columns=[scol_for(sdf, SPARK_DEFAULT_INDEX_NAME)], index_names=[None], index_fields=[None], data_spark_columns=[scol_for(sdf, col) for col in self._internal.data_spark_column_names], data_fields=None) return first_series(DataFrame(internal).transpose())
1,471,450,883,314,847,200
Return number of unique elements in the object. Excludes NA values by default. Parameters ---------- axis : int, default 0 or 'index' Can only be set to 0 at the moment. dropna : bool, default True Don’t include NaN in the count. approx: bool, default False If False, will use the exact algorithm and return the exact number of unique. If True, it uses the HyperLogLog approximate algorithm, which is significantly faster for large amount of data. Note: This parameter is specific to pandas-on-Spark and is not found in pandas. rsd: float, default 0.05 Maximum estimation error allowed in the HyperLogLog algorithm. Note: Just like ``approx`` this parameter is specific to pandas-on-Spark. Returns ------- The number of unique values per column as a pandas-on-Spark Series. Examples -------- >>> df = ps.DataFrame({'A': [1, 2, 3], 'B': [np.nan, 3, np.nan]}) >>> df.nunique() A 3 B 1 dtype: int64 >>> df.nunique(dropna=False) A 3 B 2 dtype: int64 On big data, we recommend using the approximate algorithm to speed up this function. The result will be very close to the exact unique count. >>> df.nunique(approx=True) A 3 B 1 dtype: int64
python/pyspark/pandas/frame.py
nunique
Flyangz/spark
python
def nunique(self, axis: Axis=0, dropna: bool=True, approx: bool=False, rsd: float=0.05) -> 'Series': "\n Return number of unique elements in the object.\n\n Excludes NA values by default.\n\n Parameters\n ----------\n axis : int, default 0 or 'index'\n Can only be set to 0 at the moment.\n dropna : bool, default True\n Don’t include NaN in the count.\n approx: bool, default False\n If False, will use the exact algorithm and return the exact number of unique.\n If True, it uses the HyperLogLog approximate algorithm, which is significantly faster\n for large amount of data.\n Note: This parameter is specific to pandas-on-Spark and is not found in pandas.\n rsd: float, default 0.05\n Maximum estimation error allowed in the HyperLogLog algorithm.\n Note: Just like ``approx`` this parameter is specific to pandas-on-Spark.\n\n Returns\n -------\n The number of unique values per column as a pandas-on-Spark Series.\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': [1, 2, 3], 'B': [np.nan, 3, np.nan]})\n >>> df.nunique()\n A 3\n B 1\n dtype: int64\n\n >>> df.nunique(dropna=False)\n A 3\n B 2\n dtype: int64\n\n On big data, we recommend using the approximate algorithm to speed up this function.\n The result will be very close to the exact unique count.\n\n >>> df.nunique(approx=True)\n A 3\n B 1\n dtype: int64\n " from pyspark.pandas.series import first_series axis = validate_axis(axis) if (axis != 0): raise NotImplementedError('axis should be either 0 or "index" currently.') sdf = self._internal.spark_frame.select(([SF.lit(None).cast(StringType()).alias(SPARK_DEFAULT_INDEX_NAME)] + [self._psser_for(label)._nunique(dropna, approx, rsd) for label in self._internal.column_labels])) with ps.option_context('compute.max_rows', 1): internal = self._internal.copy(spark_frame=sdf, index_spark_columns=[scol_for(sdf, SPARK_DEFAULT_INDEX_NAME)], index_names=[None], index_fields=[None], data_spark_columns=[scol_for(sdf, col) for col in self._internal.data_spark_column_names], data_fields=None) return first_series(DataFrame(internal).transpose())
def round(self, decimals: Union[(int, Dict[(Name, int)], 'Series')]=0) -> 'DataFrame': "\n Round a DataFrame to a variable number of decimal places.\n\n Parameters\n ----------\n decimals : int, dict, Series\n Number of decimal places to round each column to. If an int is\n given, round each column to the same number of places.\n Otherwise dict and Series round to variable numbers of places.\n Column names should be in the keys if `decimals` is a\n dict-like, or in the index if `decimals` is a Series. Any\n columns not included in `decimals` will be left as is. Elements\n of `decimals` which are not columns of the input will be\n ignored.\n\n .. note:: If `decimals` is a Series, it is expected to be small,\n as all the data is loaded into the driver's memory.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n Series.round\n\n Examples\n --------\n >>> df = ps.DataFrame({'A':[0.028208, 0.038683, 0.877076],\n ... 'B':[0.992815, 0.645646, 0.149370],\n ... 'C':[0.173891, 0.577595, 0.491027]},\n ... columns=['A', 'B', 'C'],\n ... index=['first', 'second', 'third'])\n >>> df\n A B C\n first 0.028208 0.992815 0.173891\n second 0.038683 0.645646 0.577595\n third 0.877076 0.149370 0.491027\n\n >>> df.round(2)\n A B C\n first 0.03 0.99 0.17\n second 0.04 0.65 0.58\n third 0.88 0.15 0.49\n\n >>> df.round({'A': 1, 'C': 2})\n A B C\n first 0.0 0.992815 0.17\n second 0.0 0.645646 0.58\n third 0.9 0.149370 0.49\n\n >>> decimals = ps.Series([1, 0, 2], index=['A', 'B', 'C'])\n >>> df.round(decimals)\n A B C\n first 0.0 1.0 0.17\n second 0.0 1.0 0.58\n third 0.9 0.0 0.49\n " if isinstance(decimals, ps.Series): decimals_dict = {(k if isinstance(k, tuple) else (k,)): v for (k, v) in decimals._to_internal_pandas().items()} elif isinstance(decimals, dict): decimals_dict = {(k if is_name_like_tuple(k) else (k,)): v for (k, v) in decimals.items()} elif isinstance(decimals, int): decimals_dict = {k: decimals for k in self._internal.column_labels} else: raise TypeError('decimals must be an integer, a dict-like or a Series') def op(psser: ps.Series) -> Union[(ps.Series, Column)]: label = psser._column_label if (label in decimals_dict): return F.round(psser.spark.column, decimals_dict[label]) else: return psser return self._apply_series_op(op)
5,604,764,155,469,943,000
Round a DataFrame to a variable number of decimal places. Parameters ---------- decimals : int, dict, Series Number of decimal places to round each column to. If an int is given, round each column to the same number of places. Otherwise dict and Series round to variable numbers of places. Column names should be in the keys if `decimals` is a dict-like, or in the index if `decimals` is a Series. Any columns not included in `decimals` will be left as is. Elements of `decimals` which are not columns of the input will be ignored. .. note:: If `decimals` is a Series, it is expected to be small, as all the data is loaded into the driver's memory. Returns ------- DataFrame See Also -------- Series.round Examples -------- >>> df = ps.DataFrame({'A':[0.028208, 0.038683, 0.877076], ... 'B':[0.992815, 0.645646, 0.149370], ... 'C':[0.173891, 0.577595, 0.491027]}, ... columns=['A', 'B', 'C'], ... index=['first', 'second', 'third']) >>> df A B C first 0.028208 0.992815 0.173891 second 0.038683 0.645646 0.577595 third 0.877076 0.149370 0.491027 >>> df.round(2) A B C first 0.03 0.99 0.17 second 0.04 0.65 0.58 third 0.88 0.15 0.49 >>> df.round({'A': 1, 'C': 2}) A B C first 0.0 0.992815 0.17 second 0.0 0.645646 0.58 third 0.9 0.149370 0.49 >>> decimals = ps.Series([1, 0, 2], index=['A', 'B', 'C']) >>> df.round(decimals) A B C first 0.0 1.0 0.17 second 0.0 1.0 0.58 third 0.9 0.0 0.49
python/pyspark/pandas/frame.py
round
Flyangz/spark
python
def round(self, decimals: Union[(int, Dict[(Name, int)], 'Series')]=0) -> 'DataFrame': "\n Round a DataFrame to a variable number of decimal places.\n\n Parameters\n ----------\n decimals : int, dict, Series\n Number of decimal places to round each column to. If an int is\n given, round each column to the same number of places.\n Otherwise dict and Series round to variable numbers of places.\n Column names should be in the keys if `decimals` is a\n dict-like, or in the index if `decimals` is a Series. Any\n columns not included in `decimals` will be left as is. Elements\n of `decimals` which are not columns of the input will be\n ignored.\n\n .. note:: If `decimals` is a Series, it is expected to be small,\n as all the data is loaded into the driver's memory.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n Series.round\n\n Examples\n --------\n >>> df = ps.DataFrame({'A':[0.028208, 0.038683, 0.877076],\n ... 'B':[0.992815, 0.645646, 0.149370],\n ... 'C':[0.173891, 0.577595, 0.491027]},\n ... columns=['A', 'B', 'C'],\n ... index=['first', 'second', 'third'])\n >>> df\n A B C\n first 0.028208 0.992815 0.173891\n second 0.038683 0.645646 0.577595\n third 0.877076 0.149370 0.491027\n\n >>> df.round(2)\n A B C\n first 0.03 0.99 0.17\n second 0.04 0.65 0.58\n third 0.88 0.15 0.49\n\n >>> df.round({'A': 1, 'C': 2})\n A B C\n first 0.0 0.992815 0.17\n second 0.0 0.645646 0.58\n third 0.9 0.149370 0.49\n\n >>> decimals = ps.Series([1, 0, 2], index=['A', 'B', 'C'])\n >>> df.round(decimals)\n A B C\n first 0.0 1.0 0.17\n second 0.0 1.0 0.58\n third 0.9 0.0 0.49\n " if isinstance(decimals, ps.Series): decimals_dict = {(k if isinstance(k, tuple) else (k,)): v for (k, v) in decimals._to_internal_pandas().items()} elif isinstance(decimals, dict): decimals_dict = {(k if is_name_like_tuple(k) else (k,)): v for (k, v) in decimals.items()} elif isinstance(decimals, int): decimals_dict = {k: decimals for k in self._internal.column_labels} else: raise TypeError('decimals must be an integer, a dict-like or a Series') def op(psser: ps.Series) -> Union[(ps.Series, Column)]: label = psser._column_label if (label in decimals_dict): return F.round(psser.spark.column, decimals_dict[label]) else: return psser return self._apply_series_op(op)
def duplicated(self, subset: Optional[Union[(Name, List[Name])]]=None, keep: Union[(bool, str)]='first') -> 'Series': "\n Return boolean Series denoting duplicate rows, optionally only considering certain columns.\n\n Parameters\n ----------\n subset : column label or sequence of labels, optional\n Only consider certain columns for identifying duplicates,\n by default use all of the columns\n keep : {'first', 'last', False}, default 'first'\n - ``first`` : Mark duplicates as ``True`` except for the first occurrence.\n - ``last`` : Mark duplicates as ``True`` except for the last occurrence.\n - False : Mark all duplicates as ``True``.\n\n Returns\n -------\n duplicated : Series\n\n Examples\n --------\n >>> df = ps.DataFrame({'a': [1, 1, 1, 3], 'b': [1, 1, 1, 4], 'c': [1, 1, 1, 5]},\n ... columns = ['a', 'b', 'c'])\n >>> df\n a b c\n 0 1 1 1\n 1 1 1 1\n 2 1 1 1\n 3 3 4 5\n\n >>> df.duplicated().sort_index()\n 0 False\n 1 True\n 2 True\n 3 False\n dtype: bool\n\n Mark duplicates as ``True`` except for the last occurrence.\n\n >>> df.duplicated(keep='last').sort_index()\n 0 True\n 1 True\n 2 False\n 3 False\n dtype: bool\n\n Mark all duplicates as ``True``.\n\n >>> df.duplicated(keep=False).sort_index()\n 0 True\n 1 True\n 2 True\n 3 False\n dtype: bool\n " from pyspark.pandas.series import first_series (sdf, column) = self._mark_duplicates(subset, keep) sdf = sdf.select((self._internal.index_spark_columns + [scol_for(sdf, column).alias(SPARK_DEFAULT_SERIES_NAME)])) return first_series(DataFrame(InternalFrame(spark_frame=sdf, index_spark_columns=[scol_for(sdf, col) for col in self._internal.index_spark_column_names], index_names=self._internal.index_names, index_fields=self._internal.index_fields, column_labels=[None], data_spark_columns=[scol_for(sdf, SPARK_DEFAULT_SERIES_NAME)])))
912,777,601,891,668,600
Return boolean Series denoting duplicate rows, optionally only considering certain columns. Parameters ---------- subset : column label or sequence of labels, optional Only consider certain columns for identifying duplicates, by default use all of the columns keep : {'first', 'last', False}, default 'first' - ``first`` : Mark duplicates as ``True`` except for the first occurrence. - ``last`` : Mark duplicates as ``True`` except for the last occurrence. - False : Mark all duplicates as ``True``. Returns ------- duplicated : Series Examples -------- >>> df = ps.DataFrame({'a': [1, 1, 1, 3], 'b': [1, 1, 1, 4], 'c': [1, 1, 1, 5]}, ... columns = ['a', 'b', 'c']) >>> df a b c 0 1 1 1 1 1 1 1 2 1 1 1 3 3 4 5 >>> df.duplicated().sort_index() 0 False 1 True 2 True 3 False dtype: bool Mark duplicates as ``True`` except for the last occurrence. >>> df.duplicated(keep='last').sort_index() 0 True 1 True 2 False 3 False dtype: bool Mark all duplicates as ``True``. >>> df.duplicated(keep=False).sort_index() 0 True 1 True 2 True 3 False dtype: bool
python/pyspark/pandas/frame.py
duplicated
Flyangz/spark
python
def duplicated(self, subset: Optional[Union[(Name, List[Name])]]=None, keep: Union[(bool, str)]='first') -> 'Series': "\n Return boolean Series denoting duplicate rows, optionally only considering certain columns.\n\n Parameters\n ----------\n subset : column label or sequence of labels, optional\n Only consider certain columns for identifying duplicates,\n by default use all of the columns\n keep : {'first', 'last', False}, default 'first'\n - ``first`` : Mark duplicates as ``True`` except for the first occurrence.\n - ``last`` : Mark duplicates as ``True`` except for the last occurrence.\n - False : Mark all duplicates as ``True``.\n\n Returns\n -------\n duplicated : Series\n\n Examples\n --------\n >>> df = ps.DataFrame({'a': [1, 1, 1, 3], 'b': [1, 1, 1, 4], 'c': [1, 1, 1, 5]},\n ... columns = ['a', 'b', 'c'])\n >>> df\n a b c\n 0 1 1 1\n 1 1 1 1\n 2 1 1 1\n 3 3 4 5\n\n >>> df.duplicated().sort_index()\n 0 False\n 1 True\n 2 True\n 3 False\n dtype: bool\n\n Mark duplicates as ``True`` except for the last occurrence.\n\n >>> df.duplicated(keep='last').sort_index()\n 0 True\n 1 True\n 2 False\n 3 False\n dtype: bool\n\n Mark all duplicates as ``True``.\n\n >>> df.duplicated(keep=False).sort_index()\n 0 True\n 1 True\n 2 True\n 3 False\n dtype: bool\n " from pyspark.pandas.series import first_series (sdf, column) = self._mark_duplicates(subset, keep) sdf = sdf.select((self._internal.index_spark_columns + [scol_for(sdf, column).alias(SPARK_DEFAULT_SERIES_NAME)])) return first_series(DataFrame(InternalFrame(spark_frame=sdf, index_spark_columns=[scol_for(sdf, col) for col in self._internal.index_spark_column_names], index_names=self._internal.index_names, index_fields=self._internal.index_fields, column_labels=[None], data_spark_columns=[scol_for(sdf, SPARK_DEFAULT_SERIES_NAME)])))
def dot(self, other: 'Series') -> 'Series': '\n Compute the matrix multiplication between the DataFrame and other.\n\n This method computes the matrix product between the DataFrame and the\n values of an other Series\n\n It can also be called using ``self @ other`` in Python >= 3.5.\n\n .. note:: This method is based on an expensive operation due to the nature\n of big data. Internally it needs to generate each row for each value, and\n then group twice - it is a huge operation. To prevent misusage, this method\n has the \'compute.max_rows\' default limit of input length, and raises a ValueError.\n\n >>> from pyspark.pandas.config import option_context\n >>> with option_context(\n ... \'compute.max_rows\', 1000, "compute.ops_on_diff_frames", True\n ... ): # doctest: +NORMALIZE_WHITESPACE\n ... psdf = ps.DataFrame({\'a\': range(1001)})\n ... psser = ps.Series([2], index=[\'a\'])\n ... psdf.dot(psser)\n Traceback (most recent call last):\n ...\n ValueError: Current DataFrame has more then the given limit 1000 rows.\n Please set \'compute.max_rows\' by using \'pyspark.pandas.config.set_option\'\n to retrieve to retrieve more than 1000 rows. Note that, before changing the\n \'compute.max_rows\', this operation is considerably expensive.\n\n Parameters\n ----------\n other : Series\n The other object to compute the matrix product with.\n\n Returns\n -------\n Series\n Return the matrix product between self and other as a Series.\n\n See Also\n --------\n Series.dot: Similar method for Series.\n\n Notes\n -----\n The dimensions of DataFrame and other must be compatible in order to\n compute the matrix multiplication. In addition, the column names of\n DataFrame and the index of other must contain the same values, as they\n will be aligned prior to the multiplication.\n\n The dot method for Series computes the inner product, instead of the\n matrix product here.\n\n Examples\n --------\n >>> from pyspark.pandas.config import set_option, reset_option\n >>> set_option("compute.ops_on_diff_frames", True)\n >>> psdf = ps.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]])\n >>> psser = ps.Series([1, 1, 2, 1])\n >>> psdf.dot(psser)\n 0 -4\n 1 5\n dtype: int64\n\n Note how shuffling of the objects does not change the result.\n\n >>> psser2 = psser.reindex([1, 0, 2, 3])\n >>> psdf.dot(psser2)\n 0 -4\n 1 5\n dtype: int64\n >>> psdf @ psser2\n 0 -4\n 1 5\n dtype: int64\n >>> reset_option("compute.ops_on_diff_frames")\n ' if (not isinstance(other, ps.Series)): raise TypeError('Unsupported type {}'.format(type(other).__name__)) else: return cast(ps.Series, other.dot(self.transpose())).rename(None)
-1,559,024,477,338,388,500
Compute the matrix multiplication between the DataFrame and other. This method computes the matrix product between the DataFrame and the values of an other Series It can also be called using ``self @ other`` in Python >= 3.5. .. note:: This method is based on an expensive operation due to the nature of big data. Internally it needs to generate each row for each value, and then group twice - it is a huge operation. To prevent misusage, this method has the 'compute.max_rows' default limit of input length, and raises a ValueError. >>> from pyspark.pandas.config import option_context >>> with option_context( ... 'compute.max_rows', 1000, "compute.ops_on_diff_frames", True ... ): # doctest: +NORMALIZE_WHITESPACE ... psdf = ps.DataFrame({'a': range(1001)}) ... psser = ps.Series([2], index=['a']) ... psdf.dot(psser) Traceback (most recent call last): ... ValueError: Current DataFrame has more then the given limit 1000 rows. Please set 'compute.max_rows' by using 'pyspark.pandas.config.set_option' to retrieve to retrieve more than 1000 rows. Note that, before changing the 'compute.max_rows', this operation is considerably expensive. Parameters ---------- other : Series The other object to compute the matrix product with. Returns ------- Series Return the matrix product between self and other as a Series. See Also -------- Series.dot: Similar method for Series. Notes ----- The dimensions of DataFrame and other must be compatible in order to compute the matrix multiplication. In addition, the column names of DataFrame and the index of other must contain the same values, as they will be aligned prior to the multiplication. The dot method for Series computes the inner product, instead of the matrix product here. Examples -------- >>> from pyspark.pandas.config import set_option, reset_option >>> set_option("compute.ops_on_diff_frames", True) >>> psdf = ps.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]]) >>> psser = ps.Series([1, 1, 2, 1]) >>> psdf.dot(psser) 0 -4 1 5 dtype: int64 Note how shuffling of the objects does not change the result. >>> psser2 = psser.reindex([1, 0, 2, 3]) >>> psdf.dot(psser2) 0 -4 1 5 dtype: int64 >>> psdf @ psser2 0 -4 1 5 dtype: int64 >>> reset_option("compute.ops_on_diff_frames")
python/pyspark/pandas/frame.py
dot
Flyangz/spark
python
def dot(self, other: 'Series') -> 'Series': '\n Compute the matrix multiplication between the DataFrame and other.\n\n This method computes the matrix product between the DataFrame and the\n values of an other Series\n\n It can also be called using ``self @ other`` in Python >= 3.5.\n\n .. note:: This method is based on an expensive operation due to the nature\n of big data. Internally it needs to generate each row for each value, and\n then group twice - it is a huge operation. To prevent misusage, this method\n has the \'compute.max_rows\' default limit of input length, and raises a ValueError.\n\n >>> from pyspark.pandas.config import option_context\n >>> with option_context(\n ... \'compute.max_rows\', 1000, "compute.ops_on_diff_frames", True\n ... ): # doctest: +NORMALIZE_WHITESPACE\n ... psdf = ps.DataFrame({\'a\': range(1001)})\n ... psser = ps.Series([2], index=[\'a\'])\n ... psdf.dot(psser)\n Traceback (most recent call last):\n ...\n ValueError: Current DataFrame has more then the given limit 1000 rows.\n Please set \'compute.max_rows\' by using \'pyspark.pandas.config.set_option\'\n to retrieve to retrieve more than 1000 rows. Note that, before changing the\n \'compute.max_rows\', this operation is considerably expensive.\n\n Parameters\n ----------\n other : Series\n The other object to compute the matrix product with.\n\n Returns\n -------\n Series\n Return the matrix product between self and other as a Series.\n\n See Also\n --------\n Series.dot: Similar method for Series.\n\n Notes\n -----\n The dimensions of DataFrame and other must be compatible in order to\n compute the matrix multiplication. In addition, the column names of\n DataFrame and the index of other must contain the same values, as they\n will be aligned prior to the multiplication.\n\n The dot method for Series computes the inner product, instead of the\n matrix product here.\n\n Examples\n --------\n >>> from pyspark.pandas.config import set_option, reset_option\n >>> set_option("compute.ops_on_diff_frames", True)\n >>> psdf = ps.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]])\n >>> psser = ps.Series([1, 1, 2, 1])\n >>> psdf.dot(psser)\n 0 -4\n 1 5\n dtype: int64\n\n Note how shuffling of the objects does not change the result.\n\n >>> psser2 = psser.reindex([1, 0, 2, 3])\n >>> psdf.dot(psser2)\n 0 -4\n 1 5\n dtype: int64\n >>> psdf @ psser2\n 0 -4\n 1 5\n dtype: int64\n >>> reset_option("compute.ops_on_diff_frames")\n ' if (not isinstance(other, ps.Series)): raise TypeError('Unsupported type {}'.format(type(other).__name__)) else: return cast(ps.Series, other.dot(self.transpose())).rename(None)
def __matmul__(self, other: 'Series') -> 'Series': '\n Matrix multiplication using binary `@` operator in Python>=3.5.\n ' return self.dot(other)
1,389,403,817,055,163,000
Matrix multiplication using binary `@` operator in Python>=3.5.
python/pyspark/pandas/frame.py
__matmul__
Flyangz/spark
python
def __matmul__(self, other: 'Series') -> 'Series': '\n \n ' return self.dot(other)
def to_delta(self, path: str, mode: str='w', partition_cols: Optional[Union[(str, List[str])]]=None, index_col: Optional[Union[(str, List[str])]]=None, **options: 'OptionalPrimitiveType') -> None: '\n Write the DataFrame out as a Delta Lake table.\n\n Parameters\n ----------\n path : str, required\n Path to write to.\n mode : str\n Python write mode, default \'w\'.\n\n .. note:: mode can accept the strings for Spark writing mode.\n Such as \'append\', \'overwrite\', \'ignore\', \'error\', \'errorifexists\'.\n\n - \'append\' (equivalent to \'a\'): Append the new data to existing data.\n - \'overwrite\' (equivalent to \'w\'): Overwrite existing data.\n - \'ignore\': Silently ignore this operation if data already exists.\n - \'error\' or \'errorifexists\': Throw an exception if data already exists.\n\n partition_cols : str or list of str, optional, default None\n Names of partitioning columns\n index_col: str or list of str, optional, default: None\n Column names to be used in Spark to represent pandas-on-Spark\'s index. The index name\n in pandas-on-Spark is ignored. By default, the index is always lost.\n options : dict\n All other options passed directly into Delta Lake.\n\n See Also\n --------\n read_delta\n DataFrame.to_parquet\n DataFrame.to_table\n DataFrame.to_spark_io\n\n Examples\n --------\n\n >>> df = ps.DataFrame(dict(\n ... date=list(pd.date_range(\'2012-1-1 12:00:00\', periods=3, freq=\'M\')),\n ... country=[\'KR\', \'US\', \'JP\'],\n ... code=[1, 2 ,3]), columns=[\'date\', \'country\', \'code\'])\n >>> df\n date country code\n 0 2012-01-31 12:00:00 KR 1\n 1 2012-02-29 12:00:00 US 2\n 2 2012-03-31 12:00:00 JP 3\n\n Create a new Delta Lake table, partitioned by one column:\n\n >>> df.to_delta(\'%s/to_delta/foo\' % path, partition_cols=\'date\') # doctest: +SKIP\n\n Partitioned by two columns:\n\n >>> df.to_delta(\'%s/to_delta/bar\' % path,\n ... partition_cols=[\'date\', \'country\']) # doctest: +SKIP\n\n Overwrite an existing table\'s partitions, using the \'replaceWhere\' capability in Delta:\n\n >>> df.to_delta(\'%s/to_delta/bar\' % path,\n ... mode=\'overwrite\', replaceWhere=\'date >= "2012-01-01"\') # doctest: +SKIP\n ' if (index_col is None): log_advice('If `index_col` is not specified for `to_delta`, the existing index is lost when converting to Delta.') if (('options' in options) and isinstance(options.get('options'), dict) and (len(options) == 1)): options = options.get('options') mode = validate_mode(mode) self.spark.to_spark_io(path=path, mode=mode, format='delta', partition_cols=partition_cols, index_col=index_col, **options)
-1,846,093,383,728,173,800
Write the DataFrame out as a Delta Lake table. Parameters ---------- path : str, required Path to write to. mode : str Python write mode, default 'w'. .. note:: mode can accept the strings for Spark writing mode. Such as 'append', 'overwrite', 'ignore', 'error', 'errorifexists'. - 'append' (equivalent to 'a'): Append the new data to existing data. - 'overwrite' (equivalent to 'w'): Overwrite existing data. - 'ignore': Silently ignore this operation if data already exists. - 'error' or 'errorifexists': Throw an exception if data already exists. partition_cols : str or list of str, optional, default None Names of partitioning columns index_col: str or list of str, optional, default: None Column names to be used in Spark to represent pandas-on-Spark's index. The index name in pandas-on-Spark is ignored. By default, the index is always lost. options : dict All other options passed directly into Delta Lake. See Also -------- read_delta DataFrame.to_parquet DataFrame.to_table DataFrame.to_spark_io Examples -------- >>> df = ps.DataFrame(dict( ... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')), ... country=['KR', 'US', 'JP'], ... code=[1, 2 ,3]), columns=['date', 'country', 'code']) >>> df date country code 0 2012-01-31 12:00:00 KR 1 1 2012-02-29 12:00:00 US 2 2 2012-03-31 12:00:00 JP 3 Create a new Delta Lake table, partitioned by one column: >>> df.to_delta('%s/to_delta/foo' % path, partition_cols='date') # doctest: +SKIP Partitioned by two columns: >>> df.to_delta('%s/to_delta/bar' % path, ... partition_cols=['date', 'country']) # doctest: +SKIP Overwrite an existing table's partitions, using the 'replaceWhere' capability in Delta: >>> df.to_delta('%s/to_delta/bar' % path, ... mode='overwrite', replaceWhere='date >= "2012-01-01"') # doctest: +SKIP
python/pyspark/pandas/frame.py
to_delta
Flyangz/spark
python
def to_delta(self, path: str, mode: str='w', partition_cols: Optional[Union[(str, List[str])]]=None, index_col: Optional[Union[(str, List[str])]]=None, **options: 'OptionalPrimitiveType') -> None: '\n Write the DataFrame out as a Delta Lake table.\n\n Parameters\n ----------\n path : str, required\n Path to write to.\n mode : str\n Python write mode, default \'w\'.\n\n .. note:: mode can accept the strings for Spark writing mode.\n Such as \'append\', \'overwrite\', \'ignore\', \'error\', \'errorifexists\'.\n\n - \'append\' (equivalent to \'a\'): Append the new data to existing data.\n - \'overwrite\' (equivalent to \'w\'): Overwrite existing data.\n - \'ignore\': Silently ignore this operation if data already exists.\n - \'error\' or \'errorifexists\': Throw an exception if data already exists.\n\n partition_cols : str or list of str, optional, default None\n Names of partitioning columns\n index_col: str or list of str, optional, default: None\n Column names to be used in Spark to represent pandas-on-Spark\'s index. The index name\n in pandas-on-Spark is ignored. By default, the index is always lost.\n options : dict\n All other options passed directly into Delta Lake.\n\n See Also\n --------\n read_delta\n DataFrame.to_parquet\n DataFrame.to_table\n DataFrame.to_spark_io\n\n Examples\n --------\n\n >>> df = ps.DataFrame(dict(\n ... date=list(pd.date_range(\'2012-1-1 12:00:00\', periods=3, freq=\'M\')),\n ... country=[\'KR\', \'US\', \'JP\'],\n ... code=[1, 2 ,3]), columns=[\'date\', \'country\', \'code\'])\n >>> df\n date country code\n 0 2012-01-31 12:00:00 KR 1\n 1 2012-02-29 12:00:00 US 2\n 2 2012-03-31 12:00:00 JP 3\n\n Create a new Delta Lake table, partitioned by one column:\n\n >>> df.to_delta(\'%s/to_delta/foo\' % path, partition_cols=\'date\') # doctest: +SKIP\n\n Partitioned by two columns:\n\n >>> df.to_delta(\'%s/to_delta/bar\' % path,\n ... partition_cols=[\'date\', \'country\']) # doctest: +SKIP\n\n Overwrite an existing table\'s partitions, using the \'replaceWhere\' capability in Delta:\n\n >>> df.to_delta(\'%s/to_delta/bar\' % path,\n ... mode=\'overwrite\', replaceWhere=\'date >= "2012-01-01"\') # doctest: +SKIP\n ' if (index_col is None): log_advice('If `index_col` is not specified for `to_delta`, the existing index is lost when converting to Delta.') if (('options' in options) and isinstance(options.get('options'), dict) and (len(options) == 1)): options = options.get('options') mode = validate_mode(mode) self.spark.to_spark_io(path=path, mode=mode, format='delta', partition_cols=partition_cols, index_col=index_col, **options)
def to_parquet(self, path: str, mode: str='w', partition_cols: Optional[Union[(str, List[str])]]=None, compression: Optional[str]=None, index_col: Optional[Union[(str, List[str])]]=None, **options: Any) -> None: "\n Write the DataFrame out as a Parquet file or directory.\n\n Parameters\n ----------\n path : str, required\n Path to write to.\n mode : str\n Python write mode, default 'w'.\n\n .. note:: mode can accept the strings for Spark writing mode.\n Such as 'append', 'overwrite', 'ignore', 'error', 'errorifexists'.\n\n - 'append' (equivalent to 'a'): Append the new data to existing data.\n - 'overwrite' (equivalent to 'w'): Overwrite existing data.\n - 'ignore': Silently ignore this operation if data already exists.\n - 'error' or 'errorifexists': Throw an exception if data already exists.\n\n partition_cols : str or list of str, optional, default None\n Names of partitioning columns\n compression : str {'none', 'uncompressed', 'snappy', 'gzip', 'lzo', 'brotli', 'lz4', 'zstd'}\n Compression codec to use when saving to file. If None is set, it uses the\n value specified in `spark.sql.parquet.compression.codec`.\n index_col: str or list of str, optional, default: None\n Column names to be used in Spark to represent pandas-on-Spark's index. The index name\n in pandas-on-Spark is ignored. By default, the index is always lost.\n options : dict\n All other options passed directly into Spark's data source.\n\n See Also\n --------\n read_parquet\n DataFrame.to_delta\n DataFrame.to_table\n DataFrame.to_spark_io\n\n Examples\n --------\n >>> df = ps.DataFrame(dict(\n ... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),\n ... country=['KR', 'US', 'JP'],\n ... code=[1, 2 ,3]), columns=['date', 'country', 'code'])\n >>> df\n date country code\n 0 2012-01-31 12:00:00 KR 1\n 1 2012-02-29 12:00:00 US 2\n 2 2012-03-31 12:00:00 JP 3\n\n >>> df.to_parquet('%s/to_parquet/foo.parquet' % path, partition_cols='date')\n\n >>> df.to_parquet(\n ... '%s/to_parquet/foo.parquet' % path,\n ... mode = 'overwrite',\n ... partition_cols=['date', 'country'])\n " if (index_col is None): log_advice('If `index_col` is not specified for `to_parquet`, the existing index is lost when converting to Parquet.') if (('options' in options) and isinstance(options.get('options'), dict) and (len(options) == 1)): options = options.get('options') mode = validate_mode(mode) builder = self.to_spark(index_col=index_col).write.mode(mode) if (partition_cols is not None): builder.partitionBy(partition_cols) if (compression is not None): builder.option('compression', compression) builder.options(**options).format('parquet').save(path)
7,255,174,748,984,033,000
Write the DataFrame out as a Parquet file or directory. Parameters ---------- path : str, required Path to write to. mode : str Python write mode, default 'w'. .. note:: mode can accept the strings for Spark writing mode. Such as 'append', 'overwrite', 'ignore', 'error', 'errorifexists'. - 'append' (equivalent to 'a'): Append the new data to existing data. - 'overwrite' (equivalent to 'w'): Overwrite existing data. - 'ignore': Silently ignore this operation if data already exists. - 'error' or 'errorifexists': Throw an exception if data already exists. partition_cols : str or list of str, optional, default None Names of partitioning columns compression : str {'none', 'uncompressed', 'snappy', 'gzip', 'lzo', 'brotli', 'lz4', 'zstd'} Compression codec to use when saving to file. If None is set, it uses the value specified in `spark.sql.parquet.compression.codec`. index_col: str or list of str, optional, default: None Column names to be used in Spark to represent pandas-on-Spark's index. The index name in pandas-on-Spark is ignored. By default, the index is always lost. options : dict All other options passed directly into Spark's data source. See Also -------- read_parquet DataFrame.to_delta DataFrame.to_table DataFrame.to_spark_io Examples -------- >>> df = ps.DataFrame(dict( ... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')), ... country=['KR', 'US', 'JP'], ... code=[1, 2 ,3]), columns=['date', 'country', 'code']) >>> df date country code 0 2012-01-31 12:00:00 KR 1 1 2012-02-29 12:00:00 US 2 2 2012-03-31 12:00:00 JP 3 >>> df.to_parquet('%s/to_parquet/foo.parquet' % path, partition_cols='date') >>> df.to_parquet( ... '%s/to_parquet/foo.parquet' % path, ... mode = 'overwrite', ... partition_cols=['date', 'country'])
python/pyspark/pandas/frame.py
to_parquet
Flyangz/spark
python
def to_parquet(self, path: str, mode: str='w', partition_cols: Optional[Union[(str, List[str])]]=None, compression: Optional[str]=None, index_col: Optional[Union[(str, List[str])]]=None, **options: Any) -> None: "\n Write the DataFrame out as a Parquet file or directory.\n\n Parameters\n ----------\n path : str, required\n Path to write to.\n mode : str\n Python write mode, default 'w'.\n\n .. note:: mode can accept the strings for Spark writing mode.\n Such as 'append', 'overwrite', 'ignore', 'error', 'errorifexists'.\n\n - 'append' (equivalent to 'a'): Append the new data to existing data.\n - 'overwrite' (equivalent to 'w'): Overwrite existing data.\n - 'ignore': Silently ignore this operation if data already exists.\n - 'error' or 'errorifexists': Throw an exception if data already exists.\n\n partition_cols : str or list of str, optional, default None\n Names of partitioning columns\n compression : str {'none', 'uncompressed', 'snappy', 'gzip', 'lzo', 'brotli', 'lz4', 'zstd'}\n Compression codec to use when saving to file. If None is set, it uses the\n value specified in `spark.sql.parquet.compression.codec`.\n index_col: str or list of str, optional, default: None\n Column names to be used in Spark to represent pandas-on-Spark's index. The index name\n in pandas-on-Spark is ignored. By default, the index is always lost.\n options : dict\n All other options passed directly into Spark's data source.\n\n See Also\n --------\n read_parquet\n DataFrame.to_delta\n DataFrame.to_table\n DataFrame.to_spark_io\n\n Examples\n --------\n >>> df = ps.DataFrame(dict(\n ... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),\n ... country=['KR', 'US', 'JP'],\n ... code=[1, 2 ,3]), columns=['date', 'country', 'code'])\n >>> df\n date country code\n 0 2012-01-31 12:00:00 KR 1\n 1 2012-02-29 12:00:00 US 2\n 2 2012-03-31 12:00:00 JP 3\n\n >>> df.to_parquet('%s/to_parquet/foo.parquet' % path, partition_cols='date')\n\n >>> df.to_parquet(\n ... '%s/to_parquet/foo.parquet' % path,\n ... mode = 'overwrite',\n ... partition_cols=['date', 'country'])\n " if (index_col is None): log_advice('If `index_col` is not specified for `to_parquet`, the existing index is lost when converting to Parquet.') if (('options' in options) and isinstance(options.get('options'), dict) and (len(options) == 1)): options = options.get('options') mode = validate_mode(mode) builder = self.to_spark(index_col=index_col).write.mode(mode) if (partition_cols is not None): builder.partitionBy(partition_cols) if (compression is not None): builder.option('compression', compression) builder.options(**options).format('parquet').save(path)
def to_orc(self, path: str, mode: str='w', partition_cols: Optional[Union[(str, List[str])]]=None, index_col: Optional[Union[(str, List[str])]]=None, **options: 'OptionalPrimitiveType') -> None: "\n Write the DataFrame out as a ORC file or directory.\n\n Parameters\n ----------\n path : str, required\n Path to write to.\n mode : str\n Python write mode, default 'w'.\n\n .. note:: mode can accept the strings for Spark writing mode.\n Such as 'append', 'overwrite', 'ignore', 'error', 'errorifexists'.\n\n - 'append' (equivalent to 'a'): Append the new data to existing data.\n - 'overwrite' (equivalent to 'w'): Overwrite existing data.\n - 'ignore': Silently ignore this operation if data already exists.\n - 'error' or 'errorifexists': Throw an exception if data already exists.\n\n partition_cols : str or list of str, optional, default None\n Names of partitioning columns\n index_col: str or list of str, optional, default: None\n Column names to be used in Spark to represent pandas-on-Spark's index. The index name\n in pandas-on-Spark is ignored. By default, the index is always lost.\n options : dict\n All other options passed directly into Spark's data source.\n\n See Also\n --------\n read_orc\n DataFrame.to_delta\n DataFrame.to_parquet\n DataFrame.to_table\n DataFrame.to_spark_io\n\n Examples\n --------\n >>> df = ps.DataFrame(dict(\n ... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),\n ... country=['KR', 'US', 'JP'],\n ... code=[1, 2 ,3]), columns=['date', 'country', 'code'])\n >>> df\n date country code\n 0 2012-01-31 12:00:00 KR 1\n 1 2012-02-29 12:00:00 US 2\n 2 2012-03-31 12:00:00 JP 3\n\n >>> df.to_orc('%s/to_orc/foo.orc' % path, partition_cols='date')\n\n >>> df.to_orc(\n ... '%s/to_orc/foo.orc' % path,\n ... mode = 'overwrite',\n ... partition_cols=['date', 'country'])\n " if (index_col is None): log_advice('If `index_col` is not specified for `to_orc`, the existing index is lost when converting to ORC.') if (('options' in options) and isinstance(options.get('options'), dict) and (len(options) == 1)): options = options.get('options') mode = validate_mode(mode) self.spark.to_spark_io(path=path, mode=mode, format='orc', partition_cols=partition_cols, index_col=index_col, **options)
-2,983,613,375,367,457,300
Write the DataFrame out as a ORC file or directory. Parameters ---------- path : str, required Path to write to. mode : str Python write mode, default 'w'. .. note:: mode can accept the strings for Spark writing mode. Such as 'append', 'overwrite', 'ignore', 'error', 'errorifexists'. - 'append' (equivalent to 'a'): Append the new data to existing data. - 'overwrite' (equivalent to 'w'): Overwrite existing data. - 'ignore': Silently ignore this operation if data already exists. - 'error' or 'errorifexists': Throw an exception if data already exists. partition_cols : str or list of str, optional, default None Names of partitioning columns index_col: str or list of str, optional, default: None Column names to be used in Spark to represent pandas-on-Spark's index. The index name in pandas-on-Spark is ignored. By default, the index is always lost. options : dict All other options passed directly into Spark's data source. See Also -------- read_orc DataFrame.to_delta DataFrame.to_parquet DataFrame.to_table DataFrame.to_spark_io Examples -------- >>> df = ps.DataFrame(dict( ... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')), ... country=['KR', 'US', 'JP'], ... code=[1, 2 ,3]), columns=['date', 'country', 'code']) >>> df date country code 0 2012-01-31 12:00:00 KR 1 1 2012-02-29 12:00:00 US 2 2 2012-03-31 12:00:00 JP 3 >>> df.to_orc('%s/to_orc/foo.orc' % path, partition_cols='date') >>> df.to_orc( ... '%s/to_orc/foo.orc' % path, ... mode = 'overwrite', ... partition_cols=['date', 'country'])
python/pyspark/pandas/frame.py
to_orc
Flyangz/spark
python
def to_orc(self, path: str, mode: str='w', partition_cols: Optional[Union[(str, List[str])]]=None, index_col: Optional[Union[(str, List[str])]]=None, **options: 'OptionalPrimitiveType') -> None: "\n Write the DataFrame out as a ORC file or directory.\n\n Parameters\n ----------\n path : str, required\n Path to write to.\n mode : str\n Python write mode, default 'w'.\n\n .. note:: mode can accept the strings for Spark writing mode.\n Such as 'append', 'overwrite', 'ignore', 'error', 'errorifexists'.\n\n - 'append' (equivalent to 'a'): Append the new data to existing data.\n - 'overwrite' (equivalent to 'w'): Overwrite existing data.\n - 'ignore': Silently ignore this operation if data already exists.\n - 'error' or 'errorifexists': Throw an exception if data already exists.\n\n partition_cols : str or list of str, optional, default None\n Names of partitioning columns\n index_col: str or list of str, optional, default: None\n Column names to be used in Spark to represent pandas-on-Spark's index. The index name\n in pandas-on-Spark is ignored. By default, the index is always lost.\n options : dict\n All other options passed directly into Spark's data source.\n\n See Also\n --------\n read_orc\n DataFrame.to_delta\n DataFrame.to_parquet\n DataFrame.to_table\n DataFrame.to_spark_io\n\n Examples\n --------\n >>> df = ps.DataFrame(dict(\n ... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),\n ... country=['KR', 'US', 'JP'],\n ... code=[1, 2 ,3]), columns=['date', 'country', 'code'])\n >>> df\n date country code\n 0 2012-01-31 12:00:00 KR 1\n 1 2012-02-29 12:00:00 US 2\n 2 2012-03-31 12:00:00 JP 3\n\n >>> df.to_orc('%s/to_orc/foo.orc' % path, partition_cols='date')\n\n >>> df.to_orc(\n ... '%s/to_orc/foo.orc' % path,\n ... mode = 'overwrite',\n ... partition_cols=['date', 'country'])\n " if (index_col is None): log_advice('If `index_col` is not specified for `to_orc`, the existing index is lost when converting to ORC.') if (('options' in options) and isinstance(options.get('options'), dict) and (len(options) == 1)): options = options.get('options') mode = validate_mode(mode) self.spark.to_spark_io(path=path, mode=mode, format='orc', partition_cols=partition_cols, index_col=index_col, **options)
def to_spark_io(self, path: Optional[str]=None, format: Optional[str]=None, mode: str='overwrite', partition_cols: Optional[Union[(str, List[str])]]=None, index_col: Optional[Union[(str, List[str])]]=None, **options: 'OptionalPrimitiveType') -> None: 'An alias for :func:`DataFrame.spark.to_spark_io`.\n See :meth:`pyspark.pandas.spark.accessors.SparkFrameMethods.to_spark_io`.\n\n .. deprecated:: 3.2.0\n Use :func:`DataFrame.spark.to_spark_io` instead.\n ' warnings.warn('Deprecated in 3.2, Use DataFrame.spark.to_spark_io instead.', FutureWarning) return self.spark.to_spark_io(path, format, mode, partition_cols, index_col, **options)
-9,186,847,900,837,489,000
An alias for :func:`DataFrame.spark.to_spark_io`. See :meth:`pyspark.pandas.spark.accessors.SparkFrameMethods.to_spark_io`. .. deprecated:: 3.2.0 Use :func:`DataFrame.spark.to_spark_io` instead.
python/pyspark/pandas/frame.py
to_spark_io
Flyangz/spark
python
def to_spark_io(self, path: Optional[str]=None, format: Optional[str]=None, mode: str='overwrite', partition_cols: Optional[Union[(str, List[str])]]=None, index_col: Optional[Union[(str, List[str])]]=None, **options: 'OptionalPrimitiveType') -> None: 'An alias for :func:`DataFrame.spark.to_spark_io`.\n See :meth:`pyspark.pandas.spark.accessors.SparkFrameMethods.to_spark_io`.\n\n .. deprecated:: 3.2.0\n Use :func:`DataFrame.spark.to_spark_io` instead.\n ' warnings.warn('Deprecated in 3.2, Use DataFrame.spark.to_spark_io instead.', FutureWarning) return self.spark.to_spark_io(path, format, mode, partition_cols, index_col, **options)
def _to_spark(self, index_col: Optional[Union[(str, List[str])]]=None) -> SparkDataFrame: '\n Same as `to_spark()`, without issueing the advice log when `index_col` is not specified\n for internal usage.\n ' return self.spark.frame(index_col)
3,150,529,469,738,035,000
Same as `to_spark()`, without issueing the advice log when `index_col` is not specified for internal usage.
python/pyspark/pandas/frame.py
_to_spark
Flyangz/spark
python
def _to_spark(self, index_col: Optional[Union[(str, List[str])]]=None) -> SparkDataFrame: '\n Same as `to_spark()`, without issueing the advice log when `index_col` is not specified\n for internal usage.\n ' return self.spark.frame(index_col)
def to_pandas(self) -> pd.DataFrame: "\n Return a pandas DataFrame.\n\n .. note:: This method should only be used if the resulting pandas DataFrame is expected\n to be small, as all the data is loaded into the driver's memory.\n\n Examples\n --------\n >>> df = ps.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],\n ... columns=['dogs', 'cats'])\n >>> df.to_pandas()\n dogs cats\n 0 0.2 0.3\n 1 0.0 0.6\n 2 0.6 0.0\n 3 0.2 0.1\n " log_advice("`to_pandas` loads all data into the driver's memory. It should only be used if the resulting pandas DataFrame is expected to be small.") return self._to_pandas()
7,510,011,846,533,468,000
Return a pandas DataFrame. .. note:: This method should only be used if the resulting pandas DataFrame is expected to be small, as all the data is loaded into the driver's memory. Examples -------- >>> df = ps.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], ... columns=['dogs', 'cats']) >>> df.to_pandas() dogs cats 0 0.2 0.3 1 0.0 0.6 2 0.6 0.0 3 0.2 0.1
python/pyspark/pandas/frame.py
to_pandas
Flyangz/spark
python
def to_pandas(self) -> pd.DataFrame: "\n Return a pandas DataFrame.\n\n .. note:: This method should only be used if the resulting pandas DataFrame is expected\n to be small, as all the data is loaded into the driver's memory.\n\n Examples\n --------\n >>> df = ps.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],\n ... columns=['dogs', 'cats'])\n >>> df.to_pandas()\n dogs cats\n 0 0.2 0.3\n 1 0.0 0.6\n 2 0.6 0.0\n 3 0.2 0.1\n " log_advice("`to_pandas` loads all data into the driver's memory. It should only be used if the resulting pandas DataFrame is expected to be small.") return self._to_pandas()
def _to_pandas(self) -> pd.DataFrame: '\n Same as `to_pandas()`, without issueing the advice log for internal usage.\n ' return self._internal.to_pandas_frame.copy()
1,384,124,970,409,361
Same as `to_pandas()`, without issueing the advice log for internal usage.
python/pyspark/pandas/frame.py
_to_pandas
Flyangz/spark
python
def _to_pandas(self) -> pd.DataFrame: '\n \n ' return self._internal.to_pandas_frame.copy()
def assign(self, **kwargs: Any) -> 'DataFrame': "\n Assign new columns to a DataFrame.\n\n Returns a new object with all original columns in addition to new ones.\n Existing columns that are re-assigned will be overwritten.\n\n Parameters\n ----------\n **kwargs : dict of {str: callable, Series or Index}\n The column names are keywords. If the values are\n callable, they are computed on the DataFrame and\n assigned to the new columns. The callable must not\n change input DataFrame (though pandas-on-Spark doesn't check it).\n If the values are not callable, (e.g. a Series or a literal),\n they are simply assigned.\n\n Returns\n -------\n DataFrame\n A new DataFrame with the new columns in addition to\n all the existing columns.\n\n Examples\n --------\n >>> df = ps.DataFrame({'temp_c': [17.0, 25.0]},\n ... index=['Portland', 'Berkeley'])\n >>> df\n temp_c\n Portland 17.0\n Berkeley 25.0\n\n Where the value is a callable, evaluated on `df`:\n\n >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)\n temp_c temp_f\n Portland 17.0 62.6\n Berkeley 25.0 77.0\n\n Alternatively, the same behavior can be achieved by directly\n referencing an existing Series or sequence and you can also\n create multiple columns within the same assign.\n\n >>> assigned = df.assign(temp_f=df['temp_c'] * 9 / 5 + 32,\n ... temp_k=df['temp_c'] + 273.15,\n ... temp_idx=df.index)\n >>> assigned[['temp_c', 'temp_f', 'temp_k', 'temp_idx']]\n temp_c temp_f temp_k temp_idx\n Portland 17.0 62.6 290.15 Portland\n Berkeley 25.0 77.0 298.15 Berkeley\n\n Notes\n -----\n Assigning multiple columns within the same ``assign`` is possible\n but you cannot refer to newly created or modified columns. This\n feature is supported in pandas for Python 3.6 and later but not in\n pandas-on-Spark. In pandas-on-Spark, all items are computed first,\n and then assigned.\n " return self._assign(kwargs)
4,465,799,761,529,371,000
Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable, Series or Index} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though pandas-on-Spark doesn't check it). If the values are not callable, (e.g. a Series or a literal), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. Examples -------- >>> df = ps.DataFrame({'temp_c': [17.0, 25.0]}, ... index=['Portland', 'Berkeley']) >>> df temp_c Portland 17.0 Berkeley 25.0 Where the value is a callable, evaluated on `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence and you can also create multiple columns within the same assign. >>> assigned = df.assign(temp_f=df['temp_c'] * 9 / 5 + 32, ... temp_k=df['temp_c'] + 273.15, ... temp_idx=df.index) >>> assigned[['temp_c', 'temp_f', 'temp_k', 'temp_idx']] temp_c temp_f temp_k temp_idx Portland 17.0 62.6 290.15 Portland Berkeley 25.0 77.0 298.15 Berkeley Notes ----- Assigning multiple columns within the same ``assign`` is possible but you cannot refer to newly created or modified columns. This feature is supported in pandas for Python 3.6 and later but not in pandas-on-Spark. In pandas-on-Spark, all items are computed first, and then assigned.
python/pyspark/pandas/frame.py
assign
Flyangz/spark
python
def assign(self, **kwargs: Any) -> 'DataFrame': "\n Assign new columns to a DataFrame.\n\n Returns a new object with all original columns in addition to new ones.\n Existing columns that are re-assigned will be overwritten.\n\n Parameters\n ----------\n **kwargs : dict of {str: callable, Series or Index}\n The column names are keywords. If the values are\n callable, they are computed on the DataFrame and\n assigned to the new columns. The callable must not\n change input DataFrame (though pandas-on-Spark doesn't check it).\n If the values are not callable, (e.g. a Series or a literal),\n they are simply assigned.\n\n Returns\n -------\n DataFrame\n A new DataFrame with the new columns in addition to\n all the existing columns.\n\n Examples\n --------\n >>> df = ps.DataFrame({'temp_c': [17.0, 25.0]},\n ... index=['Portland', 'Berkeley'])\n >>> df\n temp_c\n Portland 17.0\n Berkeley 25.0\n\n Where the value is a callable, evaluated on `df`:\n\n >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)\n temp_c temp_f\n Portland 17.0 62.6\n Berkeley 25.0 77.0\n\n Alternatively, the same behavior can be achieved by directly\n referencing an existing Series or sequence and you can also\n create multiple columns within the same assign.\n\n >>> assigned = df.assign(temp_f=df['temp_c'] * 9 / 5 + 32,\n ... temp_k=df['temp_c'] + 273.15,\n ... temp_idx=df.index)\n >>> assigned[['temp_c', 'temp_f', 'temp_k', 'temp_idx']]\n temp_c temp_f temp_k temp_idx\n Portland 17.0 62.6 290.15 Portland\n Berkeley 25.0 77.0 298.15 Berkeley\n\n Notes\n -----\n Assigning multiple columns within the same ``assign`` is possible\n but you cannot refer to newly created or modified columns. This\n feature is supported in pandas for Python 3.6 and later but not in\n pandas-on-Spark. In pandas-on-Spark, all items are computed first,\n and then assigned.\n " return self._assign(kwargs)
@staticmethod def from_records(data: Union[(np.ndarray, List[tuple], dict, pd.DataFrame)], index: Union[(str, list, np.ndarray)]=None, exclude: list=None, columns: list=None, coerce_float: bool=False, nrows: int=None) -> 'DataFrame': "\n Convert structured or record ndarray to DataFrame.\n\n Parameters\n ----------\n data : ndarray (structured dtype), list of tuples, dict, or DataFrame\n index : string, list of fields, array-like\n Field of array to use as the index, alternately a specific set of input labels to use\n exclude : sequence, default None\n Columns or fields to exclude\n columns : sequence, default None\n Column names to use. If the passed data do not have names associated with them, this\n argument provides names for the columns. Otherwise this argument indicates the order of\n the columns in the result (any names not found in the data will become all-NA columns)\n coerce_float : boolean, default False\n Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to\n floating point, useful for SQL result sets\n nrows : int, default None\n Number of rows to read if data is an iterator\n\n Returns\n -------\n df : DataFrame\n\n Examples\n --------\n Use dict as input\n\n >>> ps.DataFrame.from_records({'A': [1, 2, 3]})\n A\n 0 1\n 1 2\n 2 3\n\n Use list of tuples as input\n\n >>> ps.DataFrame.from_records([(1, 2), (3, 4)])\n 0 1\n 0 1 2\n 1 3 4\n\n Use NumPy array as input\n\n >>> ps.DataFrame.from_records(np.eye(3))\n 0 1 2\n 0 1.0 0.0 0.0\n 1 0.0 1.0 0.0\n 2 0.0 0.0 1.0\n " return DataFrame(pd.DataFrame.from_records(data, index, exclude, columns, coerce_float, nrows))
8,813,867,570,616,891,000
Convert structured or record ndarray to DataFrame. Parameters ---------- data : ndarray (structured dtype), list of tuples, dict, or DataFrame index : string, list of fields, array-like Field of array to use as the index, alternately a specific set of input labels to use exclude : sequence, default None Columns or fields to exclude columns : sequence, default None Column names to use. If the passed data do not have names associated with them, this argument provides names for the columns. Otherwise this argument indicates the order of the columns in the result (any names not found in the data will become all-NA columns) coerce_float : boolean, default False Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets nrows : int, default None Number of rows to read if data is an iterator Returns ------- df : DataFrame Examples -------- Use dict as input >>> ps.DataFrame.from_records({'A': [1, 2, 3]}) A 0 1 1 2 2 3 Use list of tuples as input >>> ps.DataFrame.from_records([(1, 2), (3, 4)]) 0 1 0 1 2 1 3 4 Use NumPy array as input >>> ps.DataFrame.from_records(np.eye(3)) 0 1 2 0 1.0 0.0 0.0 1 0.0 1.0 0.0 2 0.0 0.0 1.0
python/pyspark/pandas/frame.py
from_records
Flyangz/spark
python
@staticmethod def from_records(data: Union[(np.ndarray, List[tuple], dict, pd.DataFrame)], index: Union[(str, list, np.ndarray)]=None, exclude: list=None, columns: list=None, coerce_float: bool=False, nrows: int=None) -> 'DataFrame': "\n Convert structured or record ndarray to DataFrame.\n\n Parameters\n ----------\n data : ndarray (structured dtype), list of tuples, dict, or DataFrame\n index : string, list of fields, array-like\n Field of array to use as the index, alternately a specific set of input labels to use\n exclude : sequence, default None\n Columns or fields to exclude\n columns : sequence, default None\n Column names to use. If the passed data do not have names associated with them, this\n argument provides names for the columns. Otherwise this argument indicates the order of\n the columns in the result (any names not found in the data will become all-NA columns)\n coerce_float : boolean, default False\n Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to\n floating point, useful for SQL result sets\n nrows : int, default None\n Number of rows to read if data is an iterator\n\n Returns\n -------\n df : DataFrame\n\n Examples\n --------\n Use dict as input\n\n >>> ps.DataFrame.from_records({'A': [1, 2, 3]})\n A\n 0 1\n 1 2\n 2 3\n\n Use list of tuples as input\n\n >>> ps.DataFrame.from_records([(1, 2), (3, 4)])\n 0 1\n 0 1 2\n 1 3 4\n\n Use NumPy array as input\n\n >>> ps.DataFrame.from_records(np.eye(3))\n 0 1 2\n 0 1.0 0.0 0.0\n 1 0.0 1.0 0.0\n 2 0.0 0.0 1.0\n " return DataFrame(pd.DataFrame.from_records(data, index, exclude, columns, coerce_float, nrows))
def to_records(self, index: bool=True, column_dtypes: Optional[Union[(str, Dtype, Dict[(Name, Union[(str, Dtype)])])]]=None, index_dtypes: Optional[Union[(str, Dtype, Dict[(Name, Union[(str, Dtype)])])]]=None) -> np.recarray: '\n Convert DataFrame to a NumPy record array.\n\n Index will be included as the first field of the record array if\n requested.\n\n .. note:: This method should only be used if the resulting NumPy ndarray is\n expected to be small, as all the data is loaded into the driver\'s memory.\n\n Parameters\n ----------\n index : bool, default True\n Include index in resulting record array, stored in \'index\'\n field or using the index label, if set.\n column_dtypes : str, type, dict, default None\n If a string or type, the data type to store all columns. If\n a dictionary, a mapping of column names and indices (zero-indexed)\n to specific data types.\n index_dtypes : str, type, dict, default None\n If a string or type, the data type to store all index levels. If\n a dictionary, a mapping of index level names and indices\n (zero-indexed) to specific data types.\n This mapping is applied only if `index=True`.\n\n Returns\n -------\n numpy.recarray\n NumPy ndarray with the DataFrame labels as fields and each row\n of the DataFrame as entries.\n\n See Also\n --------\n DataFrame.from_records: Convert structured or record ndarray\n to DataFrame.\n numpy.recarray: An ndarray that allows field access using\n attributes, analogous to typed columns in a\n spreadsheet.\n\n Examples\n --------\n >>> df = ps.DataFrame({\'A\': [1, 2], \'B\': [0.5, 0.75]},\n ... index=[\'a\', \'b\'])\n >>> df\n A B\n a 1 0.50\n b 2 0.75\n\n >>> df.to_records() # doctest: +SKIP\n rec.array([(\'a\', 1, 0.5 ), (\'b\', 2, 0.75)],\n dtype=[(\'index\', \'O\'), (\'A\', \'<i8\'), (\'B\', \'<f8\')])\n\n The index can be excluded from the record array:\n\n >>> df.to_records(index=False) # doctest: +SKIP\n rec.array([(1, 0.5 ), (2, 0.75)],\n dtype=[(\'A\', \'<i8\'), (\'B\', \'<f8\')])\n\n Specification of dtype for columns is new in pandas 0.24.0.\n Data types can be specified for the columns:\n\n >>> df.to_records(column_dtypes={"A": "int32"}) # doctest: +SKIP\n rec.array([(\'a\', 1, 0.5 ), (\'b\', 2, 0.75)],\n dtype=[(\'index\', \'O\'), (\'A\', \'<i4\'), (\'B\', \'<f8\')])\n\n Specification of dtype for index is new in pandas 0.24.0.\n Data types can also be specified for the index:\n\n >>> df.to_records(index_dtypes="<S2") # doctest: +SKIP\n rec.array([(b\'a\', 1, 0.5 ), (b\'b\', 2, 0.75)],\n dtype=[(\'index\', \'S2\'), (\'A\', \'<i8\'), (\'B\', \'<f8\')])\n ' args = locals() psdf = self return validate_arguments_and_invoke_function(psdf._to_internal_pandas(), self.to_records, pd.DataFrame.to_records, args)
-2,026,692,314,611,704,600
Convert DataFrame to a NumPy record array. Index will be included as the first field of the record array if requested. .. note:: This method should only be used if the resulting NumPy ndarray is expected to be small, as all the data is loaded into the driver's memory. Parameters ---------- index : bool, default True Include index in resulting record array, stored in 'index' field or using the index label, if set. column_dtypes : str, type, dict, default None If a string or type, the data type to store all columns. If a dictionary, a mapping of column names and indices (zero-indexed) to specific data types. index_dtypes : str, type, dict, default None If a string or type, the data type to store all index levels. If a dictionary, a mapping of index level names and indices (zero-indexed) to specific data types. This mapping is applied only if `index=True`. Returns ------- numpy.recarray NumPy ndarray with the DataFrame labels as fields and each row of the DataFrame as entries. See Also -------- DataFrame.from_records: Convert structured or record ndarray to DataFrame. numpy.recarray: An ndarray that allows field access using attributes, analogous to typed columns in a spreadsheet. Examples -------- >>> df = ps.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]}, ... index=['a', 'b']) >>> df A B a 1 0.50 b 2 0.75 >>> df.to_records() # doctest: +SKIP rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')]) The index can be excluded from the record array: >>> df.to_records(index=False) # doctest: +SKIP rec.array([(1, 0.5 ), (2, 0.75)], dtype=[('A', '<i8'), ('B', '<f8')]) Specification of dtype for columns is new in pandas 0.24.0. Data types can be specified for the columns: >>> df.to_records(column_dtypes={"A": "int32"}) # doctest: +SKIP rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('index', 'O'), ('A', '<i4'), ('B', '<f8')]) Specification of dtype for index is new in pandas 0.24.0. Data types can also be specified for the index: >>> df.to_records(index_dtypes="<S2") # doctest: +SKIP rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('index', 'S2'), ('A', '<i8'), ('B', '<f8')])
python/pyspark/pandas/frame.py
to_records
Flyangz/spark
python
def to_records(self, index: bool=True, column_dtypes: Optional[Union[(str, Dtype, Dict[(Name, Union[(str, Dtype)])])]]=None, index_dtypes: Optional[Union[(str, Dtype, Dict[(Name, Union[(str, Dtype)])])]]=None) -> np.recarray: '\n Convert DataFrame to a NumPy record array.\n\n Index will be included as the first field of the record array if\n requested.\n\n .. note:: This method should only be used if the resulting NumPy ndarray is\n expected to be small, as all the data is loaded into the driver\'s memory.\n\n Parameters\n ----------\n index : bool, default True\n Include index in resulting record array, stored in \'index\'\n field or using the index label, if set.\n column_dtypes : str, type, dict, default None\n If a string or type, the data type to store all columns. If\n a dictionary, a mapping of column names and indices (zero-indexed)\n to specific data types.\n index_dtypes : str, type, dict, default None\n If a string or type, the data type to store all index levels. If\n a dictionary, a mapping of index level names and indices\n (zero-indexed) to specific data types.\n This mapping is applied only if `index=True`.\n\n Returns\n -------\n numpy.recarray\n NumPy ndarray with the DataFrame labels as fields and each row\n of the DataFrame as entries.\n\n See Also\n --------\n DataFrame.from_records: Convert structured or record ndarray\n to DataFrame.\n numpy.recarray: An ndarray that allows field access using\n attributes, analogous to typed columns in a\n spreadsheet.\n\n Examples\n --------\n >>> df = ps.DataFrame({\'A\': [1, 2], \'B\': [0.5, 0.75]},\n ... index=[\'a\', \'b\'])\n >>> df\n A B\n a 1 0.50\n b 2 0.75\n\n >>> df.to_records() # doctest: +SKIP\n rec.array([(\'a\', 1, 0.5 ), (\'b\', 2, 0.75)],\n dtype=[(\'index\', \'O\'), (\'A\', \'<i8\'), (\'B\', \'<f8\')])\n\n The index can be excluded from the record array:\n\n >>> df.to_records(index=False) # doctest: +SKIP\n rec.array([(1, 0.5 ), (2, 0.75)],\n dtype=[(\'A\', \'<i8\'), (\'B\', \'<f8\')])\n\n Specification of dtype for columns is new in pandas 0.24.0.\n Data types can be specified for the columns:\n\n >>> df.to_records(column_dtypes={"A": "int32"}) # doctest: +SKIP\n rec.array([(\'a\', 1, 0.5 ), (\'b\', 2, 0.75)],\n dtype=[(\'index\', \'O\'), (\'A\', \'<i4\'), (\'B\', \'<f8\')])\n\n Specification of dtype for index is new in pandas 0.24.0.\n Data types can also be specified for the index:\n\n >>> df.to_records(index_dtypes="<S2") # doctest: +SKIP\n rec.array([(b\'a\', 1, 0.5 ), (b\'b\', 2, 0.75)],\n dtype=[(\'index\', \'S2\'), (\'A\', \'<i8\'), (\'B\', \'<f8\')])\n ' args = locals() psdf = self return validate_arguments_and_invoke_function(psdf._to_internal_pandas(), self.to_records, pd.DataFrame.to_records, args)
def copy(self, deep: bool=True) -> 'DataFrame': "\n Make a copy of this object's indices and data.\n\n Parameters\n ----------\n deep : bool, default True\n this parameter is not supported but just dummy parameter to match pandas.\n\n Returns\n -------\n copy : DataFrame\n\n Examples\n --------\n >>> df = ps.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]},\n ... columns=['x', 'y', 'z', 'w'])\n >>> df\n x y z w\n 0 1 3 5 7\n 1 2 4 6 8\n >>> df_copy = df.copy()\n >>> df_copy\n x y z w\n 0 1 3 5 7\n 1 2 4 6 8\n " return DataFrame(self._internal)
-6,249,444,629,079,683,000
Make a copy of this object's indices and data. Parameters ---------- deep : bool, default True this parameter is not supported but just dummy parameter to match pandas. Returns ------- copy : DataFrame Examples -------- >>> df = ps.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]}, ... columns=['x', 'y', 'z', 'w']) >>> df x y z w 0 1 3 5 7 1 2 4 6 8 >>> df_copy = df.copy() >>> df_copy x y z w 0 1 3 5 7 1 2 4 6 8
python/pyspark/pandas/frame.py
copy
Flyangz/spark
python
def copy(self, deep: bool=True) -> 'DataFrame': "\n Make a copy of this object's indices and data.\n\n Parameters\n ----------\n deep : bool, default True\n this parameter is not supported but just dummy parameter to match pandas.\n\n Returns\n -------\n copy : DataFrame\n\n Examples\n --------\n >>> df = ps.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]},\n ... columns=['x', 'y', 'z', 'w'])\n >>> df\n x y z w\n 0 1 3 5 7\n 1 2 4 6 8\n >>> df_copy = df.copy()\n >>> df_copy\n x y z w\n 0 1 3 5 7\n 1 2 4 6 8\n " return DataFrame(self._internal)
def dropna(self, axis: Axis=0, how: str='any', thresh: Optional[int]=None, subset: Optional[Union[(Name, List[Name])]]=None, inplace: bool=False) -> Optional['DataFrame']: '\n Remove missing values.\n\n Parameters\n ----------\n axis : {0 or \'index\'}, default 0\n Determine if rows or columns which contain missing values are\n removed.\n\n * 0, or \'index\' : Drop rows which contain missing values.\n how : {\'any\', \'all\'}, default \'any\'\n Determine if row or column is removed from DataFrame, when we have\n at least one NA or all NA.\n\n * \'any\' : If any NA values are present, drop that row or column.\n * \'all\' : If all values are NA, drop that row or column.\n\n thresh : int, optional\n Require that many non-NA values.\n subset : array-like, optional\n Labels along other axis to consider, e.g. if you are dropping rows\n these would be a list of columns to include.\n inplace : bool, default False\n If True, do operation inplace and return None.\n\n Returns\n -------\n DataFrame\n DataFrame with NA entries dropped from it.\n\n See Also\n --------\n DataFrame.drop : Drop specified labels from columns.\n DataFrame.isnull: Indicate missing values.\n DataFrame.notnull : Indicate existing (non-missing) values.\n\n Examples\n --------\n >>> df = ps.DataFrame({"name": [\'Alfred\', \'Batman\', \'Catwoman\'],\n ... "toy": [None, \'Batmobile\', \'Bullwhip\'],\n ... "born": [None, "1940-04-25", None]},\n ... columns=[\'name\', \'toy\', \'born\'])\n >>> df\n name toy born\n 0 Alfred None None\n 1 Batman Batmobile 1940-04-25\n 2 Catwoman Bullwhip None\n\n Drop the rows where at least one element is missing.\n\n >>> df.dropna()\n name toy born\n 1 Batman Batmobile 1940-04-25\n\n Drop the columns where at least one element is missing.\n\n >>> df.dropna(axis=\'columns\')\n name\n 0 Alfred\n 1 Batman\n 2 Catwoman\n\n Drop the rows where all elements are missing.\n\n >>> df.dropna(how=\'all\')\n name toy born\n 0 Alfred None None\n 1 Batman Batmobile 1940-04-25\n 2 Catwoman Bullwhip None\n\n Keep only the rows with at least 2 non-NA values.\n\n >>> df.dropna(thresh=2)\n name toy born\n 1 Batman Batmobile 1940-04-25\n 2 Catwoman Bullwhip None\n\n Define in which columns to look for missing values.\n\n >>> df.dropna(subset=[\'name\', \'born\'])\n name toy born\n 1 Batman Batmobile 1940-04-25\n\n Keep the DataFrame with valid entries in the same variable.\n\n >>> df.dropna(inplace=True)\n >>> df\n name toy born\n 1 Batman Batmobile 1940-04-25\n ' axis = validate_axis(axis) inplace = validate_bool_kwarg(inplace, 'inplace') if (thresh is None): if (how is None): raise TypeError('must specify how or thresh') elif (how not in ('any', 'all')): raise ValueError('invalid how option: {h}'.format(h=how)) labels: Optional[List[Label]] if (subset is not None): if isinstance(subset, str): labels = [(subset,)] elif isinstance(subset, tuple): labels = [subset] else: labels = [(sub if isinstance(sub, tuple) else (sub,)) for sub in subset] else: labels = None if (axis == 0): if (labels is not None): invalids = [label for label in labels if (label not in self._internal.column_labels)] if (len(invalids) > 0): raise KeyError(invalids) else: labels = self._internal.column_labels cnt = reduce((lambda x, y: (x + y)), [F.when(self._psser_for(label).notna().spark.column, 1).otherwise(0) for label in labels], SF.lit(0)) if (thresh is not None): pred = (cnt >= SF.lit(int(thresh))) elif (how == 'any'): pred = (cnt == SF.lit(len(labels))) elif (how == 'all'): pred = (cnt > SF.lit(0)) internal = self._internal.with_filter(pred) if inplace: self._update_internal_frame(internal) return None else: return DataFrame(internal) else: assert (axis == 1) internal = self._internal.resolved_copy if (labels is not None): if any(((len(lbl) != internal.index_level) for lbl in labels)): raise ValueError('The length of each subset must be the same as the index size.') cond = reduce((lambda x, y: (x | y)), [reduce((lambda x, y: (x & y)), [(scol == SF.lit(part)) for (part, scol) in zip(lbl, internal.index_spark_columns)]) for lbl in labels]) internal = internal.with_filter(cond) psdf: DataFrame = DataFrame(internal) null_counts = [] for label in internal.column_labels: psser = psdf._psser_for(label) cond = psser.isnull().spark.column null_counts.append(F.sum(F.when((~ cond), 1).otherwise(0)).alias(name_like_string(label))) counts = internal.spark_frame.select((null_counts + [F.count('*')])).head() if (thresh is not None): column_labels = [label for (label, cnt) in zip(internal.column_labels, counts) if ((cnt or 0) >= int(thresh))] elif (how == 'any'): column_labels = [label for (label, cnt) in zip(internal.column_labels, counts) if ((cnt or 0) == counts[(- 1)])] elif (how == 'all'): column_labels = [label for (label, cnt) in zip(internal.column_labels, counts) if ((cnt or 0) > 0)] psdf = self[column_labels] if inplace: self._update_internal_frame(psdf._internal) return None else: return psdf
1,784,172,941,946,276,000
Remove missing values. Parameters ---------- axis : {0 or 'index'}, default 0 Determine if rows or columns which contain missing values are removed. * 0, or 'index' : Drop rows which contain missing values. how : {'any', 'all'}, default 'any' Determine if row or column is removed from DataFrame, when we have at least one NA or all NA. * 'any' : If any NA values are present, drop that row or column. * 'all' : If all values are NA, drop that row or column. thresh : int, optional Require that many non-NA values. subset : array-like, optional Labels along other axis to consider, e.g. if you are dropping rows these would be a list of columns to include. inplace : bool, default False If True, do operation inplace and return None. Returns ------- DataFrame DataFrame with NA entries dropped from it. See Also -------- DataFrame.drop : Drop specified labels from columns. DataFrame.isnull: Indicate missing values. DataFrame.notnull : Indicate existing (non-missing) values. Examples -------- >>> df = ps.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'], ... "toy": [None, 'Batmobile', 'Bullwhip'], ... "born": [None, "1940-04-25", None]}, ... columns=['name', 'toy', 'born']) >>> df name toy born 0 Alfred None None 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip None Drop the rows where at least one element is missing. >>> df.dropna() name toy born 1 Batman Batmobile 1940-04-25 Drop the columns where at least one element is missing. >>> df.dropna(axis='columns') name 0 Alfred 1 Batman 2 Catwoman Drop the rows where all elements are missing. >>> df.dropna(how='all') name toy born 0 Alfred None None 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip None Keep only the rows with at least 2 non-NA values. >>> df.dropna(thresh=2) name toy born 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip None Define in which columns to look for missing values. >>> df.dropna(subset=['name', 'born']) name toy born 1 Batman Batmobile 1940-04-25 Keep the DataFrame with valid entries in the same variable. >>> df.dropna(inplace=True) >>> df name toy born 1 Batman Batmobile 1940-04-25
python/pyspark/pandas/frame.py
dropna
Flyangz/spark
python
def dropna(self, axis: Axis=0, how: str='any', thresh: Optional[int]=None, subset: Optional[Union[(Name, List[Name])]]=None, inplace: bool=False) -> Optional['DataFrame']: '\n Remove missing values.\n\n Parameters\n ----------\n axis : {0 or \'index\'}, default 0\n Determine if rows or columns which contain missing values are\n removed.\n\n * 0, or \'index\' : Drop rows which contain missing values.\n how : {\'any\', \'all\'}, default \'any\'\n Determine if row or column is removed from DataFrame, when we have\n at least one NA or all NA.\n\n * \'any\' : If any NA values are present, drop that row or column.\n * \'all\' : If all values are NA, drop that row or column.\n\n thresh : int, optional\n Require that many non-NA values.\n subset : array-like, optional\n Labels along other axis to consider, e.g. if you are dropping rows\n these would be a list of columns to include.\n inplace : bool, default False\n If True, do operation inplace and return None.\n\n Returns\n -------\n DataFrame\n DataFrame with NA entries dropped from it.\n\n See Also\n --------\n DataFrame.drop : Drop specified labels from columns.\n DataFrame.isnull: Indicate missing values.\n DataFrame.notnull : Indicate existing (non-missing) values.\n\n Examples\n --------\n >>> df = ps.DataFrame({"name": [\'Alfred\', \'Batman\', \'Catwoman\'],\n ... "toy": [None, \'Batmobile\', \'Bullwhip\'],\n ... "born": [None, "1940-04-25", None]},\n ... columns=[\'name\', \'toy\', \'born\'])\n >>> df\n name toy born\n 0 Alfred None None\n 1 Batman Batmobile 1940-04-25\n 2 Catwoman Bullwhip None\n\n Drop the rows where at least one element is missing.\n\n >>> df.dropna()\n name toy born\n 1 Batman Batmobile 1940-04-25\n\n Drop the columns where at least one element is missing.\n\n >>> df.dropna(axis=\'columns\')\n name\n 0 Alfred\n 1 Batman\n 2 Catwoman\n\n Drop the rows where all elements are missing.\n\n >>> df.dropna(how=\'all\')\n name toy born\n 0 Alfred None None\n 1 Batman Batmobile 1940-04-25\n 2 Catwoman Bullwhip None\n\n Keep only the rows with at least 2 non-NA values.\n\n >>> df.dropna(thresh=2)\n name toy born\n 1 Batman Batmobile 1940-04-25\n 2 Catwoman Bullwhip None\n\n Define in which columns to look for missing values.\n\n >>> df.dropna(subset=[\'name\', \'born\'])\n name toy born\n 1 Batman Batmobile 1940-04-25\n\n Keep the DataFrame with valid entries in the same variable.\n\n >>> df.dropna(inplace=True)\n >>> df\n name toy born\n 1 Batman Batmobile 1940-04-25\n ' axis = validate_axis(axis) inplace = validate_bool_kwarg(inplace, 'inplace') if (thresh is None): if (how is None): raise TypeError('must specify how or thresh') elif (how not in ('any', 'all')): raise ValueError('invalid how option: {h}'.format(h=how)) labels: Optional[List[Label]] if (subset is not None): if isinstance(subset, str): labels = [(subset,)] elif isinstance(subset, tuple): labels = [subset] else: labels = [(sub if isinstance(sub, tuple) else (sub,)) for sub in subset] else: labels = None if (axis == 0): if (labels is not None): invalids = [label for label in labels if (label not in self._internal.column_labels)] if (len(invalids) > 0): raise KeyError(invalids) else: labels = self._internal.column_labels cnt = reduce((lambda x, y: (x + y)), [F.when(self._psser_for(label).notna().spark.column, 1).otherwise(0) for label in labels], SF.lit(0)) if (thresh is not None): pred = (cnt >= SF.lit(int(thresh))) elif (how == 'any'): pred = (cnt == SF.lit(len(labels))) elif (how == 'all'): pred = (cnt > SF.lit(0)) internal = self._internal.with_filter(pred) if inplace: self._update_internal_frame(internal) return None else: return DataFrame(internal) else: assert (axis == 1) internal = self._internal.resolved_copy if (labels is not None): if any(((len(lbl) != internal.index_level) for lbl in labels)): raise ValueError('The length of each subset must be the same as the index size.') cond = reduce((lambda x, y: (x | y)), [reduce((lambda x, y: (x & y)), [(scol == SF.lit(part)) for (part, scol) in zip(lbl, internal.index_spark_columns)]) for lbl in labels]) internal = internal.with_filter(cond) psdf: DataFrame = DataFrame(internal) null_counts = [] for label in internal.column_labels: psser = psdf._psser_for(label) cond = psser.isnull().spark.column null_counts.append(F.sum(F.when((~ cond), 1).otherwise(0)).alias(name_like_string(label))) counts = internal.spark_frame.select((null_counts + [F.count('*')])).head() if (thresh is not None): column_labels = [label for (label, cnt) in zip(internal.column_labels, counts) if ((cnt or 0) >= int(thresh))] elif (how == 'any'): column_labels = [label for (label, cnt) in zip(internal.column_labels, counts) if ((cnt or 0) == counts[(- 1)])] elif (how == 'all'): column_labels = [label for (label, cnt) in zip(internal.column_labels, counts) if ((cnt or 0) > 0)] psdf = self[column_labels] if inplace: self._update_internal_frame(psdf._internal) return None else: return psdf
def fillna(self, value: Optional[Union[(Any, Dict[(Name, Any)])]]=None, method: Optional[str]=None, axis: Optional[Axis]=None, inplace: bool=False, limit: Optional[int]=None) -> Optional['DataFrame']: "Fill NA/NaN values.\n\n .. note:: the current implementation of 'method' parameter in fillna uses Spark's Window\n without specifying partition specification. This leads to move all data into\n single partition in single machine and could cause serious\n performance degradation. Avoid this method against very large dataset.\n\n Parameters\n ----------\n value : scalar, dict, Series\n Value to use to fill holes. alternately a dict/Series of values\n specifying which value to use for each column.\n DataFrame is not supported.\n method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None\n Method to use for filling holes in reindexed Series pad / ffill: propagate last valid\n observation forward to next valid backfill / bfill:\n use NEXT valid observation to fill gap\n axis : {0 or `index`}\n 1 and `columns` are not supported.\n inplace : boolean, default False\n Fill in place (do not create a new object)\n limit : int, default None\n If method is specified, this is the maximum number of consecutive NaN values to\n forward/backward fill. In other words, if there is a gap with more than this number of\n consecutive NaNs, it will only be partially filled. If method is not specified,\n this is the maximum number of entries along the entire axis where NaNs will be filled.\n Must be greater than 0 if not None\n\n Returns\n -------\n DataFrame\n DataFrame with NA entries filled.\n\n Examples\n --------\n >>> df = ps.DataFrame({\n ... 'A': [None, 3, None, None],\n ... 'B': [2, 4, None, 3],\n ... 'C': [None, None, None, 1],\n ... 'D': [0, 1, 5, 4]\n ... },\n ... columns=['A', 'B', 'C', 'D'])\n >>> df\n A B C D\n 0 NaN 2.0 NaN 0\n 1 3.0 4.0 NaN 1\n 2 NaN NaN NaN 5\n 3 NaN 3.0 1.0 4\n\n Replace all NaN elements with 0s.\n\n >>> df.fillna(0)\n A B C D\n 0 0.0 2.0 0.0 0\n 1 3.0 4.0 0.0 1\n 2 0.0 0.0 0.0 5\n 3 0.0 3.0 1.0 4\n\n We can also propagate non-null values forward or backward.\n\n >>> df.fillna(method='ffill')\n A B C D\n 0 NaN 2.0 NaN 0\n 1 3.0 4.0 NaN 1\n 2 3.0 4.0 NaN 5\n 3 3.0 3.0 1.0 4\n\n Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,\n 2, and 3 respectively.\n\n >>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3}\n >>> df.fillna(value=values)\n A B C D\n 0 0.0 2.0 2.0 0\n 1 3.0 4.0 2.0 1\n 2 0.0 1.0 2.0 5\n 3 0.0 3.0 1.0 4\n " axis = validate_axis(axis) if (axis != 0): raise NotImplementedError("fillna currently only works for axis=0 or axis='index'") if (value is not None): if (not isinstance(value, (float, int, str, bool, dict, pd.Series))): raise TypeError(('Unsupported type %s' % type(value).__name__)) if (limit is not None): raise ValueError('limit parameter for value is not support now') if isinstance(value, pd.Series): value = value.to_dict() if isinstance(value, dict): for v in value.values(): if (not isinstance(v, (float, int, str, bool))): raise TypeError(('Unsupported type %s' % type(v).__name__)) value = {(k if is_name_like_tuple(k) else (k,)): v for (k, v) in value.items()} def op(psser: ps.Series) -> ps.Series: label = psser._column_label for (k, v) in value.items(): if (k == label[:len(k)]): return psser._fillna(value=value[k], method=method, axis=axis, limit=limit) else: return psser else: def op(psser: ps.Series) -> ps.Series: return psser._fillna(value=value, method=method, axis=axis, limit=limit) elif (method is not None): def op(psser: ps.Series) -> ps.Series: return psser._fillna(value=value, method=method, axis=axis, limit=limit) else: raise ValueError("Must specify a fillna 'value' or 'method' parameter.") psdf = self._apply_series_op(op, should_resolve=(method is not None)) inplace = validate_bool_kwarg(inplace, 'inplace') if inplace: self._update_internal_frame(psdf._internal, requires_same_anchor=False) return None else: return psdf
1,108,271,819,624,094,200
Fill NA/NaN values. .. note:: the current implementation of 'method' parameter in fillna uses Spark's Window without specifying partition specification. This leads to move all data into single partition in single machine and could cause serious performance degradation. Avoid this method against very large dataset. Parameters ---------- value : scalar, dict, Series Value to use to fill holes. alternately a dict/Series of values specifying which value to use for each column. DataFrame is not supported. method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None Method to use for filling holes in reindexed Series pad / ffill: propagate last valid observation forward to next valid backfill / bfill: use NEXT valid observation to fill gap axis : {0 or `index`} 1 and `columns` are not supported. inplace : boolean, default False Fill in place (do not create a new object) limit : int, default None If method is specified, this is the maximum number of consecutive NaN values to forward/backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. If method is not specified, this is the maximum number of entries along the entire axis where NaNs will be filled. Must be greater than 0 if not None Returns ------- DataFrame DataFrame with NA entries filled. Examples -------- >>> df = ps.DataFrame({ ... 'A': [None, 3, None, None], ... 'B': [2, 4, None, 3], ... 'C': [None, None, None, 1], ... 'D': [0, 1, 5, 4] ... }, ... columns=['A', 'B', 'C', 'D']) >>> df A B C D 0 NaN 2.0 NaN 0 1 3.0 4.0 NaN 1 2 NaN NaN NaN 5 3 NaN 3.0 1.0 4 Replace all NaN elements with 0s. >>> df.fillna(0) A B C D 0 0.0 2.0 0.0 0 1 3.0 4.0 0.0 1 2 0.0 0.0 0.0 5 3 0.0 3.0 1.0 4 We can also propagate non-null values forward or backward. >>> df.fillna(method='ffill') A B C D 0 NaN 2.0 NaN 0 1 3.0 4.0 NaN 1 2 3.0 4.0 NaN 5 3 3.0 3.0 1.0 4 Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1, 2, and 3 respectively. >>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3} >>> df.fillna(value=values) A B C D 0 0.0 2.0 2.0 0 1 3.0 4.0 2.0 1 2 0.0 1.0 2.0 5 3 0.0 3.0 1.0 4
python/pyspark/pandas/frame.py
fillna
Flyangz/spark
python
def fillna(self, value: Optional[Union[(Any, Dict[(Name, Any)])]]=None, method: Optional[str]=None, axis: Optional[Axis]=None, inplace: bool=False, limit: Optional[int]=None) -> Optional['DataFrame']: "Fill NA/NaN values.\n\n .. note:: the current implementation of 'method' parameter in fillna uses Spark's Window\n without specifying partition specification. This leads to move all data into\n single partition in single machine and could cause serious\n performance degradation. Avoid this method against very large dataset.\n\n Parameters\n ----------\n value : scalar, dict, Series\n Value to use to fill holes. alternately a dict/Series of values\n specifying which value to use for each column.\n DataFrame is not supported.\n method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None\n Method to use for filling holes in reindexed Series pad / ffill: propagate last valid\n observation forward to next valid backfill / bfill:\n use NEXT valid observation to fill gap\n axis : {0 or `index`}\n 1 and `columns` are not supported.\n inplace : boolean, default False\n Fill in place (do not create a new object)\n limit : int, default None\n If method is specified, this is the maximum number of consecutive NaN values to\n forward/backward fill. In other words, if there is a gap with more than this number of\n consecutive NaNs, it will only be partially filled. If method is not specified,\n this is the maximum number of entries along the entire axis where NaNs will be filled.\n Must be greater than 0 if not None\n\n Returns\n -------\n DataFrame\n DataFrame with NA entries filled.\n\n Examples\n --------\n >>> df = ps.DataFrame({\n ... 'A': [None, 3, None, None],\n ... 'B': [2, 4, None, 3],\n ... 'C': [None, None, None, 1],\n ... 'D': [0, 1, 5, 4]\n ... },\n ... columns=['A', 'B', 'C', 'D'])\n >>> df\n A B C D\n 0 NaN 2.0 NaN 0\n 1 3.0 4.0 NaN 1\n 2 NaN NaN NaN 5\n 3 NaN 3.0 1.0 4\n\n Replace all NaN elements with 0s.\n\n >>> df.fillna(0)\n A B C D\n 0 0.0 2.0 0.0 0\n 1 3.0 4.0 0.0 1\n 2 0.0 0.0 0.0 5\n 3 0.0 3.0 1.0 4\n\n We can also propagate non-null values forward or backward.\n\n >>> df.fillna(method='ffill')\n A B C D\n 0 NaN 2.0 NaN 0\n 1 3.0 4.0 NaN 1\n 2 3.0 4.0 NaN 5\n 3 3.0 3.0 1.0 4\n\n Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,\n 2, and 3 respectively.\n\n >>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3}\n >>> df.fillna(value=values)\n A B C D\n 0 0.0 2.0 2.0 0\n 1 3.0 4.0 2.0 1\n 2 0.0 1.0 2.0 5\n 3 0.0 3.0 1.0 4\n " axis = validate_axis(axis) if (axis != 0): raise NotImplementedError("fillna currently only works for axis=0 or axis='index'") if (value is not None): if (not isinstance(value, (float, int, str, bool, dict, pd.Series))): raise TypeError(('Unsupported type %s' % type(value).__name__)) if (limit is not None): raise ValueError('limit parameter for value is not support now') if isinstance(value, pd.Series): value = value.to_dict() if isinstance(value, dict): for v in value.values(): if (not isinstance(v, (float, int, str, bool))): raise TypeError(('Unsupported type %s' % type(v).__name__)) value = {(k if is_name_like_tuple(k) else (k,)): v for (k, v) in value.items()} def op(psser: ps.Series) -> ps.Series: label = psser._column_label for (k, v) in value.items(): if (k == label[:len(k)]): return psser._fillna(value=value[k], method=method, axis=axis, limit=limit) else: return psser else: def op(psser: ps.Series) -> ps.Series: return psser._fillna(value=value, method=method, axis=axis, limit=limit) elif (method is not None): def op(psser: ps.Series) -> ps.Series: return psser._fillna(value=value, method=method, axis=axis, limit=limit) else: raise ValueError("Must specify a fillna 'value' or 'method' parameter.") psdf = self._apply_series_op(op, should_resolve=(method is not None)) inplace = validate_bool_kwarg(inplace, 'inplace') if inplace: self._update_internal_frame(psdf._internal, requires_same_anchor=False) return None else: return psdf
def replace(self, to_replace: Optional[Union[(Any, List, Tuple, Dict)]]=None, value: Optional[Any]=None, inplace: bool=False, limit: Optional[int]=None, regex: bool=False, method: str='pad') -> Optional['DataFrame']: '\n Returns a new DataFrame replacing a value with another value.\n\n Parameters\n ----------\n to_replace : int, float, string, list, tuple or dict\n Value to be replaced.\n value : int, float, string, list or tuple\n Value to use to replace holes. The replacement value must be an int, float,\n or string.\n If value is a list or tuple, value should be of the same length with to_replace.\n inplace : boolean, default False\n Fill in place (do not create a new object)\n\n Returns\n -------\n DataFrame\n Object after replacement.\n\n Examples\n --------\n >>> df = ps.DataFrame({"name": [\'Ironman\', \'Captain America\', \'Thor\', \'Hulk\'],\n ... "weapon": [\'Mark-45\', \'Shield\', \'Mjolnir\', \'Smash\']},\n ... columns=[\'name\', \'weapon\'])\n >>> df\n name weapon\n 0 Ironman Mark-45\n 1 Captain America Shield\n 2 Thor Mjolnir\n 3 Hulk Smash\n\n Scalar `to_replace` and `value`\n\n >>> df.replace(\'Ironman\', \'War-Machine\')\n name weapon\n 0 War-Machine Mark-45\n 1 Captain America Shield\n 2 Thor Mjolnir\n 3 Hulk Smash\n\n List like `to_replace` and `value`\n\n >>> df.replace([\'Ironman\', \'Captain America\'], [\'Rescue\', \'Hawkeye\'], inplace=True)\n >>> df\n name weapon\n 0 Rescue Mark-45\n 1 Hawkeye Shield\n 2 Thor Mjolnir\n 3 Hulk Smash\n\n Dicts can be used to specify different replacement values for different existing values\n To use a dict in this way the value parameter should be None\n\n >>> df.replace({\'Mjolnir\': \'Stormbuster\'})\n name weapon\n 0 Rescue Mark-45\n 1 Hawkeye Shield\n 2 Thor Stormbuster\n 3 Hulk Smash\n\n Dict can specify that different values should be replaced in different columns\n The value parameter should not be None in this case\n\n >>> df.replace({\'weapon\': \'Mjolnir\'}, \'Stormbuster\')\n name weapon\n 0 Rescue Mark-45\n 1 Hawkeye Shield\n 2 Thor Stormbuster\n 3 Hulk Smash\n\n Nested dictionaries\n The value parameter should be None to use a nested dict in this way\n\n >>> df.replace({\'weapon\': {\'Mjolnir\': \'Stormbuster\'}})\n name weapon\n 0 Rescue Mark-45\n 1 Hawkeye Shield\n 2 Thor Stormbuster\n 3 Hulk Smash\n ' if (method != 'pad'): raise NotImplementedError("replace currently works only for method='pad") if (limit is not None): raise NotImplementedError('replace currently works only when limit=None') if (regex is not False): raise NotImplementedError("replace currently doesn't supports regex") inplace = validate_bool_kwarg(inplace, 'inplace') if ((value is not None) and (not isinstance(value, (int, float, str, list, tuple, dict)))): raise TypeError('Unsupported type {}'.format(type(value).__name__)) if ((to_replace is not None) and (not isinstance(to_replace, (int, float, str, list, tuple, dict)))): raise TypeError('Unsupported type {}'.format(type(to_replace).__name__)) if (isinstance(value, (list, tuple)) and isinstance(to_replace, (list, tuple))): if (len(value) != len(to_replace)): raise ValueError('Length of to_replace and value must be same') if (isinstance(to_replace, dict) and ((value is not None) or all((isinstance(i, dict) for i in to_replace.values())))): to_replace_dict = to_replace def op(psser: ps.Series) -> ps.Series: if (psser.name in to_replace_dict): return psser.replace(to_replace=to_replace_dict[psser.name], value=value, regex=regex) else: return psser else: def op(psser: ps.Series) -> ps.Series: return psser.replace(to_replace=to_replace, value=value, regex=regex) psdf = self._apply_series_op(op) if inplace: self._update_internal_frame(psdf._internal) return None else: return psdf
-3,898,269,011,529,222,000
Returns a new DataFrame replacing a value with another value. Parameters ---------- to_replace : int, float, string, list, tuple or dict Value to be replaced. value : int, float, string, list or tuple Value to use to replace holes. The replacement value must be an int, float, or string. If value is a list or tuple, value should be of the same length with to_replace. inplace : boolean, default False Fill in place (do not create a new object) Returns ------- DataFrame Object after replacement. Examples -------- >>> df = ps.DataFrame({"name": ['Ironman', 'Captain America', 'Thor', 'Hulk'], ... "weapon": ['Mark-45', 'Shield', 'Mjolnir', 'Smash']}, ... columns=['name', 'weapon']) >>> df name weapon 0 Ironman Mark-45 1 Captain America Shield 2 Thor Mjolnir 3 Hulk Smash Scalar `to_replace` and `value` >>> df.replace('Ironman', 'War-Machine') name weapon 0 War-Machine Mark-45 1 Captain America Shield 2 Thor Mjolnir 3 Hulk Smash List like `to_replace` and `value` >>> df.replace(['Ironman', 'Captain America'], ['Rescue', 'Hawkeye'], inplace=True) >>> df name weapon 0 Rescue Mark-45 1 Hawkeye Shield 2 Thor Mjolnir 3 Hulk Smash Dicts can be used to specify different replacement values for different existing values To use a dict in this way the value parameter should be None >>> df.replace({'Mjolnir': 'Stormbuster'}) name weapon 0 Rescue Mark-45 1 Hawkeye Shield 2 Thor Stormbuster 3 Hulk Smash Dict can specify that different values should be replaced in different columns The value parameter should not be None in this case >>> df.replace({'weapon': 'Mjolnir'}, 'Stormbuster') name weapon 0 Rescue Mark-45 1 Hawkeye Shield 2 Thor Stormbuster 3 Hulk Smash Nested dictionaries The value parameter should be None to use a nested dict in this way >>> df.replace({'weapon': {'Mjolnir': 'Stormbuster'}}) name weapon 0 Rescue Mark-45 1 Hawkeye Shield 2 Thor Stormbuster 3 Hulk Smash
python/pyspark/pandas/frame.py
replace
Flyangz/spark
python
def replace(self, to_replace: Optional[Union[(Any, List, Tuple, Dict)]]=None, value: Optional[Any]=None, inplace: bool=False, limit: Optional[int]=None, regex: bool=False, method: str='pad') -> Optional['DataFrame']: '\n Returns a new DataFrame replacing a value with another value.\n\n Parameters\n ----------\n to_replace : int, float, string, list, tuple or dict\n Value to be replaced.\n value : int, float, string, list or tuple\n Value to use to replace holes. The replacement value must be an int, float,\n or string.\n If value is a list or tuple, value should be of the same length with to_replace.\n inplace : boolean, default False\n Fill in place (do not create a new object)\n\n Returns\n -------\n DataFrame\n Object after replacement.\n\n Examples\n --------\n >>> df = ps.DataFrame({"name": [\'Ironman\', \'Captain America\', \'Thor\', \'Hulk\'],\n ... "weapon": [\'Mark-45\', \'Shield\', \'Mjolnir\', \'Smash\']},\n ... columns=[\'name\', \'weapon\'])\n >>> df\n name weapon\n 0 Ironman Mark-45\n 1 Captain America Shield\n 2 Thor Mjolnir\n 3 Hulk Smash\n\n Scalar `to_replace` and `value`\n\n >>> df.replace(\'Ironman\', \'War-Machine\')\n name weapon\n 0 War-Machine Mark-45\n 1 Captain America Shield\n 2 Thor Mjolnir\n 3 Hulk Smash\n\n List like `to_replace` and `value`\n\n >>> df.replace([\'Ironman\', \'Captain America\'], [\'Rescue\', \'Hawkeye\'], inplace=True)\n >>> df\n name weapon\n 0 Rescue Mark-45\n 1 Hawkeye Shield\n 2 Thor Mjolnir\n 3 Hulk Smash\n\n Dicts can be used to specify different replacement values for different existing values\n To use a dict in this way the value parameter should be None\n\n >>> df.replace({\'Mjolnir\': \'Stormbuster\'})\n name weapon\n 0 Rescue Mark-45\n 1 Hawkeye Shield\n 2 Thor Stormbuster\n 3 Hulk Smash\n\n Dict can specify that different values should be replaced in different columns\n The value parameter should not be None in this case\n\n >>> df.replace({\'weapon\': \'Mjolnir\'}, \'Stormbuster\')\n name weapon\n 0 Rescue Mark-45\n 1 Hawkeye Shield\n 2 Thor Stormbuster\n 3 Hulk Smash\n\n Nested dictionaries\n The value parameter should be None to use a nested dict in this way\n\n >>> df.replace({\'weapon\': {\'Mjolnir\': \'Stormbuster\'}})\n name weapon\n 0 Rescue Mark-45\n 1 Hawkeye Shield\n 2 Thor Stormbuster\n 3 Hulk Smash\n ' if (method != 'pad'): raise NotImplementedError("replace currently works only for method='pad") if (limit is not None): raise NotImplementedError('replace currently works only when limit=None') if (regex is not False): raise NotImplementedError("replace currently doesn't supports regex") inplace = validate_bool_kwarg(inplace, 'inplace') if ((value is not None) and (not isinstance(value, (int, float, str, list, tuple, dict)))): raise TypeError('Unsupported type {}'.format(type(value).__name__)) if ((to_replace is not None) and (not isinstance(to_replace, (int, float, str, list, tuple, dict)))): raise TypeError('Unsupported type {}'.format(type(to_replace).__name__)) if (isinstance(value, (list, tuple)) and isinstance(to_replace, (list, tuple))): if (len(value) != len(to_replace)): raise ValueError('Length of to_replace and value must be same') if (isinstance(to_replace, dict) and ((value is not None) or all((isinstance(i, dict) for i in to_replace.values())))): to_replace_dict = to_replace def op(psser: ps.Series) -> ps.Series: if (psser.name in to_replace_dict): return psser.replace(to_replace=to_replace_dict[psser.name], value=value, regex=regex) else: return psser else: def op(psser: ps.Series) -> ps.Series: return psser.replace(to_replace=to_replace, value=value, regex=regex) psdf = self._apply_series_op(op) if inplace: self._update_internal_frame(psdf._internal) return None else: return psdf
def clip(self, lower: Union[(float, int)]=None, upper: Union[(float, int)]=None) -> 'DataFrame': '\n Trim values at input threshold(s).\n\n Assigns values outside boundary to boundary values.\n\n Parameters\n ----------\n lower : float or int, default None\n Minimum threshold value. All values below this threshold will be set to it.\n upper : float or int, default None\n Maximum threshold value. All values above this threshold will be set to it.\n\n Returns\n -------\n DataFrame\n DataFrame with the values outside the clip boundaries replaced.\n\n Examples\n --------\n >>> ps.DataFrame({\'A\': [0, 2, 4]}).clip(1, 3)\n A\n 0 1\n 1 2\n 2 3\n\n Notes\n -----\n One difference between this implementation and pandas is that running\n pd.DataFrame({\'A\': [\'a\', \'b\']}).clip(0, 1) will crash with "TypeError: \'<=\' not supported\n between instances of \'str\' and \'int\'" while ps.DataFrame({\'A\': [\'a\', \'b\']}).clip(0, 1)\n will output the original DataFrame, simply ignoring the incompatible types.\n ' if (is_list_like(lower) or is_list_like(upper)): raise TypeError(("List-like value are not supported for 'lower' and 'upper' at the " + 'moment')) if ((lower is None) and (upper is None)): return self return self._apply_series_op((lambda psser: psser.clip(lower=lower, upper=upper)))
2,908,171,955,569,142,000
Trim values at input threshold(s). Assigns values outside boundary to boundary values. Parameters ---------- lower : float or int, default None Minimum threshold value. All values below this threshold will be set to it. upper : float or int, default None Maximum threshold value. All values above this threshold will be set to it. Returns ------- DataFrame DataFrame with the values outside the clip boundaries replaced. Examples -------- >>> ps.DataFrame({'A': [0, 2, 4]}).clip(1, 3) A 0 1 1 2 2 3 Notes ----- One difference between this implementation and pandas is that running pd.DataFrame({'A': ['a', 'b']}).clip(0, 1) will crash with "TypeError: '<=' not supported between instances of 'str' and 'int'" while ps.DataFrame({'A': ['a', 'b']}).clip(0, 1) will output the original DataFrame, simply ignoring the incompatible types.
python/pyspark/pandas/frame.py
clip
Flyangz/spark
python
def clip(self, lower: Union[(float, int)]=None, upper: Union[(float, int)]=None) -> 'DataFrame': '\n Trim values at input threshold(s).\n\n Assigns values outside boundary to boundary values.\n\n Parameters\n ----------\n lower : float or int, default None\n Minimum threshold value. All values below this threshold will be set to it.\n upper : float or int, default None\n Maximum threshold value. All values above this threshold will be set to it.\n\n Returns\n -------\n DataFrame\n DataFrame with the values outside the clip boundaries replaced.\n\n Examples\n --------\n >>> ps.DataFrame({\'A\': [0, 2, 4]}).clip(1, 3)\n A\n 0 1\n 1 2\n 2 3\n\n Notes\n -----\n One difference between this implementation and pandas is that running\n pd.DataFrame({\'A\': [\'a\', \'b\']}).clip(0, 1) will crash with "TypeError: \'<=\' not supported\n between instances of \'str\' and \'int\'" while ps.DataFrame({\'A\': [\'a\', \'b\']}).clip(0, 1)\n will output the original DataFrame, simply ignoring the incompatible types.\n ' if (is_list_like(lower) or is_list_like(upper)): raise TypeError(("List-like value are not supported for 'lower' and 'upper' at the " + 'moment')) if ((lower is None) and (upper is None)): return self return self._apply_series_op((lambda psser: psser.clip(lower=lower, upper=upper)))
def head(self, n: int=5) -> 'DataFrame': "\n Return the first `n` rows.\n\n This function returns the first `n` rows for the object based\n on position. It is useful for quickly testing if your object\n has the right type of data in it.\n\n Parameters\n ----------\n n : int, default 5\n Number of rows to select.\n\n Returns\n -------\n obj_head : same type as caller\n The first `n` rows of the caller object.\n\n Examples\n --------\n >>> df = ps.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion',\n ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})\n >>> df\n animal\n 0 alligator\n 1 bee\n 2 falcon\n 3 lion\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n\n Viewing the first 5 lines\n\n >>> df.head()\n animal\n 0 alligator\n 1 bee\n 2 falcon\n 3 lion\n 4 monkey\n\n Viewing the first `n` lines (three in this case)\n\n >>> df.head(3)\n animal\n 0 alligator\n 1 bee\n 2 falcon\n " if (n < 0): n = (len(self) + n) if (n <= 0): return DataFrame(self._internal.with_filter(SF.lit(False))) else: sdf = self._internal.resolved_copy.spark_frame if get_option('compute.ordered_head'): sdf = sdf.orderBy(NATURAL_ORDER_COLUMN_NAME) return DataFrame(self._internal.with_new_sdf(sdf.limit(n)))
-6,046,103,581,036,672,000
Return the first `n` rows. This function returns the first `n` rows for the object based on position. It is useful for quickly testing if your object has the right type of data in it. Parameters ---------- n : int, default 5 Number of rows to select. Returns ------- obj_head : same type as caller The first `n` rows of the caller object. Examples -------- >>> df = ps.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion', ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']}) >>> df animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the first 5 lines >>> df.head() animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey Viewing the first `n` lines (three in this case) >>> df.head(3) animal 0 alligator 1 bee 2 falcon
python/pyspark/pandas/frame.py
head
Flyangz/spark
python
def head(self, n: int=5) -> 'DataFrame': "\n Return the first `n` rows.\n\n This function returns the first `n` rows for the object based\n on position. It is useful for quickly testing if your object\n has the right type of data in it.\n\n Parameters\n ----------\n n : int, default 5\n Number of rows to select.\n\n Returns\n -------\n obj_head : same type as caller\n The first `n` rows of the caller object.\n\n Examples\n --------\n >>> df = ps.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion',\n ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})\n >>> df\n animal\n 0 alligator\n 1 bee\n 2 falcon\n 3 lion\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n\n Viewing the first 5 lines\n\n >>> df.head()\n animal\n 0 alligator\n 1 bee\n 2 falcon\n 3 lion\n 4 monkey\n\n Viewing the first `n` lines (three in this case)\n\n >>> df.head(3)\n animal\n 0 alligator\n 1 bee\n 2 falcon\n " if (n < 0): n = (len(self) + n) if (n <= 0): return DataFrame(self._internal.with_filter(SF.lit(False))) else: sdf = self._internal.resolved_copy.spark_frame if get_option('compute.ordered_head'): sdf = sdf.orderBy(NATURAL_ORDER_COLUMN_NAME) return DataFrame(self._internal.with_new_sdf(sdf.limit(n)))
def last(self, offset: Union[(str, DateOffset)]) -> 'DataFrame': "\n Select final periods of time series data based on a date offset.\n\n When having a DataFrame with dates as index, this function can\n select the last few rows based on a date offset.\n\n Parameters\n ----------\n offset : str or DateOffset\n The offset length of the data that will be selected. For instance,\n '3D' will display all the rows having their index within the last 3 days.\n\n Returns\n -------\n DataFrame\n A subset of the caller.\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n Examples\n --------\n\n >>> index = pd.date_range('2018-04-09', periods=4, freq='2D')\n >>> psdf = ps.DataFrame({'A': [1, 2, 3, 4]}, index=index)\n >>> psdf\n A\n 2018-04-09 1\n 2018-04-11 2\n 2018-04-13 3\n 2018-04-15 4\n\n Get the rows for the last 3 days:\n\n >>> psdf.last('3D')\n A\n 2018-04-13 3\n 2018-04-15 4\n\n Notice the data for 3 last calendar days were returned, not the last\n 3 observed days in the dataset, and therefore data for 2018-04-11 was\n not returned.\n " if (not isinstance(self.index, ps.DatetimeIndex)): raise TypeError("'last' only supports a DatetimeIndex") offset_: Optional[DateOffset] = to_offset(offset) assert (offset_ is not None) from_date = (cast(datetime.datetime, self.index.max()) - offset_) return cast(DataFrame, self.loc[from_date:])
4,773,362,931,813,385,000
Select final periods of time series data based on a date offset. When having a DataFrame with dates as index, this function can select the last few rows based on a date offset. Parameters ---------- offset : str or DateOffset The offset length of the data that will be selected. For instance, '3D' will display all the rows having their index within the last 3 days. Returns ------- DataFrame A subset of the caller. Raises ------ TypeError If the index is not a :class:`DatetimeIndex` Examples -------- >>> index = pd.date_range('2018-04-09', periods=4, freq='2D') >>> psdf = ps.DataFrame({'A': [1, 2, 3, 4]}, index=index) >>> psdf A 2018-04-09 1 2018-04-11 2 2018-04-13 3 2018-04-15 4 Get the rows for the last 3 days: >>> psdf.last('3D') A 2018-04-13 3 2018-04-15 4 Notice the data for 3 last calendar days were returned, not the last 3 observed days in the dataset, and therefore data for 2018-04-11 was not returned.
python/pyspark/pandas/frame.py
last
Flyangz/spark
python
def last(self, offset: Union[(str, DateOffset)]) -> 'DataFrame': "\n Select final periods of time series data based on a date offset.\n\n When having a DataFrame with dates as index, this function can\n select the last few rows based on a date offset.\n\n Parameters\n ----------\n offset : str or DateOffset\n The offset length of the data that will be selected. For instance,\n '3D' will display all the rows having their index within the last 3 days.\n\n Returns\n -------\n DataFrame\n A subset of the caller.\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n Examples\n --------\n\n >>> index = pd.date_range('2018-04-09', periods=4, freq='2D')\n >>> psdf = ps.DataFrame({'A': [1, 2, 3, 4]}, index=index)\n >>> psdf\n A\n 2018-04-09 1\n 2018-04-11 2\n 2018-04-13 3\n 2018-04-15 4\n\n Get the rows for the last 3 days:\n\n >>> psdf.last('3D')\n A\n 2018-04-13 3\n 2018-04-15 4\n\n Notice the data for 3 last calendar days were returned, not the last\n 3 observed days in the dataset, and therefore data for 2018-04-11 was\n not returned.\n " if (not isinstance(self.index, ps.DatetimeIndex)): raise TypeError("'last' only supports a DatetimeIndex") offset_: Optional[DateOffset] = to_offset(offset) assert (offset_ is not None) from_date = (cast(datetime.datetime, self.index.max()) - offset_) return cast(DataFrame, self.loc[from_date:])
def first(self, offset: Union[(str, DateOffset)]) -> 'DataFrame': "\n Select first periods of time series data based on a date offset.\n\n When having a DataFrame with dates as index, this function can\n select the first few rows based on a date offset.\n\n Parameters\n ----------\n offset : str or DateOffset\n The offset length of the data that will be selected. For instance,\n '3D' will display all the rows having their index within the first 3 days.\n\n Returns\n -------\n DataFrame\n A subset of the caller.\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n Examples\n --------\n\n >>> index = pd.date_range('2018-04-09', periods=4, freq='2D')\n >>> psdf = ps.DataFrame({'A': [1, 2, 3, 4]}, index=index)\n >>> psdf\n A\n 2018-04-09 1\n 2018-04-11 2\n 2018-04-13 3\n 2018-04-15 4\n\n Get the rows for the last 3 days:\n\n >>> psdf.first('3D')\n A\n 2018-04-09 1\n 2018-04-11 2\n\n Notice the data for 3 first calendar days were returned, not the first\n 3 observed days in the dataset, and therefore data for 2018-04-13 was\n not returned.\n " if (not isinstance(self.index, ps.DatetimeIndex)): raise TypeError("'first' only supports a DatetimeIndex") offset_: Optional[DateOffset] = to_offset(offset) assert (offset_ is not None) to_date = (cast(datetime.datetime, self.index.min()) + offset_) return cast(DataFrame, self.loc[:to_date])
-1,184,452,550,105,267,200
Select first periods of time series data based on a date offset. When having a DataFrame with dates as index, this function can select the first few rows based on a date offset. Parameters ---------- offset : str or DateOffset The offset length of the data that will be selected. For instance, '3D' will display all the rows having their index within the first 3 days. Returns ------- DataFrame A subset of the caller. Raises ------ TypeError If the index is not a :class:`DatetimeIndex` Examples -------- >>> index = pd.date_range('2018-04-09', periods=4, freq='2D') >>> psdf = ps.DataFrame({'A': [1, 2, 3, 4]}, index=index) >>> psdf A 2018-04-09 1 2018-04-11 2 2018-04-13 3 2018-04-15 4 Get the rows for the last 3 days: >>> psdf.first('3D') A 2018-04-09 1 2018-04-11 2 Notice the data for 3 first calendar days were returned, not the first 3 observed days in the dataset, and therefore data for 2018-04-13 was not returned.
python/pyspark/pandas/frame.py
first
Flyangz/spark
python
def first(self, offset: Union[(str, DateOffset)]) -> 'DataFrame': "\n Select first periods of time series data based on a date offset.\n\n When having a DataFrame with dates as index, this function can\n select the first few rows based on a date offset.\n\n Parameters\n ----------\n offset : str or DateOffset\n The offset length of the data that will be selected. For instance,\n '3D' will display all the rows having their index within the first 3 days.\n\n Returns\n -------\n DataFrame\n A subset of the caller.\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n Examples\n --------\n\n >>> index = pd.date_range('2018-04-09', periods=4, freq='2D')\n >>> psdf = ps.DataFrame({'A': [1, 2, 3, 4]}, index=index)\n >>> psdf\n A\n 2018-04-09 1\n 2018-04-11 2\n 2018-04-13 3\n 2018-04-15 4\n\n Get the rows for the last 3 days:\n\n >>> psdf.first('3D')\n A\n 2018-04-09 1\n 2018-04-11 2\n\n Notice the data for 3 first calendar days were returned, not the first\n 3 observed days in the dataset, and therefore data for 2018-04-13 was\n not returned.\n " if (not isinstance(self.index, ps.DatetimeIndex)): raise TypeError("'first' only supports a DatetimeIndex") offset_: Optional[DateOffset] = to_offset(offset) assert (offset_ is not None) to_date = (cast(datetime.datetime, self.index.min()) + offset_) return cast(DataFrame, self.loc[:to_date])
def pivot_table(self, values: Optional[Union[(Name, List[Name])]]=None, index: Optional[List[Name]]=None, columns: Optional[Name]=None, aggfunc: Union[(str, Dict[(Name, str)])]='mean', fill_value: Optional[Any]=None) -> 'DataFrame': '\n Create a spreadsheet-style pivot table as a DataFrame. The levels in\n the pivot table will be stored in MultiIndex objects (hierarchical\n indexes) on the index and columns of the result DataFrame.\n\n Parameters\n ----------\n values : column to aggregate.\n They should be either a list less than three or a string.\n index : column (string) or list of columns\n If an array is passed, it must be the same length as the data.\n The list should contain string.\n columns : column\n Columns used in the pivot operation. Only one column is supported and\n it should be a string.\n aggfunc : function (string), dict, default mean\n If dict is passed, the key is column to aggregate and value\n is function or list of functions.\n fill_value : scalar, default None\n Value to replace missing values with.\n\n Returns\n -------\n table : DataFrame\n\n Examples\n --------\n >>> df = ps.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo",\n ... "bar", "bar", "bar", "bar"],\n ... "B": ["one", "one", "one", "two", "two",\n ... "one", "one", "two", "two"],\n ... "C": ["small", "large", "large", "small",\n ... "small", "large", "small", "small",\n ... "large"],\n ... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7],\n ... "E": [2, 4, 5, 5, 6, 6, 8, 9, 9]},\n ... columns=[\'A\', \'B\', \'C\', \'D\', \'E\'])\n >>> df\n A B C D E\n 0 foo one small 1 2\n 1 foo one large 2 4\n 2 foo one large 2 5\n 3 foo two small 3 5\n 4 foo two small 3 6\n 5 bar one large 4 6\n 6 bar one small 5 8\n 7 bar two small 6 9\n 8 bar two large 7 9\n\n This first example aggregates values by taking the sum.\n\n >>> table = df.pivot_table(values=\'D\', index=[\'A\', \'B\'],\n ... columns=\'C\', aggfunc=\'sum\')\n >>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE\n C large small\n A B\n bar one 4.0 5\n two 7.0 6\n foo one 4.0 1\n two NaN 6\n\n We can also fill missing values using the `fill_value` parameter.\n\n >>> table = df.pivot_table(values=\'D\', index=[\'A\', \'B\'],\n ... columns=\'C\', aggfunc=\'sum\', fill_value=0)\n >>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE\n C large small\n A B\n bar one 4 5\n two 7 6\n foo one 4 1\n two 0 6\n\n We can also calculate multiple types of aggregations for any given\n value column.\n\n >>> table = df.pivot_table(values=[\'D\'], index =[\'C\'],\n ... columns="A", aggfunc={\'D\': \'mean\'})\n >>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE\n D\n A bar foo\n C\n large 5.5 2.000000\n small 5.5 2.333333\n\n The next example aggregates on multiple values.\n\n >>> table = df.pivot_table(index=[\'C\'], columns="A", values=[\'D\', \'E\'],\n ... aggfunc={\'D\': \'mean\', \'E\': \'sum\'})\n >>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE\n D E\n A bar foo bar foo\n C\n large 5.5 2.000000 15 9\n small 5.5 2.333333 17 13\n ' if (not is_name_like_value(columns)): raise TypeError('columns should be one column name.') if ((not is_name_like_value(values)) and (not (isinstance(values, list) and all((is_name_like_value(v) for v in values))))): raise TypeError('values should be one column or list of columns.') if ((not isinstance(aggfunc, str)) and ((not isinstance(aggfunc, dict)) or (not all(((is_name_like_value(key) and isinstance(value, str)) for (key, value) in aggfunc.items()))))): raise TypeError('aggfunc must be a dict mapping from column name to aggregate functions (string).') if (isinstance(aggfunc, dict) and (index is None)): raise NotImplementedError("pivot_table doesn't support aggfunc as dict and without index.") if (isinstance(values, list) and (index is None)): raise NotImplementedError("values can't be a list without index.") if (columns not in self.columns): raise ValueError('Wrong columns {}.'.format(name_like_string(columns))) if (not is_name_like_tuple(columns)): columns = (columns,) if isinstance(values, list): values = [(col if is_name_like_tuple(col) else (col,)) for col in values] if (not all((isinstance(self._internal.spark_type_for(col), NumericType) for col in values))): raise TypeError('values should be a numeric type.') else: values = (values if is_name_like_tuple(values) else (values,)) if (not isinstance(self._internal.spark_type_for(values), NumericType)): raise TypeError('values should be a numeric type.') if isinstance(aggfunc, str): if isinstance(values, list): agg_cols = [F.expr('{1}(`{0}`) as `{0}`'.format(self._internal.spark_column_name_for(value), aggfunc)) for value in values] else: agg_cols = [F.expr('{1}(`{0}`) as `{0}`'.format(self._internal.spark_column_name_for(values), aggfunc))] elif isinstance(aggfunc, dict): aggfunc = {(key if is_name_like_tuple(key) else (key,)): value for (key, value) in aggfunc.items()} agg_cols = [F.expr('{1}(`{0}`) as `{0}`'.format(self._internal.spark_column_name_for(key), value)) for (key, value) in aggfunc.items()] agg_columns = [key for (key, _) in aggfunc.items()] if (set(agg_columns) != set(values)): raise ValueError('Columns in aggfunc must be the same as values.') sdf = self._internal.resolved_copy.spark_frame if (index is None): sdf = sdf.groupBy().pivot(pivot_col=self._internal.spark_column_name_for(columns)).agg(*agg_cols) elif isinstance(index, list): index = [(label if is_name_like_tuple(label) else (label,)) for label in index] sdf = sdf.groupBy([self._internal.spark_column_name_for(label) for label in index]).pivot(pivot_col=self._internal.spark_column_name_for(columns)).agg(*agg_cols) else: raise TypeError('index should be a None or a list of columns.') if ((fill_value is not None) and isinstance(fill_value, (int, float))): sdf = sdf.fillna(fill_value) psdf: DataFrame if (index is not None): index_columns = [self._internal.spark_column_name_for(label) for label in index] index_fields = [self._internal.field_for(label) for label in index] if isinstance(values, list): data_columns = [column for column in sdf.columns if (column not in index_columns)] if (len(values) > 1): data_columns.sort(key=(lambda x: x.split('_', 1)[1])) sdf = sdf.select((index_columns + data_columns)) column_name_to_index = dict(zip(self._internal.data_spark_column_names, self._internal.column_labels)) column_labels = [tuple((list(column_name_to_index[name.split('_')[1]]) + [name.split('_')[0]])) for name in data_columns] column_label_names = (([cast(Optional[Name], None)] * column_labels_level(values)) + [columns]) internal = InternalFrame(spark_frame=sdf, index_spark_columns=[scol_for(sdf, col) for col in index_columns], index_names=index, index_fields=index_fields, column_labels=column_labels, data_spark_columns=[scol_for(sdf, col) for col in data_columns], column_label_names=column_label_names) psdf = DataFrame(internal) else: column_labels = [tuple((list(values[0]) + [column])) for column in data_columns] column_label_names = (([cast(Optional[Name], None)] * len(values[0])) + [columns]) internal = InternalFrame(spark_frame=sdf, index_spark_columns=[scol_for(sdf, col) for col in index_columns], index_names=index, index_fields=index_fields, column_labels=column_labels, data_spark_columns=[scol_for(sdf, col) for col in data_columns], column_label_names=column_label_names) psdf = DataFrame(internal) else: internal = InternalFrame(spark_frame=sdf, index_spark_columns=[scol_for(sdf, col) for col in index_columns], index_names=index, index_fields=index_fields, column_label_names=[columns]) psdf = DataFrame(internal) else: if isinstance(values, list): index_values = values[(- 1)] else: index_values = values index_map: Dict[(str, Optional[Label])] = {} for (i, index_value) in enumerate(index_values): colname = SPARK_INDEX_NAME_FORMAT(i) sdf = sdf.withColumn(colname, SF.lit(index_value)) index_map[colname] = None internal = InternalFrame(spark_frame=sdf, index_spark_columns=[scol_for(sdf, col) for col in index_map.keys()], index_names=list(index_map.values()), column_label_names=[columns]) psdf = DataFrame(internal) psdf_columns = psdf.columns if isinstance(psdf_columns, pd.MultiIndex): psdf.columns = psdf_columns.set_levels(psdf_columns.levels[(- 1)].astype(spark_type_to_pandas_dtype(self._psser_for(columns).spark.data_type)), level=(- 1)) else: psdf.columns = psdf_columns.astype(spark_type_to_pandas_dtype(self._psser_for(columns).spark.data_type)) return psdf
9,017,028,467,064,092,000
Create a spreadsheet-style pivot table as a DataFrame. The levels in the pivot table will be stored in MultiIndex objects (hierarchical indexes) on the index and columns of the result DataFrame. Parameters ---------- values : column to aggregate. They should be either a list less than three or a string. index : column (string) or list of columns If an array is passed, it must be the same length as the data. The list should contain string. columns : column Columns used in the pivot operation. Only one column is supported and it should be a string. aggfunc : function (string), dict, default mean If dict is passed, the key is column to aggregate and value is function or list of functions. fill_value : scalar, default None Value to replace missing values with. Returns ------- table : DataFrame Examples -------- >>> df = ps.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo", ... "bar", "bar", "bar", "bar"], ... "B": ["one", "one", "one", "two", "two", ... "one", "one", "two", "two"], ... "C": ["small", "large", "large", "small", ... "small", "large", "small", "small", ... "large"], ... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7], ... "E": [2, 4, 5, 5, 6, 6, 8, 9, 9]}, ... columns=['A', 'B', 'C', 'D', 'E']) >>> df A B C D E 0 foo one small 1 2 1 foo one large 2 4 2 foo one large 2 5 3 foo two small 3 5 4 foo two small 3 6 5 bar one large 4 6 6 bar one small 5 8 7 bar two small 6 9 8 bar two large 7 9 This first example aggregates values by taking the sum. >>> table = df.pivot_table(values='D', index=['A', 'B'], ... columns='C', aggfunc='sum') >>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE C large small A B bar one 4.0 5 two 7.0 6 foo one 4.0 1 two NaN 6 We can also fill missing values using the `fill_value` parameter. >>> table = df.pivot_table(values='D', index=['A', 'B'], ... columns='C', aggfunc='sum', fill_value=0) >>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE C large small A B bar one 4 5 two 7 6 foo one 4 1 two 0 6 We can also calculate multiple types of aggregations for any given value column. >>> table = df.pivot_table(values=['D'], index =['C'], ... columns="A", aggfunc={'D': 'mean'}) >>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE D A bar foo C large 5.5 2.000000 small 5.5 2.333333 The next example aggregates on multiple values. >>> table = df.pivot_table(index=['C'], columns="A", values=['D', 'E'], ... aggfunc={'D': 'mean', 'E': 'sum'}) >>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE D E A bar foo bar foo C large 5.5 2.000000 15 9 small 5.5 2.333333 17 13
python/pyspark/pandas/frame.py
pivot_table
Flyangz/spark
python
def pivot_table(self, values: Optional[Union[(Name, List[Name])]]=None, index: Optional[List[Name]]=None, columns: Optional[Name]=None, aggfunc: Union[(str, Dict[(Name, str)])]='mean', fill_value: Optional[Any]=None) -> 'DataFrame': '\n Create a spreadsheet-style pivot table as a DataFrame. The levels in\n the pivot table will be stored in MultiIndex objects (hierarchical\n indexes) on the index and columns of the result DataFrame.\n\n Parameters\n ----------\n values : column to aggregate.\n They should be either a list less than three or a string.\n index : column (string) or list of columns\n If an array is passed, it must be the same length as the data.\n The list should contain string.\n columns : column\n Columns used in the pivot operation. Only one column is supported and\n it should be a string.\n aggfunc : function (string), dict, default mean\n If dict is passed, the key is column to aggregate and value\n is function or list of functions.\n fill_value : scalar, default None\n Value to replace missing values with.\n\n Returns\n -------\n table : DataFrame\n\n Examples\n --------\n >>> df = ps.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo",\n ... "bar", "bar", "bar", "bar"],\n ... "B": ["one", "one", "one", "two", "two",\n ... "one", "one", "two", "two"],\n ... "C": ["small", "large", "large", "small",\n ... "small", "large", "small", "small",\n ... "large"],\n ... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7],\n ... "E": [2, 4, 5, 5, 6, 6, 8, 9, 9]},\n ... columns=[\'A\', \'B\', \'C\', \'D\', \'E\'])\n >>> df\n A B C D E\n 0 foo one small 1 2\n 1 foo one large 2 4\n 2 foo one large 2 5\n 3 foo two small 3 5\n 4 foo two small 3 6\n 5 bar one large 4 6\n 6 bar one small 5 8\n 7 bar two small 6 9\n 8 bar two large 7 9\n\n This first example aggregates values by taking the sum.\n\n >>> table = df.pivot_table(values=\'D\', index=[\'A\', \'B\'],\n ... columns=\'C\', aggfunc=\'sum\')\n >>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE\n C large small\n A B\n bar one 4.0 5\n two 7.0 6\n foo one 4.0 1\n two NaN 6\n\n We can also fill missing values using the `fill_value` parameter.\n\n >>> table = df.pivot_table(values=\'D\', index=[\'A\', \'B\'],\n ... columns=\'C\', aggfunc=\'sum\', fill_value=0)\n >>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE\n C large small\n A B\n bar one 4 5\n two 7 6\n foo one 4 1\n two 0 6\n\n We can also calculate multiple types of aggregations for any given\n value column.\n\n >>> table = df.pivot_table(values=[\'D\'], index =[\'C\'],\n ... columns="A", aggfunc={\'D\': \'mean\'})\n >>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE\n D\n A bar foo\n C\n large 5.5 2.000000\n small 5.5 2.333333\n\n The next example aggregates on multiple values.\n\n >>> table = df.pivot_table(index=[\'C\'], columns="A", values=[\'D\', \'E\'],\n ... aggfunc={\'D\': \'mean\', \'E\': \'sum\'})\n >>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE\n D E\n A bar foo bar foo\n C\n large 5.5 2.000000 15 9\n small 5.5 2.333333 17 13\n ' if (not is_name_like_value(columns)): raise TypeError('columns should be one column name.') if ((not is_name_like_value(values)) and (not (isinstance(values, list) and all((is_name_like_value(v) for v in values))))): raise TypeError('values should be one column or list of columns.') if ((not isinstance(aggfunc, str)) and ((not isinstance(aggfunc, dict)) or (not all(((is_name_like_value(key) and isinstance(value, str)) for (key, value) in aggfunc.items()))))): raise TypeError('aggfunc must be a dict mapping from column name to aggregate functions (string).') if (isinstance(aggfunc, dict) and (index is None)): raise NotImplementedError("pivot_table doesn't support aggfunc as dict and without index.") if (isinstance(values, list) and (index is None)): raise NotImplementedError("values can't be a list without index.") if (columns not in self.columns): raise ValueError('Wrong columns {}.'.format(name_like_string(columns))) if (not is_name_like_tuple(columns)): columns = (columns,) if isinstance(values, list): values = [(col if is_name_like_tuple(col) else (col,)) for col in values] if (not all((isinstance(self._internal.spark_type_for(col), NumericType) for col in values))): raise TypeError('values should be a numeric type.') else: values = (values if is_name_like_tuple(values) else (values,)) if (not isinstance(self._internal.spark_type_for(values), NumericType)): raise TypeError('values should be a numeric type.') if isinstance(aggfunc, str): if isinstance(values, list): agg_cols = [F.expr('{1}(`{0}`) as `{0}`'.format(self._internal.spark_column_name_for(value), aggfunc)) for value in values] else: agg_cols = [F.expr('{1}(`{0}`) as `{0}`'.format(self._internal.spark_column_name_for(values), aggfunc))] elif isinstance(aggfunc, dict): aggfunc = {(key if is_name_like_tuple(key) else (key,)): value for (key, value) in aggfunc.items()} agg_cols = [F.expr('{1}(`{0}`) as `{0}`'.format(self._internal.spark_column_name_for(key), value)) for (key, value) in aggfunc.items()] agg_columns = [key for (key, _) in aggfunc.items()] if (set(agg_columns) != set(values)): raise ValueError('Columns in aggfunc must be the same as values.') sdf = self._internal.resolved_copy.spark_frame if (index is None): sdf = sdf.groupBy().pivot(pivot_col=self._internal.spark_column_name_for(columns)).agg(*agg_cols) elif isinstance(index, list): index = [(label if is_name_like_tuple(label) else (label,)) for label in index] sdf = sdf.groupBy([self._internal.spark_column_name_for(label) for label in index]).pivot(pivot_col=self._internal.spark_column_name_for(columns)).agg(*agg_cols) else: raise TypeError('index should be a None or a list of columns.') if ((fill_value is not None) and isinstance(fill_value, (int, float))): sdf = sdf.fillna(fill_value) psdf: DataFrame if (index is not None): index_columns = [self._internal.spark_column_name_for(label) for label in index] index_fields = [self._internal.field_for(label) for label in index] if isinstance(values, list): data_columns = [column for column in sdf.columns if (column not in index_columns)] if (len(values) > 1): data_columns.sort(key=(lambda x: x.split('_', 1)[1])) sdf = sdf.select((index_columns + data_columns)) column_name_to_index = dict(zip(self._internal.data_spark_column_names, self._internal.column_labels)) column_labels = [tuple((list(column_name_to_index[name.split('_')[1]]) + [name.split('_')[0]])) for name in data_columns] column_label_names = (([cast(Optional[Name], None)] * column_labels_level(values)) + [columns]) internal = InternalFrame(spark_frame=sdf, index_spark_columns=[scol_for(sdf, col) for col in index_columns], index_names=index, index_fields=index_fields, column_labels=column_labels, data_spark_columns=[scol_for(sdf, col) for col in data_columns], column_label_names=column_label_names) psdf = DataFrame(internal) else: column_labels = [tuple((list(values[0]) + [column])) for column in data_columns] column_label_names = (([cast(Optional[Name], None)] * len(values[0])) + [columns]) internal = InternalFrame(spark_frame=sdf, index_spark_columns=[scol_for(sdf, col) for col in index_columns], index_names=index, index_fields=index_fields, column_labels=column_labels, data_spark_columns=[scol_for(sdf, col) for col in data_columns], column_label_names=column_label_names) psdf = DataFrame(internal) else: internal = InternalFrame(spark_frame=sdf, index_spark_columns=[scol_for(sdf, col) for col in index_columns], index_names=index, index_fields=index_fields, column_label_names=[columns]) psdf = DataFrame(internal) else: if isinstance(values, list): index_values = values[(- 1)] else: index_values = values index_map: Dict[(str, Optional[Label])] = {} for (i, index_value) in enumerate(index_values): colname = SPARK_INDEX_NAME_FORMAT(i) sdf = sdf.withColumn(colname, SF.lit(index_value)) index_map[colname] = None internal = InternalFrame(spark_frame=sdf, index_spark_columns=[scol_for(sdf, col) for col in index_map.keys()], index_names=list(index_map.values()), column_label_names=[columns]) psdf = DataFrame(internal) psdf_columns = psdf.columns if isinstance(psdf_columns, pd.MultiIndex): psdf.columns = psdf_columns.set_levels(psdf_columns.levels[(- 1)].astype(spark_type_to_pandas_dtype(self._psser_for(columns).spark.data_type)), level=(- 1)) else: psdf.columns = psdf_columns.astype(spark_type_to_pandas_dtype(self._psser_for(columns).spark.data_type)) return psdf
def pivot(self, index: Optional[Name]=None, columns: Optional[Name]=None, values: Optional[Name]=None) -> 'DataFrame': '\n Return reshaped DataFrame organized by given index / column values.\n\n Reshape data (produce a "pivot" table) based on column values. Uses\n unique values from specified `index` / `columns` to form axes of the\n resulting DataFrame. This function does not support data\n aggregation.\n\n Parameters\n ----------\n index : string, optional\n Column to use to make new frame\'s index. If None, uses\n existing index.\n columns : string\n Column to use to make new frame\'s columns.\n values : string, object or a list of the previous\n Column(s) to use for populating new frame\'s values.\n\n Returns\n -------\n DataFrame\n Returns reshaped DataFrame.\n\n See Also\n --------\n DataFrame.pivot_table : Generalization of pivot that can handle\n duplicate values for one index/column pair.\n\n Examples\n --------\n >>> df = ps.DataFrame({\'foo\': [\'one\', \'one\', \'one\', \'two\', \'two\',\n ... \'two\'],\n ... \'bar\': [\'A\', \'B\', \'C\', \'A\', \'B\', \'C\'],\n ... \'baz\': [1, 2, 3, 4, 5, 6],\n ... \'zoo\': [\'x\', \'y\', \'z\', \'q\', \'w\', \'t\']},\n ... columns=[\'foo\', \'bar\', \'baz\', \'zoo\'])\n >>> df\n foo bar baz zoo\n 0 one A 1 x\n 1 one B 2 y\n 2 one C 3 z\n 3 two A 4 q\n 4 two B 5 w\n 5 two C 6 t\n\n >>> df.pivot(index=\'foo\', columns=\'bar\', values=\'baz\').sort_index()\n ... # doctest: +NORMALIZE_WHITESPACE\n bar A B C\n foo\n one 1 2 3\n two 4 5 6\n\n >>> df.pivot(columns=\'bar\', values=\'baz\').sort_index() # doctest: +NORMALIZE_WHITESPACE\n bar A B C\n 0 1.0 NaN NaN\n 1 NaN 2.0 NaN\n 2 NaN NaN 3.0\n 3 4.0 NaN NaN\n 4 NaN 5.0 NaN\n 5 NaN NaN 6.0\n\n Notice that, unlike pandas raises an ValueError when duplicated values are found,\n pandas-on-Spark\'s pivot still works with its first value it meets during operation because\n pivot is an expensive operation and it is preferred to permissively execute over failing\n fast when processing large data.\n\n >>> df = ps.DataFrame({"foo": [\'one\', \'one\', \'two\', \'two\'],\n ... "bar": [\'A\', \'A\', \'B\', \'C\'],\n ... "baz": [1, 2, 3, 4]}, columns=[\'foo\', \'bar\', \'baz\'])\n >>> df\n foo bar baz\n 0 one A 1\n 1 one A 2\n 2 two B 3\n 3 two C 4\n\n >>> df.pivot(index=\'foo\', columns=\'bar\', values=\'baz\').sort_index()\n ... # doctest: +NORMALIZE_WHITESPACE\n bar A B C\n foo\n one 1.0 NaN NaN\n two NaN 3.0 4.0\n\n It also support multi-index and multi-index column.\n >>> df.columns = pd.MultiIndex.from_tuples([(\'a\', \'foo\'), (\'a\', \'bar\'), (\'b\', \'baz\')])\n\n >>> df = df.set_index((\'a\', \'bar\'), append=True)\n >>> df # doctest: +NORMALIZE_WHITESPACE\n a b\n foo baz\n (a, bar)\n 0 A one 1\n 1 A one 2\n 2 B two 3\n 3 C two 4\n\n >>> df.pivot(columns=(\'a\', \'foo\'), values=(\'b\', \'baz\')).sort_index()\n ... # doctest: +NORMALIZE_WHITESPACE\n (\'a\', \'foo\') one two\n (a, bar)\n 0 A 1.0 NaN\n 1 A 2.0 NaN\n 2 B NaN 3.0\n 3 C NaN 4.0\n\n ' if (columns is None): raise ValueError('columns should be set.') if (values is None): raise ValueError('values should be set.') should_use_existing_index = (index is not None) if should_use_existing_index: df = self index_labels = [index] else: with option_context('compute.default_index_type', 'distributed'): df = self.reset_index() index_labels = df._internal.column_labels[:self._internal.index_level] df = df.pivot_table(index=index_labels, columns=columns, values=values, aggfunc='first') if should_use_existing_index: return df else: internal = df._internal.copy(index_names=self._internal.index_names) return DataFrame(internal)
-4,641,697,249,891,161,000
Return reshaped DataFrame organized by given index / column values. Reshape data (produce a "pivot" table) based on column values. Uses unique values from specified `index` / `columns` to form axes of the resulting DataFrame. This function does not support data aggregation. Parameters ---------- index : string, optional Column to use to make new frame's index. If None, uses existing index. columns : string Column to use to make new frame's columns. values : string, object or a list of the previous Column(s) to use for populating new frame's values. Returns ------- DataFrame Returns reshaped DataFrame. See Also -------- DataFrame.pivot_table : Generalization of pivot that can handle duplicate values for one index/column pair. Examples -------- >>> df = ps.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', ... 'two'], ... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'], ... 'baz': [1, 2, 3, 4, 5, 6], ... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']}, ... columns=['foo', 'bar', 'baz', 'zoo']) >>> df foo bar baz zoo 0 one A 1 x 1 one B 2 y 2 one C 3 z 3 two A 4 q 4 two B 5 w 5 two C 6 t >>> df.pivot(index='foo', columns='bar', values='baz').sort_index() ... # doctest: +NORMALIZE_WHITESPACE bar A B C foo one 1 2 3 two 4 5 6 >>> df.pivot(columns='bar', values='baz').sort_index() # doctest: +NORMALIZE_WHITESPACE bar A B C 0 1.0 NaN NaN 1 NaN 2.0 NaN 2 NaN NaN 3.0 3 4.0 NaN NaN 4 NaN 5.0 NaN 5 NaN NaN 6.0 Notice that, unlike pandas raises an ValueError when duplicated values are found, pandas-on-Spark's pivot still works with its first value it meets during operation because pivot is an expensive operation and it is preferred to permissively execute over failing fast when processing large data. >>> df = ps.DataFrame({"foo": ['one', 'one', 'two', 'two'], ... "bar": ['A', 'A', 'B', 'C'], ... "baz": [1, 2, 3, 4]}, columns=['foo', 'bar', 'baz']) >>> df foo bar baz 0 one A 1 1 one A 2 2 two B 3 3 two C 4 >>> df.pivot(index='foo', columns='bar', values='baz').sort_index() ... # doctest: +NORMALIZE_WHITESPACE bar A B C foo one 1.0 NaN NaN two NaN 3.0 4.0 It also support multi-index and multi-index column. >>> df.columns = pd.MultiIndex.from_tuples([('a', 'foo'), ('a', 'bar'), ('b', 'baz')]) >>> df = df.set_index(('a', 'bar'), append=True) >>> df # doctest: +NORMALIZE_WHITESPACE a b foo baz (a, bar) 0 A one 1 1 A one 2 2 B two 3 3 C two 4 >>> df.pivot(columns=('a', 'foo'), values=('b', 'baz')).sort_index() ... # doctest: +NORMALIZE_WHITESPACE ('a', 'foo') one two (a, bar) 0 A 1.0 NaN 1 A 2.0 NaN 2 B NaN 3.0 3 C NaN 4.0
python/pyspark/pandas/frame.py
pivot
Flyangz/spark
python
def pivot(self, index: Optional[Name]=None, columns: Optional[Name]=None, values: Optional[Name]=None) -> 'DataFrame': '\n Return reshaped DataFrame organized by given index / column values.\n\n Reshape data (produce a "pivot" table) based on column values. Uses\n unique values from specified `index` / `columns` to form axes of the\n resulting DataFrame. This function does not support data\n aggregation.\n\n Parameters\n ----------\n index : string, optional\n Column to use to make new frame\'s index. If None, uses\n existing index.\n columns : string\n Column to use to make new frame\'s columns.\n values : string, object or a list of the previous\n Column(s) to use for populating new frame\'s values.\n\n Returns\n -------\n DataFrame\n Returns reshaped DataFrame.\n\n See Also\n --------\n DataFrame.pivot_table : Generalization of pivot that can handle\n duplicate values for one index/column pair.\n\n Examples\n --------\n >>> df = ps.DataFrame({\'foo\': [\'one\', \'one\', \'one\', \'two\', \'two\',\n ... \'two\'],\n ... \'bar\': [\'A\', \'B\', \'C\', \'A\', \'B\', \'C\'],\n ... \'baz\': [1, 2, 3, 4, 5, 6],\n ... \'zoo\': [\'x\', \'y\', \'z\', \'q\', \'w\', \'t\']},\n ... columns=[\'foo\', \'bar\', \'baz\', \'zoo\'])\n >>> df\n foo bar baz zoo\n 0 one A 1 x\n 1 one B 2 y\n 2 one C 3 z\n 3 two A 4 q\n 4 two B 5 w\n 5 two C 6 t\n\n >>> df.pivot(index=\'foo\', columns=\'bar\', values=\'baz\').sort_index()\n ... # doctest: +NORMALIZE_WHITESPACE\n bar A B C\n foo\n one 1 2 3\n two 4 5 6\n\n >>> df.pivot(columns=\'bar\', values=\'baz\').sort_index() # doctest: +NORMALIZE_WHITESPACE\n bar A B C\n 0 1.0 NaN NaN\n 1 NaN 2.0 NaN\n 2 NaN NaN 3.0\n 3 4.0 NaN NaN\n 4 NaN 5.0 NaN\n 5 NaN NaN 6.0\n\n Notice that, unlike pandas raises an ValueError when duplicated values are found,\n pandas-on-Spark\'s pivot still works with its first value it meets during operation because\n pivot is an expensive operation and it is preferred to permissively execute over failing\n fast when processing large data.\n\n >>> df = ps.DataFrame({"foo": [\'one\', \'one\', \'two\', \'two\'],\n ... "bar": [\'A\', \'A\', \'B\', \'C\'],\n ... "baz": [1, 2, 3, 4]}, columns=[\'foo\', \'bar\', \'baz\'])\n >>> df\n foo bar baz\n 0 one A 1\n 1 one A 2\n 2 two B 3\n 3 two C 4\n\n >>> df.pivot(index=\'foo\', columns=\'bar\', values=\'baz\').sort_index()\n ... # doctest: +NORMALIZE_WHITESPACE\n bar A B C\n foo\n one 1.0 NaN NaN\n two NaN 3.0 4.0\n\n It also support multi-index and multi-index column.\n >>> df.columns = pd.MultiIndex.from_tuples([(\'a\', \'foo\'), (\'a\', \'bar\'), (\'b\', \'baz\')])\n\n >>> df = df.set_index((\'a\', \'bar\'), append=True)\n >>> df # doctest: +NORMALIZE_WHITESPACE\n a b\n foo baz\n (a, bar)\n 0 A one 1\n 1 A one 2\n 2 B two 3\n 3 C two 4\n\n >>> df.pivot(columns=(\'a\', \'foo\'), values=(\'b\', \'baz\')).sort_index()\n ... # doctest: +NORMALIZE_WHITESPACE\n (\'a\', \'foo\') one two\n (a, bar)\n 0 A 1.0 NaN\n 1 A 2.0 NaN\n 2 B NaN 3.0\n 3 C NaN 4.0\n\n ' if (columns is None): raise ValueError('columns should be set.') if (values is None): raise ValueError('values should be set.') should_use_existing_index = (index is not None) if should_use_existing_index: df = self index_labels = [index] else: with option_context('compute.default_index_type', 'distributed'): df = self.reset_index() index_labels = df._internal.column_labels[:self._internal.index_level] df = df.pivot_table(index=index_labels, columns=columns, values=values, aggfunc='first') if should_use_existing_index: return df else: internal = df._internal.copy(index_names=self._internal.index_names) return DataFrame(internal)
@property def columns(self) -> pd.Index: 'The column labels of the DataFrame.' names = [(name if ((name is None) or (len(name) > 1)) else name[0]) for name in self._internal.column_label_names] if (self._internal.column_labels_level > 1): columns = pd.MultiIndex.from_tuples(self._internal.column_labels, names=names) else: columns = pd.Index([label[0] for label in self._internal.column_labels], name=names[0]) return columns
-4,037,255,681,569,786,400
The column labels of the DataFrame.
python/pyspark/pandas/frame.py
columns
Flyangz/spark
python
@property def columns(self) -> pd.Index: names = [(name if ((name is None) or (len(name) > 1)) else name[0]) for name in self._internal.column_label_names] if (self._internal.column_labels_level > 1): columns = pd.MultiIndex.from_tuples(self._internal.column_labels, names=names) else: columns = pd.Index([label[0] for label in self._internal.column_labels], name=names[0]) return columns
@property def dtypes(self) -> pd.Series: "Return the dtypes in the DataFrame.\n\n This returns a Series with the data type of each column. The result's index is the original\n DataFrame's columns. Columns with mixed types are stored with the object dtype.\n\n Returns\n -------\n pd.Series\n The data type of each column.\n\n Examples\n --------\n >>> df = ps.DataFrame({'a': list('abc'),\n ... 'b': list(range(1, 4)),\n ... 'c': np.arange(3, 6).astype('i1'),\n ... 'd': np.arange(4.0, 7.0, dtype='float64'),\n ... 'e': [True, False, True],\n ... 'f': pd.date_range('20130101', periods=3)},\n ... columns=['a', 'b', 'c', 'd', 'e', 'f'])\n >>> df.dtypes\n a object\n b int64\n c int8\n d float64\n e bool\n f datetime64[ns]\n dtype: object\n " return pd.Series([self._psser_for(label).dtype for label in self._internal.column_labels], index=pd.Index([(label if (len(label) > 1) else label[0]) for label in self._internal.column_labels]))
-7,807,592,717,761,942,000
Return the dtypes in the DataFrame. This returns a Series with the data type of each column. The result's index is the original DataFrame's columns. Columns with mixed types are stored with the object dtype. Returns ------- pd.Series The data type of each column. Examples -------- >>> df = ps.DataFrame({'a': list('abc'), ... 'b': list(range(1, 4)), ... 'c': np.arange(3, 6).astype('i1'), ... 'd': np.arange(4.0, 7.0, dtype='float64'), ... 'e': [True, False, True], ... 'f': pd.date_range('20130101', periods=3)}, ... columns=['a', 'b', 'c', 'd', 'e', 'f']) >>> df.dtypes a object b int64 c int8 d float64 e bool f datetime64[ns] dtype: object
python/pyspark/pandas/frame.py
dtypes
Flyangz/spark
python
@property def dtypes(self) -> pd.Series: "Return the dtypes in the DataFrame.\n\n This returns a Series with the data type of each column. The result's index is the original\n DataFrame's columns. Columns with mixed types are stored with the object dtype.\n\n Returns\n -------\n pd.Series\n The data type of each column.\n\n Examples\n --------\n >>> df = ps.DataFrame({'a': list('abc'),\n ... 'b': list(range(1, 4)),\n ... 'c': np.arange(3, 6).astype('i1'),\n ... 'd': np.arange(4.0, 7.0, dtype='float64'),\n ... 'e': [True, False, True],\n ... 'f': pd.date_range('20130101', periods=3)},\n ... columns=['a', 'b', 'c', 'd', 'e', 'f'])\n >>> df.dtypes\n a object\n b int64\n c int8\n d float64\n e bool\n f datetime64[ns]\n dtype: object\n " return pd.Series([self._psser_for(label).dtype for label in self._internal.column_labels], index=pd.Index([(label if (len(label) > 1) else label[0]) for label in self._internal.column_labels]))
def select_dtypes(self, include: Optional[Union[(str, List[str])]]=None, exclude: Optional[Union[(str, List[str])]]=None) -> 'DataFrame': "\n Return a subset of the DataFrame's columns based on the column dtypes.\n\n Parameters\n ----------\n include, exclude : scalar or list-like\n A selection of dtypes or strings to be included/excluded. At least\n one of these parameters must be supplied. It also takes Spark SQL\n DDL type strings, for instance, 'string' and 'date'.\n\n Returns\n -------\n DataFrame\n The subset of the frame including the dtypes in ``include`` and\n excluding the dtypes in ``exclude``.\n\n Raises\n ------\n ValueError\n * If both of ``include`` and ``exclude`` are empty\n\n >>> df = ps.DataFrame({'a': [1, 2] * 3,\n ... 'b': [True, False] * 3,\n ... 'c': [1.0, 2.0] * 3})\n >>> df.select_dtypes()\n Traceback (most recent call last):\n ...\n ValueError: at least one of include or exclude must be nonempty\n\n * If ``include`` and ``exclude`` have overlapping elements\n\n >>> df = ps.DataFrame({'a': [1, 2] * 3,\n ... 'b': [True, False] * 3,\n ... 'c': [1.0, 2.0] * 3})\n >>> df.select_dtypes(include='a', exclude='a')\n Traceback (most recent call last):\n ...\n ValueError: include and exclude overlap on {'a'}\n\n Notes\n -----\n * To select datetimes, use ``np.datetime64``, ``'datetime'`` or\n ``'datetime64'``\n\n Examples\n --------\n >>> df = ps.DataFrame({'a': [1, 2] * 3,\n ... 'b': [True, False] * 3,\n ... 'c': [1.0, 2.0] * 3,\n ... 'd': ['a', 'b'] * 3}, columns=['a', 'b', 'c', 'd'])\n >>> df\n a b c d\n 0 1 True 1.0 a\n 1 2 False 2.0 b\n 2 1 True 1.0 a\n 3 2 False 2.0 b\n 4 1 True 1.0 a\n 5 2 False 2.0 b\n\n >>> df.select_dtypes(include='bool')\n b\n 0 True\n 1 False\n 2 True\n 3 False\n 4 True\n 5 False\n\n >>> df.select_dtypes(include=['float64'], exclude=['int'])\n c\n 0 1.0\n 1 2.0\n 2 1.0\n 3 2.0\n 4 1.0\n 5 2.0\n\n >>> df.select_dtypes(exclude=['int'])\n b c d\n 0 True 1.0 a\n 1 False 2.0 b\n 2 True 1.0 a\n 3 False 2.0 b\n 4 True 1.0 a\n 5 False 2.0 b\n\n Spark SQL DDL type strings can be used as well.\n\n >>> df.select_dtypes(exclude=['string'])\n a b c\n 0 1 True 1.0\n 1 2 False 2.0\n 2 1 True 1.0\n 3 2 False 2.0\n 4 1 True 1.0\n 5 2 False 2.0\n " from pyspark.sql.types import _parse_datatype_string include_list: List[str] if (not is_list_like(include)): include_list = ([cast(str, include)] if (include is not None) else []) else: include_list = list(include) exclude_list: List[str] if (not is_list_like(exclude)): exclude_list = ([cast(str, exclude)] if (exclude is not None) else []) else: exclude_list = list(exclude) if (not any((include_list, exclude_list))): raise ValueError('at least one of include or exclude must be nonempty') if set(include_list).intersection(set(exclude_list)): raise ValueError('include and exclude overlap on {inc_ex}'.format(inc_ex=set(include_list).intersection(set(exclude_list)))) include_spark_type = [] for inc in include_list: try: include_spark_type.append(_parse_datatype_string(inc)) except BaseException: pass exclude_spark_type = [] for exc in exclude_list: try: exclude_spark_type.append(_parse_datatype_string(exc)) except BaseException: pass include_numpy_type = [] for inc in include_list: try: include_numpy_type.append(infer_dtype_from_object(inc)) except BaseException: pass exclude_numpy_type = [] for exc in exclude_list: try: exclude_numpy_type.append(infer_dtype_from_object(exc)) except BaseException: pass column_labels = [] for label in self._internal.column_labels: if (len(include_list) > 0): should_include = ((infer_dtype_from_object(self._psser_for(label).dtype.name) in include_numpy_type) or (self._internal.spark_type_for(label) in include_spark_type)) else: should_include = (not ((infer_dtype_from_object(self._psser_for(label).dtype.name) in exclude_numpy_type) or (self._internal.spark_type_for(label) in exclude_spark_type))) if should_include: column_labels.append(label) return DataFrame(self._internal.with_new_columns([self._psser_for(label) for label in column_labels]))
2,451,480,011,340,020,700
Return a subset of the DataFrame's columns based on the column dtypes. Parameters ---------- include, exclude : scalar or list-like A selection of dtypes or strings to be included/excluded. At least one of these parameters must be supplied. It also takes Spark SQL DDL type strings, for instance, 'string' and 'date'. Returns ------- DataFrame The subset of the frame including the dtypes in ``include`` and excluding the dtypes in ``exclude``. Raises ------ ValueError * If both of ``include`` and ``exclude`` are empty >>> df = ps.DataFrame({'a': [1, 2] * 3, ... 'b': [True, False] * 3, ... 'c': [1.0, 2.0] * 3}) >>> df.select_dtypes() Traceback (most recent call last): ... ValueError: at least one of include or exclude must be nonempty * If ``include`` and ``exclude`` have overlapping elements >>> df = ps.DataFrame({'a': [1, 2] * 3, ... 'b': [True, False] * 3, ... 'c': [1.0, 2.0] * 3}) >>> df.select_dtypes(include='a', exclude='a') Traceback (most recent call last): ... ValueError: include and exclude overlap on {'a'} Notes ----- * To select datetimes, use ``np.datetime64``, ``'datetime'`` or ``'datetime64'`` Examples -------- >>> df = ps.DataFrame({'a': [1, 2] * 3, ... 'b': [True, False] * 3, ... 'c': [1.0, 2.0] * 3, ... 'd': ['a', 'b'] * 3}, columns=['a', 'b', 'c', 'd']) >>> df a b c d 0 1 True 1.0 a 1 2 False 2.0 b 2 1 True 1.0 a 3 2 False 2.0 b 4 1 True 1.0 a 5 2 False 2.0 b >>> df.select_dtypes(include='bool') b 0 True 1 False 2 True 3 False 4 True 5 False >>> df.select_dtypes(include=['float64'], exclude=['int']) c 0 1.0 1 2.0 2 1.0 3 2.0 4 1.0 5 2.0 >>> df.select_dtypes(exclude=['int']) b c d 0 True 1.0 a 1 False 2.0 b 2 True 1.0 a 3 False 2.0 b 4 True 1.0 a 5 False 2.0 b Spark SQL DDL type strings can be used as well. >>> df.select_dtypes(exclude=['string']) a b c 0 1 True 1.0 1 2 False 2.0 2 1 True 1.0 3 2 False 2.0 4 1 True 1.0 5 2 False 2.0
python/pyspark/pandas/frame.py
select_dtypes
Flyangz/spark
python
def select_dtypes(self, include: Optional[Union[(str, List[str])]]=None, exclude: Optional[Union[(str, List[str])]]=None) -> 'DataFrame': "\n Return a subset of the DataFrame's columns based on the column dtypes.\n\n Parameters\n ----------\n include, exclude : scalar or list-like\n A selection of dtypes or strings to be included/excluded. At least\n one of these parameters must be supplied. It also takes Spark SQL\n DDL type strings, for instance, 'string' and 'date'.\n\n Returns\n -------\n DataFrame\n The subset of the frame including the dtypes in ``include`` and\n excluding the dtypes in ``exclude``.\n\n Raises\n ------\n ValueError\n * If both of ``include`` and ``exclude`` are empty\n\n >>> df = ps.DataFrame({'a': [1, 2] * 3,\n ... 'b': [True, False] * 3,\n ... 'c': [1.0, 2.0] * 3})\n >>> df.select_dtypes()\n Traceback (most recent call last):\n ...\n ValueError: at least one of include or exclude must be nonempty\n\n * If ``include`` and ``exclude`` have overlapping elements\n\n >>> df = ps.DataFrame({'a': [1, 2] * 3,\n ... 'b': [True, False] * 3,\n ... 'c': [1.0, 2.0] * 3})\n >>> df.select_dtypes(include='a', exclude='a')\n Traceback (most recent call last):\n ...\n ValueError: include and exclude overlap on {'a'}\n\n Notes\n -----\n * To select datetimes, use ``np.datetime64``, ``'datetime'`` or\n ``'datetime64'``\n\n Examples\n --------\n >>> df = ps.DataFrame({'a': [1, 2] * 3,\n ... 'b': [True, False] * 3,\n ... 'c': [1.0, 2.0] * 3,\n ... 'd': ['a', 'b'] * 3}, columns=['a', 'b', 'c', 'd'])\n >>> df\n a b c d\n 0 1 True 1.0 a\n 1 2 False 2.0 b\n 2 1 True 1.0 a\n 3 2 False 2.0 b\n 4 1 True 1.0 a\n 5 2 False 2.0 b\n\n >>> df.select_dtypes(include='bool')\n b\n 0 True\n 1 False\n 2 True\n 3 False\n 4 True\n 5 False\n\n >>> df.select_dtypes(include=['float64'], exclude=['int'])\n c\n 0 1.0\n 1 2.0\n 2 1.0\n 3 2.0\n 4 1.0\n 5 2.0\n\n >>> df.select_dtypes(exclude=['int'])\n b c d\n 0 True 1.0 a\n 1 False 2.0 b\n 2 True 1.0 a\n 3 False 2.0 b\n 4 True 1.0 a\n 5 False 2.0 b\n\n Spark SQL DDL type strings can be used as well.\n\n >>> df.select_dtypes(exclude=['string'])\n a b c\n 0 1 True 1.0\n 1 2 False 2.0\n 2 1 True 1.0\n 3 2 False 2.0\n 4 1 True 1.0\n 5 2 False 2.0\n " from pyspark.sql.types import _parse_datatype_string include_list: List[str] if (not is_list_like(include)): include_list = ([cast(str, include)] if (include is not None) else []) else: include_list = list(include) exclude_list: List[str] if (not is_list_like(exclude)): exclude_list = ([cast(str, exclude)] if (exclude is not None) else []) else: exclude_list = list(exclude) if (not any((include_list, exclude_list))): raise ValueError('at least one of include or exclude must be nonempty') if set(include_list).intersection(set(exclude_list)): raise ValueError('include and exclude overlap on {inc_ex}'.format(inc_ex=set(include_list).intersection(set(exclude_list)))) include_spark_type = [] for inc in include_list: try: include_spark_type.append(_parse_datatype_string(inc)) except BaseException: pass exclude_spark_type = [] for exc in exclude_list: try: exclude_spark_type.append(_parse_datatype_string(exc)) except BaseException: pass include_numpy_type = [] for inc in include_list: try: include_numpy_type.append(infer_dtype_from_object(inc)) except BaseException: pass exclude_numpy_type = [] for exc in exclude_list: try: exclude_numpy_type.append(infer_dtype_from_object(exc)) except BaseException: pass column_labels = [] for label in self._internal.column_labels: if (len(include_list) > 0): should_include = ((infer_dtype_from_object(self._psser_for(label).dtype.name) in include_numpy_type) or (self._internal.spark_type_for(label) in include_spark_type)) else: should_include = (not ((infer_dtype_from_object(self._psser_for(label).dtype.name) in exclude_numpy_type) or (self._internal.spark_type_for(label) in exclude_spark_type))) if should_include: column_labels.append(label) return DataFrame(self._internal.with_new_columns([self._psser_for(label) for label in column_labels]))
def droplevel(self, level: Union[(int, Name, List[Union[(int, Name)]])], axis: Axis=0) -> 'DataFrame': '\n Return DataFrame with requested index / column level(s) removed.\n\n Parameters\n ----------\n level: int, str, or list-like\n If a string is given, must be the name of a level If list-like, elements must\n be names or positional indexes of levels.\n\n axis: {0 or ‘index’, 1 or ‘columns’}, default 0\n\n Returns\n -------\n DataFrame with requested index / column level(s) removed.\n\n Examples\n --------\n >>> df = ps.DataFrame(\n ... [[3, 4], [7, 8], [11, 12]],\n ... index=pd.MultiIndex.from_tuples([(1, 2), (5, 6), (9, 10)], names=["a", "b"]),\n ... )\n\n >>> df.columns = pd.MultiIndex.from_tuples([\n ... (\'c\', \'e\'), (\'d\', \'f\')\n ... ], names=[\'level_1\', \'level_2\'])\n\n >>> df # doctest: +NORMALIZE_WHITESPACE\n level_1 c d\n level_2 e f\n a b\n 1 2 3 4\n 5 6 7 8\n 9 10 11 12\n\n >>> df.droplevel(\'a\') # doctest: +NORMALIZE_WHITESPACE\n level_1 c d\n level_2 e f\n b\n 2 3 4\n 6 7 8\n 10 11 12\n\n >>> df.droplevel(\'level_2\', axis=1) # doctest: +NORMALIZE_WHITESPACE\n level_1 c d\n a b\n 1 2 3 4\n 5 6 7 8\n 9 10 11 12\n ' axis = validate_axis(axis) if (axis == 0): if (not isinstance(level, (tuple, list))): level = [level] names = self.index.names nlevels = self._internal.index_level int_level = set() for n in level: if isinstance(n, int): if (n < 0): n = (n + nlevels) if (n < 0): raise IndexError('Too many levels: Index has only {} levels, {} is not a valid level number'.format(nlevels, (n - nlevels))) if (n >= nlevels): raise IndexError('Too many levels: Index has only {} levels, not {}'.format(nlevels, (n + 1))) else: if (n not in names): raise KeyError('Level {} not found'.format(n)) n = names.index(n) int_level.add(n) if (len(level) >= nlevels): raise ValueError('Cannot remove {} levels from an index with {} levels: at least one level must be left.'.format(len(level), nlevels)) (index_spark_columns, index_names, index_fields) = zip(*[item for (i, item) in enumerate(zip(self._internal.index_spark_columns, self._internal.index_names, self._internal.index_fields)) if (i not in int_level)]) internal = self._internal.copy(index_spark_columns=list(index_spark_columns), index_names=list(index_names), index_fields=list(index_fields)) return DataFrame(internal) else: psdf = self.copy() psdf.columns = psdf.columns.droplevel(level) return psdf
6,361,529,296,815,516,000
Return DataFrame with requested index / column level(s) removed. Parameters ---------- level: int, str, or list-like If a string is given, must be the name of a level If list-like, elements must be names or positional indexes of levels. axis: {0 or ‘index’, 1 or ‘columns’}, default 0 Returns ------- DataFrame with requested index / column level(s) removed. Examples -------- >>> df = ps.DataFrame( ... [[3, 4], [7, 8], [11, 12]], ... index=pd.MultiIndex.from_tuples([(1, 2), (5, 6), (9, 10)], names=["a", "b"]), ... ) >>> df.columns = pd.MultiIndex.from_tuples([ ... ('c', 'e'), ('d', 'f') ... ], names=['level_1', 'level_2']) >>> df # doctest: +NORMALIZE_WHITESPACE level_1 c d level_2 e f a b 1 2 3 4 5 6 7 8 9 10 11 12 >>> df.droplevel('a') # doctest: +NORMALIZE_WHITESPACE level_1 c d level_2 e f b 2 3 4 6 7 8 10 11 12 >>> df.droplevel('level_2', axis=1) # doctest: +NORMALIZE_WHITESPACE level_1 c d a b 1 2 3 4 5 6 7 8 9 10 11 12
python/pyspark/pandas/frame.py
droplevel
Flyangz/spark
python
def droplevel(self, level: Union[(int, Name, List[Union[(int, Name)]])], axis: Axis=0) -> 'DataFrame': '\n Return DataFrame with requested index / column level(s) removed.\n\n Parameters\n ----------\n level: int, str, or list-like\n If a string is given, must be the name of a level If list-like, elements must\n be names or positional indexes of levels.\n\n axis: {0 or ‘index’, 1 or ‘columns’}, default 0\n\n Returns\n -------\n DataFrame with requested index / column level(s) removed.\n\n Examples\n --------\n >>> df = ps.DataFrame(\n ... [[3, 4], [7, 8], [11, 12]],\n ... index=pd.MultiIndex.from_tuples([(1, 2), (5, 6), (9, 10)], names=["a", "b"]),\n ... )\n\n >>> df.columns = pd.MultiIndex.from_tuples([\n ... (\'c\', \'e\'), (\'d\', \'f\')\n ... ], names=[\'level_1\', \'level_2\'])\n\n >>> df # doctest: +NORMALIZE_WHITESPACE\n level_1 c d\n level_2 e f\n a b\n 1 2 3 4\n 5 6 7 8\n 9 10 11 12\n\n >>> df.droplevel(\'a\') # doctest: +NORMALIZE_WHITESPACE\n level_1 c d\n level_2 e f\n b\n 2 3 4\n 6 7 8\n 10 11 12\n\n >>> df.droplevel(\'level_2\', axis=1) # doctest: +NORMALIZE_WHITESPACE\n level_1 c d\n a b\n 1 2 3 4\n 5 6 7 8\n 9 10 11 12\n ' axis = validate_axis(axis) if (axis == 0): if (not isinstance(level, (tuple, list))): level = [level] names = self.index.names nlevels = self._internal.index_level int_level = set() for n in level: if isinstance(n, int): if (n < 0): n = (n + nlevels) if (n < 0): raise IndexError('Too many levels: Index has only {} levels, {} is not a valid level number'.format(nlevels, (n - nlevels))) if (n >= nlevels): raise IndexError('Too many levels: Index has only {} levels, not {}'.format(nlevels, (n + 1))) else: if (n not in names): raise KeyError('Level {} not found'.format(n)) n = names.index(n) int_level.add(n) if (len(level) >= nlevels): raise ValueError('Cannot remove {} levels from an index with {} levels: at least one level must be left.'.format(len(level), nlevels)) (index_spark_columns, index_names, index_fields) = zip(*[item for (i, item) in enumerate(zip(self._internal.index_spark_columns, self._internal.index_names, self._internal.index_fields)) if (i not in int_level)]) internal = self._internal.copy(index_spark_columns=list(index_spark_columns), index_names=list(index_names), index_fields=list(index_fields)) return DataFrame(internal) else: psdf = self.copy() psdf.columns = psdf.columns.droplevel(level) return psdf
def drop(self, labels: Optional[Union[(Name, List[Name])]]=None, axis: Optional[Axis]=0, index: Union[(Name, List[Name])]=None, columns: Union[(Name, List[Name])]=None) -> 'DataFrame': "\n Drop specified labels from columns.\n\n Remove rows and/or columns by specifying label names and corresponding axis,\n or by specifying directly index and/or column names.\n Drop rows of a MultiIndex DataFrame is not supported yet.\n\n Parameters\n ----------\n labels : single label or list-like\n Column labels to drop.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n\n .. versionchanged:: 3.3\n Set dropping by index by default.\n index : single label or list-like\n Alternative to specifying axis (``labels, axis=0``\n is quivalent to ``index=columns``).\n\n .. versionchanged:: 3.3\n Added dropping rows by 'index'.\n columns : single label or list-like\n Alternative to specifying axis (``labels, axis=1``\n is equivalent to ``columns=labels``).\n\n Returns\n -------\n dropped : DataFrame\n\n See Also\n --------\n Series.dropna\n\n Examples\n --------\n >>> df = ps.DataFrame(np.arange(12).reshape(3, 4), columns=['A', 'B', 'C', 'D'])\n >>> df\n A B C D\n 0 0 1 2 3\n 1 4 5 6 7\n 2 8 9 10 11\n\n Drop columns\n\n >>> df.drop(['B', 'C'], axis=1)\n A D\n 0 0 3\n 1 4 7\n 2 8 11\n\n >>> df.drop(columns=['B', 'C'])\n A D\n 0 0 3\n 1 4 7\n 2 8 11\n\n Drop a row by index\n\n >>> df.drop([0, 1])\n A B C D\n 2 8 9 10 11\n\n >>> df.drop(index=[0, 1], columns='A')\n B C D\n 2 9 10 11\n\n Also support dropping columns for MultiIndex\n\n >>> df = ps.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]},\n ... columns=['x', 'y', 'z', 'w'])\n >>> columns = [('a', 'x'), ('a', 'y'), ('b', 'z'), ('b', 'w')]\n >>> df.columns = pd.MultiIndex.from_tuples(columns)\n >>> df # doctest: +NORMALIZE_WHITESPACE\n a b\n x y z w\n 0 1 3 5 7\n 1 2 4 6 8\n >>> df.drop(labels='a', axis=1) # doctest: +NORMALIZE_WHITESPACE\n b\n z w\n 0 5 7\n 1 6 8\n\n Notes\n -----\n Currently, dropping rows of a MultiIndex DataFrame is not supported yet.\n " if (labels is not None): if ((index is not None) or (columns is not None)): raise ValueError("Cannot specify both 'labels' and 'index'/'columns'") axis = validate_axis(axis) if (axis == 1): return self.drop(index=index, columns=labels) else: return self.drop(index=labels, columns=columns) else: if ((index is None) and (columns is None)): raise ValueError("Need to specify at least one of 'labels' or 'columns' or 'index'") internal = self._internal if (index is not None): if (is_name_like_tuple(index) or is_name_like_value(index)): index = [index] if (len(index) > 0): if (internal.index_level == 1): internal = internal.resolved_copy if (len(index) <= ps.get_option('compute.isin_limit')): self_index_type = self.index.spark.data_type cond = (~ internal.index_spark_columns[0].isin([SF.lit(label).cast(self_index_type) for label in index])) internal = internal.with_filter(cond) else: index_sdf_col = '__index' index_sdf = default_session().createDataFrame(pd.DataFrame({index_sdf_col: index})) joined_sdf = internal.spark_frame.join(other=F.broadcast(index_sdf), on=(internal.index_spark_columns[0] == scol_for(index_sdf, index_sdf_col)), how='anti') internal = internal.with_new_sdf(joined_sdf) else: raise NotImplementedError('Drop rows of MultiIndex DataFrame is not supported yet') if (columns is not None): if is_name_like_tuple(columns): columns = [columns] elif is_name_like_value(columns): columns = [(columns,)] else: columns = [(col if is_name_like_tuple(col) else (col,)) for col in columns] if (len(columns) > 0): drop_column_labels = set((label for label in internal.column_labels for col in columns if (label[:len(col)] == col))) if (len(drop_column_labels) == 0): raise KeyError(columns) keep_columns_and_labels = [(column, label) for (column, label) in zip(self._internal.data_spark_column_names, self._internal.column_labels) if (label not in drop_column_labels)] (cols, labels) = (zip(*keep_columns_and_labels) if (len(keep_columns_and_labels) > 0) else ([], [])) internal = internal.with_new_columns([self._psser_for(label) for label in labels]) return DataFrame(internal)
4,914,197,836,815,870,000
Drop specified labels from columns. Remove rows and/or columns by specifying label names and corresponding axis, or by specifying directly index and/or column names. Drop rows of a MultiIndex DataFrame is not supported yet. Parameters ---------- labels : single label or list-like Column labels to drop. axis : {0 or 'index', 1 or 'columns'}, default 0 .. versionchanged:: 3.3 Set dropping by index by default. index : single label or list-like Alternative to specifying axis (``labels, axis=0`` is quivalent to ``index=columns``). .. versionchanged:: 3.3 Added dropping rows by 'index'. columns : single label or list-like Alternative to specifying axis (``labels, axis=1`` is equivalent to ``columns=labels``). Returns ------- dropped : DataFrame See Also -------- Series.dropna Examples -------- >>> df = ps.DataFrame(np.arange(12).reshape(3, 4), columns=['A', 'B', 'C', 'D']) >>> df A B C D 0 0 1 2 3 1 4 5 6 7 2 8 9 10 11 Drop columns >>> df.drop(['B', 'C'], axis=1) A D 0 0 3 1 4 7 2 8 11 >>> df.drop(columns=['B', 'C']) A D 0 0 3 1 4 7 2 8 11 Drop a row by index >>> df.drop([0, 1]) A B C D 2 8 9 10 11 >>> df.drop(index=[0, 1], columns='A') B C D 2 9 10 11 Also support dropping columns for MultiIndex >>> df = ps.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]}, ... columns=['x', 'y', 'z', 'w']) >>> columns = [('a', 'x'), ('a', 'y'), ('b', 'z'), ('b', 'w')] >>> df.columns = pd.MultiIndex.from_tuples(columns) >>> df # doctest: +NORMALIZE_WHITESPACE a b x y z w 0 1 3 5 7 1 2 4 6 8 >>> df.drop(labels='a', axis=1) # doctest: +NORMALIZE_WHITESPACE b z w 0 5 7 1 6 8 Notes ----- Currently, dropping rows of a MultiIndex DataFrame is not supported yet.
python/pyspark/pandas/frame.py
drop
Flyangz/spark
python
def drop(self, labels: Optional[Union[(Name, List[Name])]]=None, axis: Optional[Axis]=0, index: Union[(Name, List[Name])]=None, columns: Union[(Name, List[Name])]=None) -> 'DataFrame': "\n Drop specified labels from columns.\n\n Remove rows and/or columns by specifying label names and corresponding axis,\n or by specifying directly index and/or column names.\n Drop rows of a MultiIndex DataFrame is not supported yet.\n\n Parameters\n ----------\n labels : single label or list-like\n Column labels to drop.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n\n .. versionchanged:: 3.3\n Set dropping by index by default.\n index : single label or list-like\n Alternative to specifying axis (``labels, axis=0``\n is quivalent to ``index=columns``).\n\n .. versionchanged:: 3.3\n Added dropping rows by 'index'.\n columns : single label or list-like\n Alternative to specifying axis (``labels, axis=1``\n is equivalent to ``columns=labels``).\n\n Returns\n -------\n dropped : DataFrame\n\n See Also\n --------\n Series.dropna\n\n Examples\n --------\n >>> df = ps.DataFrame(np.arange(12).reshape(3, 4), columns=['A', 'B', 'C', 'D'])\n >>> df\n A B C D\n 0 0 1 2 3\n 1 4 5 6 7\n 2 8 9 10 11\n\n Drop columns\n\n >>> df.drop(['B', 'C'], axis=1)\n A D\n 0 0 3\n 1 4 7\n 2 8 11\n\n >>> df.drop(columns=['B', 'C'])\n A D\n 0 0 3\n 1 4 7\n 2 8 11\n\n Drop a row by index\n\n >>> df.drop([0, 1])\n A B C D\n 2 8 9 10 11\n\n >>> df.drop(index=[0, 1], columns='A')\n B C D\n 2 9 10 11\n\n Also support dropping columns for MultiIndex\n\n >>> df = ps.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]},\n ... columns=['x', 'y', 'z', 'w'])\n >>> columns = [('a', 'x'), ('a', 'y'), ('b', 'z'), ('b', 'w')]\n >>> df.columns = pd.MultiIndex.from_tuples(columns)\n >>> df # doctest: +NORMALIZE_WHITESPACE\n a b\n x y z w\n 0 1 3 5 7\n 1 2 4 6 8\n >>> df.drop(labels='a', axis=1) # doctest: +NORMALIZE_WHITESPACE\n b\n z w\n 0 5 7\n 1 6 8\n\n Notes\n -----\n Currently, dropping rows of a MultiIndex DataFrame is not supported yet.\n " if (labels is not None): if ((index is not None) or (columns is not None)): raise ValueError("Cannot specify both 'labels' and 'index'/'columns'") axis = validate_axis(axis) if (axis == 1): return self.drop(index=index, columns=labels) else: return self.drop(index=labels, columns=columns) else: if ((index is None) and (columns is None)): raise ValueError("Need to specify at least one of 'labels' or 'columns' or 'index'") internal = self._internal if (index is not None): if (is_name_like_tuple(index) or is_name_like_value(index)): index = [index] if (len(index) > 0): if (internal.index_level == 1): internal = internal.resolved_copy if (len(index) <= ps.get_option('compute.isin_limit')): self_index_type = self.index.spark.data_type cond = (~ internal.index_spark_columns[0].isin([SF.lit(label).cast(self_index_type) for label in index])) internal = internal.with_filter(cond) else: index_sdf_col = '__index' index_sdf = default_session().createDataFrame(pd.DataFrame({index_sdf_col: index})) joined_sdf = internal.spark_frame.join(other=F.broadcast(index_sdf), on=(internal.index_spark_columns[0] == scol_for(index_sdf, index_sdf_col)), how='anti') internal = internal.with_new_sdf(joined_sdf) else: raise NotImplementedError('Drop rows of MultiIndex DataFrame is not supported yet') if (columns is not None): if is_name_like_tuple(columns): columns = [columns] elif is_name_like_value(columns): columns = [(columns,)] else: columns = [(col if is_name_like_tuple(col) else (col,)) for col in columns] if (len(columns) > 0): drop_column_labels = set((label for label in internal.column_labels for col in columns if (label[:len(col)] == col))) if (len(drop_column_labels) == 0): raise KeyError(columns) keep_columns_and_labels = [(column, label) for (column, label) in zip(self._internal.data_spark_column_names, self._internal.column_labels) if (label not in drop_column_labels)] (cols, labels) = (zip(*keep_columns_and_labels) if (len(keep_columns_and_labels) > 0) else ([], [])) internal = internal.with_new_columns([self._psser_for(label) for label in labels]) return DataFrame(internal)
def sort_values(self, by: Union[(Name, List[Name])], ascending: Union[(bool, List[bool])]=True, inplace: bool=False, na_position: str='last', ignore_index: bool=False) -> Optional['DataFrame']: "\n Sort by the values along either axis.\n\n Parameters\n ----------\n by : str or list of str\n ascending : bool or list of bool, default True\n Sort ascending vs. descending. Specify list for multiple sort\n orders. If this is a list of bools, must match the length of\n the by.\n inplace : bool, default False\n if True, perform operation in-place\n na_position : {'first', 'last'}, default 'last'\n `first` puts NaNs at the beginning, `last` puts NaNs at the end\n ignore_index : bool, default False\n If True, the resulting axis will be labeled 0, 1, …, n - 1.\n\n Returns\n -------\n sorted_obj : DataFrame\n\n Examples\n --------\n >>> df = ps.DataFrame({\n ... 'col1': ['A', 'B', None, 'D', 'C'],\n ... 'col2': [2, 9, 8, 7, 4],\n ... 'col3': [0, 9, 4, 2, 3],\n ... },\n ... columns=['col1', 'col2', 'col3'],\n ... index=['a', 'b', 'c', 'd', 'e'])\n >>> df\n col1 col2 col3\n a A 2 0\n b B 9 9\n c None 8 4\n d D 7 2\n e C 4 3\n\n Sort by col1\n\n >>> df.sort_values(by=['col1'])\n col1 col2 col3\n a A 2 0\n b B 9 9\n e C 4 3\n d D 7 2\n c None 8 4\n\n Ignore index for the resulting axis\n\n >>> df.sort_values(by=['col1'], ignore_index=True)\n col1 col2 col3\n 0 A 2 0\n 1 B 9 9\n 2 C 4 3\n 3 D 7 2\n 4 None 8 4\n\n Sort Descending\n\n >>> df.sort_values(by='col1', ascending=False)\n col1 col2 col3\n d D 7 2\n e C 4 3\n b B 9 9\n a A 2 0\n c None 8 4\n\n Sort by multiple columns\n\n >>> df = ps.DataFrame({\n ... 'col1': ['A', 'A', 'B', None, 'D', 'C'],\n ... 'col2': [2, 1, 9, 8, 7, 4],\n ... 'col3': [0, 1, 9, 4, 2, 3],\n ... },\n ... columns=['col1', 'col2', 'col3'])\n >>> df.sort_values(by=['col1', 'col2'])\n col1 col2 col3\n 1 A 1 1\n 0 A 2 0\n 2 B 9 9\n 5 C 4 3\n 4 D 7 2\n 3 None 8 4\n " inplace = validate_bool_kwarg(inplace, 'inplace') new_by = self._prepare_sort_by_scols(by) psdf = self._sort(by=new_by, ascending=ascending, na_position=na_position) if inplace: if ignore_index: psdf.reset_index(drop=True, inplace=inplace) self._update_internal_frame(psdf._internal) return None else: return (psdf.reset_index(drop=True) if ignore_index else psdf)
7,202,430,524,788,683,000
Sort by the values along either axis. Parameters ---------- by : str or list of str ascending : bool or list of bool, default True Sort ascending vs. descending. Specify list for multiple sort orders. If this is a list of bools, must match the length of the by. inplace : bool, default False if True, perform operation in-place na_position : {'first', 'last'}, default 'last' `first` puts NaNs at the beginning, `last` puts NaNs at the end ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. Returns ------- sorted_obj : DataFrame Examples -------- >>> df = ps.DataFrame({ ... 'col1': ['A', 'B', None, 'D', 'C'], ... 'col2': [2, 9, 8, 7, 4], ... 'col3': [0, 9, 4, 2, 3], ... }, ... columns=['col1', 'col2', 'col3'], ... index=['a', 'b', 'c', 'd', 'e']) >>> df col1 col2 col3 a A 2 0 b B 9 9 c None 8 4 d D 7 2 e C 4 3 Sort by col1 >>> df.sort_values(by=['col1']) col1 col2 col3 a A 2 0 b B 9 9 e C 4 3 d D 7 2 c None 8 4 Ignore index for the resulting axis >>> df.sort_values(by=['col1'], ignore_index=True) col1 col2 col3 0 A 2 0 1 B 9 9 2 C 4 3 3 D 7 2 4 None 8 4 Sort Descending >>> df.sort_values(by='col1', ascending=False) col1 col2 col3 d D 7 2 e C 4 3 b B 9 9 a A 2 0 c None 8 4 Sort by multiple columns >>> df = ps.DataFrame({ ... 'col1': ['A', 'A', 'B', None, 'D', 'C'], ... 'col2': [2, 1, 9, 8, 7, 4], ... 'col3': [0, 1, 9, 4, 2, 3], ... }, ... columns=['col1', 'col2', 'col3']) >>> df.sort_values(by=['col1', 'col2']) col1 col2 col3 1 A 1 1 0 A 2 0 2 B 9 9 5 C 4 3 4 D 7 2 3 None 8 4
python/pyspark/pandas/frame.py
sort_values
Flyangz/spark
python
def sort_values(self, by: Union[(Name, List[Name])], ascending: Union[(bool, List[bool])]=True, inplace: bool=False, na_position: str='last', ignore_index: bool=False) -> Optional['DataFrame']: "\n Sort by the values along either axis.\n\n Parameters\n ----------\n by : str or list of str\n ascending : bool or list of bool, default True\n Sort ascending vs. descending. Specify list for multiple sort\n orders. If this is a list of bools, must match the length of\n the by.\n inplace : bool, default False\n if True, perform operation in-place\n na_position : {'first', 'last'}, default 'last'\n `first` puts NaNs at the beginning, `last` puts NaNs at the end\n ignore_index : bool, default False\n If True, the resulting axis will be labeled 0, 1, …, n - 1.\n\n Returns\n -------\n sorted_obj : DataFrame\n\n Examples\n --------\n >>> df = ps.DataFrame({\n ... 'col1': ['A', 'B', None, 'D', 'C'],\n ... 'col2': [2, 9, 8, 7, 4],\n ... 'col3': [0, 9, 4, 2, 3],\n ... },\n ... columns=['col1', 'col2', 'col3'],\n ... index=['a', 'b', 'c', 'd', 'e'])\n >>> df\n col1 col2 col3\n a A 2 0\n b B 9 9\n c None 8 4\n d D 7 2\n e C 4 3\n\n Sort by col1\n\n >>> df.sort_values(by=['col1'])\n col1 col2 col3\n a A 2 0\n b B 9 9\n e C 4 3\n d D 7 2\n c None 8 4\n\n Ignore index for the resulting axis\n\n >>> df.sort_values(by=['col1'], ignore_index=True)\n col1 col2 col3\n 0 A 2 0\n 1 B 9 9\n 2 C 4 3\n 3 D 7 2\n 4 None 8 4\n\n Sort Descending\n\n >>> df.sort_values(by='col1', ascending=False)\n col1 col2 col3\n d D 7 2\n e C 4 3\n b B 9 9\n a A 2 0\n c None 8 4\n\n Sort by multiple columns\n\n >>> df = ps.DataFrame({\n ... 'col1': ['A', 'A', 'B', None, 'D', 'C'],\n ... 'col2': [2, 1, 9, 8, 7, 4],\n ... 'col3': [0, 1, 9, 4, 2, 3],\n ... },\n ... columns=['col1', 'col2', 'col3'])\n >>> df.sort_values(by=['col1', 'col2'])\n col1 col2 col3\n 1 A 1 1\n 0 A 2 0\n 2 B 9 9\n 5 C 4 3\n 4 D 7 2\n 3 None 8 4\n " inplace = validate_bool_kwarg(inplace, 'inplace') new_by = self._prepare_sort_by_scols(by) psdf = self._sort(by=new_by, ascending=ascending, na_position=na_position) if inplace: if ignore_index: psdf.reset_index(drop=True, inplace=inplace) self._update_internal_frame(psdf._internal) return None else: return (psdf.reset_index(drop=True) if ignore_index else psdf)
def sort_index(self, axis: Axis=0, level: Optional[Union[(int, List[int])]]=None, ascending: bool=True, inplace: bool=False, kind: str=None, na_position: str='last') -> Optional['DataFrame']: "\n Sort object by labels (along an axis)\n\n Parameters\n ----------\n axis : index, columns to direct sorting. Currently, only axis = 0 is supported.\n level : int or level name or list of ints or list of level names\n if not None, sort on values in specified index level(s)\n ascending : boolean, default True\n Sort ascending vs. descending\n inplace : bool, default False\n if True, perform operation in-place\n kind : str, default None\n pandas-on-Spark does not allow specifying the sorting algorithm at the moment,\n default None\n na_position : {‘first’, ‘last’}, default ‘last’\n first puts NaNs at the beginning, last puts NaNs at the end. Not implemented for\n MultiIndex.\n\n Returns\n -------\n sorted_obj : DataFrame\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': [2, 1, np.nan]}, index=['b', 'a', np.nan])\n\n >>> df.sort_index()\n A\n a 1.0\n b 2.0\n NaN NaN\n\n >>> df.sort_index(ascending=False)\n A\n b 2.0\n a 1.0\n NaN NaN\n\n >>> df.sort_index(na_position='first')\n A\n NaN NaN\n a 1.0\n b 2.0\n\n >>> df.sort_index(inplace=True)\n >>> df\n A\n a 1.0\n b 2.0\n NaN NaN\n\n >>> df = ps.DataFrame({'A': range(4), 'B': range(4)[::-1]},\n ... index=[['b', 'b', 'a', 'a'], [1, 0, 1, 0]],\n ... columns=['A', 'B'])\n\n >>> df.sort_index()\n A B\n a 0 3 0\n 1 2 1\n b 0 1 2\n 1 0 3\n\n >>> df.sort_index(level=1) # doctest: +SKIP\n A B\n a 0 3 0\n b 0 1 2\n a 1 2 1\n b 1 0 3\n\n >>> df.sort_index(level=[1, 0])\n A B\n a 0 3 0\n b 0 1 2\n a 1 2 1\n b 1 0 3\n " inplace = validate_bool_kwarg(inplace, 'inplace') axis = validate_axis(axis) if (axis != 0): raise NotImplementedError('No other axis than 0 are supported at the moment') if (kind is not None): raise NotImplementedError('Specifying the sorting algorithm is not supported at the moment.') if ((level is None) or (is_list_like(level) and (len(level) == 0))): by = self._internal.index_spark_columns elif is_list_like(level): by = [self._internal.index_spark_columns[lvl] for lvl in level] else: by = [self._internal.index_spark_columns[level]] psdf = self._sort(by=by, ascending=ascending, na_position=na_position) if inplace: self._update_internal_frame(psdf._internal) return None else: return psdf
591,558,806,448,782,100
Sort object by labels (along an axis) Parameters ---------- axis : index, columns to direct sorting. Currently, only axis = 0 is supported. level : int or level name or list of ints or list of level names if not None, sort on values in specified index level(s) ascending : boolean, default True Sort ascending vs. descending inplace : bool, default False if True, perform operation in-place kind : str, default None pandas-on-Spark does not allow specifying the sorting algorithm at the moment, default None na_position : {‘first’, ‘last’}, default ‘last’ first puts NaNs at the beginning, last puts NaNs at the end. Not implemented for MultiIndex. Returns ------- sorted_obj : DataFrame Examples -------- >>> df = ps.DataFrame({'A': [2, 1, np.nan]}, index=['b', 'a', np.nan]) >>> df.sort_index() A a 1.0 b 2.0 NaN NaN >>> df.sort_index(ascending=False) A b 2.0 a 1.0 NaN NaN >>> df.sort_index(na_position='first') A NaN NaN a 1.0 b 2.0 >>> df.sort_index(inplace=True) >>> df A a 1.0 b 2.0 NaN NaN >>> df = ps.DataFrame({'A': range(4), 'B': range(4)[::-1]}, ... index=[['b', 'b', 'a', 'a'], [1, 0, 1, 0]], ... columns=['A', 'B']) >>> df.sort_index() A B a 0 3 0 1 2 1 b 0 1 2 1 0 3 >>> df.sort_index(level=1) # doctest: +SKIP A B a 0 3 0 b 0 1 2 a 1 2 1 b 1 0 3 >>> df.sort_index(level=[1, 0]) A B a 0 3 0 b 0 1 2 a 1 2 1 b 1 0 3
python/pyspark/pandas/frame.py
sort_index
Flyangz/spark
python
def sort_index(self, axis: Axis=0, level: Optional[Union[(int, List[int])]]=None, ascending: bool=True, inplace: bool=False, kind: str=None, na_position: str='last') -> Optional['DataFrame']: "\n Sort object by labels (along an axis)\n\n Parameters\n ----------\n axis : index, columns to direct sorting. Currently, only axis = 0 is supported.\n level : int or level name or list of ints or list of level names\n if not None, sort on values in specified index level(s)\n ascending : boolean, default True\n Sort ascending vs. descending\n inplace : bool, default False\n if True, perform operation in-place\n kind : str, default None\n pandas-on-Spark does not allow specifying the sorting algorithm at the moment,\n default None\n na_position : {‘first’, ‘last’}, default ‘last’\n first puts NaNs at the beginning, last puts NaNs at the end. Not implemented for\n MultiIndex.\n\n Returns\n -------\n sorted_obj : DataFrame\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': [2, 1, np.nan]}, index=['b', 'a', np.nan])\n\n >>> df.sort_index()\n A\n a 1.0\n b 2.0\n NaN NaN\n\n >>> df.sort_index(ascending=False)\n A\n b 2.0\n a 1.0\n NaN NaN\n\n >>> df.sort_index(na_position='first')\n A\n NaN NaN\n a 1.0\n b 2.0\n\n >>> df.sort_index(inplace=True)\n >>> df\n A\n a 1.0\n b 2.0\n NaN NaN\n\n >>> df = ps.DataFrame({'A': range(4), 'B': range(4)[::-1]},\n ... index=[['b', 'b', 'a', 'a'], [1, 0, 1, 0]],\n ... columns=['A', 'B'])\n\n >>> df.sort_index()\n A B\n a 0 3 0\n 1 2 1\n b 0 1 2\n 1 0 3\n\n >>> df.sort_index(level=1) # doctest: +SKIP\n A B\n a 0 3 0\n b 0 1 2\n a 1 2 1\n b 1 0 3\n\n >>> df.sort_index(level=[1, 0])\n A B\n a 0 3 0\n b 0 1 2\n a 1 2 1\n b 1 0 3\n " inplace = validate_bool_kwarg(inplace, 'inplace') axis = validate_axis(axis) if (axis != 0): raise NotImplementedError('No other axis than 0 are supported at the moment') if (kind is not None): raise NotImplementedError('Specifying the sorting algorithm is not supported at the moment.') if ((level is None) or (is_list_like(level) and (len(level) == 0))): by = self._internal.index_spark_columns elif is_list_like(level): by = [self._internal.index_spark_columns[lvl] for lvl in level] else: by = [self._internal.index_spark_columns[level]] psdf = self._sort(by=by, ascending=ascending, na_position=na_position) if inplace: self._update_internal_frame(psdf._internal) return None else: return psdf
def swaplevel(self, i: Union[(int, Name)]=(- 2), j: Union[(int, Name)]=(- 1), axis: Axis=0) -> 'DataFrame': "\n Swap levels i and j in a MultiIndex on a particular axis.\n\n Parameters\n ----------\n i, j : int or str\n Levels of the indices to be swapped. Can pass level name as string.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to swap levels on. 0 or 'index' for row-wise, 1 or\n 'columns' for column-wise.\n\n Returns\n -------\n DataFrame\n DataFrame with levels swapped in MultiIndex.\n\n Examples\n --------\n >>> midx = pd.MultiIndex.from_arrays(\n ... [['red', 'blue'], [1, 2], ['s', 'm']], names = ['color', 'number', 'size'])\n >>> midx # doctest: +SKIP\n MultiIndex([( 'red', 1, 's'),\n ('blue', 2, 'm')],\n names=['color', 'number', 'size'])\n\n Swap levels in a MultiIndex on index.\n\n >>> psdf = ps.DataFrame({'x': [5, 6], 'y':[5, 6]}, index=midx)\n >>> psdf # doctest: +NORMALIZE_WHITESPACE\n x y\n color number size\n red 1 s 5 5\n blue 2 m 6 6\n\n >>> psdf.swaplevel() # doctest: +NORMALIZE_WHITESPACE\n x y\n color size number\n red s 1 5 5\n blue m 2 6 6\n\n >>> psdf.swaplevel(0, 1) # doctest: +NORMALIZE_WHITESPACE\n x y\n number color size\n 1 red s 5 5\n 2 blue m 6 6\n\n >>> psdf.swaplevel('number', 'size') # doctest: +NORMALIZE_WHITESPACE\n x y\n color size number\n red s 1 5 5\n blue m 2 6 6\n\n Swap levels in a MultiIndex on columns.\n\n >>> psdf = ps.DataFrame({'x': [5, 6], 'y':[5, 6]})\n >>> psdf.columns = midx\n >>> psdf\n color red blue\n number 1 2\n size s m\n 0 5 5\n 1 6 6\n\n >>> psdf.swaplevel(axis=1)\n color red blue\n size s m\n number 1 2\n 0 5 5\n 1 6 6\n\n >>> psdf.swaplevel(axis=1)\n color red blue\n size s m\n number 1 2\n 0 5 5\n 1 6 6\n\n >>> psdf.swaplevel(0, 1, axis=1)\n number 1 2\n color red blue\n size s m\n 0 5 5\n 1 6 6\n\n >>> psdf.swaplevel('number', 'color', axis=1)\n number 1 2\n color red blue\n size s m\n 0 5 5\n 1 6 6\n " axis = validate_axis(axis) if (axis == 0): internal = self._swaplevel_index(i, j) else: assert (axis == 1) internal = self._swaplevel_columns(i, j) return DataFrame(internal)
-51,800,121,682,277,130
Swap levels i and j in a MultiIndex on a particular axis. Parameters ---------- i, j : int or str Levels of the indices to be swapped. Can pass level name as string. axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to swap levels on. 0 or 'index' for row-wise, 1 or 'columns' for column-wise. Returns ------- DataFrame DataFrame with levels swapped in MultiIndex. Examples -------- >>> midx = pd.MultiIndex.from_arrays( ... [['red', 'blue'], [1, 2], ['s', 'm']], names = ['color', 'number', 'size']) >>> midx # doctest: +SKIP MultiIndex([( 'red', 1, 's'), ('blue', 2, 'm')], names=['color', 'number', 'size']) Swap levels in a MultiIndex on index. >>> psdf = ps.DataFrame({'x': [5, 6], 'y':[5, 6]}, index=midx) >>> psdf # doctest: +NORMALIZE_WHITESPACE x y color number size red 1 s 5 5 blue 2 m 6 6 >>> psdf.swaplevel() # doctest: +NORMALIZE_WHITESPACE x y color size number red s 1 5 5 blue m 2 6 6 >>> psdf.swaplevel(0, 1) # doctest: +NORMALIZE_WHITESPACE x y number color size 1 red s 5 5 2 blue m 6 6 >>> psdf.swaplevel('number', 'size') # doctest: +NORMALIZE_WHITESPACE x y color size number red s 1 5 5 blue m 2 6 6 Swap levels in a MultiIndex on columns. >>> psdf = ps.DataFrame({'x': [5, 6], 'y':[5, 6]}) >>> psdf.columns = midx >>> psdf color red blue number 1 2 size s m 0 5 5 1 6 6 >>> psdf.swaplevel(axis=1) color red blue size s m number 1 2 0 5 5 1 6 6 >>> psdf.swaplevel(axis=1) color red blue size s m number 1 2 0 5 5 1 6 6 >>> psdf.swaplevel(0, 1, axis=1) number 1 2 color red blue size s m 0 5 5 1 6 6 >>> psdf.swaplevel('number', 'color', axis=1) number 1 2 color red blue size s m 0 5 5 1 6 6
python/pyspark/pandas/frame.py
swaplevel
Flyangz/spark
python
def swaplevel(self, i: Union[(int, Name)]=(- 2), j: Union[(int, Name)]=(- 1), axis: Axis=0) -> 'DataFrame': "\n Swap levels i and j in a MultiIndex on a particular axis.\n\n Parameters\n ----------\n i, j : int or str\n Levels of the indices to be swapped. Can pass level name as string.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to swap levels on. 0 or 'index' for row-wise, 1 or\n 'columns' for column-wise.\n\n Returns\n -------\n DataFrame\n DataFrame with levels swapped in MultiIndex.\n\n Examples\n --------\n >>> midx = pd.MultiIndex.from_arrays(\n ... [['red', 'blue'], [1, 2], ['s', 'm']], names = ['color', 'number', 'size'])\n >>> midx # doctest: +SKIP\n MultiIndex([( 'red', 1, 's'),\n ('blue', 2, 'm')],\n names=['color', 'number', 'size'])\n\n Swap levels in a MultiIndex on index.\n\n >>> psdf = ps.DataFrame({'x': [5, 6], 'y':[5, 6]}, index=midx)\n >>> psdf # doctest: +NORMALIZE_WHITESPACE\n x y\n color number size\n red 1 s 5 5\n blue 2 m 6 6\n\n >>> psdf.swaplevel() # doctest: +NORMALIZE_WHITESPACE\n x y\n color size number\n red s 1 5 5\n blue m 2 6 6\n\n >>> psdf.swaplevel(0, 1) # doctest: +NORMALIZE_WHITESPACE\n x y\n number color size\n 1 red s 5 5\n 2 blue m 6 6\n\n >>> psdf.swaplevel('number', 'size') # doctest: +NORMALIZE_WHITESPACE\n x y\n color size number\n red s 1 5 5\n blue m 2 6 6\n\n Swap levels in a MultiIndex on columns.\n\n >>> psdf = ps.DataFrame({'x': [5, 6], 'y':[5, 6]})\n >>> psdf.columns = midx\n >>> psdf\n color red blue\n number 1 2\n size s m\n 0 5 5\n 1 6 6\n\n >>> psdf.swaplevel(axis=1)\n color red blue\n size s m\n number 1 2\n 0 5 5\n 1 6 6\n\n >>> psdf.swaplevel(axis=1)\n color red blue\n size s m\n number 1 2\n 0 5 5\n 1 6 6\n\n >>> psdf.swaplevel(0, 1, axis=1)\n number 1 2\n color red blue\n size s m\n 0 5 5\n 1 6 6\n\n >>> psdf.swaplevel('number', 'color', axis=1)\n number 1 2\n color red blue\n size s m\n 0 5 5\n 1 6 6\n " axis = validate_axis(axis) if (axis == 0): internal = self._swaplevel_index(i, j) else: assert (axis == 1) internal = self._swaplevel_columns(i, j) return DataFrame(internal)
def swapaxes(self, i: Axis, j: Axis, copy: bool=True) -> 'DataFrame': "\n Interchange axes and swap values axes appropriately.\n\n .. note:: This method is based on an expensive operation due to the nature\n of big data. Internally it needs to generate each row for each value, and\n then group twice - it is a huge operation. To prevent misusage, this method\n has the 'compute.max_rows' default limit of input length, and raises a ValueError.\n\n >>> from pyspark.pandas.config import option_context\n >>> with option_context('compute.max_rows', 1000): # doctest: +NORMALIZE_WHITESPACE\n ... ps.DataFrame({'a': range(1001)}).swapaxes(i=0, j=1)\n Traceback (most recent call last):\n ...\n ValueError: Current DataFrame has more then the given limit 1000 rows.\n Please set 'compute.max_rows' by using 'pyspark.pandas.config.set_option'\n to retrieve to retrieve more than 1000 rows. Note that, before changing the\n 'compute.max_rows', this operation is considerably expensive.\n\n Parameters\n ----------\n i: {0 or 'index', 1 or 'columns'}. The axis to swap.\n j: {0 or 'index', 1 or 'columns'}. The axis to swap.\n copy : bool, default True.\n\n Returns\n -------\n DataFrame\n\n Examples\n --------\n >>> psdf = ps.DataFrame(\n ... [[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['x', 'y', 'z'], columns=['a', 'b', 'c']\n ... )\n >>> psdf\n a b c\n x 1 2 3\n y 4 5 6\n z 7 8 9\n >>> psdf.swapaxes(i=1, j=0)\n x y z\n a 1 4 7\n b 2 5 8\n c 3 6 9\n >>> psdf.swapaxes(i=1, j=1)\n a b c\n x 1 2 3\n y 4 5 6\n z 7 8 9\n " assert (copy is True) i = validate_axis(i) j = validate_axis(j) return (self.copy() if (i == j) else self.transpose())
6,023,020,263,360,316,000
Interchange axes and swap values axes appropriately. .. note:: This method is based on an expensive operation due to the nature of big data. Internally it needs to generate each row for each value, and then group twice - it is a huge operation. To prevent misusage, this method has the 'compute.max_rows' default limit of input length, and raises a ValueError. >>> from pyspark.pandas.config import option_context >>> with option_context('compute.max_rows', 1000): # doctest: +NORMALIZE_WHITESPACE ... ps.DataFrame({'a': range(1001)}).swapaxes(i=0, j=1) Traceback (most recent call last): ... ValueError: Current DataFrame has more then the given limit 1000 rows. Please set 'compute.max_rows' by using 'pyspark.pandas.config.set_option' to retrieve to retrieve more than 1000 rows. Note that, before changing the 'compute.max_rows', this operation is considerably expensive. Parameters ---------- i: {0 or 'index', 1 or 'columns'}. The axis to swap. j: {0 or 'index', 1 or 'columns'}. The axis to swap. copy : bool, default True. Returns ------- DataFrame Examples -------- >>> psdf = ps.DataFrame( ... [[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['x', 'y', 'z'], columns=['a', 'b', 'c'] ... ) >>> psdf a b c x 1 2 3 y 4 5 6 z 7 8 9 >>> psdf.swapaxes(i=1, j=0) x y z a 1 4 7 b 2 5 8 c 3 6 9 >>> psdf.swapaxes(i=1, j=1) a b c x 1 2 3 y 4 5 6 z 7 8 9
python/pyspark/pandas/frame.py
swapaxes
Flyangz/spark
python
def swapaxes(self, i: Axis, j: Axis, copy: bool=True) -> 'DataFrame': "\n Interchange axes and swap values axes appropriately.\n\n .. note:: This method is based on an expensive operation due to the nature\n of big data. Internally it needs to generate each row for each value, and\n then group twice - it is a huge operation. To prevent misusage, this method\n has the 'compute.max_rows' default limit of input length, and raises a ValueError.\n\n >>> from pyspark.pandas.config import option_context\n >>> with option_context('compute.max_rows', 1000): # doctest: +NORMALIZE_WHITESPACE\n ... ps.DataFrame({'a': range(1001)}).swapaxes(i=0, j=1)\n Traceback (most recent call last):\n ...\n ValueError: Current DataFrame has more then the given limit 1000 rows.\n Please set 'compute.max_rows' by using 'pyspark.pandas.config.set_option'\n to retrieve to retrieve more than 1000 rows. Note that, before changing the\n 'compute.max_rows', this operation is considerably expensive.\n\n Parameters\n ----------\n i: {0 or 'index', 1 or 'columns'}. The axis to swap.\n j: {0 or 'index', 1 or 'columns'}. The axis to swap.\n copy : bool, default True.\n\n Returns\n -------\n DataFrame\n\n Examples\n --------\n >>> psdf = ps.DataFrame(\n ... [[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['x', 'y', 'z'], columns=['a', 'b', 'c']\n ... )\n >>> psdf\n a b c\n x 1 2 3\n y 4 5 6\n z 7 8 9\n >>> psdf.swapaxes(i=1, j=0)\n x y z\n a 1 4 7\n b 2 5 8\n c 3 6 9\n >>> psdf.swapaxes(i=1, j=1)\n a b c\n x 1 2 3\n y 4 5 6\n z 7 8 9\n " assert (copy is True) i = validate_axis(i) j = validate_axis(j) return (self.copy() if (i == j) else self.transpose())
def nlargest(self, n: int, columns: Union[(Name, List[Name])], keep: str='first') -> 'DataFrame': '\n Return the first `n` rows ordered by `columns` in descending order.\n\n Return the first `n` rows with the largest values in `columns`, in\n descending order. The columns that are not specified are returned as\n well, but not used for ordering.\n\n This method is equivalent to\n ``df.sort_values(columns, ascending=False).head(n)``, but more\n performant in pandas.\n In pandas-on-Spark, thanks to Spark\'s lazy execution and query optimizer,\n the two would have same performance.\n\n Parameters\n ----------\n n : int\n Number of rows to return.\n columns : label or list of labels\n Column label(s) to order by.\n keep : {\'first\', \'last\'}, default \'first\'. \'all\' is not implemented yet.\n Determines which duplicates (if any) to keep.\n - ``first`` : Keep the first occurrence.\n - ``last`` : Keep the last occurrence.\n\n Returns\n -------\n DataFrame\n The first `n` rows ordered by the given columns in descending\n order.\n\n See Also\n --------\n DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in\n ascending order.\n DataFrame.sort_values : Sort DataFrame by the values.\n DataFrame.head : Return the first `n` rows without re-ordering.\n\n Notes\n -----\n\n This function cannot be used with all column types. For example, when\n specifying columns with `object` or `category` dtypes, ``TypeError`` is\n raised.\n\n Examples\n --------\n >>> df = ps.DataFrame({\'X\': [1, 2, 3, 5, 6, 7, np.nan],\n ... \'Y\': [6, 7, 8, 9, 10, 11, 12]})\n >>> df\n X Y\n 0 1.0 6\n 1 2.0 7\n 2 3.0 8\n 3 5.0 9\n 4 6.0 10\n 5 7.0 11\n 6 NaN 12\n\n In the following example, we will use ``nlargest`` to select the three\n rows having the largest values in column "X".\n\n >>> df.nlargest(n=3, columns=\'X\')\n X Y\n 5 7.0 11\n 4 6.0 10\n 3 5.0 9\n\n To order by the largest values in column "Y" and then "X", we can\n specify multiple columns like in the next example.\n\n >>> df.nlargest(n=3, columns=[\'Y\', \'X\'])\n X Y\n 6 NaN 12\n 5 7.0 11\n 4 6.0 10\n\n The examples below show how ties are resolved, which is decided by `keep`.\n\n >>> tied_df = ps.DataFrame({\'X\': [1, 2, 2, 3, 3]}, index=[\'a\', \'b\', \'c\', \'d\', \'e\'])\n >>> tied_df\n X\n a 1\n b 2\n c 2\n d 3\n e 3\n\n When using keep=\'first\' (by default), ties are resolved in order:\n\n >>> tied_df.nlargest(3, \'X\')\n X\n d 3\n e 3\n b 2\n\n >>> tied_df.nlargest(3, \'X\', keep=\'first\')\n X\n d 3\n e 3\n b 2\n\n When using keep=\'last\', ties are resolved in reverse order:\n\n >>> tied_df.nlargest(3, \'X\', keep=\'last\')\n X\n e 3\n d 3\n c 2\n ' by_scols = self._prepare_sort_by_scols(columns) return self._sort(by=by_scols, ascending=False, na_position='last', keep=keep).head(n=n)
-1,321,435,559,041,933,600
Return the first `n` rows ordered by `columns` in descending order. Return the first `n` rows with the largest values in `columns`, in descending order. The columns that are not specified are returned as well, but not used for ordering. This method is equivalent to ``df.sort_values(columns, ascending=False).head(n)``, but more performant in pandas. In pandas-on-Spark, thanks to Spark's lazy execution and query optimizer, the two would have same performance. Parameters ---------- n : int Number of rows to return. columns : label or list of labels Column label(s) to order by. keep : {'first', 'last'}, default 'first'. 'all' is not implemented yet. Determines which duplicates (if any) to keep. - ``first`` : Keep the first occurrence. - ``last`` : Keep the last occurrence. Returns ------- DataFrame The first `n` rows ordered by the given columns in descending order. See Also -------- DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in ascending order. DataFrame.sort_values : Sort DataFrame by the values. DataFrame.head : Return the first `n` rows without re-ordering. Notes ----- This function cannot be used with all column types. For example, when specifying columns with `object` or `category` dtypes, ``TypeError`` is raised. Examples -------- >>> df = ps.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan], ... 'Y': [6, 7, 8, 9, 10, 11, 12]}) >>> df X Y 0 1.0 6 1 2.0 7 2 3.0 8 3 5.0 9 4 6.0 10 5 7.0 11 6 NaN 12 In the following example, we will use ``nlargest`` to select the three rows having the largest values in column "X". >>> df.nlargest(n=3, columns='X') X Y 5 7.0 11 4 6.0 10 3 5.0 9 To order by the largest values in column "Y" and then "X", we can specify multiple columns like in the next example. >>> df.nlargest(n=3, columns=['Y', 'X']) X Y 6 NaN 12 5 7.0 11 4 6.0 10 The examples below show how ties are resolved, which is decided by `keep`. >>> tied_df = ps.DataFrame({'X': [1, 2, 2, 3, 3]}, index=['a', 'b', 'c', 'd', 'e']) >>> tied_df X a 1 b 2 c 2 d 3 e 3 When using keep='first' (by default), ties are resolved in order: >>> tied_df.nlargest(3, 'X') X d 3 e 3 b 2 >>> tied_df.nlargest(3, 'X', keep='first') X d 3 e 3 b 2 When using keep='last', ties are resolved in reverse order: >>> tied_df.nlargest(3, 'X', keep='last') X e 3 d 3 c 2
python/pyspark/pandas/frame.py
nlargest
Flyangz/spark
python
def nlargest(self, n: int, columns: Union[(Name, List[Name])], keep: str='first') -> 'DataFrame': '\n Return the first `n` rows ordered by `columns` in descending order.\n\n Return the first `n` rows with the largest values in `columns`, in\n descending order. The columns that are not specified are returned as\n well, but not used for ordering.\n\n This method is equivalent to\n ``df.sort_values(columns, ascending=False).head(n)``, but more\n performant in pandas.\n In pandas-on-Spark, thanks to Spark\'s lazy execution and query optimizer,\n the two would have same performance.\n\n Parameters\n ----------\n n : int\n Number of rows to return.\n columns : label or list of labels\n Column label(s) to order by.\n keep : {\'first\', \'last\'}, default \'first\'. \'all\' is not implemented yet.\n Determines which duplicates (if any) to keep.\n - ``first`` : Keep the first occurrence.\n - ``last`` : Keep the last occurrence.\n\n Returns\n -------\n DataFrame\n The first `n` rows ordered by the given columns in descending\n order.\n\n See Also\n --------\n DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in\n ascending order.\n DataFrame.sort_values : Sort DataFrame by the values.\n DataFrame.head : Return the first `n` rows without re-ordering.\n\n Notes\n -----\n\n This function cannot be used with all column types. For example, when\n specifying columns with `object` or `category` dtypes, ``TypeError`` is\n raised.\n\n Examples\n --------\n >>> df = ps.DataFrame({\'X\': [1, 2, 3, 5, 6, 7, np.nan],\n ... \'Y\': [6, 7, 8, 9, 10, 11, 12]})\n >>> df\n X Y\n 0 1.0 6\n 1 2.0 7\n 2 3.0 8\n 3 5.0 9\n 4 6.0 10\n 5 7.0 11\n 6 NaN 12\n\n In the following example, we will use ``nlargest`` to select the three\n rows having the largest values in column "X".\n\n >>> df.nlargest(n=3, columns=\'X\')\n X Y\n 5 7.0 11\n 4 6.0 10\n 3 5.0 9\n\n To order by the largest values in column "Y" and then "X", we can\n specify multiple columns like in the next example.\n\n >>> df.nlargest(n=3, columns=[\'Y\', \'X\'])\n X Y\n 6 NaN 12\n 5 7.0 11\n 4 6.0 10\n\n The examples below show how ties are resolved, which is decided by `keep`.\n\n >>> tied_df = ps.DataFrame({\'X\': [1, 2, 2, 3, 3]}, index=[\'a\', \'b\', \'c\', \'d\', \'e\'])\n >>> tied_df\n X\n a 1\n b 2\n c 2\n d 3\n e 3\n\n When using keep=\'first\' (by default), ties are resolved in order:\n\n >>> tied_df.nlargest(3, \'X\')\n X\n d 3\n e 3\n b 2\n\n >>> tied_df.nlargest(3, \'X\', keep=\'first\')\n X\n d 3\n e 3\n b 2\n\n When using keep=\'last\', ties are resolved in reverse order:\n\n >>> tied_df.nlargest(3, \'X\', keep=\'last\')\n X\n e 3\n d 3\n c 2\n ' by_scols = self._prepare_sort_by_scols(columns) return self._sort(by=by_scols, ascending=False, na_position='last', keep=keep).head(n=n)
def nsmallest(self, n: int, columns: Union[(Name, List[Name])], keep: str='first') -> 'DataFrame': '\n Return the first `n` rows ordered by `columns` in ascending order.\n\n Return the first `n` rows with the smallest values in `columns`, in\n ascending order. The columns that are not specified are returned as\n well, but not used for ordering.\n\n This method is equivalent to ``df.sort_values(columns, ascending=True).head(n)``,\n but more performant. In pandas-on-Spark, thanks to Spark\'s lazy execution and query\n optimizer, the two would have same performance.\n\n Parameters\n ----------\n n : int\n Number of items to retrieve.\n columns : list or str\n Column name or names to order by.\n keep : {\'first\', \'last\'}, default \'first\'. \'all\' is not implemented yet.\n Determines which duplicates (if any) to keep.\n - ``first`` : Keep the first occurrence.\n - ``last`` : Keep the last occurrence.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n DataFrame.nlargest : Return the first `n` rows ordered by `columns` in\n descending order.\n DataFrame.sort_values : Sort DataFrame by the values.\n DataFrame.head : Return the first `n` rows without re-ordering.\n\n Examples\n --------\n >>> df = ps.DataFrame({\'X\': [1, 2, 3, 5, 6, 7, np.nan],\n ... \'Y\': [6, 7, 8, 9, 10, 11, 12]})\n >>> df\n X Y\n 0 1.0 6\n 1 2.0 7\n 2 3.0 8\n 3 5.0 9\n 4 6.0 10\n 5 7.0 11\n 6 NaN 12\n\n In the following example, we will use ``nsmallest`` to select the\n three rows having the smallest values in column "X".\n\n >>> df.nsmallest(n=3, columns=\'X\') # doctest: +NORMALIZE_WHITESPACE\n X Y\n 0 1.0 6\n 1 2.0 7\n 2 3.0 8\n\n To order by the smallest values in column "Y" and then "X", we can\n specify multiple columns like in the next example.\n\n >>> df.nsmallest(n=3, columns=[\'Y\', \'X\']) # doctest: +NORMALIZE_WHITESPACE\n X Y\n 0 1.0 6\n 1 2.0 7\n 2 3.0 8\n\n The examples below show how ties are resolved, which is decided by `keep`.\n\n >>> tied_df = ps.DataFrame({\'X\': [1, 1, 2, 2, 3]}, index=[\'a\', \'b\', \'c\', \'d\', \'e\'])\n >>> tied_df\n X\n a 1\n b 1\n c 2\n d 2\n e 3\n\n When using keep=\'first\' (by default), ties are resolved in order:\n\n >>> tied_df.nsmallest(3, \'X\')\n X\n a 1\n b 1\n c 2\n\n >>> tied_df.nsmallest(3, \'X\', keep=\'first\')\n X\n a 1\n b 1\n c 2\n\n When using keep=\'last\', ties are resolved in reverse order:\n\n >>> tied_df.nsmallest(3, \'X\', keep=\'last\')\n X\n b 1\n a 1\n d 2\n ' by_scols = self._prepare_sort_by_scols(columns) return self._sort(by=by_scols, ascending=True, na_position='last', keep=keep).head(n=n)
-4,372,649,593,661,523,500
Return the first `n` rows ordered by `columns` in ascending order. Return the first `n` rows with the smallest values in `columns`, in ascending order. The columns that are not specified are returned as well, but not used for ordering. This method is equivalent to ``df.sort_values(columns, ascending=True).head(n)``, but more performant. In pandas-on-Spark, thanks to Spark's lazy execution and query optimizer, the two would have same performance. Parameters ---------- n : int Number of items to retrieve. columns : list or str Column name or names to order by. keep : {'first', 'last'}, default 'first'. 'all' is not implemented yet. Determines which duplicates (if any) to keep. - ``first`` : Keep the first occurrence. - ``last`` : Keep the last occurrence. Returns ------- DataFrame See Also -------- DataFrame.nlargest : Return the first `n` rows ordered by `columns` in descending order. DataFrame.sort_values : Sort DataFrame by the values. DataFrame.head : Return the first `n` rows without re-ordering. Examples -------- >>> df = ps.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan], ... 'Y': [6, 7, 8, 9, 10, 11, 12]}) >>> df X Y 0 1.0 6 1 2.0 7 2 3.0 8 3 5.0 9 4 6.0 10 5 7.0 11 6 NaN 12 In the following example, we will use ``nsmallest`` to select the three rows having the smallest values in column "X". >>> df.nsmallest(n=3, columns='X') # doctest: +NORMALIZE_WHITESPACE X Y 0 1.0 6 1 2.0 7 2 3.0 8 To order by the smallest values in column "Y" and then "X", we can specify multiple columns like in the next example. >>> df.nsmallest(n=3, columns=['Y', 'X']) # doctest: +NORMALIZE_WHITESPACE X Y 0 1.0 6 1 2.0 7 2 3.0 8 The examples below show how ties are resolved, which is decided by `keep`. >>> tied_df = ps.DataFrame({'X': [1, 1, 2, 2, 3]}, index=['a', 'b', 'c', 'd', 'e']) >>> tied_df X a 1 b 1 c 2 d 2 e 3 When using keep='first' (by default), ties are resolved in order: >>> tied_df.nsmallest(3, 'X') X a 1 b 1 c 2 >>> tied_df.nsmallest(3, 'X', keep='first') X a 1 b 1 c 2 When using keep='last', ties are resolved in reverse order: >>> tied_df.nsmallest(3, 'X', keep='last') X b 1 a 1 d 2
python/pyspark/pandas/frame.py
nsmallest
Flyangz/spark
python
def nsmallest(self, n: int, columns: Union[(Name, List[Name])], keep: str='first') -> 'DataFrame': '\n Return the first `n` rows ordered by `columns` in ascending order.\n\n Return the first `n` rows with the smallest values in `columns`, in\n ascending order. The columns that are not specified are returned as\n well, but not used for ordering.\n\n This method is equivalent to ``df.sort_values(columns, ascending=True).head(n)``,\n but more performant. In pandas-on-Spark, thanks to Spark\'s lazy execution and query\n optimizer, the two would have same performance.\n\n Parameters\n ----------\n n : int\n Number of items to retrieve.\n columns : list or str\n Column name or names to order by.\n keep : {\'first\', \'last\'}, default \'first\'. \'all\' is not implemented yet.\n Determines which duplicates (if any) to keep.\n - ``first`` : Keep the first occurrence.\n - ``last`` : Keep the last occurrence.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n DataFrame.nlargest : Return the first `n` rows ordered by `columns` in\n descending order.\n DataFrame.sort_values : Sort DataFrame by the values.\n DataFrame.head : Return the first `n` rows without re-ordering.\n\n Examples\n --------\n >>> df = ps.DataFrame({\'X\': [1, 2, 3, 5, 6, 7, np.nan],\n ... \'Y\': [6, 7, 8, 9, 10, 11, 12]})\n >>> df\n X Y\n 0 1.0 6\n 1 2.0 7\n 2 3.0 8\n 3 5.0 9\n 4 6.0 10\n 5 7.0 11\n 6 NaN 12\n\n In the following example, we will use ``nsmallest`` to select the\n three rows having the smallest values in column "X".\n\n >>> df.nsmallest(n=3, columns=\'X\') # doctest: +NORMALIZE_WHITESPACE\n X Y\n 0 1.0 6\n 1 2.0 7\n 2 3.0 8\n\n To order by the smallest values in column "Y" and then "X", we can\n specify multiple columns like in the next example.\n\n >>> df.nsmallest(n=3, columns=[\'Y\', \'X\']) # doctest: +NORMALIZE_WHITESPACE\n X Y\n 0 1.0 6\n 1 2.0 7\n 2 3.0 8\n\n The examples below show how ties are resolved, which is decided by `keep`.\n\n >>> tied_df = ps.DataFrame({\'X\': [1, 1, 2, 2, 3]}, index=[\'a\', \'b\', \'c\', \'d\', \'e\'])\n >>> tied_df\n X\n a 1\n b 1\n c 2\n d 2\n e 3\n\n When using keep=\'first\' (by default), ties are resolved in order:\n\n >>> tied_df.nsmallest(3, \'X\')\n X\n a 1\n b 1\n c 2\n\n >>> tied_df.nsmallest(3, \'X\', keep=\'first\')\n X\n a 1\n b 1\n c 2\n\n When using keep=\'last\', ties are resolved in reverse order:\n\n >>> tied_df.nsmallest(3, \'X\', keep=\'last\')\n X\n b 1\n a 1\n d 2\n ' by_scols = self._prepare_sort_by_scols(columns) return self._sort(by=by_scols, ascending=True, na_position='last', keep=keep).head(n=n)
def isin(self, values: Union[(List, Dict)]) -> 'DataFrame': "\n Whether each element in the DataFrame is contained in values.\n\n Parameters\n ----------\n values : iterable or dict\n The sequence of values to test. If values is a dict,\n the keys must be the column names, which must match.\n Series and DataFrame are not supported.\n\n Returns\n -------\n DataFrame\n DataFrame of booleans showing whether each element in the DataFrame\n is contained in values.\n\n Examples\n --------\n >>> df = ps.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},\n ... index=['falcon', 'dog'],\n ... columns=['num_legs', 'num_wings'])\n >>> df\n num_legs num_wings\n falcon 2 2\n dog 4 0\n\n When ``values`` is a list check whether every value in the DataFrame\n is present in the list (which animals have 0 or 2 legs or wings)\n\n >>> df.isin([0, 2])\n num_legs num_wings\n falcon True True\n dog False True\n\n When ``values`` is a dict, we can pass values to check for each\n column separately:\n\n >>> df.isin({'num_wings': [0, 3]})\n num_legs num_wings\n falcon False False\n dog False True\n " if isinstance(values, (pd.DataFrame, pd.Series)): raise NotImplementedError('DataFrame and Series are not supported') if (isinstance(values, dict) and (not set(values.keys()).issubset(self.columns))): raise AttributeError(("'DataFrame' object has no attribute %s" % set(values.keys()).difference(self.columns))) data_spark_columns = [] if isinstance(values, dict): for (i, col) in enumerate(self.columns): if (col in values): item = values[col] item = (item.tolist() if isinstance(item, np.ndarray) else list(item)) scol = self._internal.spark_column_for(self._internal.column_labels[i]).isin([SF.lit(v) for v in item]) scol = F.coalesce(scol, F.lit(False)) else: scol = SF.lit(False) data_spark_columns.append(scol.alias(self._internal.data_spark_column_names[i])) elif is_list_like(values): values = (cast(np.ndarray, values).tolist() if isinstance(values, np.ndarray) else list(values)) for label in self._internal.column_labels: scol = self._internal.spark_column_for(label).isin([SF.lit(v) for v in values]) scol = F.coalesce(scol, F.lit(False)) data_spark_columns.append(scol.alias(self._internal.spark_column_name_for(label))) else: raise TypeError('Values should be iterable, Series, DataFrame or dict.') return DataFrame(self._internal.with_new_columns(data_spark_columns, data_fields=[field.copy(dtype=np.dtype('bool'), spark_type=BooleanType(), nullable=False) for field in self._internal.data_fields]))
8,959,033,474,880,497,000
Whether each element in the DataFrame is contained in values. Parameters ---------- values : iterable or dict The sequence of values to test. If values is a dict, the keys must be the column names, which must match. Series and DataFrame are not supported. Returns ------- DataFrame DataFrame of booleans showing whether each element in the DataFrame is contained in values. Examples -------- >>> df = ps.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]}, ... index=['falcon', 'dog'], ... columns=['num_legs', 'num_wings']) >>> df num_legs num_wings falcon 2 2 dog 4 0 When ``values`` is a list check whether every value in the DataFrame is present in the list (which animals have 0 or 2 legs or wings) >>> df.isin([0, 2]) num_legs num_wings falcon True True dog False True When ``values`` is a dict, we can pass values to check for each column separately: >>> df.isin({'num_wings': [0, 3]}) num_legs num_wings falcon False False dog False True
python/pyspark/pandas/frame.py
isin
Flyangz/spark
python
def isin(self, values: Union[(List, Dict)]) -> 'DataFrame': "\n Whether each element in the DataFrame is contained in values.\n\n Parameters\n ----------\n values : iterable or dict\n The sequence of values to test. If values is a dict,\n the keys must be the column names, which must match.\n Series and DataFrame are not supported.\n\n Returns\n -------\n DataFrame\n DataFrame of booleans showing whether each element in the DataFrame\n is contained in values.\n\n Examples\n --------\n >>> df = ps.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},\n ... index=['falcon', 'dog'],\n ... columns=['num_legs', 'num_wings'])\n >>> df\n num_legs num_wings\n falcon 2 2\n dog 4 0\n\n When ``values`` is a list check whether every value in the DataFrame\n is present in the list (which animals have 0 or 2 legs or wings)\n\n >>> df.isin([0, 2])\n num_legs num_wings\n falcon True True\n dog False True\n\n When ``values`` is a dict, we can pass values to check for each\n column separately:\n\n >>> df.isin({'num_wings': [0, 3]})\n num_legs num_wings\n falcon False False\n dog False True\n " if isinstance(values, (pd.DataFrame, pd.Series)): raise NotImplementedError('DataFrame and Series are not supported') if (isinstance(values, dict) and (not set(values.keys()).issubset(self.columns))): raise AttributeError(("'DataFrame' object has no attribute %s" % set(values.keys()).difference(self.columns))) data_spark_columns = [] if isinstance(values, dict): for (i, col) in enumerate(self.columns): if (col in values): item = values[col] item = (item.tolist() if isinstance(item, np.ndarray) else list(item)) scol = self._internal.spark_column_for(self._internal.column_labels[i]).isin([SF.lit(v) for v in item]) scol = F.coalesce(scol, F.lit(False)) else: scol = SF.lit(False) data_spark_columns.append(scol.alias(self._internal.data_spark_column_names[i])) elif is_list_like(values): values = (cast(np.ndarray, values).tolist() if isinstance(values, np.ndarray) else list(values)) for label in self._internal.column_labels: scol = self._internal.spark_column_for(label).isin([SF.lit(v) for v in values]) scol = F.coalesce(scol, F.lit(False)) data_spark_columns.append(scol.alias(self._internal.spark_column_name_for(label))) else: raise TypeError('Values should be iterable, Series, DataFrame or dict.') return DataFrame(self._internal.with_new_columns(data_spark_columns, data_fields=[field.copy(dtype=np.dtype('bool'), spark_type=BooleanType(), nullable=False) for field in self._internal.data_fields]))
@property def shape(self) -> Tuple[(int, int)]: "\n Return a tuple representing the dimensionality of the DataFrame.\n\n Examples\n --------\n >>> df = ps.DataFrame({'col1': [1, 2], 'col2': [3, 4]})\n >>> df.shape\n (2, 2)\n\n >>> df = ps.DataFrame({'col1': [1, 2], 'col2': [3, 4],\n ... 'col3': [5, 6]})\n >>> df.shape\n (2, 3)\n " return (len(self), len(self.columns))
7,635,316,900,167,971,000
Return a tuple representing the dimensionality of the DataFrame. Examples -------- >>> df = ps.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.shape (2, 2) >>> df = ps.DataFrame({'col1': [1, 2], 'col2': [3, 4], ... 'col3': [5, 6]}) >>> df.shape (2, 3)
python/pyspark/pandas/frame.py
shape
Flyangz/spark
python
@property def shape(self) -> Tuple[(int, int)]: "\n Return a tuple representing the dimensionality of the DataFrame.\n\n Examples\n --------\n >>> df = ps.DataFrame({'col1': [1, 2], 'col2': [3, 4]})\n >>> df.shape\n (2, 2)\n\n >>> df = ps.DataFrame({'col1': [1, 2], 'col2': [3, 4],\n ... 'col3': [5, 6]})\n >>> df.shape\n (2, 3)\n " return (len(self), len(self.columns))
def merge(self, right: 'DataFrame', how: str='inner', on: Optional[Union[(Name, List[Name])]]=None, left_on: Optional[Union[(Name, List[Name])]]=None, right_on: Optional[Union[(Name, List[Name])]]=None, left_index: bool=False, right_index: bool=False, suffixes: Tuple[(str, str)]=('_x', '_y')) -> 'DataFrame': "\n Merge DataFrame objects with a database-style join.\n\n The index of the resulting DataFrame will be one of the following:\n - 0...n if no index is used for merging\n - Index of the left DataFrame if merged only on the index of the right DataFrame\n - Index of the right DataFrame if merged only on the index of the left DataFrame\n - All involved indices if merged using the indices of both DataFrames\n e.g. if `left` with indices (a, x) and `right` with indices (b, x), the result will\n be an index (x, a, b)\n\n Parameters\n ----------\n right: Object to merge with.\n how: Type of merge to be performed.\n {'left', 'right', 'outer', 'inner'}, default 'inner'\n\n left: use only keys from left frame, similar to a SQL left outer join; not preserve\n key order unlike pandas.\n right: use only keys from right frame, similar to a SQL right outer join; not preserve\n key order unlike pandas.\n outer: use union of keys from both frames, similar to a SQL full outer join; sort keys\n lexicographically.\n inner: use intersection of keys from both frames, similar to a SQL inner join;\n not preserve the order of the left keys unlike pandas.\n on: Column or index level names to join on. These must be found in both DataFrames. If on\n is None and not merging on indexes then this defaults to the intersection of the\n columns in both DataFrames.\n left_on: Column or index level names to join on in the left DataFrame. Can also\n be an array or list of arrays of the length of the left DataFrame.\n These arrays are treated as if they are columns.\n right_on: Column or index level names to join on in the right DataFrame. Can also\n be an array or list of arrays of the length of the right DataFrame.\n These arrays are treated as if they are columns.\n left_index: Use the index from the left DataFrame as the join key(s). If it is a\n MultiIndex, the number of keys in the other DataFrame (either the index or a number of\n columns) must match the number of levels.\n right_index: Use the index from the right DataFrame as the join key. Same caveats as\n left_index.\n suffixes: Suffix to apply to overlapping column names in the left and right side,\n respectively.\n\n Returns\n -------\n DataFrame\n A DataFrame of the two merged objects.\n\n See Also\n --------\n DataFrame.join : Join columns of another DataFrame.\n DataFrame.update : Modify in place using non-NA values from another DataFrame.\n DataFrame.hint : Specifies some hint on the current DataFrame.\n broadcast : Marks a DataFrame as small enough for use in broadcast joins.\n\n Examples\n --------\n >>> df1 = ps.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],\n ... 'value': [1, 2, 3, 5]},\n ... columns=['lkey', 'value'])\n >>> df2 = ps.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],\n ... 'value': [5, 6, 7, 8]},\n ... columns=['rkey', 'value'])\n >>> df1\n lkey value\n 0 foo 1\n 1 bar 2\n 2 baz 3\n 3 foo 5\n >>> df2\n rkey value\n 0 foo 5\n 1 bar 6\n 2 baz 7\n 3 foo 8\n\n Merge df1 and df2 on the lkey and rkey columns. The value columns have\n the default suffixes, _x and _y, appended.\n\n >>> merged = df1.merge(df2, left_on='lkey', right_on='rkey')\n >>> merged.sort_values(by=['lkey', 'value_x', 'rkey', 'value_y']) # doctest: +ELLIPSIS\n lkey value_x rkey value_y\n ...bar 2 bar 6\n ...baz 3 baz 7\n ...foo 1 foo 5\n ...foo 1 foo 8\n ...foo 5 foo 5\n ...foo 5 foo 8\n\n >>> left_psdf = ps.DataFrame({'A': [1, 2]})\n >>> right_psdf = ps.DataFrame({'B': ['x', 'y']}, index=[1, 2])\n\n >>> left_psdf.merge(right_psdf, left_index=True, right_index=True).sort_index()\n A B\n 1 2 x\n\n >>> left_psdf.merge(right_psdf, left_index=True, right_index=True, how='left').sort_index()\n A B\n 0 1 None\n 1 2 x\n\n >>> left_psdf.merge(right_psdf, left_index=True, right_index=True, how='right').sort_index()\n A B\n 1 2.0 x\n 2 NaN y\n\n >>> left_psdf.merge(right_psdf, left_index=True, right_index=True, how='outer').sort_index()\n A B\n 0 1.0 None\n 1 2.0 x\n 2 NaN y\n\n Notes\n -----\n As described in #263, joining string columns currently returns None for missing values\n instead of NaN.\n " def to_list(os: Optional[Union[(Name, List[Name])]]) -> List[Label]: if (os is None): return [] elif is_name_like_tuple(os): return [cast(Label, os)] elif is_name_like_value(os): return [(os,)] else: return [(o if is_name_like_tuple(o) else (o,)) for o in os] if isinstance(right, ps.Series): right = right.to_frame() if on: if (left_on or right_on): raise ValueError('Can only pass argument "on" OR "left_on" and "right_on", not a combination of both.') left_key_names = list(map(self._internal.spark_column_name_for, to_list(on))) right_key_names = list(map(right._internal.spark_column_name_for, to_list(on))) else: if left_index: left_key_names = self._internal.index_spark_column_names else: left_key_names = list(map(self._internal.spark_column_name_for, to_list(left_on))) if right_index: right_key_names = right._internal.index_spark_column_names else: right_key_names = list(map(right._internal.spark_column_name_for, to_list(right_on))) if (left_key_names and (not right_key_names)): raise ValueError('Must pass right_on or right_index=True') if (right_key_names and (not left_key_names)): raise ValueError('Must pass left_on or left_index=True') if ((not left_key_names) and (not right_key_names)): common = list(self.columns.intersection(right.columns)) if (len(common) == 0): raise ValueError('No common columns to perform merge on. Merge options: left_on=None, right_on=None, left_index=False, right_index=False') left_key_names = list(map(self._internal.spark_column_name_for, to_list(common))) right_key_names = list(map(right._internal.spark_column_name_for, to_list(common))) if (len(left_key_names) != len(right_key_names)): raise ValueError('len(left_keys) must equal len(right_keys)') right_prefix = '__right_' right_key_names = [(right_prefix + right_key_name) for right_key_name in right_key_names] how = validate_how(how) def resolve(internal: InternalFrame, side: str) -> InternalFrame: def rename(col: str) -> str: return '__{}_{}'.format(side, col) internal = internal.resolved_copy sdf = internal.spark_frame sdf = sdf.select(*[scol_for(sdf, col).alias(rename(col)) for col in sdf.columns if (col not in HIDDEN_COLUMNS)], *HIDDEN_COLUMNS) return internal.copy(spark_frame=sdf, index_spark_columns=[scol_for(sdf, rename(col)) for col in internal.index_spark_column_names], index_fields=[field.copy(name=rename(field.name)) for field in internal.index_fields], data_spark_columns=[scol_for(sdf, rename(col)) for col in internal.data_spark_column_names], data_fields=[field.copy(name=rename(field.name)) for field in internal.data_fields]) left_internal = self._internal.resolved_copy right_internal = resolve(right._internal, 'right') left_table = left_internal.spark_frame.alias('left_table') right_table = right_internal.spark_frame.alias('right_table') left_key_columns = [scol_for(left_table, label) for label in left_key_names] right_key_columns = [scol_for(right_table, label) for label in right_key_names] join_condition = reduce((lambda x, y: (x & y)), [(lkey == rkey) for (lkey, rkey) in zip(left_key_columns, right_key_columns)]) joined_table = left_table.join(right_table, join_condition, how=how) left_suffix = suffixes[0] right_suffix = suffixes[1] duplicate_columns = (set(left_internal.column_labels) & set(right_internal.column_labels)) exprs = [] data_columns = [] column_labels = [] def left_scol_for(label: Label) -> Column: return scol_for(left_table, left_internal.spark_column_name_for(label)) def right_scol_for(label: Label) -> Column: return scol_for(right_table, right_internal.spark_column_name_for(label)) for label in left_internal.column_labels: col = left_internal.spark_column_name_for(label) scol = left_scol_for(label) if (label in duplicate_columns): spark_column_name = left_internal.spark_column_name_for(label) if ((spark_column_name in left_key_names) and ((right_prefix + spark_column_name) in right_key_names)): right_scol = right_scol_for(label) if (how == 'right'): scol = right_scol.alias(col) elif (how == 'full'): scol = F.when(scol.isNotNull(), scol).otherwise(right_scol).alias(col) else: pass else: col = (col + left_suffix) scol = scol.alias(col) label = tuple(([(str(label[0]) + left_suffix)] + list(label[1:]))) exprs.append(scol) data_columns.append(col) column_labels.append(label) for label in right_internal.column_labels: col = right_internal.spark_column_name_for(label)[len(right_prefix):] scol = right_scol_for(label).alias(col) if (label in duplicate_columns): spark_column_name = left_internal.spark_column_name_for(label) if ((spark_column_name in left_key_names) and ((right_prefix + spark_column_name) in right_key_names)): continue else: col = (col + right_suffix) scol = scol.alias(col) label = tuple(([(str(label[0]) + right_suffix)] + list(label[1:]))) exprs.append(scol) data_columns.append(col) column_labels.append(label) left_index_scols = left_internal.index_spark_columns right_index_scols = right_internal.index_spark_columns if left_index: if right_index: if (how in ('inner', 'left')): exprs.extend(left_index_scols) index_spark_column_names = left_internal.index_spark_column_names index_names = left_internal.index_names elif (how == 'right'): exprs.extend(right_index_scols) index_spark_column_names = right_internal.index_spark_column_names index_names = right_internal.index_names else: index_spark_column_names = left_internal.index_spark_column_names index_names = left_internal.index_names for (col, left_scol, right_scol) in zip(index_spark_column_names, left_index_scols, right_index_scols): scol = F.when(left_scol.isNotNull(), left_scol).otherwise(right_scol) exprs.append(scol.alias(col)) else: exprs.extend(right_index_scols) index_spark_column_names = right_internal.index_spark_column_names index_names = right_internal.index_names elif right_index: exprs.extend(left_index_scols) index_spark_column_names = left_internal.index_spark_column_names index_names = left_internal.index_names else: index_spark_column_names = [] index_names = [] selected_columns = joined_table.select(*exprs) internal = InternalFrame(spark_frame=selected_columns, index_spark_columns=[scol_for(selected_columns, col) for col in index_spark_column_names], index_names=index_names, column_labels=column_labels, data_spark_columns=[scol_for(selected_columns, col) for col in data_columns]) return DataFrame(internal)
-4,141,360,236,492,709,400
Merge DataFrame objects with a database-style join. The index of the resulting DataFrame will be one of the following: - 0...n if no index is used for merging - Index of the left DataFrame if merged only on the index of the right DataFrame - Index of the right DataFrame if merged only on the index of the left DataFrame - All involved indices if merged using the indices of both DataFrames e.g. if `left` with indices (a, x) and `right` with indices (b, x), the result will be an index (x, a, b) Parameters ---------- right: Object to merge with. how: Type of merge to be performed. {'left', 'right', 'outer', 'inner'}, default 'inner' left: use only keys from left frame, similar to a SQL left outer join; not preserve key order unlike pandas. right: use only keys from right frame, similar to a SQL right outer join; not preserve key order unlike pandas. outer: use union of keys from both frames, similar to a SQL full outer join; sort keys lexicographically. inner: use intersection of keys from both frames, similar to a SQL inner join; not preserve the order of the left keys unlike pandas. on: Column or index level names to join on. These must be found in both DataFrames. If on is None and not merging on indexes then this defaults to the intersection of the columns in both DataFrames. left_on: Column or index level names to join on in the left DataFrame. Can also be an array or list of arrays of the length of the left DataFrame. These arrays are treated as if they are columns. right_on: Column or index level names to join on in the right DataFrame. Can also be an array or list of arrays of the length of the right DataFrame. These arrays are treated as if they are columns. left_index: Use the index from the left DataFrame as the join key(s). If it is a MultiIndex, the number of keys in the other DataFrame (either the index or a number of columns) must match the number of levels. right_index: Use the index from the right DataFrame as the join key. Same caveats as left_index. suffixes: Suffix to apply to overlapping column names in the left and right side, respectively. Returns ------- DataFrame A DataFrame of the two merged objects. See Also -------- DataFrame.join : Join columns of another DataFrame. DataFrame.update : Modify in place using non-NA values from another DataFrame. DataFrame.hint : Specifies some hint on the current DataFrame. broadcast : Marks a DataFrame as small enough for use in broadcast joins. Examples -------- >>> df1 = ps.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'], ... 'value': [1, 2, 3, 5]}, ... columns=['lkey', 'value']) >>> df2 = ps.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'], ... 'value': [5, 6, 7, 8]}, ... columns=['rkey', 'value']) >>> df1 lkey value 0 foo 1 1 bar 2 2 baz 3 3 foo 5 >>> df2 rkey value 0 foo 5 1 bar 6 2 baz 7 3 foo 8 Merge df1 and df2 on the lkey and rkey columns. The value columns have the default suffixes, _x and _y, appended. >>> merged = df1.merge(df2, left_on='lkey', right_on='rkey') >>> merged.sort_values(by=['lkey', 'value_x', 'rkey', 'value_y']) # doctest: +ELLIPSIS lkey value_x rkey value_y ...bar 2 bar 6 ...baz 3 baz 7 ...foo 1 foo 5 ...foo 1 foo 8 ...foo 5 foo 5 ...foo 5 foo 8 >>> left_psdf = ps.DataFrame({'A': [1, 2]}) >>> right_psdf = ps.DataFrame({'B': ['x', 'y']}, index=[1, 2]) >>> left_psdf.merge(right_psdf, left_index=True, right_index=True).sort_index() A B 1 2 x >>> left_psdf.merge(right_psdf, left_index=True, right_index=True, how='left').sort_index() A B 0 1 None 1 2 x >>> left_psdf.merge(right_psdf, left_index=True, right_index=True, how='right').sort_index() A B 1 2.0 x 2 NaN y >>> left_psdf.merge(right_psdf, left_index=True, right_index=True, how='outer').sort_index() A B 0 1.0 None 1 2.0 x 2 NaN y Notes ----- As described in #263, joining string columns currently returns None for missing values instead of NaN.
python/pyspark/pandas/frame.py
merge
Flyangz/spark
python
def merge(self, right: 'DataFrame', how: str='inner', on: Optional[Union[(Name, List[Name])]]=None, left_on: Optional[Union[(Name, List[Name])]]=None, right_on: Optional[Union[(Name, List[Name])]]=None, left_index: bool=False, right_index: bool=False, suffixes: Tuple[(str, str)]=('_x', '_y')) -> 'DataFrame': "\n Merge DataFrame objects with a database-style join.\n\n The index of the resulting DataFrame will be one of the following:\n - 0...n if no index is used for merging\n - Index of the left DataFrame if merged only on the index of the right DataFrame\n - Index of the right DataFrame if merged only on the index of the left DataFrame\n - All involved indices if merged using the indices of both DataFrames\n e.g. if `left` with indices (a, x) and `right` with indices (b, x), the result will\n be an index (x, a, b)\n\n Parameters\n ----------\n right: Object to merge with.\n how: Type of merge to be performed.\n {'left', 'right', 'outer', 'inner'}, default 'inner'\n\n left: use only keys from left frame, similar to a SQL left outer join; not preserve\n key order unlike pandas.\n right: use only keys from right frame, similar to a SQL right outer join; not preserve\n key order unlike pandas.\n outer: use union of keys from both frames, similar to a SQL full outer join; sort keys\n lexicographically.\n inner: use intersection of keys from both frames, similar to a SQL inner join;\n not preserve the order of the left keys unlike pandas.\n on: Column or index level names to join on. These must be found in both DataFrames. If on\n is None and not merging on indexes then this defaults to the intersection of the\n columns in both DataFrames.\n left_on: Column or index level names to join on in the left DataFrame. Can also\n be an array or list of arrays of the length of the left DataFrame.\n These arrays are treated as if they are columns.\n right_on: Column or index level names to join on in the right DataFrame. Can also\n be an array or list of arrays of the length of the right DataFrame.\n These arrays are treated as if they are columns.\n left_index: Use the index from the left DataFrame as the join key(s). If it is a\n MultiIndex, the number of keys in the other DataFrame (either the index or a number of\n columns) must match the number of levels.\n right_index: Use the index from the right DataFrame as the join key. Same caveats as\n left_index.\n suffixes: Suffix to apply to overlapping column names in the left and right side,\n respectively.\n\n Returns\n -------\n DataFrame\n A DataFrame of the two merged objects.\n\n See Also\n --------\n DataFrame.join : Join columns of another DataFrame.\n DataFrame.update : Modify in place using non-NA values from another DataFrame.\n DataFrame.hint : Specifies some hint on the current DataFrame.\n broadcast : Marks a DataFrame as small enough for use in broadcast joins.\n\n Examples\n --------\n >>> df1 = ps.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],\n ... 'value': [1, 2, 3, 5]},\n ... columns=['lkey', 'value'])\n >>> df2 = ps.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],\n ... 'value': [5, 6, 7, 8]},\n ... columns=['rkey', 'value'])\n >>> df1\n lkey value\n 0 foo 1\n 1 bar 2\n 2 baz 3\n 3 foo 5\n >>> df2\n rkey value\n 0 foo 5\n 1 bar 6\n 2 baz 7\n 3 foo 8\n\n Merge df1 and df2 on the lkey and rkey columns. The value columns have\n the default suffixes, _x and _y, appended.\n\n >>> merged = df1.merge(df2, left_on='lkey', right_on='rkey')\n >>> merged.sort_values(by=['lkey', 'value_x', 'rkey', 'value_y']) # doctest: +ELLIPSIS\n lkey value_x rkey value_y\n ...bar 2 bar 6\n ...baz 3 baz 7\n ...foo 1 foo 5\n ...foo 1 foo 8\n ...foo 5 foo 5\n ...foo 5 foo 8\n\n >>> left_psdf = ps.DataFrame({'A': [1, 2]})\n >>> right_psdf = ps.DataFrame({'B': ['x', 'y']}, index=[1, 2])\n\n >>> left_psdf.merge(right_psdf, left_index=True, right_index=True).sort_index()\n A B\n 1 2 x\n\n >>> left_psdf.merge(right_psdf, left_index=True, right_index=True, how='left').sort_index()\n A B\n 0 1 None\n 1 2 x\n\n >>> left_psdf.merge(right_psdf, left_index=True, right_index=True, how='right').sort_index()\n A B\n 1 2.0 x\n 2 NaN y\n\n >>> left_psdf.merge(right_psdf, left_index=True, right_index=True, how='outer').sort_index()\n A B\n 0 1.0 None\n 1 2.0 x\n 2 NaN y\n\n Notes\n -----\n As described in #263, joining string columns currently returns None for missing values\n instead of NaN.\n " def to_list(os: Optional[Union[(Name, List[Name])]]) -> List[Label]: if (os is None): return [] elif is_name_like_tuple(os): return [cast(Label, os)] elif is_name_like_value(os): return [(os,)] else: return [(o if is_name_like_tuple(o) else (o,)) for o in os] if isinstance(right, ps.Series): right = right.to_frame() if on: if (left_on or right_on): raise ValueError('Can only pass argument "on" OR "left_on" and "right_on", not a combination of both.') left_key_names = list(map(self._internal.spark_column_name_for, to_list(on))) right_key_names = list(map(right._internal.spark_column_name_for, to_list(on))) else: if left_index: left_key_names = self._internal.index_spark_column_names else: left_key_names = list(map(self._internal.spark_column_name_for, to_list(left_on))) if right_index: right_key_names = right._internal.index_spark_column_names else: right_key_names = list(map(right._internal.spark_column_name_for, to_list(right_on))) if (left_key_names and (not right_key_names)): raise ValueError('Must pass right_on or right_index=True') if (right_key_names and (not left_key_names)): raise ValueError('Must pass left_on or left_index=True') if ((not left_key_names) and (not right_key_names)): common = list(self.columns.intersection(right.columns)) if (len(common) == 0): raise ValueError('No common columns to perform merge on. Merge options: left_on=None, right_on=None, left_index=False, right_index=False') left_key_names = list(map(self._internal.spark_column_name_for, to_list(common))) right_key_names = list(map(right._internal.spark_column_name_for, to_list(common))) if (len(left_key_names) != len(right_key_names)): raise ValueError('len(left_keys) must equal len(right_keys)') right_prefix = '__right_' right_key_names = [(right_prefix + right_key_name) for right_key_name in right_key_names] how = validate_how(how) def resolve(internal: InternalFrame, side: str) -> InternalFrame: def rename(col: str) -> str: return '__{}_{}'.format(side, col) internal = internal.resolved_copy sdf = internal.spark_frame sdf = sdf.select(*[scol_for(sdf, col).alias(rename(col)) for col in sdf.columns if (col not in HIDDEN_COLUMNS)], *HIDDEN_COLUMNS) return internal.copy(spark_frame=sdf, index_spark_columns=[scol_for(sdf, rename(col)) for col in internal.index_spark_column_names], index_fields=[field.copy(name=rename(field.name)) for field in internal.index_fields], data_spark_columns=[scol_for(sdf, rename(col)) for col in internal.data_spark_column_names], data_fields=[field.copy(name=rename(field.name)) for field in internal.data_fields]) left_internal = self._internal.resolved_copy right_internal = resolve(right._internal, 'right') left_table = left_internal.spark_frame.alias('left_table') right_table = right_internal.spark_frame.alias('right_table') left_key_columns = [scol_for(left_table, label) for label in left_key_names] right_key_columns = [scol_for(right_table, label) for label in right_key_names] join_condition = reduce((lambda x, y: (x & y)), [(lkey == rkey) for (lkey, rkey) in zip(left_key_columns, right_key_columns)]) joined_table = left_table.join(right_table, join_condition, how=how) left_suffix = suffixes[0] right_suffix = suffixes[1] duplicate_columns = (set(left_internal.column_labels) & set(right_internal.column_labels)) exprs = [] data_columns = [] column_labels = [] def left_scol_for(label: Label) -> Column: return scol_for(left_table, left_internal.spark_column_name_for(label)) def right_scol_for(label: Label) -> Column: return scol_for(right_table, right_internal.spark_column_name_for(label)) for label in left_internal.column_labels: col = left_internal.spark_column_name_for(label) scol = left_scol_for(label) if (label in duplicate_columns): spark_column_name = left_internal.spark_column_name_for(label) if ((spark_column_name in left_key_names) and ((right_prefix + spark_column_name) in right_key_names)): right_scol = right_scol_for(label) if (how == 'right'): scol = right_scol.alias(col) elif (how == 'full'): scol = F.when(scol.isNotNull(), scol).otherwise(right_scol).alias(col) else: pass else: col = (col + left_suffix) scol = scol.alias(col) label = tuple(([(str(label[0]) + left_suffix)] + list(label[1:]))) exprs.append(scol) data_columns.append(col) column_labels.append(label) for label in right_internal.column_labels: col = right_internal.spark_column_name_for(label)[len(right_prefix):] scol = right_scol_for(label).alias(col) if (label in duplicate_columns): spark_column_name = left_internal.spark_column_name_for(label) if ((spark_column_name in left_key_names) and ((right_prefix + spark_column_name) in right_key_names)): continue else: col = (col + right_suffix) scol = scol.alias(col) label = tuple(([(str(label[0]) + right_suffix)] + list(label[1:]))) exprs.append(scol) data_columns.append(col) column_labels.append(label) left_index_scols = left_internal.index_spark_columns right_index_scols = right_internal.index_spark_columns if left_index: if right_index: if (how in ('inner', 'left')): exprs.extend(left_index_scols) index_spark_column_names = left_internal.index_spark_column_names index_names = left_internal.index_names elif (how == 'right'): exprs.extend(right_index_scols) index_spark_column_names = right_internal.index_spark_column_names index_names = right_internal.index_names else: index_spark_column_names = left_internal.index_spark_column_names index_names = left_internal.index_names for (col, left_scol, right_scol) in zip(index_spark_column_names, left_index_scols, right_index_scols): scol = F.when(left_scol.isNotNull(), left_scol).otherwise(right_scol) exprs.append(scol.alias(col)) else: exprs.extend(right_index_scols) index_spark_column_names = right_internal.index_spark_column_names index_names = right_internal.index_names elif right_index: exprs.extend(left_index_scols) index_spark_column_names = left_internal.index_spark_column_names index_names = left_internal.index_names else: index_spark_column_names = [] index_names = [] selected_columns = joined_table.select(*exprs) internal = InternalFrame(spark_frame=selected_columns, index_spark_columns=[scol_for(selected_columns, col) for col in index_spark_column_names], index_names=index_names, column_labels=column_labels, data_spark_columns=[scol_for(selected_columns, col) for col in data_columns]) return DataFrame(internal)
def join(self, right: 'DataFrame', on: Optional[Union[(Name, List[Name])]]=None, how: str='left', lsuffix: str='', rsuffix: str='') -> 'DataFrame': "\n Join columns of another DataFrame.\n\n Join columns with `right` DataFrame either on index or on a key column. Efficiently join\n multiple DataFrame objects by index at once by passing a list.\n\n Parameters\n ----------\n right: DataFrame, Series\n on: str, list of str, or array-like, optional\n Column or index level name(s) in the caller to join on the index in `right`, otherwise\n joins index-on-index. If multiple values given, the `right` DataFrame must have a\n MultiIndex. Can pass an array as the join key if it is not already contained in the\n calling DataFrame. Like an Excel VLOOKUP operation.\n how: {'left', 'right', 'outer', 'inner'}, default 'left'\n How to handle the operation of the two objects.\n\n * left: use `left` frame’s index (or column if on is specified).\n * right: use `right`’s index.\n * outer: form union of `left` frame’s index (or column if on is specified) with\n right’s index, and sort it. lexicographically.\n * inner: form intersection of `left` frame’s index (or column if on is specified)\n with `right`’s index, preserving the order of the `left`’s one.\n lsuffix : str, default ''\n Suffix to use from left frame's overlapping columns.\n rsuffix : str, default ''\n Suffix to use from `right` frame's overlapping columns.\n\n Returns\n -------\n DataFrame\n A dataframe containing columns from both the `left` and `right`.\n\n See Also\n --------\n DataFrame.merge: For column(s)-on-columns(s) operations.\n DataFrame.update : Modify in place using non-NA values from another DataFrame.\n DataFrame.hint : Specifies some hint on the current DataFrame.\n broadcast : Marks a DataFrame as small enough for use in broadcast joins.\n\n Notes\n -----\n Parameters on, lsuffix, and rsuffix are not supported when passing a list of DataFrame\n objects.\n\n Examples\n --------\n >>> psdf1 = ps.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'],\n ... 'A': ['A0', 'A1', 'A2', 'A3']},\n ... columns=['key', 'A'])\n >>> psdf2 = ps.DataFrame({'key': ['K0', 'K1', 'K2'],\n ... 'B': ['B0', 'B1', 'B2']},\n ... columns=['key', 'B'])\n >>> psdf1\n key A\n 0 K0 A0\n 1 K1 A1\n 2 K2 A2\n 3 K3 A3\n >>> psdf2\n key B\n 0 K0 B0\n 1 K1 B1\n 2 K2 B2\n\n Join DataFrames using their indexes.\n\n >>> join_psdf = psdf1.join(psdf2, lsuffix='_left', rsuffix='_right')\n >>> join_psdf.sort_values(by=join_psdf.columns)\n key_left A key_right B\n 0 K0 A0 K0 B0\n 1 K1 A1 K1 B1\n 2 K2 A2 K2 B2\n 3 K3 A3 None None\n\n If we want to join using the key columns, we need to set key to be the index in both df and\n right. The joined DataFrame will have key as its index.\n\n >>> join_psdf = psdf1.set_index('key').join(psdf2.set_index('key'))\n >>> join_psdf.sort_values(by=join_psdf.columns) # doctest: +NORMALIZE_WHITESPACE\n A B\n key\n K0 A0 B0\n K1 A1 B1\n K2 A2 B2\n K3 A3 None\n\n Another option to join using the key columns is to use the on parameter. DataFrame.join\n always uses right’s index but we can use any column in df. This method not preserve the\n original DataFrame’s index in the result unlike pandas.\n\n >>> join_psdf = psdf1.join(psdf2.set_index('key'), on='key')\n >>> join_psdf.index\n Int64Index([0, 1, 2, 3], dtype='int64')\n " if isinstance(right, ps.Series): common = list(self.columns.intersection([right.name])) else: common = list(self.columns.intersection(right.columns)) if ((len(common) > 0) and (not lsuffix) and (not rsuffix)): raise ValueError('columns overlap but no suffix specified: {rename}'.format(rename=common)) need_set_index = False if on: if (not is_list_like(on)): on = [on] if (len(on) != right._internal.index_level): raise ValueError('len(left_on) must equal the number of levels in the index of "right"') need_set_index = (len((set(on) & set(self.index.names))) == 0) if need_set_index: self = self.set_index(on) join_psdf = self.merge(right, left_index=True, right_index=True, how=how, suffixes=(lsuffix, rsuffix)) return (join_psdf.reset_index() if need_set_index else join_psdf)
3,580,601,826,865,726,500
Join columns of another DataFrame. Join columns with `right` DataFrame either on index or on a key column. Efficiently join multiple DataFrame objects by index at once by passing a list. Parameters ---------- right: DataFrame, Series on: str, list of str, or array-like, optional Column or index level name(s) in the caller to join on the index in `right`, otherwise joins index-on-index. If multiple values given, the `right` DataFrame must have a MultiIndex. Can pass an array as the join key if it is not already contained in the calling DataFrame. Like an Excel VLOOKUP operation. how: {'left', 'right', 'outer', 'inner'}, default 'left' How to handle the operation of the two objects. * left: use `left` frame’s index (or column if on is specified). * right: use `right`’s index. * outer: form union of `left` frame’s index (or column if on is specified) with right’s index, and sort it. lexicographically. * inner: form intersection of `left` frame’s index (or column if on is specified) with `right`’s index, preserving the order of the `left`’s one. lsuffix : str, default '' Suffix to use from left frame's overlapping columns. rsuffix : str, default '' Suffix to use from `right` frame's overlapping columns. Returns ------- DataFrame A dataframe containing columns from both the `left` and `right`. See Also -------- DataFrame.merge: For column(s)-on-columns(s) operations. DataFrame.update : Modify in place using non-NA values from another DataFrame. DataFrame.hint : Specifies some hint on the current DataFrame. broadcast : Marks a DataFrame as small enough for use in broadcast joins. Notes ----- Parameters on, lsuffix, and rsuffix are not supported when passing a list of DataFrame objects. Examples -------- >>> psdf1 = ps.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'], ... 'A': ['A0', 'A1', 'A2', 'A3']}, ... columns=['key', 'A']) >>> psdf2 = ps.DataFrame({'key': ['K0', 'K1', 'K2'], ... 'B': ['B0', 'B1', 'B2']}, ... columns=['key', 'B']) >>> psdf1 key A 0 K0 A0 1 K1 A1 2 K2 A2 3 K3 A3 >>> psdf2 key B 0 K0 B0 1 K1 B1 2 K2 B2 Join DataFrames using their indexes. >>> join_psdf = psdf1.join(psdf2, lsuffix='_left', rsuffix='_right') >>> join_psdf.sort_values(by=join_psdf.columns) key_left A key_right B 0 K0 A0 K0 B0 1 K1 A1 K1 B1 2 K2 A2 K2 B2 3 K3 A3 None None If we want to join using the key columns, we need to set key to be the index in both df and right. The joined DataFrame will have key as its index. >>> join_psdf = psdf1.set_index('key').join(psdf2.set_index('key')) >>> join_psdf.sort_values(by=join_psdf.columns) # doctest: +NORMALIZE_WHITESPACE A B key K0 A0 B0 K1 A1 B1 K2 A2 B2 K3 A3 None Another option to join using the key columns is to use the on parameter. DataFrame.join always uses right’s index but we can use any column in df. This method not preserve the original DataFrame’s index in the result unlike pandas. >>> join_psdf = psdf1.join(psdf2.set_index('key'), on='key') >>> join_psdf.index Int64Index([0, 1, 2, 3], dtype='int64')
python/pyspark/pandas/frame.py
join
Flyangz/spark
python
def join(self, right: 'DataFrame', on: Optional[Union[(Name, List[Name])]]=None, how: str='left', lsuffix: str=, rsuffix: str=) -> 'DataFrame': "\n Join columns of another DataFrame.\n\n Join columns with `right` DataFrame either on index or on a key column. Efficiently join\n multiple DataFrame objects by index at once by passing a list.\n\n Parameters\n ----------\n right: DataFrame, Series\n on: str, list of str, or array-like, optional\n Column or index level name(s) in the caller to join on the index in `right`, otherwise\n joins index-on-index. If multiple values given, the `right` DataFrame must have a\n MultiIndex. Can pass an array as the join key if it is not already contained in the\n calling DataFrame. Like an Excel VLOOKUP operation.\n how: {'left', 'right', 'outer', 'inner'}, default 'left'\n How to handle the operation of the two objects.\n\n * left: use `left` frame’s index (or column if on is specified).\n * right: use `right`’s index.\n * outer: form union of `left` frame’s index (or column if on is specified) with\n right’s index, and sort it. lexicographically.\n * inner: form intersection of `left` frame’s index (or column if on is specified)\n with `right`’s index, preserving the order of the `left`’s one.\n lsuffix : str, default \n Suffix to use from left frame's overlapping columns.\n rsuffix : str, default \n Suffix to use from `right` frame's overlapping columns.\n\n Returns\n -------\n DataFrame\n A dataframe containing columns from both the `left` and `right`.\n\n See Also\n --------\n DataFrame.merge: For column(s)-on-columns(s) operations.\n DataFrame.update : Modify in place using non-NA values from another DataFrame.\n DataFrame.hint : Specifies some hint on the current DataFrame.\n broadcast : Marks a DataFrame as small enough for use in broadcast joins.\n\n Notes\n -----\n Parameters on, lsuffix, and rsuffix are not supported when passing a list of DataFrame\n objects.\n\n Examples\n --------\n >>> psdf1 = ps.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'],\n ... 'A': ['A0', 'A1', 'A2', 'A3']},\n ... columns=['key', 'A'])\n >>> psdf2 = ps.DataFrame({'key': ['K0', 'K1', 'K2'],\n ... 'B': ['B0', 'B1', 'B2']},\n ... columns=['key', 'B'])\n >>> psdf1\n key A\n 0 K0 A0\n 1 K1 A1\n 2 K2 A2\n 3 K3 A3\n >>> psdf2\n key B\n 0 K0 B0\n 1 K1 B1\n 2 K2 B2\n\n Join DataFrames using their indexes.\n\n >>> join_psdf = psdf1.join(psdf2, lsuffix='_left', rsuffix='_right')\n >>> join_psdf.sort_values(by=join_psdf.columns)\n key_left A key_right B\n 0 K0 A0 K0 B0\n 1 K1 A1 K1 B1\n 2 K2 A2 K2 B2\n 3 K3 A3 None None\n\n If we want to join using the key columns, we need to set key to be the index in both df and\n right. The joined DataFrame will have key as its index.\n\n >>> join_psdf = psdf1.set_index('key').join(psdf2.set_index('key'))\n >>> join_psdf.sort_values(by=join_psdf.columns) # doctest: +NORMALIZE_WHITESPACE\n A B\n key\n K0 A0 B0\n K1 A1 B1\n K2 A2 B2\n K3 A3 None\n\n Another option to join using the key columns is to use the on parameter. DataFrame.join\n always uses right’s index but we can use any column in df. This method not preserve the\n original DataFrame’s index in the result unlike pandas.\n\n >>> join_psdf = psdf1.join(psdf2.set_index('key'), on='key')\n >>> join_psdf.index\n Int64Index([0, 1, 2, 3], dtype='int64')\n " if isinstance(right, ps.Series): common = list(self.columns.intersection([right.name])) else: common = list(self.columns.intersection(right.columns)) if ((len(common) > 0) and (not lsuffix) and (not rsuffix)): raise ValueError('columns overlap but no suffix specified: {rename}'.format(rename=common)) need_set_index = False if on: if (not is_list_like(on)): on = [on] if (len(on) != right._internal.index_level): raise ValueError('len(left_on) must equal the number of levels in the index of "right"') need_set_index = (len((set(on) & set(self.index.names))) == 0) if need_set_index: self = self.set_index(on) join_psdf = self.merge(right, left_index=True, right_index=True, how=how, suffixes=(lsuffix, rsuffix)) return (join_psdf.reset_index() if need_set_index else join_psdf)
def combine_first(self, other: 'DataFrame') -> 'DataFrame': '\n Update null elements with value in the same location in `other`.\n\n Combine two DataFrame objects by filling null values in one DataFrame\n with non-null values from other DataFrame. The row and column indexes\n of the resulting DataFrame will be the union of the two.\n\n .. versionadded:: 3.3.0\n\n Parameters\n ----------\n other : DataFrame\n Provided DataFrame to use to fill null values.\n\n Returns\n -------\n DataFrame\n\n Examples\n --------\n >>> ps.set_option("compute.ops_on_diff_frames", True)\n >>> df1 = ps.DataFrame({\'A\': [None, 0], \'B\': [None, 4]})\n >>> df2 = ps.DataFrame({\'A\': [1, 1], \'B\': [3, 3]})\n\n >>> df1.combine_first(df2).sort_index()\n A B\n 0 1.0 3.0\n 1 0.0 4.0\n\n Null values still persist if the location of that null value does not exist in other\n\n >>> df1 = ps.DataFrame({\'A\': [None, 0], \'B\': [4, None]})\n >>> df2 = ps.DataFrame({\'B\': [3, 3], \'C\': [1, 1]}, index=[1, 2])\n\n >>> df1.combine_first(df2).sort_index()\n A B C\n 0 NaN 4.0 NaN\n 1 0.0 3.0 1.0\n 2 NaN 3.0 1.0\n >>> ps.reset_option("compute.ops_on_diff_frames")\n ' if (not isinstance(other, DataFrame)): raise TypeError('`combine_first` only allows `DataFrame` for parameter `other`') if same_anchor(self, other): combined = self this = self that = other else: combined = combine_frames(self, other) this = combined['this'] that = combined['that'] intersect_column_labels = set(self._internal.column_labels).intersection(set(other._internal.column_labels)) (column_labels, data_spark_columns) = ([], []) for column_label in this._internal.column_labels: this_scol = this._internal.spark_column_for(column_label) if (column_label in intersect_column_labels): that_scol = that._internal.spark_column_for(column_label) this_scol_name = this._internal.spark_column_name_for(column_label) combined_scol = F.when(this_scol.isNull(), that_scol).otherwise(this_scol).alias(this_scol_name) data_spark_columns.append(combined_scol) else: data_spark_columns.append(this_scol) column_labels.append(column_label) for column_label in that._internal.column_labels: if (column_label not in intersect_column_labels): that_scol = that._internal.spark_column_for(column_label) data_spark_columns.append(that_scol) column_labels.append(column_label) internal = combined._internal.copy(column_labels=column_labels, data_spark_columns=data_spark_columns, data_fields=None, column_label_names=self._internal.column_label_names) return DataFrame(internal)
-5,688,754,360,827,215,000
Update null elements with value in the same location in `other`. Combine two DataFrame objects by filling null values in one DataFrame with non-null values from other DataFrame. The row and column indexes of the resulting DataFrame will be the union of the two. .. versionadded:: 3.3.0 Parameters ---------- other : DataFrame Provided DataFrame to use to fill null values. Returns ------- DataFrame Examples -------- >>> ps.set_option("compute.ops_on_diff_frames", True) >>> df1 = ps.DataFrame({'A': [None, 0], 'B': [None, 4]}) >>> df2 = ps.DataFrame({'A': [1, 1], 'B': [3, 3]}) >>> df1.combine_first(df2).sort_index() A B 0 1.0 3.0 1 0.0 4.0 Null values still persist if the location of that null value does not exist in other >>> df1 = ps.DataFrame({'A': [None, 0], 'B': [4, None]}) >>> df2 = ps.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2]) >>> df1.combine_first(df2).sort_index() A B C 0 NaN 4.0 NaN 1 0.0 3.0 1.0 2 NaN 3.0 1.0 >>> ps.reset_option("compute.ops_on_diff_frames")
python/pyspark/pandas/frame.py
combine_first
Flyangz/spark
python
def combine_first(self, other: 'DataFrame') -> 'DataFrame': '\n Update null elements with value in the same location in `other`.\n\n Combine two DataFrame objects by filling null values in one DataFrame\n with non-null values from other DataFrame. The row and column indexes\n of the resulting DataFrame will be the union of the two.\n\n .. versionadded:: 3.3.0\n\n Parameters\n ----------\n other : DataFrame\n Provided DataFrame to use to fill null values.\n\n Returns\n -------\n DataFrame\n\n Examples\n --------\n >>> ps.set_option("compute.ops_on_diff_frames", True)\n >>> df1 = ps.DataFrame({\'A\': [None, 0], \'B\': [None, 4]})\n >>> df2 = ps.DataFrame({\'A\': [1, 1], \'B\': [3, 3]})\n\n >>> df1.combine_first(df2).sort_index()\n A B\n 0 1.0 3.0\n 1 0.0 4.0\n\n Null values still persist if the location of that null value does not exist in other\n\n >>> df1 = ps.DataFrame({\'A\': [None, 0], \'B\': [4, None]})\n >>> df2 = ps.DataFrame({\'B\': [3, 3], \'C\': [1, 1]}, index=[1, 2])\n\n >>> df1.combine_first(df2).sort_index()\n A B C\n 0 NaN 4.0 NaN\n 1 0.0 3.0 1.0\n 2 NaN 3.0 1.0\n >>> ps.reset_option("compute.ops_on_diff_frames")\n ' if (not isinstance(other, DataFrame)): raise TypeError('`combine_first` only allows `DataFrame` for parameter `other`') if same_anchor(self, other): combined = self this = self that = other else: combined = combine_frames(self, other) this = combined['this'] that = combined['that'] intersect_column_labels = set(self._internal.column_labels).intersection(set(other._internal.column_labels)) (column_labels, data_spark_columns) = ([], []) for column_label in this._internal.column_labels: this_scol = this._internal.spark_column_for(column_label) if (column_label in intersect_column_labels): that_scol = that._internal.spark_column_for(column_label) this_scol_name = this._internal.spark_column_name_for(column_label) combined_scol = F.when(this_scol.isNull(), that_scol).otherwise(this_scol).alias(this_scol_name) data_spark_columns.append(combined_scol) else: data_spark_columns.append(this_scol) column_labels.append(column_label) for column_label in that._internal.column_labels: if (column_label not in intersect_column_labels): that_scol = that._internal.spark_column_for(column_label) data_spark_columns.append(that_scol) column_labels.append(column_label) internal = combined._internal.copy(column_labels=column_labels, data_spark_columns=data_spark_columns, data_fields=None, column_label_names=self._internal.column_label_names) return DataFrame(internal)
def append(self, other: 'DataFrame', ignore_index: bool=False, verify_integrity: bool=False, sort: bool=False) -> 'DataFrame': "\n Append rows of other to the end of caller, returning a new object.\n\n Columns in other that are not in the caller are added as new columns.\n\n Parameters\n ----------\n other : DataFrame or Series/dict-like object, or list of these\n The data to append.\n\n ignore_index : boolean, default False\n If True, do not use the index labels.\n\n verify_integrity : boolean, default False\n If True, raise ValueError on creating index with duplicates.\n\n sort : boolean, default False\n Currently not supported.\n\n Returns\n -------\n appended : DataFrame\n\n Examples\n --------\n >>> df = ps.DataFrame([[1, 2], [3, 4]], columns=list('AB'))\n\n >>> df.append(df)\n A B\n 0 1 2\n 1 3 4\n 0 1 2\n 1 3 4\n\n >>> df.append(df, ignore_index=True)\n A B\n 0 1 2\n 1 3 4\n 2 1 2\n 3 3 4\n " if isinstance(other, ps.Series): raise TypeError('DataFrames.append() does not support appending Series to DataFrames') if sort: raise NotImplementedError("The 'sort' parameter is currently not supported") if (not ignore_index): index_scols = self._internal.index_spark_columns if (len(index_scols) != other._internal.index_level): raise ValueError('Both DataFrames have to have the same number of index levels') if (verify_integrity and (len(index_scols) > 0)): if (self._internal.spark_frame.select(index_scols).intersect(other._internal.spark_frame.select(other._internal.index_spark_columns)).count() > 0): raise ValueError('Indices have overlapping values') from pyspark.pandas.namespace import concat return cast(DataFrame, concat([self, other], ignore_index=ignore_index))
-4,467,937,677,897,888,000
Append rows of other to the end of caller, returning a new object. Columns in other that are not in the caller are added as new columns. Parameters ---------- other : DataFrame or Series/dict-like object, or list of these The data to append. ignore_index : boolean, default False If True, do not use the index labels. verify_integrity : boolean, default False If True, raise ValueError on creating index with duplicates. sort : boolean, default False Currently not supported. Returns ------- appended : DataFrame Examples -------- >>> df = ps.DataFrame([[1, 2], [3, 4]], columns=list('AB')) >>> df.append(df) A B 0 1 2 1 3 4 0 1 2 1 3 4 >>> df.append(df, ignore_index=True) A B 0 1 2 1 3 4 2 1 2 3 3 4
python/pyspark/pandas/frame.py
append
Flyangz/spark
python
def append(self, other: 'DataFrame', ignore_index: bool=False, verify_integrity: bool=False, sort: bool=False) -> 'DataFrame': "\n Append rows of other to the end of caller, returning a new object.\n\n Columns in other that are not in the caller are added as new columns.\n\n Parameters\n ----------\n other : DataFrame or Series/dict-like object, or list of these\n The data to append.\n\n ignore_index : boolean, default False\n If True, do not use the index labels.\n\n verify_integrity : boolean, default False\n If True, raise ValueError on creating index with duplicates.\n\n sort : boolean, default False\n Currently not supported.\n\n Returns\n -------\n appended : DataFrame\n\n Examples\n --------\n >>> df = ps.DataFrame([[1, 2], [3, 4]], columns=list('AB'))\n\n >>> df.append(df)\n A B\n 0 1 2\n 1 3 4\n 0 1 2\n 1 3 4\n\n >>> df.append(df, ignore_index=True)\n A B\n 0 1 2\n 1 3 4\n 2 1 2\n 3 3 4\n " if isinstance(other, ps.Series): raise TypeError('DataFrames.append() does not support appending Series to DataFrames') if sort: raise NotImplementedError("The 'sort' parameter is currently not supported") if (not ignore_index): index_scols = self._internal.index_spark_columns if (len(index_scols) != other._internal.index_level): raise ValueError('Both DataFrames have to have the same number of index levels') if (verify_integrity and (len(index_scols) > 0)): if (self._internal.spark_frame.select(index_scols).intersect(other._internal.spark_frame.select(other._internal.index_spark_columns)).count() > 0): raise ValueError('Indices have overlapping values') from pyspark.pandas.namespace import concat return cast(DataFrame, concat([self, other], ignore_index=ignore_index))
def update(self, other: 'DataFrame', join: str='left', overwrite: bool=True) -> None: "\n Modify in place using non-NA values from another DataFrame.\n Aligns on indices. There is no return value.\n\n Parameters\n ----------\n other : DataFrame, or Series\n join : 'left', default 'left'\n Only left join is implemented, keeping the index and columns of the original object.\n overwrite : bool, default True\n How to handle non-NA values for overlapping keys:\n\n * True: overwrite original DataFrame's values with values from `other`.\n * False: only update values that are NA in the original DataFrame.\n\n Returns\n -------\n None : method directly changes calling object\n\n See Also\n --------\n DataFrame.merge : For column(s)-on-columns(s) operations.\n DataFrame.join : Join columns of another DataFrame.\n DataFrame.hint : Specifies some hint on the current DataFrame.\n broadcast : Marks a DataFrame as small enough for use in broadcast joins.\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]}, columns=['A', 'B'])\n >>> new_df = ps.DataFrame({'B': [4, 5, 6], 'C': [7, 8, 9]}, columns=['B', 'C'])\n >>> df.update(new_df)\n >>> df.sort_index()\n A B\n 0 1 4\n 1 2 5\n 2 3 6\n\n The DataFrame's length does not increase as a result of the update,\n only values at matching index/column labels are updated.\n\n >>> df = ps.DataFrame({'A': ['a', 'b', 'c'], 'B': ['x', 'y', 'z']}, columns=['A', 'B'])\n >>> new_df = ps.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']}, columns=['B'])\n >>> df.update(new_df)\n >>> df.sort_index()\n A B\n 0 a d\n 1 b e\n 2 c f\n\n For Series, it's name attribute must be set.\n\n >>> df = ps.DataFrame({'A': ['a', 'b', 'c'], 'B': ['x', 'y', 'z']}, columns=['A', 'B'])\n >>> new_column = ps.Series(['d', 'e'], name='B', index=[0, 2])\n >>> df.update(new_column)\n >>> df.sort_index()\n A B\n 0 a d\n 1 b y\n 2 c e\n\n If `other` contains None the corresponding values are not updated in the original dataframe.\n\n >>> df = ps.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]}, columns=['A', 'B'])\n >>> new_df = ps.DataFrame({'B': [4, None, 6]}, columns=['B'])\n >>> df.update(new_df)\n >>> df.sort_index()\n A B\n 0 1 4.0\n 1 2 500.0\n 2 3 6.0\n " if (join != 'left'): raise NotImplementedError('Only left join is supported') if isinstance(other, ps.Series): other = other.to_frame() update_columns = list(set(self._internal.column_labels).intersection(set(other._internal.column_labels))) update_sdf = self.join(other[update_columns], rsuffix='_new')._internal.resolved_copy.spark_frame data_fields = self._internal.data_fields.copy() for column_labels in update_columns: column_name = self._internal.spark_column_name_for(column_labels) old_col = scol_for(update_sdf, column_name) new_col = scol_for(update_sdf, (other._internal.spark_column_name_for(column_labels) + '_new')) if overwrite: update_sdf = update_sdf.withColumn(column_name, F.when(new_col.isNull(), old_col).otherwise(new_col)) else: update_sdf = update_sdf.withColumn(column_name, F.when(old_col.isNull(), new_col).otherwise(old_col)) data_fields[self._internal.column_labels.index(column_labels)] = None sdf = update_sdf.select(*[scol_for(update_sdf, col) for col in self._internal.spark_column_names], *HIDDEN_COLUMNS) internal = self._internal.with_new_sdf(sdf, data_fields=data_fields) self._update_internal_frame(internal, requires_same_anchor=False)
-386,408,235,323,198,900
Modify in place using non-NA values from another DataFrame. Aligns on indices. There is no return value. Parameters ---------- other : DataFrame, or Series join : 'left', default 'left' Only left join is implemented, keeping the index and columns of the original object. overwrite : bool, default True How to handle non-NA values for overlapping keys: * True: overwrite original DataFrame's values with values from `other`. * False: only update values that are NA in the original DataFrame. Returns ------- None : method directly changes calling object See Also -------- DataFrame.merge : For column(s)-on-columns(s) operations. DataFrame.join : Join columns of another DataFrame. DataFrame.hint : Specifies some hint on the current DataFrame. broadcast : Marks a DataFrame as small enough for use in broadcast joins. Examples -------- >>> df = ps.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]}, columns=['A', 'B']) >>> new_df = ps.DataFrame({'B': [4, 5, 6], 'C': [7, 8, 9]}, columns=['B', 'C']) >>> df.update(new_df) >>> df.sort_index() A B 0 1 4 1 2 5 2 3 6 The DataFrame's length does not increase as a result of the update, only values at matching index/column labels are updated. >>> df = ps.DataFrame({'A': ['a', 'b', 'c'], 'B': ['x', 'y', 'z']}, columns=['A', 'B']) >>> new_df = ps.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']}, columns=['B']) >>> df.update(new_df) >>> df.sort_index() A B 0 a d 1 b e 2 c f For Series, it's name attribute must be set. >>> df = ps.DataFrame({'A': ['a', 'b', 'c'], 'B': ['x', 'y', 'z']}, columns=['A', 'B']) >>> new_column = ps.Series(['d', 'e'], name='B', index=[0, 2]) >>> df.update(new_column) >>> df.sort_index() A B 0 a d 1 b y 2 c e If `other` contains None the corresponding values are not updated in the original dataframe. >>> df = ps.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]}, columns=['A', 'B']) >>> new_df = ps.DataFrame({'B': [4, None, 6]}, columns=['B']) >>> df.update(new_df) >>> df.sort_index() A B 0 1 4.0 1 2 500.0 2 3 6.0
python/pyspark/pandas/frame.py
update
Flyangz/spark
python
def update(self, other: 'DataFrame', join: str='left', overwrite: bool=True) -> None: "\n Modify in place using non-NA values from another DataFrame.\n Aligns on indices. There is no return value.\n\n Parameters\n ----------\n other : DataFrame, or Series\n join : 'left', default 'left'\n Only left join is implemented, keeping the index and columns of the original object.\n overwrite : bool, default True\n How to handle non-NA values for overlapping keys:\n\n * True: overwrite original DataFrame's values with values from `other`.\n * False: only update values that are NA in the original DataFrame.\n\n Returns\n -------\n None : method directly changes calling object\n\n See Also\n --------\n DataFrame.merge : For column(s)-on-columns(s) operations.\n DataFrame.join : Join columns of another DataFrame.\n DataFrame.hint : Specifies some hint on the current DataFrame.\n broadcast : Marks a DataFrame as small enough for use in broadcast joins.\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]}, columns=['A', 'B'])\n >>> new_df = ps.DataFrame({'B': [4, 5, 6], 'C': [7, 8, 9]}, columns=['B', 'C'])\n >>> df.update(new_df)\n >>> df.sort_index()\n A B\n 0 1 4\n 1 2 5\n 2 3 6\n\n The DataFrame's length does not increase as a result of the update,\n only values at matching index/column labels are updated.\n\n >>> df = ps.DataFrame({'A': ['a', 'b', 'c'], 'B': ['x', 'y', 'z']}, columns=['A', 'B'])\n >>> new_df = ps.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']}, columns=['B'])\n >>> df.update(new_df)\n >>> df.sort_index()\n A B\n 0 a d\n 1 b e\n 2 c f\n\n For Series, it's name attribute must be set.\n\n >>> df = ps.DataFrame({'A': ['a', 'b', 'c'], 'B': ['x', 'y', 'z']}, columns=['A', 'B'])\n >>> new_column = ps.Series(['d', 'e'], name='B', index=[0, 2])\n >>> df.update(new_column)\n >>> df.sort_index()\n A B\n 0 a d\n 1 b y\n 2 c e\n\n If `other` contains None the corresponding values are not updated in the original dataframe.\n\n >>> df = ps.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]}, columns=['A', 'B'])\n >>> new_df = ps.DataFrame({'B': [4, None, 6]}, columns=['B'])\n >>> df.update(new_df)\n >>> df.sort_index()\n A B\n 0 1 4.0\n 1 2 500.0\n 2 3 6.0\n " if (join != 'left'): raise NotImplementedError('Only left join is supported') if isinstance(other, ps.Series): other = other.to_frame() update_columns = list(set(self._internal.column_labels).intersection(set(other._internal.column_labels))) update_sdf = self.join(other[update_columns], rsuffix='_new')._internal.resolved_copy.spark_frame data_fields = self._internal.data_fields.copy() for column_labels in update_columns: column_name = self._internal.spark_column_name_for(column_labels) old_col = scol_for(update_sdf, column_name) new_col = scol_for(update_sdf, (other._internal.spark_column_name_for(column_labels) + '_new')) if overwrite: update_sdf = update_sdf.withColumn(column_name, F.when(new_col.isNull(), old_col).otherwise(new_col)) else: update_sdf = update_sdf.withColumn(column_name, F.when(old_col.isNull(), new_col).otherwise(old_col)) data_fields[self._internal.column_labels.index(column_labels)] = None sdf = update_sdf.select(*[scol_for(update_sdf, col) for col in self._internal.spark_column_names], *HIDDEN_COLUMNS) internal = self._internal.with_new_sdf(sdf, data_fields=data_fields) self._update_internal_frame(internal, requires_same_anchor=False)
def cov(self, min_periods: Optional[int]=None) -> 'DataFrame': "\n Compute pairwise covariance of columns, excluding NA/null values.\n\n Compute the pairwise covariance among the series of a DataFrame.\n The returned data frame is the `covariance matrix\n <https://en.wikipedia.org/wiki/Covariance_matrix>`__ of the columns\n of the DataFrame.\n\n Both NA and null values are automatically excluded from the\n calculation. (See the note below about bias from missing values.)\n A threshold can be set for the minimum number of\n observations for each value created. Comparisons with observations\n below this threshold will be returned as ``NaN``.\n\n This method is generally used for the analysis of time series data to\n understand the relationship between different measures\n across time.\n\n .. versionadded:: 3.3.0\n\n Parameters\n ----------\n min_periods : int, optional\n Minimum number of observations required per pair of columns\n to have a valid result.\n\n Returns\n -------\n DataFrame\n The covariance matrix of the series of the DataFrame.\n\n See Also\n --------\n Series.cov : Compute covariance with another Series.\n\n Examples\n --------\n >>> df = ps.DataFrame([(1, 2), (0, 3), (2, 0), (1, 1)],\n ... columns=['dogs', 'cats'])\n >>> df.cov()\n dogs cats\n dogs 0.666667 -1.000000\n cats -1.000000 1.666667\n\n >>> np.random.seed(42)\n >>> df = ps.DataFrame(np.random.randn(1000, 5),\n ... columns=['a', 'b', 'c', 'd', 'e'])\n >>> df.cov()\n a b c d e\n a 0.998438 -0.020161 0.059277 -0.008943 0.014144\n b -0.020161 1.059352 -0.008543 -0.024738 0.009826\n c 0.059277 -0.008543 1.010670 -0.001486 -0.000271\n d -0.008943 -0.024738 -0.001486 0.921297 -0.013692\n e 0.014144 0.009826 -0.000271 -0.013692 0.977795\n\n **Minimum number of periods**\n\n This method also supports an optional ``min_periods`` keyword\n that specifies the required minimum number of non-NA observations for\n each column pair in order to have a valid result:\n\n >>> np.random.seed(42)\n >>> df = pd.DataFrame(np.random.randn(20, 3),\n ... columns=['a', 'b', 'c'])\n >>> df.loc[df.index[:5], 'a'] = np.nan\n >>> df.loc[df.index[5:10], 'b'] = np.nan\n >>> sdf = ps.from_pandas(df)\n >>> sdf.cov(min_periods=12)\n a b c\n a 0.316741 NaN -0.150812\n b NaN 1.248003 0.191417\n c -0.150812 0.191417 0.895202\n " min_periods = (1 if (min_periods is None) else min_periods) psdf = self[[col for col in self.columns if (isinstance(self[col].spark.data_type, BooleanType) or (isinstance(self[col].spark.data_type, NumericType) and (not isinstance(self[col].spark.data_type, DecimalType))))]] num_cols = len(psdf.columns) cov = np.zeros([num_cols, num_cols]) if (num_cols == 0): return DataFrame() if (len(psdf) < min_periods): cov.fill(np.nan) return DataFrame(cov, columns=psdf.columns, index=psdf.columns) data_cols = psdf._internal.data_spark_column_names cov_scols = [] count_not_null_scols = [] for r in range(0, num_cols): for c in range(r, num_cols): count_not_null_scols.append(F.count(F.when((F.col(data_cols[r]).isNotNull() & F.col(data_cols[c]).isNotNull()), 1))) count_not_null = psdf._internal.spark_frame.replace(float('nan'), None).select(*count_not_null_scols).head(1)[0] step = 0 for r in range(0, num_cols): step += r for c in range(r, num_cols): cov_scols.append((F.covar_samp(F.col(data_cols[r]).cast('double'), F.col(data_cols[c]).cast('double')) if (count_not_null[(((r * num_cols) + c) - step)] >= min_periods) else F.lit(None))) pair_cov = psdf._internal.spark_frame.select(*cov_scols).head(1)[0] step = 0 for r in range(0, num_cols): step += r for c in range(r, num_cols): cov[r][c] = pair_cov[(((r * num_cols) + c) - step)] cov = ((cov + cov.T) - np.diag(np.diag(cov))) return DataFrame(cov, columns=psdf.columns, index=psdf.columns)
2,049,463,742,787,191,000
Compute pairwise covariance of columns, excluding NA/null values. Compute the pairwise covariance among the series of a DataFrame. The returned data frame is the `covariance matrix <https://en.wikipedia.org/wiki/Covariance_matrix>`__ of the columns of the DataFrame. Both NA and null values are automatically excluded from the calculation. (See the note below about bias from missing values.) A threshold can be set for the minimum number of observations for each value created. Comparisons with observations below this threshold will be returned as ``NaN``. This method is generally used for the analysis of time series data to understand the relationship between different measures across time. .. versionadded:: 3.3.0 Parameters ---------- min_periods : int, optional Minimum number of observations required per pair of columns to have a valid result. Returns ------- DataFrame The covariance matrix of the series of the DataFrame. See Also -------- Series.cov : Compute covariance with another Series. Examples -------- >>> df = ps.DataFrame([(1, 2), (0, 3), (2, 0), (1, 1)], ... columns=['dogs', 'cats']) >>> df.cov() dogs cats dogs 0.666667 -1.000000 cats -1.000000 1.666667 >>> np.random.seed(42) >>> df = ps.DataFrame(np.random.randn(1000, 5), ... columns=['a', 'b', 'c', 'd', 'e']) >>> df.cov() a b c d e a 0.998438 -0.020161 0.059277 -0.008943 0.014144 b -0.020161 1.059352 -0.008543 -0.024738 0.009826 c 0.059277 -0.008543 1.010670 -0.001486 -0.000271 d -0.008943 -0.024738 -0.001486 0.921297 -0.013692 e 0.014144 0.009826 -0.000271 -0.013692 0.977795 **Minimum number of periods** This method also supports an optional ``min_periods`` keyword that specifies the required minimum number of non-NA observations for each column pair in order to have a valid result: >>> np.random.seed(42) >>> df = pd.DataFrame(np.random.randn(20, 3), ... columns=['a', 'b', 'c']) >>> df.loc[df.index[:5], 'a'] = np.nan >>> df.loc[df.index[5:10], 'b'] = np.nan >>> sdf = ps.from_pandas(df) >>> sdf.cov(min_periods=12) a b c a 0.316741 NaN -0.150812 b NaN 1.248003 0.191417 c -0.150812 0.191417 0.895202
python/pyspark/pandas/frame.py
cov
Flyangz/spark
python
def cov(self, min_periods: Optional[int]=None) -> 'DataFrame': "\n Compute pairwise covariance of columns, excluding NA/null values.\n\n Compute the pairwise covariance among the series of a DataFrame.\n The returned data frame is the `covariance matrix\n <https://en.wikipedia.org/wiki/Covariance_matrix>`__ of the columns\n of the DataFrame.\n\n Both NA and null values are automatically excluded from the\n calculation. (See the note below about bias from missing values.)\n A threshold can be set for the minimum number of\n observations for each value created. Comparisons with observations\n below this threshold will be returned as ``NaN``.\n\n This method is generally used for the analysis of time series data to\n understand the relationship between different measures\n across time.\n\n .. versionadded:: 3.3.0\n\n Parameters\n ----------\n min_periods : int, optional\n Minimum number of observations required per pair of columns\n to have a valid result.\n\n Returns\n -------\n DataFrame\n The covariance matrix of the series of the DataFrame.\n\n See Also\n --------\n Series.cov : Compute covariance with another Series.\n\n Examples\n --------\n >>> df = ps.DataFrame([(1, 2), (0, 3), (2, 0), (1, 1)],\n ... columns=['dogs', 'cats'])\n >>> df.cov()\n dogs cats\n dogs 0.666667 -1.000000\n cats -1.000000 1.666667\n\n >>> np.random.seed(42)\n >>> df = ps.DataFrame(np.random.randn(1000, 5),\n ... columns=['a', 'b', 'c', 'd', 'e'])\n >>> df.cov()\n a b c d e\n a 0.998438 -0.020161 0.059277 -0.008943 0.014144\n b -0.020161 1.059352 -0.008543 -0.024738 0.009826\n c 0.059277 -0.008543 1.010670 -0.001486 -0.000271\n d -0.008943 -0.024738 -0.001486 0.921297 -0.013692\n e 0.014144 0.009826 -0.000271 -0.013692 0.977795\n\n **Minimum number of periods**\n\n This method also supports an optional ``min_periods`` keyword\n that specifies the required minimum number of non-NA observations for\n each column pair in order to have a valid result:\n\n >>> np.random.seed(42)\n >>> df = pd.DataFrame(np.random.randn(20, 3),\n ... columns=['a', 'b', 'c'])\n >>> df.loc[df.index[:5], 'a'] = np.nan\n >>> df.loc[df.index[5:10], 'b'] = np.nan\n >>> sdf = ps.from_pandas(df)\n >>> sdf.cov(min_periods=12)\n a b c\n a 0.316741 NaN -0.150812\n b NaN 1.248003 0.191417\n c -0.150812 0.191417 0.895202\n " min_periods = (1 if (min_periods is None) else min_periods) psdf = self[[col for col in self.columns if (isinstance(self[col].spark.data_type, BooleanType) or (isinstance(self[col].spark.data_type, NumericType) and (not isinstance(self[col].spark.data_type, DecimalType))))]] num_cols = len(psdf.columns) cov = np.zeros([num_cols, num_cols]) if (num_cols == 0): return DataFrame() if (len(psdf) < min_periods): cov.fill(np.nan) return DataFrame(cov, columns=psdf.columns, index=psdf.columns) data_cols = psdf._internal.data_spark_column_names cov_scols = [] count_not_null_scols = [] for r in range(0, num_cols): for c in range(r, num_cols): count_not_null_scols.append(F.count(F.when((F.col(data_cols[r]).isNotNull() & F.col(data_cols[c]).isNotNull()), 1))) count_not_null = psdf._internal.spark_frame.replace(float('nan'), None).select(*count_not_null_scols).head(1)[0] step = 0 for r in range(0, num_cols): step += r for c in range(r, num_cols): cov_scols.append((F.covar_samp(F.col(data_cols[r]).cast('double'), F.col(data_cols[c]).cast('double')) if (count_not_null[(((r * num_cols) + c) - step)] >= min_periods) else F.lit(None))) pair_cov = psdf._internal.spark_frame.select(*cov_scols).head(1)[0] step = 0 for r in range(0, num_cols): step += r for c in range(r, num_cols): cov[r][c] = pair_cov[(((r * num_cols) + c) - step)] cov = ((cov + cov.T) - np.diag(np.diag(cov))) return DataFrame(cov, columns=psdf.columns, index=psdf.columns)
def sample(self, n: Optional[int]=None, frac: Optional[float]=None, replace: bool=False, random_state: Optional[int]=None) -> 'DataFrame': "\n Return a random sample of items from an axis of object.\n\n Please call this function using named argument by specifying the ``frac`` argument.\n\n You can use `random_state` for reproducibility. However, note that different from pandas,\n specifying a seed in pandas-on-Spark/Spark does not guarantee the sampled rows will\n be fixed. The result set depends on not only the seed, but also how the data is distributed\n across machines and to some extent network randomness when shuffle operations are involved.\n Even in the simplest case, the result set will depend on the system's CPU core count.\n\n Parameters\n ----------\n n : int, optional\n Number of items to return. This is currently NOT supported. Use frac instead.\n frac : float, optional\n Fraction of axis items to return.\n replace : bool, default False\n Sample with or without replacement.\n random_state : int, optional\n Seed for the random number generator (if int).\n\n Returns\n -------\n Series or DataFrame\n A new object of same type as caller containing the sampled items.\n\n Examples\n --------\n >>> df = ps.DataFrame({'num_legs': [2, 4, 8, 0],\n ... 'num_wings': [2, 0, 0, 0],\n ... 'num_specimen_seen': [10, 2, 1, 8]},\n ... index=['falcon', 'dog', 'spider', 'fish'],\n ... columns=['num_legs', 'num_wings', 'num_specimen_seen'])\n >>> df # doctest: +SKIP\n num_legs num_wings num_specimen_seen\n falcon 2 2 10\n dog 4 0 2\n spider 8 0 1\n fish 0 0 8\n\n A random 25% sample of the ``DataFrame``.\n Note that we use `random_state` to ensure the reproducibility of\n the examples.\n\n >>> df.sample(frac=0.25, random_state=1) # doctest: +SKIP\n num_legs num_wings num_specimen_seen\n falcon 2 2 10\n fish 0 0 8\n\n Extract 25% random elements from the ``Series`` ``df['num_legs']``, with replacement,\n so the same items could appear more than once.\n\n >>> df['num_legs'].sample(frac=0.4, replace=True, random_state=1) # doctest: +SKIP\n falcon 2\n spider 8\n spider 8\n Name: num_legs, dtype: int64\n\n Specifying the exact number of items to return is not supported at the moment.\n\n >>> df.sample(n=5) # doctest: +ELLIPSIS\n Traceback (most recent call last):\n ...\n NotImplementedError: Function sample currently does not support specifying ...\n " if (n is not None): raise NotImplementedError('Function sample currently does not support specifying exact number of items to return. Use frac instead.') if (frac is None): raise ValueError('frac must be specified.') sdf = self._internal.resolved_copy.spark_frame.sample(withReplacement=replace, fraction=frac, seed=random_state) return DataFrame(self._internal.with_new_sdf(sdf))
-4,820,223,393,553,322,000
Return a random sample of items from an axis of object. Please call this function using named argument by specifying the ``frac`` argument. You can use `random_state` for reproducibility. However, note that different from pandas, specifying a seed in pandas-on-Spark/Spark does not guarantee the sampled rows will be fixed. The result set depends on not only the seed, but also how the data is distributed across machines and to some extent network randomness when shuffle operations are involved. Even in the simplest case, the result set will depend on the system's CPU core count. Parameters ---------- n : int, optional Number of items to return. This is currently NOT supported. Use frac instead. frac : float, optional Fraction of axis items to return. replace : bool, default False Sample with or without replacement. random_state : int, optional Seed for the random number generator (if int). Returns ------- Series or DataFrame A new object of same type as caller containing the sampled items. Examples -------- >>> df = ps.DataFrame({'num_legs': [2, 4, 8, 0], ... 'num_wings': [2, 0, 0, 0], ... 'num_specimen_seen': [10, 2, 1, 8]}, ... index=['falcon', 'dog', 'spider', 'fish'], ... columns=['num_legs', 'num_wings', 'num_specimen_seen']) >>> df # doctest: +SKIP num_legs num_wings num_specimen_seen falcon 2 2 10 dog 4 0 2 spider 8 0 1 fish 0 0 8 A random 25% sample of the ``DataFrame``. Note that we use `random_state` to ensure the reproducibility of the examples. >>> df.sample(frac=0.25, random_state=1) # doctest: +SKIP num_legs num_wings num_specimen_seen falcon 2 2 10 fish 0 0 8 Extract 25% random elements from the ``Series`` ``df['num_legs']``, with replacement, so the same items could appear more than once. >>> df['num_legs'].sample(frac=0.4, replace=True, random_state=1) # doctest: +SKIP falcon 2 spider 8 spider 8 Name: num_legs, dtype: int64 Specifying the exact number of items to return is not supported at the moment. >>> df.sample(n=5) # doctest: +ELLIPSIS Traceback (most recent call last): ... NotImplementedError: Function sample currently does not support specifying ...
python/pyspark/pandas/frame.py
sample
Flyangz/spark
python
def sample(self, n: Optional[int]=None, frac: Optional[float]=None, replace: bool=False, random_state: Optional[int]=None) -> 'DataFrame': "\n Return a random sample of items from an axis of object.\n\n Please call this function using named argument by specifying the ``frac`` argument.\n\n You can use `random_state` for reproducibility. However, note that different from pandas,\n specifying a seed in pandas-on-Spark/Spark does not guarantee the sampled rows will\n be fixed. The result set depends on not only the seed, but also how the data is distributed\n across machines and to some extent network randomness when shuffle operations are involved.\n Even in the simplest case, the result set will depend on the system's CPU core count.\n\n Parameters\n ----------\n n : int, optional\n Number of items to return. This is currently NOT supported. Use frac instead.\n frac : float, optional\n Fraction of axis items to return.\n replace : bool, default False\n Sample with or without replacement.\n random_state : int, optional\n Seed for the random number generator (if int).\n\n Returns\n -------\n Series or DataFrame\n A new object of same type as caller containing the sampled items.\n\n Examples\n --------\n >>> df = ps.DataFrame({'num_legs': [2, 4, 8, 0],\n ... 'num_wings': [2, 0, 0, 0],\n ... 'num_specimen_seen': [10, 2, 1, 8]},\n ... index=['falcon', 'dog', 'spider', 'fish'],\n ... columns=['num_legs', 'num_wings', 'num_specimen_seen'])\n >>> df # doctest: +SKIP\n num_legs num_wings num_specimen_seen\n falcon 2 2 10\n dog 4 0 2\n spider 8 0 1\n fish 0 0 8\n\n A random 25% sample of the ``DataFrame``.\n Note that we use `random_state` to ensure the reproducibility of\n the examples.\n\n >>> df.sample(frac=0.25, random_state=1) # doctest: +SKIP\n num_legs num_wings num_specimen_seen\n falcon 2 2 10\n fish 0 0 8\n\n Extract 25% random elements from the ``Series`` ``df['num_legs']``, with replacement,\n so the same items could appear more than once.\n\n >>> df['num_legs'].sample(frac=0.4, replace=True, random_state=1) # doctest: +SKIP\n falcon 2\n spider 8\n spider 8\n Name: num_legs, dtype: int64\n\n Specifying the exact number of items to return is not supported at the moment.\n\n >>> df.sample(n=5) # doctest: +ELLIPSIS\n Traceback (most recent call last):\n ...\n NotImplementedError: Function sample currently does not support specifying ...\n " if (n is not None): raise NotImplementedError('Function sample currently does not support specifying exact number of items to return. Use frac instead.') if (frac is None): raise ValueError('frac must be specified.') sdf = self._internal.resolved_copy.spark_frame.sample(withReplacement=replace, fraction=frac, seed=random_state) return DataFrame(self._internal.with_new_sdf(sdf))
def astype(self, dtype: Union[(str, Dtype, Dict[(Name, Union[(str, Dtype)])])]) -> 'DataFrame': "\n Cast a pandas-on-Spark object to a specified dtype ``dtype``.\n\n Parameters\n ----------\n dtype : data type, or dict of column name -> data type\n Use a numpy.dtype or Python type to cast entire pandas-on-Spark object to\n the same type. Alternatively, use {col: dtype, ...}, where col is a\n column label and dtype is a numpy.dtype or Python type to cast one\n or more of the DataFrame's columns to column-specific types.\n\n Returns\n -------\n casted : same type as caller\n\n See Also\n --------\n to_datetime : Convert argument to datetime.\n\n Examples\n --------\n >>> df = ps.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]}, dtype='int64')\n >>> df\n a b\n 0 1 1\n 1 2 2\n 2 3 3\n\n Convert to float type:\n\n >>> df.astype('float')\n a b\n 0 1.0 1.0\n 1 2.0 2.0\n 2 3.0 3.0\n\n Convert to int64 type back:\n\n >>> df.astype('int64')\n a b\n 0 1 1\n 1 2 2\n 2 3 3\n\n Convert column a to float type:\n\n >>> df.astype({'a': float})\n a b\n 0 1.0 1\n 1 2.0 2\n 2 3.0 3\n\n " applied = [] if is_dict_like(dtype): dtype_dict = cast(Dict[(Name, Union[(str, Dtype)])], dtype) for col_name in dtype_dict.keys(): if (col_name not in self.columns): raise KeyError('Only a column name can be used for the key in a dtype mappings argument.') for (col_name, col) in self.items(): if (col_name in dtype_dict): applied.append(col.astype(dtype=dtype_dict[col_name])) else: applied.append(col) else: for (col_name, col) in self.items(): applied.append(col.astype(dtype=cast(Union[(str, Dtype)], dtype))) return DataFrame(self._internal.with_new_columns(applied))
-1,261,696,734,745,360,000
Cast a pandas-on-Spark object to a specified dtype ``dtype``. Parameters ---------- dtype : data type, or dict of column name -> data type Use a numpy.dtype or Python type to cast entire pandas-on-Spark object to the same type. Alternatively, use {col: dtype, ...}, where col is a column label and dtype is a numpy.dtype or Python type to cast one or more of the DataFrame's columns to column-specific types. Returns ------- casted : same type as caller See Also -------- to_datetime : Convert argument to datetime. Examples -------- >>> df = ps.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]}, dtype='int64') >>> df a b 0 1 1 1 2 2 2 3 3 Convert to float type: >>> df.astype('float') a b 0 1.0 1.0 1 2.0 2.0 2 3.0 3.0 Convert to int64 type back: >>> df.astype('int64') a b 0 1 1 1 2 2 2 3 3 Convert column a to float type: >>> df.astype({'a': float}) a b 0 1.0 1 1 2.0 2 2 3.0 3
python/pyspark/pandas/frame.py
astype
Flyangz/spark
python
def astype(self, dtype: Union[(str, Dtype, Dict[(Name, Union[(str, Dtype)])])]) -> 'DataFrame': "\n Cast a pandas-on-Spark object to a specified dtype ``dtype``.\n\n Parameters\n ----------\n dtype : data type, or dict of column name -> data type\n Use a numpy.dtype or Python type to cast entire pandas-on-Spark object to\n the same type. Alternatively, use {col: dtype, ...}, where col is a\n column label and dtype is a numpy.dtype or Python type to cast one\n or more of the DataFrame's columns to column-specific types.\n\n Returns\n -------\n casted : same type as caller\n\n See Also\n --------\n to_datetime : Convert argument to datetime.\n\n Examples\n --------\n >>> df = ps.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]}, dtype='int64')\n >>> df\n a b\n 0 1 1\n 1 2 2\n 2 3 3\n\n Convert to float type:\n\n >>> df.astype('float')\n a b\n 0 1.0 1.0\n 1 2.0 2.0\n 2 3.0 3.0\n\n Convert to int64 type back:\n\n >>> df.astype('int64')\n a b\n 0 1 1\n 1 2 2\n 2 3 3\n\n Convert column a to float type:\n\n >>> df.astype({'a': float})\n a b\n 0 1.0 1\n 1 2.0 2\n 2 3.0 3\n\n " applied = [] if is_dict_like(dtype): dtype_dict = cast(Dict[(Name, Union[(str, Dtype)])], dtype) for col_name in dtype_dict.keys(): if (col_name not in self.columns): raise KeyError('Only a column name can be used for the key in a dtype mappings argument.') for (col_name, col) in self.items(): if (col_name in dtype_dict): applied.append(col.astype(dtype=dtype_dict[col_name])) else: applied.append(col) else: for (col_name, col) in self.items(): applied.append(col.astype(dtype=cast(Union[(str, Dtype)], dtype))) return DataFrame(self._internal.with_new_columns(applied))
def add_prefix(self, prefix: str) -> 'DataFrame': "\n Prefix labels with string `prefix`.\n\n For Series, the row labels are prefixed.\n For DataFrame, the column labels are prefixed.\n\n Parameters\n ----------\n prefix : str\n The string to add before each label.\n\n Returns\n -------\n DataFrame\n New DataFrame with updated labels.\n\n See Also\n --------\n Series.add_prefix: Prefix row labels with string `prefix`.\n Series.add_suffix: Suffix row labels with string `suffix`.\n DataFrame.add_suffix: Suffix column labels with string `suffix`.\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}, columns=['A', 'B'])\n >>> df\n A B\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n\n >>> df.add_prefix('col_')\n col_A col_B\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n " assert isinstance(prefix, str) return self._apply_series_op((lambda psser: psser.rename(tuple([(prefix + i) for i in psser._column_label]))))
-6,918,965,661,863,779,000
Prefix labels with string `prefix`. For Series, the row labels are prefixed. For DataFrame, the column labels are prefixed. Parameters ---------- prefix : str The string to add before each label. Returns ------- DataFrame New DataFrame with updated labels. See Also -------- Series.add_prefix: Prefix row labels with string `prefix`. Series.add_suffix: Suffix row labels with string `suffix`. DataFrame.add_suffix: Suffix column labels with string `suffix`. Examples -------- >>> df = ps.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}, columns=['A', 'B']) >>> df A B 0 1 3 1 2 4 2 3 5 3 4 6 >>> df.add_prefix('col_') col_A col_B 0 1 3 1 2 4 2 3 5 3 4 6
python/pyspark/pandas/frame.py
add_prefix
Flyangz/spark
python
def add_prefix(self, prefix: str) -> 'DataFrame': "\n Prefix labels with string `prefix`.\n\n For Series, the row labels are prefixed.\n For DataFrame, the column labels are prefixed.\n\n Parameters\n ----------\n prefix : str\n The string to add before each label.\n\n Returns\n -------\n DataFrame\n New DataFrame with updated labels.\n\n See Also\n --------\n Series.add_prefix: Prefix row labels with string `prefix`.\n Series.add_suffix: Suffix row labels with string `suffix`.\n DataFrame.add_suffix: Suffix column labels with string `suffix`.\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}, columns=['A', 'B'])\n >>> df\n A B\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n\n >>> df.add_prefix('col_')\n col_A col_B\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n " assert isinstance(prefix, str) return self._apply_series_op((lambda psser: psser.rename(tuple([(prefix + i) for i in psser._column_label]))))
def add_suffix(self, suffix: str) -> 'DataFrame': "\n Suffix labels with string `suffix`.\n\n For Series, the row labels are suffixed.\n For DataFrame, the column labels are suffixed.\n\n Parameters\n ----------\n suffix : str\n The string to add before each label.\n\n Returns\n -------\n DataFrame\n New DataFrame with updated labels.\n\n See Also\n --------\n Series.add_prefix: Prefix row labels with string `prefix`.\n Series.add_suffix: Suffix row labels with string `suffix`.\n DataFrame.add_prefix: Prefix column labels with string `prefix`.\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}, columns=['A', 'B'])\n >>> df\n A B\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n\n >>> df.add_suffix('_col')\n A_col B_col\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n " assert isinstance(suffix, str) return self._apply_series_op((lambda psser: psser.rename(tuple([(i + suffix) for i in psser._column_label]))))
4,785,632,729,825,555,000
Suffix labels with string `suffix`. For Series, the row labels are suffixed. For DataFrame, the column labels are suffixed. Parameters ---------- suffix : str The string to add before each label. Returns ------- DataFrame New DataFrame with updated labels. See Also -------- Series.add_prefix: Prefix row labels with string `prefix`. Series.add_suffix: Suffix row labels with string `suffix`. DataFrame.add_prefix: Prefix column labels with string `prefix`. Examples -------- >>> df = ps.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}, columns=['A', 'B']) >>> df A B 0 1 3 1 2 4 2 3 5 3 4 6 >>> df.add_suffix('_col') A_col B_col 0 1 3 1 2 4 2 3 5 3 4 6
python/pyspark/pandas/frame.py
add_suffix
Flyangz/spark
python
def add_suffix(self, suffix: str) -> 'DataFrame': "\n Suffix labels with string `suffix`.\n\n For Series, the row labels are suffixed.\n For DataFrame, the column labels are suffixed.\n\n Parameters\n ----------\n suffix : str\n The string to add before each label.\n\n Returns\n -------\n DataFrame\n New DataFrame with updated labels.\n\n See Also\n --------\n Series.add_prefix: Prefix row labels with string `prefix`.\n Series.add_suffix: Suffix row labels with string `suffix`.\n DataFrame.add_prefix: Prefix column labels with string `prefix`.\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}, columns=['A', 'B'])\n >>> df\n A B\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n\n >>> df.add_suffix('_col')\n A_col B_col\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n " assert isinstance(suffix, str) return self._apply_series_op((lambda psser: psser.rename(tuple([(i + suffix) for i in psser._column_label]))))
def describe(self, percentiles: Optional[List[float]]=None) -> 'DataFrame': "\n Generate descriptive statistics that summarize the central tendency,\n dispersion and shape of a dataset's distribution, excluding\n ``NaN`` values.\n\n Analyzes both numeric and object series, as well\n as ``DataFrame`` column sets of mixed data types. The output\n will vary depending on what is provided. Refer to the notes\n below for more detail.\n\n Parameters\n ----------\n percentiles : list of ``float`` in range [0.0, 1.0], default [0.25, 0.5, 0.75]\n A list of percentiles to be computed.\n\n Returns\n -------\n DataFrame\n Summary statistics of the Dataframe provided.\n\n See Also\n --------\n DataFrame.count: Count number of non-NA/null observations.\n DataFrame.max: Maximum of the values in the object.\n DataFrame.min: Minimum of the values in the object.\n DataFrame.mean: Mean of the values.\n DataFrame.std: Standard deviation of the observations.\n\n Notes\n -----\n For numeric data, the result's index will include ``count``,\n ``mean``, ``std``, ``min``, ``25%``, ``50%``, ``75%``, ``max``.\n\n For object data (e.g. strings or timestamps), the result’s index will include\n ``count``, ``unique``, ``top``, and ``freq``.\n The ``top`` is the most common value. The ``freq`` is the most common value’s frequency.\n Timestamps also include the ``first`` and ``last`` items.\n\n Examples\n --------\n Describing a numeric ``Series``.\n\n >>> s = ps.Series([1, 2, 3])\n >>> s.describe()\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 25% 1.0\n 50% 2.0\n 75% 3.0\n max 3.0\n dtype: float64\n\n Describing a ``DataFrame``. Only numeric fields are returned.\n\n >>> df = ps.DataFrame({'numeric1': [1, 2, 3],\n ... 'numeric2': [4.0, 5.0, 6.0],\n ... 'object': ['a', 'b', 'c']\n ... },\n ... columns=['numeric1', 'numeric2', 'object'])\n >>> df.describe()\n numeric1 numeric2\n count 3.0 3.0\n mean 2.0 5.0\n std 1.0 1.0\n min 1.0 4.0\n 25% 1.0 4.0\n 50% 2.0 5.0\n 75% 3.0 6.0\n max 3.0 6.0\n\n For multi-index columns:\n\n >>> df.columns = [('num', 'a'), ('num', 'b'), ('obj', 'c')]\n >>> df.describe() # doctest: +NORMALIZE_WHITESPACE\n num\n a b\n count 3.0 3.0\n mean 2.0 5.0\n std 1.0 1.0\n min 1.0 4.0\n 25% 1.0 4.0\n 50% 2.0 5.0\n 75% 3.0 6.0\n max 3.0 6.0\n\n >>> df[('num', 'b')].describe()\n count 3.0\n mean 5.0\n std 1.0\n min 4.0\n 25% 4.0\n 50% 5.0\n 75% 6.0\n max 6.0\n Name: (num, b), dtype: float64\n\n Describing a ``DataFrame`` and selecting custom percentiles.\n\n >>> df = ps.DataFrame({'numeric1': [1, 2, 3],\n ... 'numeric2': [4.0, 5.0, 6.0]\n ... },\n ... columns=['numeric1', 'numeric2'])\n >>> df.describe(percentiles = [0.85, 0.15])\n numeric1 numeric2\n count 3.0 3.0\n mean 2.0 5.0\n std 1.0 1.0\n min 1.0 4.0\n 15% 1.0 4.0\n 50% 2.0 5.0\n 85% 3.0 6.0\n max 3.0 6.0\n\n Describing a column from a ``DataFrame`` by accessing it as\n an attribute.\n\n >>> df.numeric1.describe()\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 25% 1.0\n 50% 2.0\n 75% 3.0\n max 3.0\n Name: numeric1, dtype: float64\n\n Describing a column from a ``DataFrame`` by accessing it as\n an attribute and selecting custom percentiles.\n\n >>> df.numeric1.describe(percentiles = [0.85, 0.15])\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 15% 1.0\n 50% 2.0\n 85% 3.0\n max 3.0\n Name: numeric1, dtype: float64\n " psser_numeric: List[Series] = [] psser_string: List[Series] = [] psser_timestamp: List[Series] = [] spark_data_types: List[DataType] = [] column_labels: Optional[List[Label]] = [] column_names: List[str] = [] for label in self._internal.column_labels: psser = self._psser_for(label) spark_data_type = psser.spark.data_type if isinstance(spark_data_type, NumericType): psser_numeric.append(psser) column_labels.append(label) spark_data_types.append(spark_data_type) elif isinstance(spark_data_type, (TimestampType, TimestampNTZType)): psser_timestamp.append(psser) column_labels.append(label) spark_data_types.append(spark_data_type) else: psser_string.append(psser) column_names.append(self._internal.spark_column_name_for(label)) if (percentiles is not None): if any((((p < 0.0) or (p > 1.0)) for p in percentiles)): raise ValueError('Percentiles should all be in the interval [0, 1]') percentiles = ((percentiles + [0.5]) if (0.5 not in percentiles) else percentiles) else: percentiles = [0.25, 0.5, 0.75] is_all_string_type = ((len(psser_numeric) == 0) and (len(psser_timestamp) == 0) and (len(psser_string) > 0)) is_all_numeric_type = ((len(psser_numeric) > 0) and (len(psser_timestamp) == 0)) has_timestamp_type = (len(psser_timestamp) > 0) has_numeric_type = (len(psser_numeric) > 0) if is_all_string_type: internal = self._internal.resolved_copy exprs_string = [internal.spark_column_for(psser._column_label) for psser in psser_string] sdf = internal.spark_frame.select(*exprs_string) (counts, uniques) = map((lambda x: x[1:]), sdf.summary('count', 'count_distinct').take(2)) if ((len(counts) == 0) or (counts[0] == '0')): data = dict() for psser in psser_string: data[psser.name] = [0, 0, np.nan, np.nan] return DataFrame(data, index=['count', 'unique', 'top', 'freq']) tops = [] freqs = [] for column in exprs_string: (top, freq) = sdf.groupby(column).count().sort('count', ascending=False).first() tops.append(str(top)) freqs.append(str(freq)) stats = [counts, uniques, tops, freqs] stats_names = ['count', 'unique', 'top', 'freq'] result: DataFrame = DataFrame(data=stats, index=stats_names, columns=column_names) elif is_all_numeric_type: exprs_numeric = [psser._dtype_op.nan_to_null(psser).spark.column for psser in psser_numeric] formatted_perc = ['{:.0%}'.format(p) for p in sorted(percentiles)] stats = ['count', 'mean', 'stddev', 'min', *formatted_perc, 'max'] sdf = self._internal.spark_frame.select(*exprs_numeric).summary(*stats) sdf = sdf.replace('stddev', 'std', subset=['summary']) internal = InternalFrame(spark_frame=sdf, index_spark_columns=[scol_for(sdf, 'summary')], column_labels=column_labels, data_spark_columns=[scol_for(sdf, self._internal.spark_column_name_for(label)) for label in column_labels]) result = DataFrame(internal).astype('float64') elif has_timestamp_type: internal = self._internal.resolved_copy column_names = [internal.spark_column_name_for(column_label) for column_label in column_labels] column_length = len(column_labels) count_exprs = map(F.count, column_names) min_exprs = map(F.min, column_names) perc_exprs = chain(*[map(F.percentile_approx, column_names, ([percentile] * column_length)) for percentile in percentiles]) max_exprs = map(F.max, column_names) mean_exprs = [] for (column_name, spark_data_type) in zip(column_names, spark_data_types): mean_exprs.append(F.mean(column_name).astype(spark_data_type)) exprs = [*count_exprs, *mean_exprs, *min_exprs, *perc_exprs, *max_exprs] formatted_perc = ['{:.0%}'.format(p) for p in sorted(percentiles)] stats_names = ['count', 'mean', 'min', *formatted_perc, 'max'] if has_numeric_type: std_exprs = [] for (label, spark_data_type) in zip(column_labels, spark_data_types): column_name = label[0] if isinstance(spark_data_type, (TimestampType, TimestampNTZType)): std_exprs.append(F.lit(None).alias('stddev_samp({})'.format(column_name))) else: std_exprs.append(F.stddev(column_name)) exprs.extend(std_exprs) stats_names.append('std') sdf = internal.spark_frame.select(exprs) stat_values = sdf.first() num_stats = int((len(exprs) / column_length)) column_name_stats_kv: Dict[(str, List[str])] = defaultdict(list) for (i, column_name) in enumerate(column_names): for first_stat_idx in range(num_stats): column_name_stats_kv[column_name].append(stat_values[((first_stat_idx * column_length) + i)]) for (key, spark_data_type) in zip(column_name_stats_kv, spark_data_types): if isinstance(spark_data_type, (TimestampType, TimestampNTZType)): column_name_stats_kv[key] = [str(value) for value in column_name_stats_kv[key]] result: DataFrame = DataFrame(data=column_name_stats_kv, index=stats_names, columns=column_names) else: raise ValueError('Cannot describe a DataFrame without columns') return result
-3,598,272,818,452,397,000
Generate descriptive statistics that summarize the central tendency, dispersion and shape of a dataset's distribution, excluding ``NaN`` values. Analyzes both numeric and object series, as well as ``DataFrame`` column sets of mixed data types. The output will vary depending on what is provided. Refer to the notes below for more detail. Parameters ---------- percentiles : list of ``float`` in range [0.0, 1.0], default [0.25, 0.5, 0.75] A list of percentiles to be computed. Returns ------- DataFrame Summary statistics of the Dataframe provided. See Also -------- DataFrame.count: Count number of non-NA/null observations. DataFrame.max: Maximum of the values in the object. DataFrame.min: Minimum of the values in the object. DataFrame.mean: Mean of the values. DataFrame.std: Standard deviation of the observations. Notes ----- For numeric data, the result's index will include ``count``, ``mean``, ``std``, ``min``, ``25%``, ``50%``, ``75%``, ``max``. For object data (e.g. strings or timestamps), the result’s index will include ``count``, ``unique``, ``top``, and ``freq``. The ``top`` is the most common value. The ``freq`` is the most common value’s frequency. Timestamps also include the ``first`` and ``last`` items. Examples -------- Describing a numeric ``Series``. >>> s = ps.Series([1, 2, 3]) >>> s.describe() count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.0 50% 2.0 75% 3.0 max 3.0 dtype: float64 Describing a ``DataFrame``. Only numeric fields are returned. >>> df = ps.DataFrame({'numeric1': [1, 2, 3], ... 'numeric2': [4.0, 5.0, 6.0], ... 'object': ['a', 'b', 'c'] ... }, ... columns=['numeric1', 'numeric2', 'object']) >>> df.describe() numeric1 numeric2 count 3.0 3.0 mean 2.0 5.0 std 1.0 1.0 min 1.0 4.0 25% 1.0 4.0 50% 2.0 5.0 75% 3.0 6.0 max 3.0 6.0 For multi-index columns: >>> df.columns = [('num', 'a'), ('num', 'b'), ('obj', 'c')] >>> df.describe() # doctest: +NORMALIZE_WHITESPACE num a b count 3.0 3.0 mean 2.0 5.0 std 1.0 1.0 min 1.0 4.0 25% 1.0 4.0 50% 2.0 5.0 75% 3.0 6.0 max 3.0 6.0 >>> df[('num', 'b')].describe() count 3.0 mean 5.0 std 1.0 min 4.0 25% 4.0 50% 5.0 75% 6.0 max 6.0 Name: (num, b), dtype: float64 Describing a ``DataFrame`` and selecting custom percentiles. >>> df = ps.DataFrame({'numeric1': [1, 2, 3], ... 'numeric2': [4.0, 5.0, 6.0] ... }, ... columns=['numeric1', 'numeric2']) >>> df.describe(percentiles = [0.85, 0.15]) numeric1 numeric2 count 3.0 3.0 mean 2.0 5.0 std 1.0 1.0 min 1.0 4.0 15% 1.0 4.0 50% 2.0 5.0 85% 3.0 6.0 max 3.0 6.0 Describing a column from a ``DataFrame`` by accessing it as an attribute. >>> df.numeric1.describe() count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.0 50% 2.0 75% 3.0 max 3.0 Name: numeric1, dtype: float64 Describing a column from a ``DataFrame`` by accessing it as an attribute and selecting custom percentiles. >>> df.numeric1.describe(percentiles = [0.85, 0.15]) count 3.0 mean 2.0 std 1.0 min 1.0 15% 1.0 50% 2.0 85% 3.0 max 3.0 Name: numeric1, dtype: float64
python/pyspark/pandas/frame.py
describe
Flyangz/spark
python
def describe(self, percentiles: Optional[List[float]]=None) -> 'DataFrame': "\n Generate descriptive statistics that summarize the central tendency,\n dispersion and shape of a dataset's distribution, excluding\n ``NaN`` values.\n\n Analyzes both numeric and object series, as well\n as ``DataFrame`` column sets of mixed data types. The output\n will vary depending on what is provided. Refer to the notes\n below for more detail.\n\n Parameters\n ----------\n percentiles : list of ``float`` in range [0.0, 1.0], default [0.25, 0.5, 0.75]\n A list of percentiles to be computed.\n\n Returns\n -------\n DataFrame\n Summary statistics of the Dataframe provided.\n\n See Also\n --------\n DataFrame.count: Count number of non-NA/null observations.\n DataFrame.max: Maximum of the values in the object.\n DataFrame.min: Minimum of the values in the object.\n DataFrame.mean: Mean of the values.\n DataFrame.std: Standard deviation of the observations.\n\n Notes\n -----\n For numeric data, the result's index will include ``count``,\n ``mean``, ``std``, ``min``, ``25%``, ``50%``, ``75%``, ``max``.\n\n For object data (e.g. strings or timestamps), the result’s index will include\n ``count``, ``unique``, ``top``, and ``freq``.\n The ``top`` is the most common value. The ``freq`` is the most common value’s frequency.\n Timestamps also include the ``first`` and ``last`` items.\n\n Examples\n --------\n Describing a numeric ``Series``.\n\n >>> s = ps.Series([1, 2, 3])\n >>> s.describe()\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 25% 1.0\n 50% 2.0\n 75% 3.0\n max 3.0\n dtype: float64\n\n Describing a ``DataFrame``. Only numeric fields are returned.\n\n >>> df = ps.DataFrame({'numeric1': [1, 2, 3],\n ... 'numeric2': [4.0, 5.0, 6.0],\n ... 'object': ['a', 'b', 'c']\n ... },\n ... columns=['numeric1', 'numeric2', 'object'])\n >>> df.describe()\n numeric1 numeric2\n count 3.0 3.0\n mean 2.0 5.0\n std 1.0 1.0\n min 1.0 4.0\n 25% 1.0 4.0\n 50% 2.0 5.0\n 75% 3.0 6.0\n max 3.0 6.0\n\n For multi-index columns:\n\n >>> df.columns = [('num', 'a'), ('num', 'b'), ('obj', 'c')]\n >>> df.describe() # doctest: +NORMALIZE_WHITESPACE\n num\n a b\n count 3.0 3.0\n mean 2.0 5.0\n std 1.0 1.0\n min 1.0 4.0\n 25% 1.0 4.0\n 50% 2.0 5.0\n 75% 3.0 6.0\n max 3.0 6.0\n\n >>> df[('num', 'b')].describe()\n count 3.0\n mean 5.0\n std 1.0\n min 4.0\n 25% 4.0\n 50% 5.0\n 75% 6.0\n max 6.0\n Name: (num, b), dtype: float64\n\n Describing a ``DataFrame`` and selecting custom percentiles.\n\n >>> df = ps.DataFrame({'numeric1': [1, 2, 3],\n ... 'numeric2': [4.0, 5.0, 6.0]\n ... },\n ... columns=['numeric1', 'numeric2'])\n >>> df.describe(percentiles = [0.85, 0.15])\n numeric1 numeric2\n count 3.0 3.0\n mean 2.0 5.0\n std 1.0 1.0\n min 1.0 4.0\n 15% 1.0 4.0\n 50% 2.0 5.0\n 85% 3.0 6.0\n max 3.0 6.0\n\n Describing a column from a ``DataFrame`` by accessing it as\n an attribute.\n\n >>> df.numeric1.describe()\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 25% 1.0\n 50% 2.0\n 75% 3.0\n max 3.0\n Name: numeric1, dtype: float64\n\n Describing a column from a ``DataFrame`` by accessing it as\n an attribute and selecting custom percentiles.\n\n >>> df.numeric1.describe(percentiles = [0.85, 0.15])\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 15% 1.0\n 50% 2.0\n 85% 3.0\n max 3.0\n Name: numeric1, dtype: float64\n " psser_numeric: List[Series] = [] psser_string: List[Series] = [] psser_timestamp: List[Series] = [] spark_data_types: List[DataType] = [] column_labels: Optional[List[Label]] = [] column_names: List[str] = [] for label in self._internal.column_labels: psser = self._psser_for(label) spark_data_type = psser.spark.data_type if isinstance(spark_data_type, NumericType): psser_numeric.append(psser) column_labels.append(label) spark_data_types.append(spark_data_type) elif isinstance(spark_data_type, (TimestampType, TimestampNTZType)): psser_timestamp.append(psser) column_labels.append(label) spark_data_types.append(spark_data_type) else: psser_string.append(psser) column_names.append(self._internal.spark_column_name_for(label)) if (percentiles is not None): if any((((p < 0.0) or (p > 1.0)) for p in percentiles)): raise ValueError('Percentiles should all be in the interval [0, 1]') percentiles = ((percentiles + [0.5]) if (0.5 not in percentiles) else percentiles) else: percentiles = [0.25, 0.5, 0.75] is_all_string_type = ((len(psser_numeric) == 0) and (len(psser_timestamp) == 0) and (len(psser_string) > 0)) is_all_numeric_type = ((len(psser_numeric) > 0) and (len(psser_timestamp) == 0)) has_timestamp_type = (len(psser_timestamp) > 0) has_numeric_type = (len(psser_numeric) > 0) if is_all_string_type: internal = self._internal.resolved_copy exprs_string = [internal.spark_column_for(psser._column_label) for psser in psser_string] sdf = internal.spark_frame.select(*exprs_string) (counts, uniques) = map((lambda x: x[1:]), sdf.summary('count', 'count_distinct').take(2)) if ((len(counts) == 0) or (counts[0] == '0')): data = dict() for psser in psser_string: data[psser.name] = [0, 0, np.nan, np.nan] return DataFrame(data, index=['count', 'unique', 'top', 'freq']) tops = [] freqs = [] for column in exprs_string: (top, freq) = sdf.groupby(column).count().sort('count', ascending=False).first() tops.append(str(top)) freqs.append(str(freq)) stats = [counts, uniques, tops, freqs] stats_names = ['count', 'unique', 'top', 'freq'] result: DataFrame = DataFrame(data=stats, index=stats_names, columns=column_names) elif is_all_numeric_type: exprs_numeric = [psser._dtype_op.nan_to_null(psser).spark.column for psser in psser_numeric] formatted_perc = ['{:.0%}'.format(p) for p in sorted(percentiles)] stats = ['count', 'mean', 'stddev', 'min', *formatted_perc, 'max'] sdf = self._internal.spark_frame.select(*exprs_numeric).summary(*stats) sdf = sdf.replace('stddev', 'std', subset=['summary']) internal = InternalFrame(spark_frame=sdf, index_spark_columns=[scol_for(sdf, 'summary')], column_labels=column_labels, data_spark_columns=[scol_for(sdf, self._internal.spark_column_name_for(label)) for label in column_labels]) result = DataFrame(internal).astype('float64') elif has_timestamp_type: internal = self._internal.resolved_copy column_names = [internal.spark_column_name_for(column_label) for column_label in column_labels] column_length = len(column_labels) count_exprs = map(F.count, column_names) min_exprs = map(F.min, column_names) perc_exprs = chain(*[map(F.percentile_approx, column_names, ([percentile] * column_length)) for percentile in percentiles]) max_exprs = map(F.max, column_names) mean_exprs = [] for (column_name, spark_data_type) in zip(column_names, spark_data_types): mean_exprs.append(F.mean(column_name).astype(spark_data_type)) exprs = [*count_exprs, *mean_exprs, *min_exprs, *perc_exprs, *max_exprs] formatted_perc = ['{:.0%}'.format(p) for p in sorted(percentiles)] stats_names = ['count', 'mean', 'min', *formatted_perc, 'max'] if has_numeric_type: std_exprs = [] for (label, spark_data_type) in zip(column_labels, spark_data_types): column_name = label[0] if isinstance(spark_data_type, (TimestampType, TimestampNTZType)): std_exprs.append(F.lit(None).alias('stddev_samp({})'.format(column_name))) else: std_exprs.append(F.stddev(column_name)) exprs.extend(std_exprs) stats_names.append('std') sdf = internal.spark_frame.select(exprs) stat_values = sdf.first() num_stats = int((len(exprs) / column_length)) column_name_stats_kv: Dict[(str, List[str])] = defaultdict(list) for (i, column_name) in enumerate(column_names): for first_stat_idx in range(num_stats): column_name_stats_kv[column_name].append(stat_values[((first_stat_idx * column_length) + i)]) for (key, spark_data_type) in zip(column_name_stats_kv, spark_data_types): if isinstance(spark_data_type, (TimestampType, TimestampNTZType)): column_name_stats_kv[key] = [str(value) for value in column_name_stats_kv[key]] result: DataFrame = DataFrame(data=column_name_stats_kv, index=stats_names, columns=column_names) else: raise ValueError('Cannot describe a DataFrame without columns') return result